summaryrefslogtreecommitdiffstats
path: root/src/spdk/test/unit
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
commite6918187568dbd01842d8d1d2c808ce16a894239 (patch)
tree64f88b554b444a49f656b6c656111a145cbbaa28 /src/spdk/test/unit
parentInitial commit. (diff)
downloadceph-e6918187568dbd01842d8d1d2c808ce16a894239.tar.xz
ceph-e6918187568dbd01842d8d1d2c808ce16a894239.zip
Adding upstream version 18.2.2.upstream/18.2.2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/spdk/test/unit')
-rw-r--r--src/spdk/test/unit/Makefile44
-rw-r--r--src/spdk/test/unit/include/Makefile44
-rw-r--r--src/spdk/test/unit/include/spdk/Makefile44
-rw-r--r--src/spdk/test/unit/include/spdk/histogram_data.h/.gitignore1
-rw-r--r--src/spdk/test/unit/include/spdk/histogram_data.h/Makefile37
-rw-r--r--src/spdk/test/unit/include/spdk/histogram_data.h/histogram_ut.c161
-rw-r--r--src/spdk/test/unit/lib/Makefile51
-rw-r--r--src/spdk/test/unit/lib/bdev/Makefile51
-rw-r--r--src/spdk/test/unit/lib/bdev/bdev.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/bdev/bdev.c/Makefile37
-rw-r--r--src/spdk/test/unit/lib/bdev/bdev.c/bdev_ut.c3417
-rw-r--r--src/spdk/test/unit/lib/bdev/bdev_ocssd.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/bdev/bdev_ocssd.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/bdev/bdev_ocssd.c/bdev_ocssd_ut.c1195
-rw-r--r--src/spdk/test/unit/lib/bdev/bdev_zone.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/bdev/bdev_zone.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/bdev/bdev_zone.c/bdev_zone_ut.c429
-rw-r--r--src/spdk/test/unit/lib/bdev/compress.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/bdev/compress.c/Makefile39
-rw-r--r--src/spdk/test/unit/lib/bdev/compress.c/compress_ut.c1140
-rw-r--r--src/spdk/test/unit/lib/bdev/crypto.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/bdev/crypto.c/Makefile39
-rw-r--r--src/spdk/test/unit/lib/bdev/crypto.c/crypto_ut.c1084
-rw-r--r--src/spdk/test/unit/lib/bdev/gpt/Makefile44
-rw-r--r--src/spdk/test/unit/lib/bdev/gpt/gpt.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/bdev/gpt/gpt.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/bdev/gpt/gpt.c/gpt_ut.c363
-rw-r--r--src/spdk/test/unit/lib/bdev/mt/Makefile44
-rw-r--r--src/spdk/test/unit/lib/bdev/mt/bdev.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/bdev/mt/bdev.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/bdev/mt/bdev.c/bdev_ut.c1994
-rw-r--r--src/spdk/test/unit/lib/bdev/part.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/bdev/part.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/bdev/part.c/part_ut.c173
-rw-r--r--src/spdk/test/unit/lib/bdev/pmem/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/bdev/pmem/Makefile38
-rw-r--r--src/spdk/test/unit/lib/bdev/pmem/bdev_pmem_ut.c772
-rw-r--r--src/spdk/test/unit/lib/bdev/raid/Makefile46
-rw-r--r--src/spdk/test/unit/lib/bdev/raid/bdev_raid.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/bdev/raid/bdev_raid.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/bdev/raid/bdev_raid.c/bdev_raid_ut.c2258
-rw-r--r--src/spdk/test/unit/lib/bdev/raid/raid5.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/bdev/raid/raid5.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/bdev/raid/raid5.c/raid5_ut.c214
-rw-r--r--src/spdk/test/unit/lib/bdev/scsi_nvme.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/bdev/scsi_nvme.c/Makefile37
-rw-r--r--src/spdk/test/unit/lib/bdev/scsi_nvme.c/scsi_nvme_ut.c131
-rw-r--r--src/spdk/test/unit/lib/bdev/vbdev_lvol.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/bdev/vbdev_lvol.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/bdev/vbdev_lvol.c/vbdev_lvol_ut.c1440
-rw-r--r--src/spdk/test/unit/lib/bdev/vbdev_zone_block.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/bdev/vbdev_zone_block.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/bdev/vbdev_zone_block.c/vbdev_zone_block_ut.c1502
-rw-r--r--src/spdk/test/unit/lib/blob/Makefile49
-rw-r--r--src/spdk/test/unit/lib/blob/blob.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/blob/blob.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/blob/blob.c/blob_ut.c6693
-rw-r--r--src/spdk/test/unit/lib/blob/bs_dev_common.c395
-rw-r--r--src/spdk/test/unit/lib/blob/bs_scheduler.c87
-rw-r--r--src/spdk/test/unit/lib/blobfs/Makefile44
-rw-r--r--src/spdk/test/unit/lib/blobfs/blobfs_async_ut/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/blobfs/blobfs_async_ut/Makefile39
-rw-r--r--src/spdk/test/unit/lib/blobfs/blobfs_async_ut/blobfs_async_ut.c704
-rw-r--r--src/spdk/test/unit/lib/blobfs/blobfs_bdev.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/blobfs/blobfs_bdev.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/blobfs/blobfs_bdev.c/blobfs_bdev_ut.c348
-rw-r--r--src/spdk/test/unit/lib/blobfs/blobfs_sync_ut/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/blobfs/blobfs_sync_ut/Makefile39
-rw-r--r--src/spdk/test/unit/lib/blobfs/blobfs_sync_ut/blobfs_sync_ut.c703
-rw-r--r--src/spdk/test/unit/lib/blobfs/tree.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/blobfs/tree.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/blobfs/tree.c/tree_ut.c150
-rw-r--r--src/spdk/test/unit/lib/event/Makefile44
-rw-r--r--src/spdk/test/unit/lib/event/app.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/event/app.c/Makefile39
-rw-r--r--src/spdk/test/unit/lib/event/app.c/app_ut.c193
-rw-r--r--src/spdk/test/unit/lib/event/reactor.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/event/reactor.c/Makefile39
-rw-r--r--src/spdk/test/unit/lib/event/reactor.c/reactor_ut.c455
-rw-r--r--src/spdk/test/unit/lib/event/subsystem.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/event/subsystem.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/event/subsystem.c/subsystem_ut.c255
-rw-r--r--src/spdk/test/unit/lib/ftl/Makefile44
-rw-r--r--src/spdk/test/unit/lib/ftl/common/utils.c173
-rw-r--r--src/spdk/test/unit/lib/ftl/ftl_band.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/ftl/ftl_band.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/ftl/ftl_band.c/ftl_band_ut.c307
-rw-r--r--src/spdk/test/unit/lib/ftl/ftl_io.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/ftl/ftl_io.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/ftl/ftl_io.c/ftl_io_ut.c1068
-rw-r--r--src/spdk/test/unit/lib/ftl/ftl_md/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/ftl/ftl_md/Makefile38
-rw-r--r--src/spdk/test/unit/lib/ftl/ftl_md/ftl_md_ut.c150
-rw-r--r--src/spdk/test/unit/lib/ftl/ftl_ppa/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/ftl/ftl_ppa/Makefile38
-rw-r--r--src/spdk/test/unit/lib/ftl/ftl_ppa/ftl_ppa_ut.c226
-rw-r--r--src/spdk/test/unit/lib/ftl/ftl_reloc.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/ftl/ftl_reloc.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/ftl/ftl_reloc.c/ftl_reloc_ut.c508
-rw-r--r--src/spdk/test/unit/lib/ftl/ftl_wptr/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/ftl/ftl_wptr/Makefile38
-rw-r--r--src/spdk/test/unit/lib/ftl/ftl_wptr/ftl_wptr_ut.c223
-rw-r--r--src/spdk/test/unit/lib/idxd/Makefile44
-rw-r--r--src/spdk/test/unit/lib/idxd/idxd.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/idxd/idxd.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/idxd/idxd.c/idxd_ut.c300
-rw-r--r--src/spdk/test/unit/lib/ioat/Makefile44
-rw-r--r--src/spdk/test/unit/lib/ioat/ioat.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/ioat/ioat.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/ioat/ioat.c/ioat_ut.c144
-rw-r--r--src/spdk/test/unit/lib/iscsi/Makefile44
-rw-r--r--src/spdk/test/unit/lib/iscsi/common.c209
-rw-r--r--src/spdk/test/unit/lib/iscsi/conn.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/iscsi/conn.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/iscsi/conn.c/conn_ut.c927
-rw-r--r--src/spdk/test/unit/lib/iscsi/init_grp.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/iscsi/init_grp.c/Makefile39
-rw-r--r--src/spdk/test/unit/lib/iscsi/init_grp.c/init_grp.conf31
-rw-r--r--src/spdk/test/unit/lib/iscsi/init_grp.c/init_grp_ut.c674
-rw-r--r--src/spdk/test/unit/lib/iscsi/iscsi.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/iscsi/iscsi.c/Makefile46
-rw-r--r--src/spdk/test/unit/lib/iscsi/iscsi.c/iscsi_ut.c2024
-rw-r--r--src/spdk/test/unit/lib/iscsi/param.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/iscsi/param.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/iscsi/param.c/param_ut.c400
-rw-r--r--src/spdk/test/unit/lib/iscsi/portal_grp.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/iscsi/portal_grp.c/Makefile40
-rw-r--r--src/spdk/test/unit/lib/iscsi/portal_grp.c/portal_grp_ut.c419
-rw-r--r--src/spdk/test/unit/lib/iscsi/tgt_node.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/iscsi/tgt_node.c/Makefile39
-rw-r--r--src/spdk/test/unit/lib/iscsi/tgt_node.c/tgt_node.conf95
-rw-r--r--src/spdk/test/unit/lib/iscsi/tgt_node.c/tgt_node_ut.c832
-rw-r--r--src/spdk/test/unit/lib/json/Makefile44
-rw-r--r--src/spdk/test/unit/lib/json/json_parse.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/json/json_parse.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/json/json_parse.c/json_parse_ut.c931
-rw-r--r--src/spdk/test/unit/lib/json/json_util.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/json/json_util.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/json/json_util.c/json_util_ut.c954
-rw-r--r--src/spdk/test/unit/lib/json/json_write.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/json/json_write.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/json/json_write.c/json_write_ut.c736
-rw-r--r--src/spdk/test/unit/lib/json_mock.c81
-rw-r--r--src/spdk/test/unit/lib/jsonrpc/Makefile44
-rw-r--r--src/spdk/test/unit/lib/jsonrpc/jsonrpc_server.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/jsonrpc/jsonrpc_server.c/Makefile39
-rw-r--r--src/spdk/test/unit/lib/jsonrpc/jsonrpc_server.c/jsonrpc_server_ut.c410
-rw-r--r--src/spdk/test/unit/lib/log/Makefile44
-rw-r--r--src/spdk/test/unit/lib/log/log.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/log/log.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/log/log.c/log_ut.c106
-rw-r--r--src/spdk/test/unit/lib/lvol/Makefile44
-rw-r--r--src/spdk/test/unit/lib/lvol/lvol.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/lvol/lvol.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/lvol/lvol.c/lvol_ut.c2096
-rw-r--r--src/spdk/test/unit/lib/notify/Makefile44
-rw-r--r--src/spdk/test/unit/lib/notify/notify.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/notify/notify.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/notify/notify.c/notify_ut.c111
-rw-r--r--src/spdk/test/unit/lib/nvme/Makefile47
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme.c/nvme_ut.c1376
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/nvme_ctrlr_ut.c2150
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/nvme_ctrlr_cmd_ut.c751
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/nvme_ctrlr_ocssd_cmd_ut.c106
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ns.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ns.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ns.c/nvme_ns_ut.c153
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/nvme_ns_cmd_ut.c1739
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/nvme_ns_ocssd_cmd_ut.c650
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_pcie.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_pcie.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_pcie.c/nvme_pcie_ut.c498
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_poll_group.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_poll_group.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_poll_group.c/nvme_poll_group_ut.c484
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_qpair.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_qpair.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_qpair.c/nvme_qpair_ut.c625
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_quirks.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_quirks.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_quirks.c/nvme_quirks_ut.c92
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_rdma.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_rdma.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_rdma.c/nvme_rdma_ut.c406
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_tcp.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_tcp.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_tcp.c/nvme_tcp_ut.c459
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_uevent.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_uevent.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_uevent.c/nvme_uevent_ut.c165
-rw-r--r--src/spdk/test/unit/lib/nvmf/Makefile48
-rw-r--r--src/spdk/test/unit/lib/nvmf/ctrlr.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvmf/ctrlr.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvmf/ctrlr.c/ctrlr_ut.c1711
-rw-r--r--src/spdk/test/unit/lib/nvmf/ctrlr_bdev.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvmf/ctrlr_bdev.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvmf/ctrlr_bdev.c/ctrlr_bdev_ut.c415
-rw-r--r--src/spdk/test/unit/lib/nvmf/ctrlr_discovery.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvmf/ctrlr_discovery.c/Makefile39
-rw-r--r--src/spdk/test/unit/lib/nvmf/ctrlr_discovery.c/ctrlr_discovery_ut.c303
-rw-r--r--src/spdk/test/unit/lib/nvmf/fc.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvmf/fc.c/Makefile58
-rw-r--r--src/spdk/test/unit/lib/nvmf/fc.c/fc_ut.c505
-rw-r--r--src/spdk/test/unit/lib/nvmf/fc_ls.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvmf/fc_ls.c/Makefile45
-rw-r--r--src/spdk/test/unit/lib/nvmf/fc_ls.c/fc_ls_ut.c1070
-rw-r--r--src/spdk/test/unit/lib/nvmf/rdma.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvmf/rdma.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvmf/rdma.c/rdma_ut.c1283
-rw-r--r--src/spdk/test/unit/lib/nvmf/subsystem.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvmf/subsystem.c/Makefile39
-rw-r--r--src/spdk/test/unit/lib/nvmf/subsystem.c/subsystem_ut.c1342
-rw-r--r--src/spdk/test/unit/lib/nvmf/tcp.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvmf/tcp.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvmf/tcp.c/tcp_ut.c722
-rw-r--r--src/spdk/test/unit/lib/reduce/Makefile44
-rw-r--r--src/spdk/test/unit/lib/reduce/reduce.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/reduce/reduce.c/Makefile39
-rw-r--r--src/spdk/test/unit/lib/reduce/reduce.c/reduce_ut.c1300
-rw-r--r--src/spdk/test/unit/lib/scsi/Makefile44
-rw-r--r--src/spdk/test/unit/lib/scsi/dev.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/scsi/dev.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/scsi/dev.c/dev_ut.c682
-rw-r--r--src/spdk/test/unit/lib/scsi/lun.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/scsi/lun.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/scsi/lun.c/lun_ut.c750
-rw-r--r--src/spdk/test/unit/lib/scsi/scsi.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/scsi/scsi.c/Makefile39
-rw-r--r--src/spdk/test/unit/lib/scsi/scsi.c/scsi_ut.c69
-rw-r--r--src/spdk/test/unit/lib/scsi/scsi_bdev.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/scsi/scsi_bdev.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/scsi/scsi_bdev.c/scsi_bdev_ut.c1037
-rw-r--r--src/spdk/test/unit/lib/scsi/scsi_pr.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/scsi/scsi_pr.c/Makefile39
-rw-r--r--src/spdk/test/unit/lib/scsi/scsi_pr.c/scsi_pr_ut.c673
-rw-r--r--src/spdk/test/unit/lib/sock/Makefile48
-rw-r--r--src/spdk/test/unit/lib/sock/posix.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/sock/posix.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/sock/posix.c/posix_ut.c174
-rw-r--r--src/spdk/test/unit/lib/sock/sock.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/sock/sock.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/sock/sock.c/sock_ut.c982
-rw-r--r--src/spdk/test/unit/lib/sock/uring.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/sock/uring.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/sock/uring.c/uring_ut.c272
-rw-r--r--src/spdk/test/unit/lib/thread/Makefile44
-rw-r--r--src/spdk/test/unit/lib/thread/thread.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/thread/thread.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/thread/thread.c/thread_ut.c1270
-rw-r--r--src/spdk/test/unit/lib/util/Makefile45
-rw-r--r--src/spdk/test/unit/lib/util/base64.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/util/base64.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/util/base64.c/base64_ut.c381
-rw-r--r--src/spdk/test/unit/lib/util/bit_array.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/util/bit_array.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/util/bit_array.c/bit_array_ut.c376
-rw-r--r--src/spdk/test/unit/lib/util/cpuset.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/util/cpuset.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/util/cpuset.c/cpuset_ut.c262
-rw-r--r--src/spdk/test/unit/lib/util/crc16.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/util/crc16.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/util/crc16.c/crc16_ut.c104
-rw-r--r--src/spdk/test/unit/lib/util/crc32_ieee.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/util/crc32_ieee.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/util/crc32_ieee.c/crc32_ieee_ut.c74
-rw-r--r--src/spdk/test/unit/lib/util/crc32c.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/util/crc32c.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/util/crc32c.c/crc32c_ut.c145
-rw-r--r--src/spdk/test/unit/lib/util/dif.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/util/dif.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/util/dif.c/dif_ut.c2669
-rw-r--r--src/spdk/test/unit/lib/util/iov.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/util/iov.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/util/iov.c/iov_ut.c249
-rw-r--r--src/spdk/test/unit/lib/util/math.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/util/math.c/Makefile39
-rw-r--r--src/spdk/test/unit/lib/util/math.c/math_ut.c81
-rw-r--r--src/spdk/test/unit/lib/util/pipe.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/util/pipe.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/util/pipe.c/pipe_ut.c653
-rw-r--r--src/spdk/test/unit/lib/util/string.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/util/string.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/util/string.c/string_ut.c407
-rw-r--r--src/spdk/test/unit/lib/vhost/Makefile44
-rw-r--r--src/spdk/test/unit/lib/vhost/vhost.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/vhost/vhost.c/Makefile44
-rw-r--r--src/spdk/test/unit/lib/vhost/vhost.c/vhost_ut.c547
-rwxr-xr-xsrc/spdk/test/unit/unittest.sh253
300 files changed, 75558 insertions, 0 deletions
diff --git a/src/spdk/test/unit/Makefile b/src/spdk/test/unit/Makefile
new file mode 100644
index 000000000..dbe663cbb
--- /dev/null
+++ b/src/spdk/test/unit/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = include lib
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/include/Makefile b/src/spdk/test/unit/include/Makefile
new file mode 100644
index 000000000..0ddc15242
--- /dev/null
+++ b/src/spdk/test/unit/include/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = spdk
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/include/spdk/Makefile b/src/spdk/test/unit/include/spdk/Makefile
new file mode 100644
index 000000000..d99c7d632
--- /dev/null
+++ b/src/spdk/test/unit/include/spdk/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = histogram_data.h
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/include/spdk/histogram_data.h/.gitignore b/src/spdk/test/unit/include/spdk/histogram_data.h/.gitignore
new file mode 100644
index 000000000..b2b36ff73
--- /dev/null
+++ b/src/spdk/test/unit/include/spdk/histogram_data.h/.gitignore
@@ -0,0 +1 @@
+histogram_ut
diff --git a/src/spdk/test/unit/include/spdk/histogram_data.h/Makefile b/src/spdk/test/unit/include/spdk/histogram_data.h/Makefile
new file mode 100644
index 000000000..54f7278ae
--- /dev/null
+++ b/src/spdk/test/unit/include/spdk/histogram_data.h/Makefile
@@ -0,0 +1,37 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) 2016 FUJITSU LIMITED, All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of the copyright holder nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = histogram_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/include/spdk/histogram_data.h/histogram_ut.c b/src/spdk/test/unit/include/spdk/histogram_data.h/histogram_ut.c
new file mode 100644
index 000000000..45b81d594
--- /dev/null
+++ b/src/spdk/test/unit/include/spdk/histogram_data.h/histogram_ut.c
@@ -0,0 +1,161 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "spdk/histogram_data.h"
+#include "spdk/util.h"
+
+uint64_t g_values[] = {
+ 1,
+ 10,
+ 1000,
+ 50000,
+ (1ULL << 63),
+ UINT64_MAX
+};
+
+uint64_t *g_values_end = &g_values[SPDK_COUNTOF(g_values)];
+uint64_t g_total;
+uint64_t g_number_of_merged_histograms;
+
+static void
+check_values(void *ctx, uint64_t start, uint64_t end, uint64_t count,
+ uint64_t total, uint64_t so_far)
+{
+ uint64_t **values = ctx;
+
+ if (count == 0) {
+ return;
+ }
+
+ CU_ASSERT(so_far == (g_total + count));
+
+ /*
+ * The bucket for this iteration does not include end, but
+ * subtract one anyways to account for the last bucket
+ * which will have end = 0x0 (UINT64_MAX + 1).
+ */
+ end--;
+
+ while (1) {
+ CU_ASSERT(**values >= start);
+ /*
+ * We subtracted one from end above, so it's OK here for
+ * **values to equal end.
+ */
+ CU_ASSERT(**values <= end);
+ g_total += g_number_of_merged_histograms;
+ count -= g_number_of_merged_histograms;
+ (*values)++;
+ if (*values == g_values_end || **values > end) {
+ break;
+ }
+ }
+ CU_ASSERT(count == 0);
+}
+
+static void
+histogram_test(void)
+{
+ struct spdk_histogram_data *h;
+ uint64_t *values = g_values;
+ uint32_t i;
+
+ h = spdk_histogram_data_alloc();
+
+ for (i = 0; i < SPDK_COUNTOF(g_values); i++) {
+ spdk_histogram_data_tally(h, g_values[i]);
+ }
+ g_total = 0;
+ g_number_of_merged_histograms = 1;
+ spdk_histogram_data_iterate(h, check_values, &values);
+
+ spdk_histogram_data_free(h);
+}
+
+static void
+histogram_merge(void)
+{
+ struct spdk_histogram_data *h1, *h2;
+ uint64_t *values = g_values;
+ uint32_t i;
+
+ h1 = spdk_histogram_data_alloc();
+ h2 = spdk_histogram_data_alloc();
+
+ for (i = 0; i < SPDK_COUNTOF(g_values); i++) {
+ spdk_histogram_data_tally(h1, g_values[i]);
+ spdk_histogram_data_tally(h2, g_values[i]);
+ }
+
+ spdk_histogram_data_merge(h1, h2);
+
+ g_total = 0;
+ g_number_of_merged_histograms = 2;
+ spdk_histogram_data_iterate(h1, check_values, &values);
+
+ spdk_histogram_data_free(h1);
+ spdk_histogram_data_free(h2);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("histogram", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "histogram_test", histogram_test) == NULL ||
+ CU_add_test(suite, "histogram_merge", histogram_merge) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/Makefile b/src/spdk/test/unit/lib/Makefile
new file mode 100644
index 000000000..aa2d707ab
--- /dev/null
+++ b/src/spdk/test/unit/lib/Makefile
@@ -0,0 +1,51 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = bdev blob blobfs event ioat iscsi json jsonrpc log lvol
+DIRS-y += notify nvme nvmf scsi sock thread util
+DIRS-$(CONFIG_IDXD) += idxd
+DIRS-$(CONFIG_REDUCE) += reduce
+ifeq ($(OS),Linux)
+DIRS-$(CONFIG_VHOST) += vhost
+DIRS-y += ftl
+endif
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/bdev/Makefile b/src/spdk/test/unit/lib/bdev/Makefile
new file mode 100644
index 000000000..8120b1127
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/Makefile
@@ -0,0 +1,51 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = bdev.c part.c scsi_nvme.c gpt vbdev_lvol.c mt raid bdev_zone.c vbdev_zone_block.c bdev_ocssd.c
+
+DIRS-$(CONFIG_CRYPTO) += crypto.c
+
+# enable once new mocks are added for compressdev
+DIRS-$(CONFIG_REDUCE) += compress.c
+
+DIRS-$(CONFIG_PMDK) += pmem
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/bdev/bdev.c/.gitignore b/src/spdk/test/unit/lib/bdev/bdev.c/.gitignore
new file mode 100644
index 000000000..a5a22d0d3
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/bdev.c/.gitignore
@@ -0,0 +1 @@
+bdev_ut
diff --git a/src/spdk/test/unit/lib/bdev/bdev.c/Makefile b/src/spdk/test/unit/lib/bdev/bdev.c/Makefile
new file mode 100644
index 000000000..eb73fafb3
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/bdev.c/Makefile
@@ -0,0 +1,37 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) 2016 FUJITSU LIMITED, All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of the copyright holder nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = bdev_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/bdev/bdev.c/bdev_ut.c b/src/spdk/test/unit/lib/bdev/bdev.c/bdev_ut.c
new file mode 100644
index 000000000..36916f4f5
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/bdev.c/bdev_ut.c
@@ -0,0 +1,3417 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation. All rights reserved.
+ * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "common/lib/ut_multithread.c"
+#include "unit/lib/json_mock.c"
+
+#include "spdk/config.h"
+/* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
+#undef SPDK_CONFIG_VTUNE
+
+#include "bdev/bdev.c"
+
+DEFINE_STUB(spdk_conf_find_section, struct spdk_conf_section *, (struct spdk_conf *cp,
+ const char *name), NULL);
+DEFINE_STUB(spdk_conf_section_get_nmval, char *,
+ (struct spdk_conf_section *sp, const char *key, int idx1, int idx2), NULL);
+DEFINE_STUB(spdk_conf_section_get_intval, int, (struct spdk_conf_section *sp, const char *key), -1);
+
+struct spdk_trace_histories *g_trace_histories;
+DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn));
+DEFINE_STUB_V(spdk_trace_register_owner, (uint8_t type, char id_prefix));
+DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix));
+DEFINE_STUB_V(spdk_trace_register_description, (const char *name,
+ uint16_t tpoint_id, uint8_t owner_type,
+ uint8_t object_type, uint8_t new_object,
+ uint8_t arg1_type, const char *arg1_name));
+DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
+ uint32_t size, uint64_t object_id, uint64_t arg1));
+DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0);
+DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL);
+
+
+int g_status;
+int g_count;
+enum spdk_bdev_event_type g_event_type1;
+enum spdk_bdev_event_type g_event_type2;
+struct spdk_histogram_data *g_histogram;
+void *g_unregister_arg;
+int g_unregister_rc;
+
+void
+spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io,
+ int *sc, int *sk, int *asc, int *ascq)
+{
+}
+
+static int
+null_init(void)
+{
+ return 0;
+}
+
+static int
+null_clean(void)
+{
+ return 0;
+}
+
+static int
+stub_destruct(void *ctx)
+{
+ return 0;
+}
+
+struct ut_expected_io {
+ uint8_t type;
+ uint64_t offset;
+ uint64_t length;
+ int iovcnt;
+ struct iovec iov[BDEV_IO_NUM_CHILD_IOV];
+ void *md_buf;
+ TAILQ_ENTRY(ut_expected_io) link;
+};
+
+struct bdev_ut_channel {
+ TAILQ_HEAD(, spdk_bdev_io) outstanding_io;
+ uint32_t outstanding_io_count;
+ TAILQ_HEAD(, ut_expected_io) expected_io;
+};
+
+static bool g_io_done;
+static struct spdk_bdev_io *g_bdev_io;
+static enum spdk_bdev_io_status g_io_status;
+static enum spdk_bdev_io_status g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
+static uint32_t g_bdev_ut_io_device;
+static struct bdev_ut_channel *g_bdev_ut_channel;
+static void *g_compare_read_buf;
+static uint32_t g_compare_read_buf_len;
+static void *g_compare_write_buf;
+static uint32_t g_compare_write_buf_len;
+static bool g_abort_done;
+static enum spdk_bdev_io_status g_abort_status;
+
+static struct ut_expected_io *
+ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt)
+{
+ struct ut_expected_io *expected_io;
+
+ expected_io = calloc(1, sizeof(*expected_io));
+ SPDK_CU_ASSERT_FATAL(expected_io != NULL);
+
+ expected_io->type = type;
+ expected_io->offset = offset;
+ expected_io->length = length;
+ expected_io->iovcnt = iovcnt;
+
+ return expected_io;
+}
+
+static void
+ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len)
+{
+ expected_io->iov[pos].iov_base = base;
+ expected_io->iov[pos].iov_len = len;
+}
+
+static void
+stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
+{
+ struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch);
+ struct ut_expected_io *expected_io;
+ struct iovec *iov, *expected_iov;
+ struct spdk_bdev_io *bio_to_abort;
+ int i;
+
+ g_bdev_io = bdev_io;
+
+ if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_READ) {
+ uint32_t len = bdev_io->u.bdev.iovs[0].iov_len;
+
+ CU_ASSERT(bdev_io->u.bdev.iovcnt == 1);
+ CU_ASSERT(g_compare_read_buf_len == len);
+ memcpy(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len);
+ }
+
+ if (g_compare_write_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
+ uint32_t len = bdev_io->u.bdev.iovs[0].iov_len;
+
+ CU_ASSERT(bdev_io->u.bdev.iovcnt == 1);
+ CU_ASSERT(g_compare_write_buf_len == len);
+ memcpy(g_compare_write_buf, bdev_io->u.bdev.iovs[0].iov_base, len);
+ }
+
+ if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_COMPARE) {
+ uint32_t len = bdev_io->u.bdev.iovs[0].iov_len;
+
+ CU_ASSERT(bdev_io->u.bdev.iovcnt == 1);
+ CU_ASSERT(g_compare_read_buf_len == len);
+ if (memcmp(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len)) {
+ g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE;
+ }
+ }
+
+ if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) {
+ if (g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS) {
+ TAILQ_FOREACH(bio_to_abort, &ch->outstanding_io, module_link) {
+ if (bio_to_abort == bdev_io->u.abort.bio_to_abort) {
+ TAILQ_REMOVE(&ch->outstanding_io, bio_to_abort, module_link);
+ ch->outstanding_io_count--;
+ spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_FAILED);
+ break;
+ }
+ }
+ }
+ }
+
+ TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link);
+ ch->outstanding_io_count++;
+
+ expected_io = TAILQ_FIRST(&ch->expected_io);
+ if (expected_io == NULL) {
+ return;
+ }
+ TAILQ_REMOVE(&ch->expected_io, expected_io, link);
+
+ if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) {
+ CU_ASSERT(bdev_io->type == expected_io->type);
+ }
+
+ if (expected_io->md_buf != NULL) {
+ CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.md_buf);
+ }
+
+ if (expected_io->length == 0) {
+ free(expected_io);
+ return;
+ }
+
+ CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks);
+ CU_ASSERT(expected_io->length = bdev_io->u.bdev.num_blocks);
+
+ if (expected_io->iovcnt == 0) {
+ free(expected_io);
+ /* UNMAP, WRITE_ZEROES and FLUSH don't have iovs, so we can just return now. */
+ return;
+ }
+
+ CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt);
+ for (i = 0; i < expected_io->iovcnt; i++) {
+ iov = &bdev_io->u.bdev.iovs[i];
+ expected_iov = &expected_io->iov[i];
+ CU_ASSERT(iov->iov_len == expected_iov->iov_len);
+ CU_ASSERT(iov->iov_base == expected_iov->iov_base);
+ }
+
+ free(expected_io);
+}
+
+static void
+stub_submit_request_get_buf_cb(struct spdk_io_channel *_ch,
+ struct spdk_bdev_io *bdev_io, bool success)
+{
+ CU_ASSERT(success == true);
+
+ stub_submit_request(_ch, bdev_io);
+}
+
+static void
+stub_submit_request_get_buf(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
+{
+ spdk_bdev_io_get_buf(bdev_io, stub_submit_request_get_buf_cb,
+ bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
+}
+
+static uint32_t
+stub_complete_io(uint32_t num_to_complete)
+{
+ struct bdev_ut_channel *ch = g_bdev_ut_channel;
+ struct spdk_bdev_io *bdev_io;
+ static enum spdk_bdev_io_status io_status;
+ uint32_t num_completed = 0;
+
+ while (num_completed < num_to_complete) {
+ if (TAILQ_EMPTY(&ch->outstanding_io)) {
+ break;
+ }
+ bdev_io = TAILQ_FIRST(&ch->outstanding_io);
+ TAILQ_REMOVE(&ch->outstanding_io, bdev_io, module_link);
+ ch->outstanding_io_count--;
+ io_status = g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS ? SPDK_BDEV_IO_STATUS_SUCCESS :
+ g_io_exp_status;
+ spdk_bdev_io_complete(bdev_io, io_status);
+ num_completed++;
+ }
+
+ return num_completed;
+}
+
+static struct spdk_io_channel *
+bdev_ut_get_io_channel(void *ctx)
+{
+ return spdk_get_io_channel(&g_bdev_ut_io_device);
+}
+
+static bool g_io_types_supported[SPDK_BDEV_NUM_IO_TYPES] = {
+ [SPDK_BDEV_IO_TYPE_READ] = true,
+ [SPDK_BDEV_IO_TYPE_WRITE] = true,
+ [SPDK_BDEV_IO_TYPE_COMPARE] = true,
+ [SPDK_BDEV_IO_TYPE_UNMAP] = true,
+ [SPDK_BDEV_IO_TYPE_FLUSH] = true,
+ [SPDK_BDEV_IO_TYPE_RESET] = true,
+ [SPDK_BDEV_IO_TYPE_NVME_ADMIN] = true,
+ [SPDK_BDEV_IO_TYPE_NVME_IO] = true,
+ [SPDK_BDEV_IO_TYPE_NVME_IO_MD] = true,
+ [SPDK_BDEV_IO_TYPE_WRITE_ZEROES] = true,
+ [SPDK_BDEV_IO_TYPE_ZCOPY] = true,
+ [SPDK_BDEV_IO_TYPE_ABORT] = true,
+};
+
+static void
+ut_enable_io_type(enum spdk_bdev_io_type io_type, bool enable)
+{
+ g_io_types_supported[io_type] = enable;
+}
+
+static bool
+stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type)
+{
+ return g_io_types_supported[io_type];
+}
+
+static struct spdk_bdev_fn_table fn_table = {
+ .destruct = stub_destruct,
+ .submit_request = stub_submit_request,
+ .get_io_channel = bdev_ut_get_io_channel,
+ .io_type_supported = stub_io_type_supported,
+};
+
+static int
+bdev_ut_create_ch(void *io_device, void *ctx_buf)
+{
+ struct bdev_ut_channel *ch = ctx_buf;
+
+ CU_ASSERT(g_bdev_ut_channel == NULL);
+ g_bdev_ut_channel = ch;
+
+ TAILQ_INIT(&ch->outstanding_io);
+ ch->outstanding_io_count = 0;
+ TAILQ_INIT(&ch->expected_io);
+ return 0;
+}
+
+static void
+bdev_ut_destroy_ch(void *io_device, void *ctx_buf)
+{
+ CU_ASSERT(g_bdev_ut_channel != NULL);
+ g_bdev_ut_channel = NULL;
+}
+
+struct spdk_bdev_module bdev_ut_if;
+
+static int
+bdev_ut_module_init(void)
+{
+ spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch,
+ sizeof(struct bdev_ut_channel), NULL);
+ spdk_bdev_module_init_done(&bdev_ut_if);
+ return 0;
+}
+
+static void
+bdev_ut_module_fini(void)
+{
+ spdk_io_device_unregister(&g_bdev_ut_io_device, NULL);
+}
+
+struct spdk_bdev_module bdev_ut_if = {
+ .name = "bdev_ut",
+ .module_init = bdev_ut_module_init,
+ .module_fini = bdev_ut_module_fini,
+ .async_init = true,
+};
+
+static void vbdev_ut_examine(struct spdk_bdev *bdev);
+
+static int
+vbdev_ut_module_init(void)
+{
+ return 0;
+}
+
+static void
+vbdev_ut_module_fini(void)
+{
+}
+
+struct spdk_bdev_module vbdev_ut_if = {
+ .name = "vbdev_ut",
+ .module_init = vbdev_ut_module_init,
+ .module_fini = vbdev_ut_module_fini,
+ .examine_config = vbdev_ut_examine,
+};
+
+SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if)
+SPDK_BDEV_MODULE_REGISTER(vbdev_ut, &vbdev_ut_if)
+
+static void
+vbdev_ut_examine(struct spdk_bdev *bdev)
+{
+ spdk_bdev_module_examine_done(&vbdev_ut_if);
+}
+
+static struct spdk_bdev *
+allocate_bdev(char *name)
+{
+ struct spdk_bdev *bdev;
+ int rc;
+
+ bdev = calloc(1, sizeof(*bdev));
+ SPDK_CU_ASSERT_FATAL(bdev != NULL);
+
+ bdev->name = name;
+ bdev->fn_table = &fn_table;
+ bdev->module = &bdev_ut_if;
+ bdev->blockcnt = 1024;
+ bdev->blocklen = 512;
+
+ rc = spdk_bdev_register(bdev);
+ CU_ASSERT(rc == 0);
+
+ return bdev;
+}
+
+static struct spdk_bdev *
+allocate_vbdev(char *name)
+{
+ struct spdk_bdev *bdev;
+ int rc;
+
+ bdev = calloc(1, sizeof(*bdev));
+ SPDK_CU_ASSERT_FATAL(bdev != NULL);
+
+ bdev->name = name;
+ bdev->fn_table = &fn_table;
+ bdev->module = &vbdev_ut_if;
+
+ rc = spdk_bdev_register(bdev);
+ CU_ASSERT(rc == 0);
+
+ return bdev;
+}
+
+static void
+free_bdev(struct spdk_bdev *bdev)
+{
+ spdk_bdev_unregister(bdev, NULL, NULL);
+ poll_threads();
+ memset(bdev, 0xFF, sizeof(*bdev));
+ free(bdev);
+}
+
+static void
+free_vbdev(struct spdk_bdev *bdev)
+{
+ spdk_bdev_unregister(bdev, NULL, NULL);
+ poll_threads();
+ memset(bdev, 0xFF, sizeof(*bdev));
+ free(bdev);
+}
+
+static void
+get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc)
+{
+ const char *bdev_name;
+
+ CU_ASSERT(bdev != NULL);
+ CU_ASSERT(rc == 0);
+ bdev_name = spdk_bdev_get_name(bdev);
+ CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0");
+
+ free(stat);
+ free_bdev(bdev);
+
+ *(bool *)cb_arg = true;
+}
+
+static void
+bdev_unregister_cb(void *cb_arg, int rc)
+{
+ g_unregister_arg = cb_arg;
+ g_unregister_rc = rc;
+}
+
+static void
+bdev_open_cb1(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx)
+{
+ struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx;
+
+ g_event_type1 = type;
+ if (SPDK_BDEV_EVENT_REMOVE == type) {
+ spdk_bdev_close(desc);
+ }
+}
+
+static void
+bdev_open_cb2(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx)
+{
+ struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx;
+
+ g_event_type2 = type;
+ if (SPDK_BDEV_EVENT_REMOVE == type) {
+ spdk_bdev_close(desc);
+ }
+}
+
+static void
+get_device_stat_test(void)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_io_stat *stat;
+ bool done;
+
+ bdev = allocate_bdev("bdev0");
+ stat = calloc(1, sizeof(struct spdk_bdev_io_stat));
+ if (stat == NULL) {
+ free_bdev(bdev);
+ return;
+ }
+
+ done = false;
+ spdk_bdev_get_device_stat(bdev, stat, get_device_stat_cb, &done);
+ while (!done) { poll_threads(); }
+
+
+}
+
+static void
+open_write_test(void)
+{
+ struct spdk_bdev *bdev[9];
+ struct spdk_bdev_desc *desc[9] = {};
+ int rc;
+
+ /*
+ * Create a tree of bdevs to test various open w/ write cases.
+ *
+ * bdev0 through bdev3 are physical block devices, such as NVMe
+ * namespaces or Ceph block devices.
+ *
+ * bdev4 is a virtual bdev with multiple base bdevs. This models
+ * caching or RAID use cases.
+ *
+ * bdev5 through bdev7 are all virtual bdevs with the same base
+ * bdev (except bdev7). This models partitioning or logical volume
+ * use cases.
+ *
+ * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs
+ * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This
+ * models caching, RAID, partitioning or logical volumes use cases.
+ *
+ * bdev8 is a virtual bdev with multiple base bdevs, but these
+ * base bdevs are themselves virtual bdevs.
+ *
+ * bdev8
+ * |
+ * +----------+
+ * | |
+ * bdev4 bdev5 bdev6 bdev7
+ * | | | |
+ * +---+---+ +---+ + +---+---+
+ * | | \ | / \
+ * bdev0 bdev1 bdev2 bdev3
+ */
+
+ bdev[0] = allocate_bdev("bdev0");
+ rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if);
+ CU_ASSERT(rc == 0);
+
+ bdev[1] = allocate_bdev("bdev1");
+ rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if);
+ CU_ASSERT(rc == 0);
+
+ bdev[2] = allocate_bdev("bdev2");
+ rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if);
+ CU_ASSERT(rc == 0);
+
+ bdev[3] = allocate_bdev("bdev3");
+ rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if);
+ CU_ASSERT(rc == 0);
+
+ bdev[4] = allocate_vbdev("bdev4");
+ rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if);
+ CU_ASSERT(rc == 0);
+
+ bdev[5] = allocate_vbdev("bdev5");
+ rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if);
+ CU_ASSERT(rc == 0);
+
+ bdev[6] = allocate_vbdev("bdev6");
+
+ bdev[7] = allocate_vbdev("bdev7");
+
+ bdev[8] = allocate_vbdev("bdev8");
+
+ /* Open bdev0 read-only. This should succeed. */
+ rc = spdk_bdev_open(bdev[0], false, NULL, NULL, &desc[0]);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(desc[0] != NULL);
+ spdk_bdev_close(desc[0]);
+
+ /*
+ * Open bdev1 read/write. This should fail since bdev1 has been claimed
+ * by a vbdev module.
+ */
+ rc = spdk_bdev_open(bdev[1], true, NULL, NULL, &desc[1]);
+ CU_ASSERT(rc == -EPERM);
+
+ /*
+ * Open bdev4 read/write. This should fail since bdev3 has been claimed
+ * by a vbdev module.
+ */
+ rc = spdk_bdev_open(bdev[4], true, NULL, NULL, &desc[4]);
+ CU_ASSERT(rc == -EPERM);
+
+ /* Open bdev4 read-only. This should succeed. */
+ rc = spdk_bdev_open(bdev[4], false, NULL, NULL, &desc[4]);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(desc[4] != NULL);
+ spdk_bdev_close(desc[4]);
+
+ /*
+ * Open bdev8 read/write. This should succeed since it is a leaf
+ * bdev.
+ */
+ rc = spdk_bdev_open(bdev[8], true, NULL, NULL, &desc[8]);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(desc[8] != NULL);
+ spdk_bdev_close(desc[8]);
+
+ /*
+ * Open bdev5 read/write. This should fail since bdev4 has been claimed
+ * by a vbdev module.
+ */
+ rc = spdk_bdev_open(bdev[5], true, NULL, NULL, &desc[5]);
+ CU_ASSERT(rc == -EPERM);
+
+ /* Open bdev4 read-only. This should succeed. */
+ rc = spdk_bdev_open(bdev[5], false, NULL, NULL, &desc[5]);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(desc[5] != NULL);
+ spdk_bdev_close(desc[5]);
+
+ free_vbdev(bdev[8]);
+
+ free_vbdev(bdev[5]);
+ free_vbdev(bdev[6]);
+ free_vbdev(bdev[7]);
+
+ free_vbdev(bdev[4]);
+
+ free_bdev(bdev[0]);
+ free_bdev(bdev[1]);
+ free_bdev(bdev[2]);
+ free_bdev(bdev[3]);
+}
+
+static void
+bytes_to_blocks_test(void)
+{
+ struct spdk_bdev bdev;
+ uint64_t offset_blocks, num_blocks;
+
+ memset(&bdev, 0, sizeof(bdev));
+
+ bdev.blocklen = 512;
+
+ /* All parameters valid */
+ offset_blocks = 0;
+ num_blocks = 0;
+ CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 1024, &num_blocks) == 0);
+ CU_ASSERT(offset_blocks == 1);
+ CU_ASSERT(num_blocks == 2);
+
+ /* Offset not a block multiple */
+ CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 512, &num_blocks) != 0);
+
+ /* Length not a block multiple */
+ CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 3, &num_blocks) != 0);
+
+ /* In case blocklen not the power of two */
+ bdev.blocklen = 100;
+ CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 200, &num_blocks) == 0);
+ CU_ASSERT(offset_blocks == 1);
+ CU_ASSERT(num_blocks == 2);
+
+ /* Offset not a block multiple */
+ CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 100, &num_blocks) != 0);
+
+ /* Length not a block multiple */
+ CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 3, &num_blocks) != 0);
+}
+
+static void
+num_blocks_test(void)
+{
+ struct spdk_bdev bdev;
+ struct spdk_bdev_desc *desc = NULL;
+ struct spdk_bdev_desc *desc_ext = NULL;
+ int rc;
+
+ memset(&bdev, 0, sizeof(bdev));
+ bdev.name = "num_blocks";
+ bdev.fn_table = &fn_table;
+ bdev.module = &bdev_ut_if;
+ spdk_bdev_register(&bdev);
+ spdk_bdev_notify_blockcnt_change(&bdev, 50);
+
+ /* Growing block number */
+ CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 70) == 0);
+ /* Shrinking block number */
+ CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 30) == 0);
+
+ /* In case bdev opened */
+ rc = spdk_bdev_open(&bdev, false, NULL, NULL, &desc);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(desc != NULL);
+
+ /* Growing block number */
+ CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 80) == 0);
+ /* Shrinking block number */
+ CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 20) != 0);
+
+ /* In case bdev opened with ext API */
+ rc = spdk_bdev_open_ext("num_blocks", false, bdev_open_cb1, &desc_ext, &desc_ext);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(desc_ext != NULL);
+
+ g_event_type1 = 0xFF;
+ /* Growing block number */
+ CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 90) == 0);
+
+ poll_threads();
+ CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_RESIZE);
+
+ g_event_type1 = 0xFF;
+ /* Growing block number and closing */
+ CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 100) == 0);
+
+ spdk_bdev_close(desc);
+ spdk_bdev_close(desc_ext);
+ spdk_bdev_unregister(&bdev, NULL, NULL);
+
+ poll_threads();
+
+ /* Callback is not called for closed device */
+ CU_ASSERT_EQUAL(g_event_type1, 0xFF);
+}
+
+static void
+io_valid_test(void)
+{
+ struct spdk_bdev bdev;
+
+ memset(&bdev, 0, sizeof(bdev));
+
+ bdev.blocklen = 512;
+ spdk_bdev_notify_blockcnt_change(&bdev, 100);
+
+ /* All parameters valid */
+ CU_ASSERT(bdev_io_valid_blocks(&bdev, 1, 2) == true);
+
+ /* Last valid block */
+ CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 1) == true);
+
+ /* Offset past end of bdev */
+ CU_ASSERT(bdev_io_valid_blocks(&bdev, 100, 1) == false);
+
+ /* Offset + length past end of bdev */
+ CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 2) == false);
+
+ /* Offset near end of uint64_t range (2^64 - 1) */
+ CU_ASSERT(bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false);
+}
+
+static void
+alias_add_del_test(void)
+{
+ struct spdk_bdev *bdev[3];
+ int rc;
+
+ /* Creating and registering bdevs */
+ bdev[0] = allocate_bdev("bdev0");
+ SPDK_CU_ASSERT_FATAL(bdev[0] != 0);
+
+ bdev[1] = allocate_bdev("bdev1");
+ SPDK_CU_ASSERT_FATAL(bdev[1] != 0);
+
+ bdev[2] = allocate_bdev("bdev2");
+ SPDK_CU_ASSERT_FATAL(bdev[2] != 0);
+
+ poll_threads();
+
+ /*
+ * Trying adding an alias identical to name.
+ * Alias is identical to name, so it can not be added to aliases list
+ */
+ rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name);
+ CU_ASSERT(rc == -EEXIST);
+
+ /*
+ * Trying to add empty alias,
+ * this one should fail
+ */
+ rc = spdk_bdev_alias_add(bdev[0], NULL);
+ CU_ASSERT(rc == -EINVAL);
+
+ /* Trying adding same alias to two different registered bdevs */
+
+ /* Alias is used first time, so this one should pass */
+ rc = spdk_bdev_alias_add(bdev[0], "proper alias 0");
+ CU_ASSERT(rc == 0);
+
+ /* Alias was added to another bdev, so this one should fail */
+ rc = spdk_bdev_alias_add(bdev[1], "proper alias 0");
+ CU_ASSERT(rc == -EEXIST);
+
+ /* Alias is used first time, so this one should pass */
+ rc = spdk_bdev_alias_add(bdev[1], "proper alias 1");
+ CU_ASSERT(rc == 0);
+
+ /* Trying removing an alias from registered bdevs */
+
+ /* Alias is not on a bdev aliases list, so this one should fail */
+ rc = spdk_bdev_alias_del(bdev[0], "not existing");
+ CU_ASSERT(rc == -ENOENT);
+
+ /* Alias is present on a bdev aliases list, so this one should pass */
+ rc = spdk_bdev_alias_del(bdev[0], "proper alias 0");
+ CU_ASSERT(rc == 0);
+
+ /* Alias is present on a bdev aliases list, so this one should pass */
+ rc = spdk_bdev_alias_del(bdev[1], "proper alias 1");
+ CU_ASSERT(rc == 0);
+
+ /* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */
+ rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name);
+ CU_ASSERT(rc != 0);
+
+ /* Trying to del all alias from empty alias list */
+ spdk_bdev_alias_del_all(bdev[2]);
+ SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases));
+
+ /* Trying to del all alias from non-empty alias list */
+ rc = spdk_bdev_alias_add(bdev[2], "alias0");
+ CU_ASSERT(rc == 0);
+ rc = spdk_bdev_alias_add(bdev[2], "alias1");
+ CU_ASSERT(rc == 0);
+ spdk_bdev_alias_del_all(bdev[2]);
+ CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases));
+
+ /* Unregister and free bdevs */
+ spdk_bdev_unregister(bdev[0], NULL, NULL);
+ spdk_bdev_unregister(bdev[1], NULL, NULL);
+ spdk_bdev_unregister(bdev[2], NULL, NULL);
+
+ poll_threads();
+
+ free(bdev[0]);
+ free(bdev[1]);
+ free(bdev[2]);
+}
+
+static void
+io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
+{
+ g_io_done = true;
+ g_io_status = bdev_io->internal.status;
+ spdk_bdev_free_io(bdev_io);
+}
+
+static void
+bdev_init_cb(void *arg, int rc)
+{
+ CU_ASSERT(rc == 0);
+}
+
+static void
+bdev_fini_cb(void *arg)
+{
+}
+
+struct bdev_ut_io_wait_entry {
+ struct spdk_bdev_io_wait_entry entry;
+ struct spdk_io_channel *io_ch;
+ struct spdk_bdev_desc *desc;
+ bool submitted;
+};
+
+static void
+io_wait_cb(void *arg)
+{
+ struct bdev_ut_io_wait_entry *entry = arg;
+ int rc;
+
+ rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ entry->submitted = true;
+}
+
+static void
+bdev_io_types_test(void)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_desc *desc = NULL;
+ struct spdk_io_channel *io_ch;
+ struct spdk_bdev_opts bdev_opts = {
+ .bdev_io_pool_size = 4,
+ .bdev_io_cache_size = 2,
+ };
+ int rc;
+
+ rc = spdk_bdev_set_opts(&bdev_opts);
+ CU_ASSERT(rc == 0);
+ spdk_bdev_initialize(bdev_init_cb, NULL);
+ poll_threads();
+
+ bdev = allocate_bdev("bdev0");
+
+ rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
+ CU_ASSERT(rc == 0);
+ poll_threads();
+ SPDK_CU_ASSERT_FATAL(desc != NULL);
+ io_ch = spdk_bdev_get_io_channel(desc);
+ CU_ASSERT(io_ch != NULL);
+
+ /* WRITE and WRITE ZEROES are not supported */
+ ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false);
+ ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, false);
+ rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 0, 128, io_done, NULL);
+ CU_ASSERT(rc == -ENOTSUP);
+ ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true);
+ ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, true);
+
+ spdk_put_io_channel(io_ch);
+ spdk_bdev_close(desc);
+ free_bdev(bdev);
+ spdk_bdev_finish(bdev_fini_cb, NULL);
+ poll_threads();
+}
+
+static void
+bdev_io_wait_test(void)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_desc *desc = NULL;
+ struct spdk_io_channel *io_ch;
+ struct spdk_bdev_opts bdev_opts = {
+ .bdev_io_pool_size = 4,
+ .bdev_io_cache_size = 2,
+ };
+ struct bdev_ut_io_wait_entry io_wait_entry;
+ struct bdev_ut_io_wait_entry io_wait_entry2;
+ int rc;
+
+ rc = spdk_bdev_set_opts(&bdev_opts);
+ CU_ASSERT(rc == 0);
+ spdk_bdev_initialize(bdev_init_cb, NULL);
+ poll_threads();
+
+ bdev = allocate_bdev("bdev0");
+
+ rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
+ CU_ASSERT(rc == 0);
+ poll_threads();
+ SPDK_CU_ASSERT_FATAL(desc != NULL);
+ io_ch = spdk_bdev_get_io_channel(desc);
+ CU_ASSERT(io_ch != NULL);
+
+ rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
+
+ rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
+ CU_ASSERT(rc == -ENOMEM);
+
+ io_wait_entry.entry.bdev = bdev;
+ io_wait_entry.entry.cb_fn = io_wait_cb;
+ io_wait_entry.entry.cb_arg = &io_wait_entry;
+ io_wait_entry.io_ch = io_ch;
+ io_wait_entry.desc = desc;
+ io_wait_entry.submitted = false;
+ /* Cannot use the same io_wait_entry for two different calls. */
+ memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry));
+ io_wait_entry2.entry.cb_arg = &io_wait_entry2;
+
+ /* Queue two I/O waits. */
+ rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(io_wait_entry.submitted == false);
+ rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(io_wait_entry2.submitted == false);
+
+ stub_complete_io(1);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
+ CU_ASSERT(io_wait_entry.submitted == true);
+ CU_ASSERT(io_wait_entry2.submitted == false);
+
+ stub_complete_io(1);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
+ CU_ASSERT(io_wait_entry2.submitted == true);
+
+ stub_complete_io(4);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
+
+ spdk_put_io_channel(io_ch);
+ spdk_bdev_close(desc);
+ free_bdev(bdev);
+ spdk_bdev_finish(bdev_fini_cb, NULL);
+ poll_threads();
+}
+
+static void
+bdev_io_spans_boundary_test(void)
+{
+ struct spdk_bdev bdev;
+ struct spdk_bdev_io bdev_io;
+
+ memset(&bdev, 0, sizeof(bdev));
+
+ bdev.optimal_io_boundary = 0;
+ bdev_io.bdev = &bdev;
+
+ /* bdev has no optimal_io_boundary set - so this should return false. */
+ CU_ASSERT(bdev_io_should_split(&bdev_io) == false);
+
+ bdev.optimal_io_boundary = 32;
+ bdev_io.type = SPDK_BDEV_IO_TYPE_RESET;
+
+ /* RESETs are not based on LBAs - so this should return false. */
+ CU_ASSERT(bdev_io_should_split(&bdev_io) == false);
+
+ bdev_io.type = SPDK_BDEV_IO_TYPE_READ;
+ bdev_io.u.bdev.offset_blocks = 0;
+ bdev_io.u.bdev.num_blocks = 32;
+
+ /* This I/O run right up to, but does not cross, the boundary - so this should return false. */
+ CU_ASSERT(bdev_io_should_split(&bdev_io) == false);
+
+ bdev_io.u.bdev.num_blocks = 33;
+
+ /* This I/O spans a boundary. */
+ CU_ASSERT(bdev_io_should_split(&bdev_io) == true);
+}
+
+static void
+bdev_io_split_test(void)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_desc *desc = NULL;
+ struct spdk_io_channel *io_ch;
+ struct spdk_bdev_opts bdev_opts = {
+ .bdev_io_pool_size = 512,
+ .bdev_io_cache_size = 64,
+ };
+ struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2];
+ struct ut_expected_io *expected_io;
+ uint64_t i;
+ int rc;
+
+ rc = spdk_bdev_set_opts(&bdev_opts);
+ CU_ASSERT(rc == 0);
+ spdk_bdev_initialize(bdev_init_cb, NULL);
+
+ bdev = allocate_bdev("bdev0");
+
+ rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(desc != NULL);
+ io_ch = spdk_bdev_get_io_channel(desc);
+ CU_ASSERT(io_ch != NULL);
+
+ bdev->optimal_io_boundary = 16;
+ bdev->split_on_optimal_io_boundary = false;
+
+ g_io_done = false;
+
+ /* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1);
+ ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
+
+ bdev->split_on_optimal_io_boundary = true;
+
+ /* Now test that a single-vector command is split correctly.
+ * Offset 14, length 8, payload 0xF000
+ * Child - Offset 14, length 2, payload 0xF000
+ * Child - Offset 16, length 6, payload 0xF000 + 2 * 512
+ *
+ * Set up the expected values before calling spdk_bdev_read_blocks
+ */
+ g_io_done = false;
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1);
+ ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1);
+ ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ /* spdk_bdev_read_blocks will submit the first child immediately. */
+ rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
+ stub_complete_io(2);
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
+
+ /* Now set up a more complex, multi-vector command that needs to be split,
+ * including splitting iovecs.
+ */
+ iov[0].iov_base = (void *)0x10000;
+ iov[0].iov_len = 512;
+ iov[1].iov_base = (void *)0x20000;
+ iov[1].iov_len = 20 * 512;
+ iov[2].iov_base = (void *)0x30000;
+ iov[2].iov_len = 11 * 512;
+
+ g_io_done = false;
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2);
+ ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512);
+ ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1);
+ ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2);
+ ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512);
+ ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
+ stub_complete_io(3);
+ CU_ASSERT(g_io_done == true);
+
+ /* Test multi vector command that needs to be split by strip and then needs to be
+ * split further due to the capacity of child iovs.
+ */
+ for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) {
+ iov[i].iov_base = (void *)((i + 1) * 0x10000);
+ iov[i].iov_len = 512;
+ }
+
+ bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV;
+ g_io_done = false;
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV,
+ BDEV_IO_NUM_CHILD_IOV);
+ for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) {
+ ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512);
+ }
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV,
+ BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV);
+ for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) {
+ ut_expected_io_set_iov(expected_io, i,
+ (void *)((i + 1 + BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512);
+ }
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0,
+ BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == false);
+
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
+
+ /* Test multi vector command that needs to be split by strip and then needs to be
+ * split further due to the capacity of child iovs. In this case, the length of
+ * the rest of iovec array with an I/O boundary is the multiple of block size.
+ */
+
+ /* Fill iovec array for exactly one boundary. The iovec cnt for this boundary
+ * is BDEV_IO_NUM_CHILD_IOV + 1, which exceeds the capacity of child iovs.
+ */
+ for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) {
+ iov[i].iov_base = (void *)((i + 1) * 0x10000);
+ iov[i].iov_len = 512;
+ }
+ for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) {
+ iov[i].iov_base = (void *)((i + 1) * 0x10000);
+ iov[i].iov_len = 256;
+ }
+ iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000);
+ iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 512;
+
+ /* Add an extra iovec to trigger split */
+ iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000);
+ iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512;
+
+ bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV;
+ g_io_done = false;
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0,
+ BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV);
+ for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) {
+ ut_expected_io_set_iov(expected_io, i,
+ (void *)((i + 1) * 0x10000), 512);
+ }
+ for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) {
+ ut_expected_io_set_iov(expected_io, i,
+ (void *)((i + 1) * 0x10000), 256);
+ }
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1,
+ 1, 1);
+ ut_expected_io_set_iov(expected_io, 0,
+ (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 512);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV,
+ 1, 1);
+ ut_expected_io_set_iov(expected_io, 0,
+ (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, 0,
+ BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == false);
+
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
+ stub_complete_io(2);
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
+
+ /* Test multi vector command that needs to be split by strip and then needs to be
+ * split further due to the capacity of child iovs, the child request offset should
+ * be rewind to last aligned offset and go success without error.
+ */
+ for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) {
+ iov[i].iov_base = (void *)((i + 1) * 0x10000);
+ iov[i].iov_len = 512;
+ }
+ iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000);
+ iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256;
+
+ iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000);
+ iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 256;
+
+ iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000);
+ iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512;
+
+ bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV;
+ g_io_done = false;
+ g_io_status = 0;
+ /* The first expected io should be start from offset 0 to BDEV_IO_NUM_CHILD_IOV - 1 */
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0,
+ BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV - 1);
+ for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) {
+ ut_expected_io_set_iov(expected_io, i,
+ (void *)((i + 1) * 0x10000), 512);
+ }
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+ /* The second expected io should be start from offset BDEV_IO_NUM_CHILD_IOV - 1 to BDEV_IO_NUM_CHILD_IOV */
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1,
+ 1, 2);
+ ut_expected_io_set_iov(expected_io, 0,
+ (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000), 256);
+ ut_expected_io_set_iov(expected_io, 1,
+ (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 256);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+ /* The third expected io should be start from offset BDEV_IO_NUM_CHILD_IOV to BDEV_IO_NUM_CHILD_IOV + 1 */
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV,
+ 1, 1);
+ ut_expected_io_set_iov(expected_io, 0,
+ (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0,
+ BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == false);
+
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
+ stub_complete_io(2);
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
+
+ /* Test multi vector command that needs to be split due to the IO boundary and
+ * the capacity of child iovs. Especially test the case when the command is
+ * split due to the capacity of child iovs, the tail address is not aligned with
+ * block size and is rewinded to the aligned address.
+ *
+ * The iovecs used in read request is complex but is based on the data
+ * collected in the real issue. We change the base addresses but keep the lengths
+ * not to loose the credibility of the test.
+ */
+ bdev->optimal_io_boundary = 128;
+ g_io_done = false;
+ g_io_status = 0;
+
+ for (i = 0; i < 31; i++) {
+ iov[i].iov_base = (void *)(0xFEED0000000 + (i << 20));
+ iov[i].iov_len = 1024;
+ }
+ iov[31].iov_base = (void *)0xFEED1F00000;
+ iov[31].iov_len = 32768;
+ iov[32].iov_base = (void *)0xFEED2000000;
+ iov[32].iov_len = 160;
+ iov[33].iov_base = (void *)0xFEED2100000;
+ iov[33].iov_len = 4096;
+ iov[34].iov_base = (void *)0xFEED2200000;
+ iov[34].iov_len = 4096;
+ iov[35].iov_base = (void *)0xFEED2300000;
+ iov[35].iov_len = 4096;
+ iov[36].iov_base = (void *)0xFEED2400000;
+ iov[36].iov_len = 4096;
+ iov[37].iov_base = (void *)0xFEED2500000;
+ iov[37].iov_len = 4096;
+ iov[38].iov_base = (void *)0xFEED2600000;
+ iov[38].iov_len = 4096;
+ iov[39].iov_base = (void *)0xFEED2700000;
+ iov[39].iov_len = 4096;
+ iov[40].iov_base = (void *)0xFEED2800000;
+ iov[40].iov_len = 4096;
+ iov[41].iov_base = (void *)0xFEED2900000;
+ iov[41].iov_len = 4096;
+ iov[42].iov_base = (void *)0xFEED2A00000;
+ iov[42].iov_len = 4096;
+ iov[43].iov_base = (void *)0xFEED2B00000;
+ iov[43].iov_len = 12288;
+ iov[44].iov_base = (void *)0xFEED2C00000;
+ iov[44].iov_len = 8192;
+ iov[45].iov_base = (void *)0xFEED2F00000;
+ iov[45].iov_len = 4096;
+ iov[46].iov_base = (void *)0xFEED3000000;
+ iov[46].iov_len = 4096;
+ iov[47].iov_base = (void *)0xFEED3100000;
+ iov[47].iov_len = 4096;
+ iov[48].iov_base = (void *)0xFEED3200000;
+ iov[48].iov_len = 24576;
+ iov[49].iov_base = (void *)0xFEED3300000;
+ iov[49].iov_len = 16384;
+ iov[50].iov_base = (void *)0xFEED3400000;
+ iov[50].iov_len = 12288;
+ iov[51].iov_base = (void *)0xFEED3500000;
+ iov[51].iov_len = 4096;
+ iov[52].iov_base = (void *)0xFEED3600000;
+ iov[52].iov_len = 4096;
+ iov[53].iov_base = (void *)0xFEED3700000;
+ iov[53].iov_len = 4096;
+ iov[54].iov_base = (void *)0xFEED3800000;
+ iov[54].iov_len = 28672;
+ iov[55].iov_base = (void *)0xFEED3900000;
+ iov[55].iov_len = 20480;
+ iov[56].iov_base = (void *)0xFEED3A00000;
+ iov[56].iov_len = 4096;
+ iov[57].iov_base = (void *)0xFEED3B00000;
+ iov[57].iov_len = 12288;
+ iov[58].iov_base = (void *)0xFEED3C00000;
+ iov[58].iov_len = 4096;
+ iov[59].iov_base = (void *)0xFEED3D00000;
+ iov[59].iov_len = 4096;
+ iov[60].iov_base = (void *)0xFEED3E00000;
+ iov[60].iov_len = 352;
+
+ /* The 1st child IO must be from iov[0] to iov[31] split by the capacity
+ * of child iovs,
+ */
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 126, 32);
+ for (i = 0; i < 32; i++) {
+ ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len);
+ }
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ /* The 2nd child IO must be from iov[32] to the first 864 bytes of iov[33]
+ * split by the IO boundary requirement.
+ */
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 126, 2, 2);
+ ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base, iov[32].iov_len);
+ ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 864);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ /* The 3rd child IO must be from the remaining 3232 bytes of iov[33] to
+ * the first 864 bytes of iov[46] split by the IO boundary requirement.
+ */
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 128, 128, 14);
+ ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[33].iov_base + 864),
+ iov[33].iov_len - 864);
+ ut_expected_io_set_iov(expected_io, 1, iov[34].iov_base, iov[34].iov_len);
+ ut_expected_io_set_iov(expected_io, 2, iov[35].iov_base, iov[35].iov_len);
+ ut_expected_io_set_iov(expected_io, 3, iov[36].iov_base, iov[36].iov_len);
+ ut_expected_io_set_iov(expected_io, 4, iov[37].iov_base, iov[37].iov_len);
+ ut_expected_io_set_iov(expected_io, 5, iov[38].iov_base, iov[38].iov_len);
+ ut_expected_io_set_iov(expected_io, 6, iov[39].iov_base, iov[39].iov_len);
+ ut_expected_io_set_iov(expected_io, 7, iov[40].iov_base, iov[40].iov_len);
+ ut_expected_io_set_iov(expected_io, 8, iov[41].iov_base, iov[41].iov_len);
+ ut_expected_io_set_iov(expected_io, 9, iov[42].iov_base, iov[42].iov_len);
+ ut_expected_io_set_iov(expected_io, 10, iov[43].iov_base, iov[43].iov_len);
+ ut_expected_io_set_iov(expected_io, 11, iov[44].iov_base, iov[44].iov_len);
+ ut_expected_io_set_iov(expected_io, 12, iov[45].iov_base, iov[45].iov_len);
+ ut_expected_io_set_iov(expected_io, 13, iov[46].iov_base, 864);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ /* The 4th child IO must be from the remaining 3232 bytes of iov[46] to the
+ * first 864 bytes of iov[52] split by the IO boundary requirement.
+ */
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 256, 128, 7);
+ ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[46].iov_base + 864),
+ iov[46].iov_len - 864);
+ ut_expected_io_set_iov(expected_io, 1, iov[47].iov_base, iov[47].iov_len);
+ ut_expected_io_set_iov(expected_io, 2, iov[48].iov_base, iov[48].iov_len);
+ ut_expected_io_set_iov(expected_io, 3, iov[49].iov_base, iov[49].iov_len);
+ ut_expected_io_set_iov(expected_io, 4, iov[50].iov_base, iov[50].iov_len);
+ ut_expected_io_set_iov(expected_io, 5, iov[51].iov_base, iov[51].iov_len);
+ ut_expected_io_set_iov(expected_io, 6, iov[52].iov_base, 864);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ /* The 5th child IO must be from the remaining 3232 bytes of iov[52] to
+ * the first 4096 bytes of iov[57] split by the IO boundary requirement.
+ */
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 384, 128, 6);
+ ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[52].iov_base + 864),
+ iov[52].iov_len - 864);
+ ut_expected_io_set_iov(expected_io, 1, iov[53].iov_base, iov[53].iov_len);
+ ut_expected_io_set_iov(expected_io, 2, iov[54].iov_base, iov[54].iov_len);
+ ut_expected_io_set_iov(expected_io, 3, iov[55].iov_base, iov[55].iov_len);
+ ut_expected_io_set_iov(expected_io, 4, iov[56].iov_base, iov[56].iov_len);
+ ut_expected_io_set_iov(expected_io, 5, iov[57].iov_base, 4960);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ /* The 6th child IO must be from the remaining 7328 bytes of iov[57]
+ * to the first 3936 bytes of iov[58] split by the capacity of child iovs.
+ */
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 512, 30, 3);
+ ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[57].iov_base + 4960),
+ iov[57].iov_len - 4960);
+ ut_expected_io_set_iov(expected_io, 1, iov[58].iov_base, iov[58].iov_len);
+ ut_expected_io_set_iov(expected_io, 2, iov[59].iov_base, 3936);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ /* The 7th child IO is from the remaining 160 bytes of iov[59] and iov[60]. */
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 542, 1, 2);
+ ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[59].iov_base + 3936),
+ iov[59].iov_len - 3936);
+ ut_expected_io_set_iov(expected_io, 1, iov[60].iov_base, iov[60].iov_len);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 61, 0, 543, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == false);
+
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5);
+ stub_complete_io(5);
+ CU_ASSERT(g_io_done == false);
+
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
+ CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ /* Test a WRITE_ZEROES that would span an I/O boundary. WRITE_ZEROES should not be
+ * split, so test that.
+ */
+ bdev->optimal_io_boundary = 15;
+ g_io_done = false;
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == true);
+
+ /* Test an UNMAP. This should also not be split. */
+ bdev->optimal_io_boundary = 16;
+ g_io_done = false;
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == true);
+
+ /* Test a FLUSH. This should also not be split. */
+ bdev->optimal_io_boundary = 16;
+ g_io_done = false;
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == true);
+
+ CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io));
+
+ /* Children requests return an error status */
+ bdev->optimal_io_boundary = 16;
+ iov[0].iov_base = (void *)0x10000;
+ iov[0].iov_len = 512 * 64;
+ g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED;
+ g_io_done = false;
+ g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS;
+
+ rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 1, 64, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5);
+ stub_complete_io(4);
+ CU_ASSERT(g_io_done == false);
+ CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
+
+ /* Test if a multi vector command terminated with failure before continueing
+ * splitting process when one of child I/O failed.
+ * The multi vector command is as same as the above that needs to be split by strip
+ * and then needs to be split further due to the capacity of child iovs.
+ */
+ for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) {
+ iov[i].iov_base = (void *)((i + 1) * 0x10000);
+ iov[i].iov_len = 512;
+ }
+ iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000);
+ iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256;
+
+ iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000);
+ iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 256;
+
+ iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000);
+ iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512;
+
+ bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV;
+
+ g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED;
+ g_io_done = false;
+ g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS;
+
+ rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0,
+ BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
+
+ g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
+
+ /* for this test we will create the following conditions to hit the code path where
+ * we are trying to send and IO following a split that has no iovs because we had to
+ * trim them for alignment reasons.
+ *
+ * - 16K boundary, our IO will start at offset 0 with a length of 0x4200
+ * - Our IOVs are 0x212 in size so that we run into the 16K boundary at child IOV
+ * position 30 and overshoot by 0x2e.
+ * - That means we'll send the IO and loop back to pick up the remaining bytes at
+ * child IOV index 31. When we do, we find that we have to shorten index 31 by 0x2e
+ * which eliniates that vector so we just send the first split IO with 30 vectors
+ * and let the completion pick up the last 2 vectors.
+ */
+ bdev->optimal_io_boundary = 32;
+ bdev->split_on_optimal_io_boundary = true;
+ g_io_done = false;
+
+ /* Init all parent IOVs to 0x212 */
+ for (i = 0; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) {
+ iov[i].iov_base = (void *)((i + 1) * 0x10000);
+ iov[i].iov_len = 0x212;
+ }
+
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV,
+ BDEV_IO_NUM_CHILD_IOV - 1);
+ /* expect 0-29 to be 1:1 with the parent iov */
+ for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) {
+ ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len);
+ }
+
+ /* expect index 30 to be shortened to 0x1e4 (0x212 - 0x1e) because of the alignment
+ * where 0x1e is the amount we overshot the 16K boundary
+ */
+ ut_expected_io_set_iov(expected_io, BDEV_IO_NUM_CHILD_IOV - 2,
+ (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 2].iov_base), 0x1e4);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ /* 2nd child IO will have 2 remaining vectors, one to pick up from the one that was
+ * shortened that take it to the next boundary and then a final one to get us to
+ * 0x4200 bytes for the IO.
+ */
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV,
+ BDEV_IO_NUM_CHILD_IOV, 2);
+ /* position 30 picked up the remaining bytes to the next boundary */
+ ut_expected_io_set_iov(expected_io, 0,
+ (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 2].iov_base + 0x1e4), 0x2e);
+
+ /* position 31 picked the the rest of the trasnfer to get us to 0x4200 */
+ ut_expected_io_set_iov(expected_io, 1,
+ (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base), 0x1d2);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 1, 0,
+ BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == false);
+
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
+
+ spdk_put_io_channel(io_ch);
+ spdk_bdev_close(desc);
+ free_bdev(bdev);
+ spdk_bdev_finish(bdev_fini_cb, NULL);
+ poll_threads();
+}
+
+static void
+bdev_io_split_with_io_wait(void)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_desc *desc = NULL;
+ struct spdk_io_channel *io_ch;
+ struct spdk_bdev_channel *channel;
+ struct spdk_bdev_mgmt_channel *mgmt_ch;
+ struct spdk_bdev_opts bdev_opts = {
+ .bdev_io_pool_size = 2,
+ .bdev_io_cache_size = 1,
+ };
+ struct iovec iov[3];
+ struct ut_expected_io *expected_io;
+ int rc;
+
+ rc = spdk_bdev_set_opts(&bdev_opts);
+ CU_ASSERT(rc == 0);
+ spdk_bdev_initialize(bdev_init_cb, NULL);
+
+ bdev = allocate_bdev("bdev0");
+
+ rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(desc != NULL);
+ io_ch = spdk_bdev_get_io_channel(desc);
+ CU_ASSERT(io_ch != NULL);
+ channel = spdk_io_channel_get_ctx(io_ch);
+ mgmt_ch = channel->shared_resource->mgmt_ch;
+
+ bdev->optimal_io_boundary = 16;
+ bdev->split_on_optimal_io_boundary = true;
+
+ rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+
+ /* Now test that a single-vector command is split correctly.
+ * Offset 14, length 8, payload 0xF000
+ * Child - Offset 14, length 2, payload 0xF000
+ * Child - Offset 16, length 6, payload 0xF000 + 2 * 512
+ *
+ * Set up the expected values before calling spdk_bdev_read_blocks
+ */
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1);
+ ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1);
+ ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ /* The following children will be submitted sequentially due to the capacity of
+ * spdk_bdev_io.
+ */
+
+ /* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */
+ rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue));
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+
+ /* Completing the first read I/O will submit the first child */
+ stub_complete_io(1);
+ CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue));
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+
+ /* Completing the first child will submit the second child */
+ stub_complete_io(1);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+
+ /* Complete the second child I/O. This should result in our callback getting
+ * invoked since the parent I/O is now complete.
+ */
+ stub_complete_io(1);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
+
+ /* Now set up a more complex, multi-vector command that needs to be split,
+ * including splitting iovecs.
+ */
+ iov[0].iov_base = (void *)0x10000;
+ iov[0].iov_len = 512;
+ iov[1].iov_base = (void *)0x20000;
+ iov[1].iov_len = 20 * 512;
+ iov[2].iov_base = (void *)0x30000;
+ iov[2].iov_len = 11 * 512;
+
+ g_io_done = false;
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2);
+ ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512);
+ ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1);
+ ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2);
+ ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512);
+ ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+
+ /* The following children will be submitted sequentially due to the capacity of
+ * spdk_bdev_io.
+ */
+
+ /* Completing the first child will submit the second child */
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == false);
+
+ /* Completing the second child will submit the third child */
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == false);
+
+ /* Completing the third child will result in our callback getting invoked
+ * since the parent I/O is now complete.
+ */
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == true);
+
+ CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io));
+
+ spdk_put_io_channel(io_ch);
+ spdk_bdev_close(desc);
+ free_bdev(bdev);
+ spdk_bdev_finish(bdev_fini_cb, NULL);
+ poll_threads();
+}
+
+static void
+bdev_io_alignment(void)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_desc *desc = NULL;
+ struct spdk_io_channel *io_ch;
+ struct spdk_bdev_opts bdev_opts = {
+ .bdev_io_pool_size = 20,
+ .bdev_io_cache_size = 2,
+ };
+ int rc;
+ void *buf;
+ struct iovec iovs[2];
+ int iovcnt;
+ uint64_t alignment;
+
+ rc = spdk_bdev_set_opts(&bdev_opts);
+ CU_ASSERT(rc == 0);
+ spdk_bdev_initialize(bdev_init_cb, NULL);
+
+ fn_table.submit_request = stub_submit_request_get_buf;
+ bdev = allocate_bdev("bdev0");
+
+ rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(desc != NULL);
+ io_ch = spdk_bdev_get_io_channel(desc);
+ CU_ASSERT(io_ch != NULL);
+
+ /* Create aligned buffer */
+ rc = posix_memalign(&buf, 4096, 8192);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+
+ /* Pass aligned single buffer with no alignment required */
+ alignment = 1;
+ bdev->required_alignment = spdk_u32log2(alignment);
+
+ rc = spdk_bdev_write_blocks(desc, io_ch, buf, 0, 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ stub_complete_io(1);
+ CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
+ alignment));
+
+ rc = spdk_bdev_read_blocks(desc, io_ch, buf, 0, 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ stub_complete_io(1);
+ CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
+ alignment));
+
+ /* Pass unaligned single buffer with no alignment required */
+ alignment = 1;
+ bdev->required_alignment = spdk_u32log2(alignment);
+
+ rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
+ CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4);
+ stub_complete_io(1);
+
+ rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
+ CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4);
+ stub_complete_io(1);
+
+ /* Pass unaligned single buffer with 512 alignment required */
+ alignment = 512;
+ bdev->required_alignment = spdk_u32log2(alignment);
+
+ rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
+ CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
+ CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
+ alignment));
+ stub_complete_io(1);
+ CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
+
+ rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
+ CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
+ CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
+ alignment));
+ stub_complete_io(1);
+ CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
+
+ /* Pass unaligned single buffer with 4096 alignment required */
+ alignment = 4096;
+ bdev->required_alignment = spdk_u32log2(alignment);
+
+ rc = spdk_bdev_write_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
+ CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
+ CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
+ alignment));
+ stub_complete_io(1);
+ CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
+
+ rc = spdk_bdev_read_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
+ CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
+ CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
+ alignment));
+ stub_complete_io(1);
+ CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
+
+ /* Pass aligned iovs with no alignment required */
+ alignment = 1;
+ bdev->required_alignment = spdk_u32log2(alignment);
+
+ iovcnt = 1;
+ iovs[0].iov_base = buf;
+ iovs[0].iov_len = 512;
+
+ rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
+ stub_complete_io(1);
+ CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
+
+ rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
+ stub_complete_io(1);
+ CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
+
+ /* Pass unaligned iovs with no alignment required */
+ alignment = 1;
+ bdev->required_alignment = spdk_u32log2(alignment);
+
+ iovcnt = 2;
+ iovs[0].iov_base = buf + 16;
+ iovs[0].iov_len = 256;
+ iovs[1].iov_base = buf + 16 + 256 + 32;
+ iovs[1].iov_len = 256;
+
+ rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
+ stub_complete_io(1);
+ CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
+
+ rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
+ stub_complete_io(1);
+ CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
+
+ /* Pass unaligned iov with 2048 alignment required */
+ alignment = 2048;
+ bdev->required_alignment = spdk_u32log2(alignment);
+
+ iovcnt = 2;
+ iovs[0].iov_base = buf + 16;
+ iovs[0].iov_len = 256;
+ iovs[1].iov_base = buf + 16 + 256 + 32;
+ iovs[1].iov_len = 256;
+
+ rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt);
+ CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
+ CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
+ alignment));
+ stub_complete_io(1);
+ CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
+
+ rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt);
+ CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
+ CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
+ alignment));
+ stub_complete_io(1);
+ CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
+
+ /* Pass iov without allocated buffer without alignment required */
+ alignment = 1;
+ bdev->required_alignment = spdk_u32log2(alignment);
+
+ iovcnt = 1;
+ iovs[0].iov_base = NULL;
+ iovs[0].iov_len = 0;
+
+ rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
+ CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
+ alignment));
+ stub_complete_io(1);
+
+ /* Pass iov without allocated buffer with 1024 alignment required */
+ alignment = 1024;
+ bdev->required_alignment = spdk_u32log2(alignment);
+
+ iovcnt = 1;
+ iovs[0].iov_base = NULL;
+ iovs[0].iov_len = 0;
+
+ rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
+ CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
+ alignment));
+ stub_complete_io(1);
+
+ spdk_put_io_channel(io_ch);
+ spdk_bdev_close(desc);
+ free_bdev(bdev);
+ fn_table.submit_request = stub_submit_request;
+ spdk_bdev_finish(bdev_fini_cb, NULL);
+ poll_threads();
+
+ free(buf);
+}
+
+static void
+bdev_io_alignment_with_boundary(void)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_desc *desc = NULL;
+ struct spdk_io_channel *io_ch;
+ struct spdk_bdev_opts bdev_opts = {
+ .bdev_io_pool_size = 20,
+ .bdev_io_cache_size = 2,
+ };
+ int rc;
+ void *buf;
+ struct iovec iovs[2];
+ int iovcnt;
+ uint64_t alignment;
+
+ rc = spdk_bdev_set_opts(&bdev_opts);
+ CU_ASSERT(rc == 0);
+ spdk_bdev_initialize(bdev_init_cb, NULL);
+
+ fn_table.submit_request = stub_submit_request_get_buf;
+ bdev = allocate_bdev("bdev0");
+
+ rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(desc != NULL);
+ io_ch = spdk_bdev_get_io_channel(desc);
+ CU_ASSERT(io_ch != NULL);
+
+ /* Create aligned buffer */
+ rc = posix_memalign(&buf, 4096, 131072);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
+
+ /* 512 * 3 with 2 IO boundary, allocate small data buffer from bdev layer */
+ alignment = 512;
+ bdev->required_alignment = spdk_u32log2(alignment);
+ bdev->optimal_io_boundary = 2;
+ bdev->split_on_optimal_io_boundary = true;
+
+ iovcnt = 1;
+ iovs[0].iov_base = NULL;
+ iovs[0].iov_len = 512 * 3;
+
+ rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
+ stub_complete_io(2);
+
+ /* 8KiB with 16 IO boundary, allocate large data buffer from bdev layer */
+ alignment = 512;
+ bdev->required_alignment = spdk_u32log2(alignment);
+ bdev->optimal_io_boundary = 16;
+ bdev->split_on_optimal_io_boundary = true;
+
+ iovcnt = 1;
+ iovs[0].iov_base = NULL;
+ iovs[0].iov_len = 512 * 16;
+
+ rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 16, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
+ stub_complete_io(2);
+
+ /* 512 * 160 with 128 IO boundary, 63.5KiB + 16.5KiB for the two children requests */
+ alignment = 512;
+ bdev->required_alignment = spdk_u32log2(alignment);
+ bdev->optimal_io_boundary = 128;
+ bdev->split_on_optimal_io_boundary = true;
+
+ iovcnt = 1;
+ iovs[0].iov_base = buf + 16;
+ iovs[0].iov_len = 512 * 160;
+ rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
+ stub_complete_io(2);
+
+ /* 512 * 3 with 2 IO boundary */
+ alignment = 512;
+ bdev->required_alignment = spdk_u32log2(alignment);
+ bdev->optimal_io_boundary = 2;
+ bdev->split_on_optimal_io_boundary = true;
+
+ iovcnt = 2;
+ iovs[0].iov_base = buf + 16;
+ iovs[0].iov_len = 512;
+ iovs[1].iov_base = buf + 16 + 512 + 32;
+ iovs[1].iov_len = 1024;
+
+ rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
+ stub_complete_io(2);
+
+ rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
+ stub_complete_io(2);
+
+ /* 512 * 64 with 32 IO boundary */
+ bdev->optimal_io_boundary = 32;
+ iovcnt = 2;
+ iovs[0].iov_base = buf + 16;
+ iovs[0].iov_len = 16384;
+ iovs[1].iov_base = buf + 16 + 16384 + 32;
+ iovs[1].iov_len = 16384;
+
+ rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
+ stub_complete_io(3);
+
+ rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
+ stub_complete_io(3);
+
+ /* 512 * 160 with 32 IO boundary */
+ iovcnt = 1;
+ iovs[0].iov_base = buf + 16;
+ iovs[0].iov_len = 16384 + 65536;
+
+ rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6);
+ stub_complete_io(6);
+
+ spdk_put_io_channel(io_ch);
+ spdk_bdev_close(desc);
+ free_bdev(bdev);
+ fn_table.submit_request = stub_submit_request;
+ spdk_bdev_finish(bdev_fini_cb, NULL);
+ poll_threads();
+
+ free(buf);
+}
+
+static void
+histogram_status_cb(void *cb_arg, int status)
+{
+ g_status = status;
+}
+
+static void
+histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram)
+{
+ g_status = status;
+ g_histogram = histogram;
+}
+
+static void
+histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count,
+ uint64_t total, uint64_t so_far)
+{
+ g_count += count;
+}
+
+static void
+bdev_histograms(void)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_desc *desc = NULL;
+ struct spdk_io_channel *ch;
+ struct spdk_histogram_data *histogram;
+ uint8_t buf[4096];
+ int rc;
+
+ spdk_bdev_initialize(bdev_init_cb, NULL);
+
+ bdev = allocate_bdev("bdev");
+
+ rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(desc != NULL);
+
+ ch = spdk_bdev_get_io_channel(desc);
+ CU_ASSERT(ch != NULL);
+
+ /* Enable histogram */
+ g_status = -1;
+ spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, true);
+ poll_threads();
+ CU_ASSERT(g_status == 0);
+ CU_ASSERT(bdev->internal.histogram_enabled == true);
+
+ /* Allocate histogram */
+ histogram = spdk_histogram_data_alloc();
+ SPDK_CU_ASSERT_FATAL(histogram != NULL);
+
+ /* Check if histogram is zeroed */
+ spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL);
+ poll_threads();
+ CU_ASSERT(g_status == 0);
+ SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
+
+ g_count = 0;
+ spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
+
+ CU_ASSERT(g_count == 0);
+
+ rc = spdk_bdev_write_blocks(desc, ch, buf, 0, 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+
+ spdk_delay_us(10);
+ stub_complete_io(1);
+ poll_threads();
+
+ rc = spdk_bdev_read_blocks(desc, ch, buf, 0, 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+
+ spdk_delay_us(10);
+ stub_complete_io(1);
+ poll_threads();
+
+ /* Check if histogram gathered data from all I/O channels */
+ g_histogram = NULL;
+ spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL);
+ poll_threads();
+ CU_ASSERT(g_status == 0);
+ CU_ASSERT(bdev->internal.histogram_enabled == true);
+ SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
+
+ g_count = 0;
+ spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
+ CU_ASSERT(g_count == 2);
+
+ /* Disable histogram */
+ spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, false);
+ poll_threads();
+ CU_ASSERT(g_status == 0);
+ CU_ASSERT(bdev->internal.histogram_enabled == false);
+
+ /* Try to run histogram commands on disabled bdev */
+ spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL);
+ poll_threads();
+ CU_ASSERT(g_status == -EFAULT);
+
+ spdk_histogram_data_free(histogram);
+ spdk_put_io_channel(ch);
+ spdk_bdev_close(desc);
+ free_bdev(bdev);
+ spdk_bdev_finish(bdev_fini_cb, NULL);
+ poll_threads();
+}
+
+static void
+_bdev_compare(bool emulated)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_desc *desc = NULL;
+ struct spdk_io_channel *ioch;
+ struct ut_expected_io *expected_io;
+ uint64_t offset, num_blocks;
+ uint32_t num_completed;
+ char aa_buf[512];
+ char bb_buf[512];
+ struct iovec compare_iov;
+ uint8_t io_type;
+ int rc;
+
+ if (emulated) {
+ io_type = SPDK_BDEV_IO_TYPE_READ;
+ } else {
+ io_type = SPDK_BDEV_IO_TYPE_COMPARE;
+ }
+
+ memset(aa_buf, 0xaa, sizeof(aa_buf));
+ memset(bb_buf, 0xbb, sizeof(bb_buf));
+
+ g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated;
+
+ spdk_bdev_initialize(bdev_init_cb, NULL);
+ fn_table.submit_request = stub_submit_request_get_buf;
+ bdev = allocate_bdev("bdev");
+
+ rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
+ CU_ASSERT_EQUAL(rc, 0);
+ SPDK_CU_ASSERT_FATAL(desc != NULL);
+ ioch = spdk_bdev_get_io_channel(desc);
+ SPDK_CU_ASSERT_FATAL(ioch != NULL);
+
+ fn_table.submit_request = stub_submit_request_get_buf;
+ g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
+
+ offset = 50;
+ num_blocks = 1;
+ compare_iov.iov_base = aa_buf;
+ compare_iov.iov_len = sizeof(aa_buf);
+
+ expected_io = ut_alloc_expected_io(io_type, offset, num_blocks, 0);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ g_io_done = false;
+ g_compare_read_buf = aa_buf;
+ g_compare_read_buf_len = sizeof(aa_buf);
+ rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL);
+ CU_ASSERT_EQUAL(rc, 0);
+ num_completed = stub_complete_io(1);
+ CU_ASSERT_EQUAL(num_completed, 1);
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ expected_io = ut_alloc_expected_io(io_type, offset, num_blocks, 0);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ g_io_done = false;
+ g_compare_read_buf = bb_buf;
+ g_compare_read_buf_len = sizeof(bb_buf);
+ rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL);
+ CU_ASSERT_EQUAL(rc, 0);
+ num_completed = stub_complete_io(1);
+ CU_ASSERT_EQUAL(num_completed, 1);
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE);
+
+ spdk_put_io_channel(ioch);
+ spdk_bdev_close(desc);
+ free_bdev(bdev);
+ fn_table.submit_request = stub_submit_request;
+ spdk_bdev_finish(bdev_fini_cb, NULL);
+ poll_threads();
+
+ g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true;
+
+ g_compare_read_buf = NULL;
+}
+
+static void
+bdev_compare(void)
+{
+ _bdev_compare(true);
+ _bdev_compare(false);
+}
+
+static void
+bdev_compare_and_write(void)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_desc *desc = NULL;
+ struct spdk_io_channel *ioch;
+ struct ut_expected_io *expected_io;
+ uint64_t offset, num_blocks;
+ uint32_t num_completed;
+ char aa_buf[512];
+ char bb_buf[512];
+ char cc_buf[512];
+ char write_buf[512];
+ struct iovec compare_iov;
+ struct iovec write_iov;
+ int rc;
+
+ memset(aa_buf, 0xaa, sizeof(aa_buf));
+ memset(bb_buf, 0xbb, sizeof(bb_buf));
+ memset(cc_buf, 0xcc, sizeof(cc_buf));
+
+ g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = false;
+
+ spdk_bdev_initialize(bdev_init_cb, NULL);
+ fn_table.submit_request = stub_submit_request_get_buf;
+ bdev = allocate_bdev("bdev");
+
+ rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
+ CU_ASSERT_EQUAL(rc, 0);
+ SPDK_CU_ASSERT_FATAL(desc != NULL);
+ ioch = spdk_bdev_get_io_channel(desc);
+ SPDK_CU_ASSERT_FATAL(ioch != NULL);
+
+ fn_table.submit_request = stub_submit_request_get_buf;
+ g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
+
+ offset = 50;
+ num_blocks = 1;
+ compare_iov.iov_base = aa_buf;
+ compare_iov.iov_len = sizeof(aa_buf);
+ write_iov.iov_base = bb_buf;
+ write_iov.iov_len = sizeof(bb_buf);
+
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, offset, num_blocks, 0);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ g_io_done = false;
+ g_compare_read_buf = aa_buf;
+ g_compare_read_buf_len = sizeof(aa_buf);
+ memset(write_buf, 0, sizeof(write_buf));
+ g_compare_write_buf = write_buf;
+ g_compare_write_buf_len = sizeof(write_buf);
+ rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1,
+ offset, num_blocks, io_done, NULL);
+ /* Trigger range locking */
+ poll_threads();
+ CU_ASSERT_EQUAL(rc, 0);
+ num_completed = stub_complete_io(1);
+ CU_ASSERT_EQUAL(num_completed, 1);
+ CU_ASSERT(g_io_done == false);
+ num_completed = stub_complete_io(1);
+ /* Trigger range unlocking */
+ poll_threads();
+ CU_ASSERT_EQUAL(num_completed, 1);
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(memcmp(write_buf, bb_buf, sizeof(write_buf)) == 0);
+
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ g_io_done = false;
+ g_compare_read_buf = cc_buf;
+ g_compare_read_buf_len = sizeof(cc_buf);
+ memset(write_buf, 0, sizeof(write_buf));
+ g_compare_write_buf = write_buf;
+ g_compare_write_buf_len = sizeof(write_buf);
+ rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1,
+ offset, num_blocks, io_done, NULL);
+ /* Trigger range locking */
+ poll_threads();
+ CU_ASSERT_EQUAL(rc, 0);
+ num_completed = stub_complete_io(1);
+ /* Trigger range unlocking earlier because we expect error here */
+ poll_threads();
+ CU_ASSERT_EQUAL(num_completed, 1);
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE);
+ num_completed = stub_complete_io(1);
+ CU_ASSERT_EQUAL(num_completed, 0);
+
+ spdk_put_io_channel(ioch);
+ spdk_bdev_close(desc);
+ free_bdev(bdev);
+ fn_table.submit_request = stub_submit_request;
+ spdk_bdev_finish(bdev_fini_cb, NULL);
+ poll_threads();
+
+ g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true;
+
+ g_compare_read_buf = NULL;
+ g_compare_write_buf = NULL;
+}
+
+static void
+bdev_write_zeroes(void)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_desc *desc = NULL;
+ struct spdk_io_channel *ioch;
+ struct ut_expected_io *expected_io;
+ uint64_t offset, num_io_blocks, num_blocks;
+ uint32_t num_completed, num_requests;
+ int rc;
+
+ spdk_bdev_initialize(bdev_init_cb, NULL);
+ bdev = allocate_bdev("bdev");
+
+ rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
+ CU_ASSERT_EQUAL(rc, 0);
+ SPDK_CU_ASSERT_FATAL(desc != NULL);
+ ioch = spdk_bdev_get_io_channel(desc);
+ SPDK_CU_ASSERT_FATAL(ioch != NULL);
+
+ fn_table.submit_request = stub_submit_request;
+ g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
+
+ /* First test that if the bdev supports write_zeroes, the request won't be split */
+ bdev->md_len = 0;
+ bdev->blocklen = 4096;
+ num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2;
+
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+ rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
+ CU_ASSERT_EQUAL(rc, 0);
+ num_completed = stub_complete_io(1);
+ CU_ASSERT_EQUAL(num_completed, 1);
+
+ /* Check that if write zeroes is not supported it'll be replaced by regular writes */
+ ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false);
+ num_io_blocks = ZERO_BUFFER_SIZE / bdev->blocklen;
+ num_requests = 2;
+ num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * num_requests;
+
+ for (offset = 0; offset < num_requests; ++offset) {
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE,
+ offset * num_io_blocks, num_io_blocks, 0);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+ }
+
+ rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
+ CU_ASSERT_EQUAL(rc, 0);
+ num_completed = stub_complete_io(num_requests);
+ CU_ASSERT_EQUAL(num_completed, num_requests);
+
+ /* Check that the splitting is correct if bdev has interleaved metadata */
+ bdev->md_interleave = true;
+ bdev->md_len = 64;
+ bdev->blocklen = 4096 + 64;
+ num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2;
+
+ num_requests = offset = 0;
+ while (offset < num_blocks) {
+ num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / bdev->blocklen, num_blocks - offset);
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE,
+ offset, num_io_blocks, 0);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+ offset += num_io_blocks;
+ num_requests++;
+ }
+
+ rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
+ CU_ASSERT_EQUAL(rc, 0);
+ num_completed = stub_complete_io(num_requests);
+ CU_ASSERT_EQUAL(num_completed, num_requests);
+ num_completed = stub_complete_io(num_requests);
+ assert(num_completed == 0);
+
+ /* Check the the same for separate metadata buffer */
+ bdev->md_interleave = false;
+ bdev->md_len = 64;
+ bdev->blocklen = 4096;
+
+ num_requests = offset = 0;
+ while (offset < num_blocks) {
+ num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / (bdev->blocklen + bdev->md_len), num_blocks);
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE,
+ offset, num_io_blocks, 0);
+ expected_io->md_buf = (char *)g_bdev_mgr.zero_buffer + num_io_blocks * bdev->blocklen;
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+ offset += num_io_blocks;
+ num_requests++;
+ }
+
+ rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
+ CU_ASSERT_EQUAL(rc, 0);
+ num_completed = stub_complete_io(num_requests);
+ CU_ASSERT_EQUAL(num_completed, num_requests);
+
+ ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true);
+ spdk_put_io_channel(ioch);
+ spdk_bdev_close(desc);
+ free_bdev(bdev);
+ spdk_bdev_finish(bdev_fini_cb, NULL);
+ poll_threads();
+}
+
+static void
+bdev_open_while_hotremove(void)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_desc *desc[2] = {};
+ int rc;
+
+ bdev = allocate_bdev("bdev");
+
+ rc = spdk_bdev_open(bdev, false, NULL, NULL, &desc[0]);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(desc[0] != NULL);
+
+ spdk_bdev_unregister(bdev, NULL, NULL);
+
+ rc = spdk_bdev_open(bdev, false, NULL, NULL, &desc[1]);
+ CU_ASSERT(rc == -ENODEV);
+ SPDK_CU_ASSERT_FATAL(desc[1] == NULL);
+
+ spdk_bdev_close(desc[0]);
+ free_bdev(bdev);
+}
+
+static void
+bdev_close_while_hotremove(void)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_desc *desc = NULL;
+ int rc = 0;
+
+ bdev = allocate_bdev("bdev");
+
+ rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc, &desc);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ /* Simulate hot-unplug by unregistering bdev */
+ g_event_type1 = 0xFF;
+ g_unregister_arg = NULL;
+ g_unregister_rc = -1;
+ spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678);
+ /* Close device while remove event is in flight */
+ spdk_bdev_close(desc);
+
+ /* Ensure that unregister callback is delayed */
+ CU_ASSERT_EQUAL(g_unregister_arg, NULL);
+ CU_ASSERT_EQUAL(g_unregister_rc, -1);
+
+ poll_threads();
+
+ /* Event callback shall not be issued because device was closed */
+ CU_ASSERT_EQUAL(g_event_type1, 0xFF);
+ /* Unregister callback is issued */
+ CU_ASSERT_EQUAL(g_unregister_arg, (void *)0x12345678);
+ CU_ASSERT_EQUAL(g_unregister_rc, 0);
+
+ free_bdev(bdev);
+}
+
+static void
+bdev_open_ext(void)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_desc *desc1 = NULL;
+ struct spdk_bdev_desc *desc2 = NULL;
+ int rc = 0;
+
+ bdev = allocate_bdev("bdev");
+
+ rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1);
+ CU_ASSERT_EQUAL(rc, -EINVAL);
+
+ rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ g_event_type1 = 0xFF;
+ g_event_type2 = 0xFF;
+
+ /* Simulate hot-unplug by unregistering bdev */
+ spdk_bdev_unregister(bdev, NULL, NULL);
+ poll_threads();
+
+ /* Check if correct events have been triggered in event callback fn */
+ CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE);
+ CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE);
+
+ free_bdev(bdev);
+ poll_threads();
+}
+
+struct timeout_io_cb_arg {
+ struct iovec iov;
+ uint8_t type;
+};
+
+static int
+bdev_channel_count_submitted_io(struct spdk_bdev_channel *ch)
+{
+ struct spdk_bdev_io *bdev_io;
+ int n = 0;
+
+ if (!ch) {
+ return -1;
+ }
+
+ TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) {
+ n++;
+ }
+
+ return n;
+}
+
+static void
+bdev_channel_io_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io)
+{
+ struct timeout_io_cb_arg *ctx = cb_arg;
+
+ ctx->type = bdev_io->type;
+ ctx->iov.iov_base = bdev_io->iov.iov_base;
+ ctx->iov.iov_len = bdev_io->iov.iov_len;
+}
+
+static void
+bdev_set_io_timeout(void)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_desc *desc = NULL;
+ struct spdk_io_channel *io_ch = NULL;
+ struct spdk_bdev_channel *bdev_ch = NULL;
+ struct timeout_io_cb_arg cb_arg;
+
+ spdk_bdev_initialize(bdev_init_cb, NULL);
+
+ bdev = allocate_bdev("bdev");
+
+ CU_ASSERT(spdk_bdev_open(bdev, true, NULL, NULL, &desc) == 0);
+ SPDK_CU_ASSERT_FATAL(desc != NULL);
+ io_ch = spdk_bdev_get_io_channel(desc);
+ CU_ASSERT(io_ch != NULL);
+
+ bdev_ch = spdk_io_channel_get_ctx(io_ch);
+ CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted));
+
+ /* This is the part1.
+ * We will check the bdev_ch->io_submitted list
+ * TO make sure that it can link IOs and only the user submitted IOs
+ */
+ CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0);
+ CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1);
+ CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0);
+ CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2);
+ stub_complete_io(1);
+ CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1);
+ stub_complete_io(1);
+ CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0);
+
+ /* Split IO */
+ bdev->optimal_io_boundary = 16;
+ bdev->split_on_optimal_io_boundary = true;
+
+ /* Now test that a single-vector command is split correctly.
+ * Offset 14, length 8, payload 0xF000
+ * Child - Offset 14, length 2, payload 0xF000
+ * Child - Offset 16, length 6, payload 0xF000 + 2 * 512
+ *
+ * Set up the expected values before calling spdk_bdev_read_blocks
+ */
+ CU_ASSERT(spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0);
+ /* We count all submitted IOs including IO that are generated by splitting. */
+ CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 3);
+ stub_complete_io(1);
+ CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2);
+ stub_complete_io(1);
+ CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0);
+
+ /* Also include the reset IO */
+ CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0);
+ CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1);
+ poll_threads();
+ stub_complete_io(1);
+ poll_threads();
+ CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0);
+
+ /* This is part2
+ * Test the desc timeout poller register
+ */
+
+ /* Successfully set the timeout */
+ CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0);
+ CU_ASSERT(desc->io_timeout_poller != NULL);
+ CU_ASSERT(desc->timeout_in_sec == 30);
+ CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb);
+ CU_ASSERT(desc->cb_arg == &cb_arg);
+
+ /* Change the timeout limit */
+ CU_ASSERT(spdk_bdev_set_timeout(desc, 20, bdev_channel_io_timeout_cb, &cb_arg) == 0);
+ CU_ASSERT(desc->io_timeout_poller != NULL);
+ CU_ASSERT(desc->timeout_in_sec == 20);
+ CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb);
+ CU_ASSERT(desc->cb_arg == &cb_arg);
+
+ /* Disable the timeout */
+ CU_ASSERT(spdk_bdev_set_timeout(desc, 0, NULL, NULL) == 0);
+ CU_ASSERT(desc->io_timeout_poller == NULL);
+
+ /* This the part3
+ * We will test to catch timeout IO and check whether the IO is
+ * the submitted one.
+ */
+ memset(&cb_arg, 0, sizeof(cb_arg));
+ CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0);
+ CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0x1000, 0, 1, io_done, NULL) == 0);
+
+ /* Don't reach the limit */
+ spdk_delay_us(15 * spdk_get_ticks_hz());
+ poll_threads();
+ CU_ASSERT(cb_arg.type == 0);
+ CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0);
+ CU_ASSERT(cb_arg.iov.iov_len == 0);
+
+ /* 15 + 15 = 30 reach the limit */
+ spdk_delay_us(15 * spdk_get_ticks_hz());
+ poll_threads();
+ CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE);
+ CU_ASSERT(cb_arg.iov.iov_base == (void *)0x1000);
+ CU_ASSERT(cb_arg.iov.iov_len == 1 * bdev->blocklen);
+ stub_complete_io(1);
+
+ /* Use the same split IO above and check the IO */
+ memset(&cb_arg, 0, sizeof(cb_arg));
+ CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0);
+
+ /* The first child complete in time */
+ spdk_delay_us(15 * spdk_get_ticks_hz());
+ poll_threads();
+ stub_complete_io(1);
+ CU_ASSERT(cb_arg.type == 0);
+ CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0);
+ CU_ASSERT(cb_arg.iov.iov_len == 0);
+
+ /* The second child reach the limit */
+ spdk_delay_us(15 * spdk_get_ticks_hz());
+ poll_threads();
+ CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE);
+ CU_ASSERT(cb_arg.iov.iov_base == (void *)0xF000);
+ CU_ASSERT(cb_arg.iov.iov_len == 8 * bdev->blocklen);
+ stub_complete_io(1);
+
+ /* Also include the reset IO */
+ memset(&cb_arg, 0, sizeof(cb_arg));
+ CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0);
+ spdk_delay_us(30 * spdk_get_ticks_hz());
+ poll_threads();
+ CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_RESET);
+ stub_complete_io(1);
+ poll_threads();
+
+ spdk_put_io_channel(io_ch);
+ spdk_bdev_close(desc);
+ free_bdev(bdev);
+ spdk_bdev_finish(bdev_fini_cb, NULL);
+ poll_threads();
+}
+
+static void
+lba_range_overlap(void)
+{
+ struct lba_range r1, r2;
+
+ r1.offset = 100;
+ r1.length = 50;
+
+ r2.offset = 0;
+ r2.length = 1;
+ CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2));
+
+ r2.offset = 0;
+ r2.length = 100;
+ CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2));
+
+ r2.offset = 0;
+ r2.length = 110;
+ CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2));
+
+ r2.offset = 100;
+ r2.length = 10;
+ CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2));
+
+ r2.offset = 110;
+ r2.length = 20;
+ CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2));
+
+ r2.offset = 140;
+ r2.length = 150;
+ CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2));
+
+ r2.offset = 130;
+ r2.length = 200;
+ CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2));
+
+ r2.offset = 150;
+ r2.length = 100;
+ CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2));
+
+ r2.offset = 110;
+ r2.length = 0;
+ CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2));
+}
+
+static bool g_lock_lba_range_done;
+static bool g_unlock_lba_range_done;
+
+static void
+lock_lba_range_done(void *ctx, int status)
+{
+ g_lock_lba_range_done = true;
+}
+
+static void
+unlock_lba_range_done(void *ctx, int status)
+{
+ g_unlock_lba_range_done = true;
+}
+
+static void
+lock_lba_range_check_ranges(void)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_desc *desc = NULL;
+ struct spdk_io_channel *io_ch;
+ struct spdk_bdev_channel *channel;
+ struct lba_range *range;
+ int ctx1;
+ int rc;
+
+ spdk_bdev_initialize(bdev_init_cb, NULL);
+
+ bdev = allocate_bdev("bdev0");
+
+ rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(desc != NULL);
+ io_ch = spdk_bdev_get_io_channel(desc);
+ CU_ASSERT(io_ch != NULL);
+ channel = spdk_io_channel_get_ctx(io_ch);
+
+ g_lock_lba_range_done = false;
+ rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1);
+ CU_ASSERT(rc == 0);
+ poll_threads();
+
+ CU_ASSERT(g_lock_lba_range_done == true);
+ range = TAILQ_FIRST(&channel->locked_ranges);
+ SPDK_CU_ASSERT_FATAL(range != NULL);
+ CU_ASSERT(range->offset == 20);
+ CU_ASSERT(range->length == 10);
+ CU_ASSERT(range->owner_ch == channel);
+
+ /* Unlocks must exactly match a lock. */
+ g_unlock_lba_range_done = false;
+ rc = bdev_unlock_lba_range(desc, io_ch, 20, 1, unlock_lba_range_done, &ctx1);
+ CU_ASSERT(rc == -EINVAL);
+ CU_ASSERT(g_unlock_lba_range_done == false);
+
+ rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1);
+ CU_ASSERT(rc == 0);
+ spdk_delay_us(100);
+ poll_threads();
+
+ CU_ASSERT(g_unlock_lba_range_done == true);
+ CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges));
+
+ spdk_put_io_channel(io_ch);
+ spdk_bdev_close(desc);
+ free_bdev(bdev);
+ spdk_bdev_finish(bdev_fini_cb, NULL);
+ poll_threads();
+}
+
+static void
+lock_lba_range_with_io_outstanding(void)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_desc *desc = NULL;
+ struct spdk_io_channel *io_ch;
+ struct spdk_bdev_channel *channel;
+ struct lba_range *range;
+ char buf[4096];
+ int ctx1;
+ int rc;
+
+ spdk_bdev_initialize(bdev_init_cb, NULL);
+
+ bdev = allocate_bdev("bdev0");
+
+ rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(desc != NULL);
+ io_ch = spdk_bdev_get_io_channel(desc);
+ CU_ASSERT(io_ch != NULL);
+ channel = spdk_io_channel_get_ctx(io_ch);
+
+ g_io_done = false;
+ rc = spdk_bdev_read_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1);
+ CU_ASSERT(rc == 0);
+
+ g_lock_lba_range_done = false;
+ rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1);
+ CU_ASSERT(rc == 0);
+ poll_threads();
+
+ /* The lock should immediately become valid, since there are no outstanding
+ * write I/O.
+ */
+ CU_ASSERT(g_io_done == false);
+ CU_ASSERT(g_lock_lba_range_done == true);
+ range = TAILQ_FIRST(&channel->locked_ranges);
+ SPDK_CU_ASSERT_FATAL(range != NULL);
+ CU_ASSERT(range->offset == 20);
+ CU_ASSERT(range->length == 10);
+ CU_ASSERT(range->owner_ch == channel);
+ CU_ASSERT(range->locked_ctx == &ctx1);
+
+ rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1);
+ CU_ASSERT(rc == 0);
+ stub_complete_io(1);
+ spdk_delay_us(100);
+ poll_threads();
+
+ CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges));
+
+ /* Now try again, but with a write I/O. */
+ g_io_done = false;
+ rc = spdk_bdev_write_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1);
+ CU_ASSERT(rc == 0);
+
+ g_lock_lba_range_done = false;
+ rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1);
+ CU_ASSERT(rc == 0);
+ poll_threads();
+
+ /* The lock should not be fully valid yet, since a write I/O is outstanding.
+ * But note that the range should be on the channel's locked_list, to make sure no
+ * new write I/O are started.
+ */
+ CU_ASSERT(g_io_done == false);
+ CU_ASSERT(g_lock_lba_range_done == false);
+ range = TAILQ_FIRST(&channel->locked_ranges);
+ SPDK_CU_ASSERT_FATAL(range != NULL);
+ CU_ASSERT(range->offset == 20);
+ CU_ASSERT(range->length == 10);
+
+ /* Complete the write I/O. This should make the lock valid (checked by confirming
+ * our callback was invoked).
+ */
+ stub_complete_io(1);
+ spdk_delay_us(100);
+ poll_threads();
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(g_lock_lba_range_done == true);
+
+ rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1);
+ CU_ASSERT(rc == 0);
+ poll_threads();
+
+ CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges));
+
+ spdk_put_io_channel(io_ch);
+ spdk_bdev_close(desc);
+ free_bdev(bdev);
+ spdk_bdev_finish(bdev_fini_cb, NULL);
+ poll_threads();
+}
+
+static void
+lock_lba_range_overlapped(void)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_desc *desc = NULL;
+ struct spdk_io_channel *io_ch;
+ struct spdk_bdev_channel *channel;
+ struct lba_range *range;
+ int ctx1;
+ int rc;
+
+ spdk_bdev_initialize(bdev_init_cb, NULL);
+
+ bdev = allocate_bdev("bdev0");
+
+ rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(desc != NULL);
+ io_ch = spdk_bdev_get_io_channel(desc);
+ CU_ASSERT(io_ch != NULL);
+ channel = spdk_io_channel_get_ctx(io_ch);
+
+ /* Lock range 20-29. */
+ g_lock_lba_range_done = false;
+ rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1);
+ CU_ASSERT(rc == 0);
+ poll_threads();
+
+ CU_ASSERT(g_lock_lba_range_done == true);
+ range = TAILQ_FIRST(&channel->locked_ranges);
+ SPDK_CU_ASSERT_FATAL(range != NULL);
+ CU_ASSERT(range->offset == 20);
+ CU_ASSERT(range->length == 10);
+
+ /* Try to lock range 25-39. It should not lock immediately, since it overlaps with
+ * 20-29.
+ */
+ g_lock_lba_range_done = false;
+ rc = bdev_lock_lba_range(desc, io_ch, 25, 15, lock_lba_range_done, &ctx1);
+ CU_ASSERT(rc == 0);
+ poll_threads();
+
+ CU_ASSERT(g_lock_lba_range_done == false);
+ range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges);
+ SPDK_CU_ASSERT_FATAL(range != NULL);
+ CU_ASSERT(range->offset == 25);
+ CU_ASSERT(range->length == 15);
+
+ /* Unlock 20-29. This should result in range 25-39 now getting locked since it
+ * no longer overlaps with an active lock.
+ */
+ g_unlock_lba_range_done = false;
+ rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1);
+ CU_ASSERT(rc == 0);
+ poll_threads();
+
+ CU_ASSERT(g_unlock_lba_range_done == true);
+ CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges));
+ range = TAILQ_FIRST(&channel->locked_ranges);
+ SPDK_CU_ASSERT_FATAL(range != NULL);
+ CU_ASSERT(range->offset == 25);
+ CU_ASSERT(range->length == 15);
+
+ /* Lock 40-59. This should immediately lock since it does not overlap with the
+ * currently active 25-39 lock.
+ */
+ g_lock_lba_range_done = false;
+ rc = bdev_lock_lba_range(desc, io_ch, 40, 20, lock_lba_range_done, &ctx1);
+ CU_ASSERT(rc == 0);
+ poll_threads();
+
+ CU_ASSERT(g_lock_lba_range_done == true);
+ range = TAILQ_FIRST(&bdev->internal.locked_ranges);
+ SPDK_CU_ASSERT_FATAL(range != NULL);
+ range = TAILQ_NEXT(range, tailq);
+ SPDK_CU_ASSERT_FATAL(range != NULL);
+ CU_ASSERT(range->offset == 40);
+ CU_ASSERT(range->length == 20);
+
+ /* Try to lock 35-44. Note that this overlaps with both 25-39 and 40-59. */
+ g_lock_lba_range_done = false;
+ rc = bdev_lock_lba_range(desc, io_ch, 35, 10, lock_lba_range_done, &ctx1);
+ CU_ASSERT(rc == 0);
+ poll_threads();
+
+ CU_ASSERT(g_lock_lba_range_done == false);
+ range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges);
+ SPDK_CU_ASSERT_FATAL(range != NULL);
+ CU_ASSERT(range->offset == 35);
+ CU_ASSERT(range->length == 10);
+
+ /* Unlock 25-39. Make sure that 35-44 is still in the pending list, since
+ * the 40-59 lock is still active.
+ */
+ g_unlock_lba_range_done = false;
+ rc = bdev_unlock_lba_range(desc, io_ch, 25, 15, unlock_lba_range_done, &ctx1);
+ CU_ASSERT(rc == 0);
+ poll_threads();
+
+ CU_ASSERT(g_unlock_lba_range_done == true);
+ CU_ASSERT(g_lock_lba_range_done == false);
+ range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges);
+ SPDK_CU_ASSERT_FATAL(range != NULL);
+ CU_ASSERT(range->offset == 35);
+ CU_ASSERT(range->length == 10);
+
+ /* Unlock 40-59. This should result in 35-44 now getting locked, since there are
+ * no longer any active overlapping locks.
+ */
+ g_unlock_lba_range_done = false;
+ rc = bdev_unlock_lba_range(desc, io_ch, 40, 20, unlock_lba_range_done, &ctx1);
+ CU_ASSERT(rc == 0);
+ poll_threads();
+
+ CU_ASSERT(g_unlock_lba_range_done == true);
+ CU_ASSERT(g_lock_lba_range_done == true);
+ CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges));
+ range = TAILQ_FIRST(&bdev->internal.locked_ranges);
+ SPDK_CU_ASSERT_FATAL(range != NULL);
+ CU_ASSERT(range->offset == 35);
+ CU_ASSERT(range->length == 10);
+
+ /* Finally, unlock 35-44. */
+ g_unlock_lba_range_done = false;
+ rc = bdev_unlock_lba_range(desc, io_ch, 35, 10, unlock_lba_range_done, &ctx1);
+ CU_ASSERT(rc == 0);
+ poll_threads();
+
+ CU_ASSERT(g_unlock_lba_range_done == true);
+ CU_ASSERT(TAILQ_EMPTY(&bdev->internal.locked_ranges));
+
+ spdk_put_io_channel(io_ch);
+ spdk_bdev_close(desc);
+ free_bdev(bdev);
+ spdk_bdev_finish(bdev_fini_cb, NULL);
+ poll_threads();
+}
+
+static void
+abort_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
+{
+ g_abort_done = true;
+ g_abort_status = bdev_io->internal.status;
+ spdk_bdev_free_io(bdev_io);
+}
+
+static void
+bdev_io_abort(void)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_desc *desc = NULL;
+ struct spdk_io_channel *io_ch;
+ struct spdk_bdev_channel *channel;
+ struct spdk_bdev_mgmt_channel *mgmt_ch;
+ struct spdk_bdev_opts bdev_opts = {
+ .bdev_io_pool_size = 7,
+ .bdev_io_cache_size = 2,
+ };
+ struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2];
+ uint64_t io_ctx1 = 0, io_ctx2 = 0, i;
+ int rc;
+
+ rc = spdk_bdev_set_opts(&bdev_opts);
+ CU_ASSERT(rc == 0);
+ spdk_bdev_initialize(bdev_init_cb, NULL);
+
+ bdev = allocate_bdev("bdev0");
+
+ rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(desc != NULL);
+ io_ch = spdk_bdev_get_io_channel(desc);
+ CU_ASSERT(io_ch != NULL);
+ channel = spdk_io_channel_get_ctx(io_ch);
+ mgmt_ch = channel->shared_resource->mgmt_ch;
+
+ g_abort_done = false;
+
+ ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, false);
+
+ rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL);
+ CU_ASSERT(rc == -ENOTSUP);
+
+ ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, true);
+
+ rc = spdk_bdev_abort(desc, io_ch, &io_ctx2, abort_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_abort_done == true);
+ CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_FAILED);
+
+ /* Test the case that the target I/O was successfully aborted. */
+ g_io_done = false;
+
+ rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+
+ g_abort_done = false;
+ g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
+
+ rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
+ stub_complete_io(1);
+ CU_ASSERT(g_abort_done == true);
+ CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ /* Test the case that the target I/O was not aborted because it completed
+ * in the middle of execution of the abort.
+ */
+ g_io_done = false;
+
+ rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+
+ g_abort_done = false;
+ g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED;
+
+ rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+
+ g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED;
+ stub_complete_io(1);
+ CU_ASSERT(g_abort_done == true);
+ CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
+
+ bdev->optimal_io_boundary = 16;
+ bdev->split_on_optimal_io_boundary = true;
+
+ /* Test that a single-vector command which is split is aborted correctly.
+ * Offset 14, length 8, payload 0xF000
+ * Child - Offset 14, length 2, payload 0xF000
+ * Child - Offset 16, length 6, payload 0xF000 + 2 * 512
+ */
+ g_io_done = false;
+
+ rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, &io_ctx1);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
+
+ g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
+
+ rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
+ stub_complete_io(2);
+ CU_ASSERT(g_abort_done == true);
+ CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ /* Test that a multi-vector command that needs to be split by strip and then
+ * needs to be split is aborted correctly. Abort is requested before the second
+ * child I/O was submitted. The parent I/O should complete with failure without
+ * submitting the second child I/O.
+ */
+ for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) {
+ iov[i].iov_base = (void *)((i + 1) * 0x10000);
+ iov[i].iov_len = 512;
+ }
+
+ bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV;
+ g_io_done = false;
+ rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0,
+ BDEV_IO_NUM_CHILD_IOV * 2, io_done, &io_ctx1);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+
+ g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
+
+ rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
+ stub_complete_io(1);
+ CU_ASSERT(g_abort_done == true);
+ CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
+
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
+
+ bdev->optimal_io_boundary = 16;
+ g_io_done = false;
+
+ /* Test that a ingle-vector command which is split is aborted correctly.
+ * Differently from the above, the child abort request will be submitted
+ * sequentially due to the capacity of spdk_bdev_io.
+ */
+ rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 50, io_done, &io_ctx1);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
+
+ g_abort_done = false;
+ g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
+
+ rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue));
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
+
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
+ stub_complete_io(3);
+ CU_ASSERT(g_abort_done == true);
+ CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
+
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
+
+ spdk_put_io_channel(io_ch);
+ spdk_bdev_close(desc);
+ free_bdev(bdev);
+ spdk_bdev_finish(bdev_fini_cb, NULL);
+ poll_threads();
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("bdev", null_init, null_clean);
+
+ CU_ADD_TEST(suite, bytes_to_blocks_test);
+ CU_ADD_TEST(suite, num_blocks_test);
+ CU_ADD_TEST(suite, io_valid_test);
+ CU_ADD_TEST(suite, open_write_test);
+ CU_ADD_TEST(suite, alias_add_del_test);
+ CU_ADD_TEST(suite, get_device_stat_test);
+ CU_ADD_TEST(suite, bdev_io_types_test);
+ CU_ADD_TEST(suite, bdev_io_wait_test);
+ CU_ADD_TEST(suite, bdev_io_spans_boundary_test);
+ CU_ADD_TEST(suite, bdev_io_split_test);
+ CU_ADD_TEST(suite, bdev_io_split_with_io_wait);
+ CU_ADD_TEST(suite, bdev_io_alignment_with_boundary);
+ CU_ADD_TEST(suite, bdev_io_alignment);
+ CU_ADD_TEST(suite, bdev_histograms);
+ CU_ADD_TEST(suite, bdev_write_zeroes);
+ CU_ADD_TEST(suite, bdev_compare_and_write);
+ CU_ADD_TEST(suite, bdev_compare);
+ CU_ADD_TEST(suite, bdev_open_while_hotremove);
+ CU_ADD_TEST(suite, bdev_close_while_hotremove);
+ CU_ADD_TEST(suite, bdev_open_ext);
+ CU_ADD_TEST(suite, bdev_set_io_timeout);
+ CU_ADD_TEST(suite, lba_range_overlap);
+ CU_ADD_TEST(suite, lock_lba_range_check_ranges);
+ CU_ADD_TEST(suite, lock_lba_range_with_io_outstanding);
+ CU_ADD_TEST(suite, lock_lba_range_overlapped);
+ CU_ADD_TEST(suite, bdev_io_abort);
+
+ allocate_cores(1);
+ allocate_threads(1);
+ set_thread(0);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ free_threads();
+ free_cores();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/bdev/bdev_ocssd.c/.gitignore b/src/spdk/test/unit/lib/bdev/bdev_ocssd.c/.gitignore
new file mode 100644
index 000000000..906b8067c
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/bdev_ocssd.c/.gitignore
@@ -0,0 +1 @@
+bdev_ocssd_ut
diff --git a/src/spdk/test/unit/lib/bdev/bdev_ocssd.c/Makefile b/src/spdk/test/unit/lib/bdev/bdev_ocssd.c/Makefile
new file mode 100644
index 000000000..7106d46fc
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/bdev_ocssd.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = bdev_ocssd_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/bdev/bdev_ocssd.c/bdev_ocssd_ut.c b/src/spdk/test/unit/lib/bdev/bdev_ocssd.c/bdev_ocssd_ut.c
new file mode 100644
index 000000000..a2f8e7f71
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/bdev_ocssd.c/bdev_ocssd_ut.c
@@ -0,0 +1,1195 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+#include "spdk_cunit.h"
+#include "spdk/nvme_ocssd_spec.h"
+#include "spdk/thread.h"
+#include "spdk/bdev_module.h"
+#include "spdk/util.h"
+#include "spdk_internal/mock.h"
+
+#include "bdev/nvme/bdev_ocssd.c"
+#include "bdev/nvme/common.c"
+#include "common/lib/test_env.c"
+#include "unit/lib/json_mock.c"
+
+DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
+DEFINE_STUB(spdk_nvme_ctrlr_is_ocssd_ns, bool, (struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid),
+ true);
+DEFINE_STUB(spdk_nvme_ns_get_extended_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 4096);
+DEFINE_STUB(spdk_nvme_ns_is_active, bool, (struct spdk_nvme_ns *ns), true);
+DEFINE_STUB_V(spdk_opal_dev_destruct, (struct spdk_opal_dev *dev));
+DEFINE_STUB_V(spdk_bdev_io_complete_nvme_status, (struct spdk_bdev_io *bdev_io, uint32_t cdw0,
+ int sct, int sc));
+DEFINE_STUB(spdk_bdev_io_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_io *bdev_io),
+ NULL);
+DEFINE_STUB(spdk_bdev_push_media_events, int, (struct spdk_bdev *bdev,
+ const struct spdk_bdev_media_event *events,
+ size_t num_events), 0);
+DEFINE_STUB_V(spdk_bdev_notify_media_management, (struct spdk_bdev *bdev));
+DEFINE_STUB_V(nvme_ctrlr_depopulate_namespace_done, (struct nvme_bdev_ctrlr *ctrlr));
+DEFINE_STUB_V(spdk_bdev_module_finish_done, (void));
+
+struct nvme_request {
+ spdk_nvme_cmd_cb cb_fn;
+ void *cb_arg;
+ TAILQ_ENTRY(nvme_request) tailq;
+};
+
+struct spdk_nvme_qpair {
+ TAILQ_HEAD(, nvme_request) requests;
+};
+
+struct spdk_nvme_ns {
+ uint32_t nsid;
+};
+
+struct spdk_nvme_ctrlr {
+ struct spdk_nvme_transport_id trid;
+ struct spdk_ocssd_geometry_data geometry;
+ struct spdk_nvme_qpair *admin_qpair;
+ struct spdk_nvme_ns *ns;
+ uint32_t ns_count;
+ struct spdk_ocssd_chunk_information_entry *chunk_info;
+ uint64_t num_chunks;
+
+ LIST_ENTRY(spdk_nvme_ctrlr) list;
+};
+
+static LIST_HEAD(, spdk_nvme_ctrlr) g_ctrlr_list = LIST_HEAD_INITIALIZER(g_ctrlr_list);
+static TAILQ_HEAD(, spdk_bdev) g_bdev_list = TAILQ_HEAD_INITIALIZER(g_bdev_list);
+static struct spdk_thread *g_thread;
+
+static struct spdk_nvme_ctrlr *
+find_controller(const struct spdk_nvme_transport_id *trid)
+{
+ struct spdk_nvme_ctrlr *ctrlr;
+
+ LIST_FOREACH(ctrlr, &g_ctrlr_list, list) {
+ if (!spdk_nvme_transport_id_compare(trid, &ctrlr->trid)) {
+ return ctrlr;
+ }
+ }
+
+ return NULL;
+}
+
+static void
+free_controller(struct spdk_nvme_ctrlr *ctrlr)
+{
+ CU_ASSERT(!nvme_bdev_ctrlr_get(&ctrlr->trid));
+ LIST_REMOVE(ctrlr, list);
+ spdk_nvme_ctrlr_free_io_qpair(ctrlr->admin_qpair);
+ free(ctrlr->chunk_info);
+ free(ctrlr->ns);
+ free(ctrlr);
+}
+
+static uint64_t
+chunk_offset_to_lba(struct spdk_ocssd_geometry_data *geo, uint64_t offset)
+{
+ uint64_t chk, pu, grp;
+ uint64_t chk_off, pu_off, grp_off;
+
+ chk_off = geo->lbaf.lbk_len;
+ pu_off = geo->lbaf.chk_len + chk_off;
+ grp_off = geo->lbaf.pu_len + pu_off;
+
+ chk = offset % geo->num_chk;
+ pu = (offset / geo->num_chk) % geo->num_pu;
+ grp = (offset / (geo->num_chk * geo->num_pu)) % geo->num_grp;
+
+ return chk << chk_off |
+ pu << pu_off |
+ grp << grp_off;
+}
+
+static struct spdk_nvme_ctrlr *
+create_controller(const struct spdk_nvme_transport_id *trid, uint32_t ns_count,
+ const struct spdk_ocssd_geometry_data *geo)
+{
+ struct spdk_nvme_ctrlr *ctrlr;
+ uint32_t nsid, offset;
+
+ SPDK_CU_ASSERT_FATAL(!find_controller(trid));
+
+ ctrlr = calloc(1, sizeof(*ctrlr));
+ SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
+
+ ctrlr->ns = calloc(ns_count, sizeof(*ctrlr->ns));
+ SPDK_CU_ASSERT_FATAL(ctrlr->ns != NULL);
+
+ ctrlr->num_chunks = geo->num_grp * geo->num_pu * geo->num_chk;
+ ctrlr->chunk_info = calloc(ctrlr->num_chunks, sizeof(*ctrlr->chunk_info));
+ SPDK_CU_ASSERT_FATAL(ctrlr->chunk_info != NULL);
+
+ for (nsid = 0; nsid < ns_count; ++nsid) {
+ ctrlr->ns[nsid].nsid = nsid + 1;
+ }
+
+ ctrlr->geometry = *geo;
+ ctrlr->trid = *trid;
+ ctrlr->ns_count = ns_count;
+ ctrlr->admin_qpair = spdk_nvme_ctrlr_alloc_io_qpair(ctrlr, NULL, 0);
+
+ for (offset = 0; offset < ctrlr->num_chunks; ++offset) {
+ ctrlr->chunk_info[offset].cs.free = 1;
+ ctrlr->chunk_info[offset].slba = chunk_offset_to_lba(&ctrlr->geometry, offset);
+ ctrlr->chunk_info[offset].wp = ctrlr->chunk_info[offset].slba;
+ }
+
+ SPDK_CU_ASSERT_FATAL(ctrlr->admin_qpair != NULL);
+
+ LIST_INSERT_HEAD(&g_ctrlr_list, ctrlr, list);
+
+ return ctrlr;
+}
+
+static int
+io_channel_create_cb(void *io_device, void *ctx_buf)
+{
+ return 0;
+}
+
+static void
+io_channel_destroy_cb(void *io_device, void *ctx_buf)
+{}
+
+void
+nvme_ctrlr_populate_namespace_done(struct nvme_async_probe_ctx *ctx,
+ struct nvme_bdev_ns *ns, int rc)
+{
+ CU_ASSERT_EQUAL(rc, 0);
+}
+
+static struct nvme_bdev_ctrlr *
+create_nvme_bdev_controller(const struct spdk_nvme_transport_id *trid, const char *name)
+{
+ struct spdk_nvme_ctrlr *ctrlr;
+ struct nvme_bdev_ctrlr *nvme_bdev_ctrlr;
+ uint32_t nsid;
+
+ ctrlr = find_controller(trid);
+
+ SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
+ SPDK_CU_ASSERT_FATAL(!nvme_bdev_ctrlr_get(trid));
+
+ nvme_bdev_ctrlr = calloc(1, sizeof(*nvme_bdev_ctrlr));
+ SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL);
+
+ nvme_bdev_ctrlr->namespaces = calloc(ctrlr->ns_count, sizeof(struct nvme_bdev_ns *));
+ SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr->namespaces != NULL);
+
+ nvme_bdev_ctrlr->trid = calloc(1, sizeof(struct spdk_nvme_transport_id));
+ SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr->trid != NULL);
+
+ nvme_bdev_ctrlr->ctrlr = ctrlr;
+ nvme_bdev_ctrlr->num_ns = ctrlr->ns_count;
+ nvme_bdev_ctrlr->ref = 0;
+ *nvme_bdev_ctrlr->trid = *trid;
+ nvme_bdev_ctrlr->name = strdup(name);
+
+ for (nsid = 0; nsid < ctrlr->ns_count; ++nsid) {
+ nvme_bdev_ctrlr->namespaces[nsid] = calloc(1, sizeof(struct nvme_bdev_ns));
+ SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr->namespaces[nsid] != NULL);
+
+ nvme_bdev_ctrlr->namespaces[nsid]->id = nsid + 1;
+ nvme_bdev_ctrlr->namespaces[nsid]->ctrlr = nvme_bdev_ctrlr;
+ nvme_bdev_ctrlr->namespaces[nsid]->type = NVME_BDEV_NS_OCSSD;
+ TAILQ_INIT(&nvme_bdev_ctrlr->namespaces[nsid]->bdevs);
+
+ bdev_ocssd_populate_namespace(nvme_bdev_ctrlr, nvme_bdev_ctrlr->namespaces[nsid], NULL);
+ }
+
+ while (spdk_thread_poll(g_thread, 0, 0) > 0) {}
+
+ spdk_io_device_register(nvme_bdev_ctrlr, io_channel_create_cb,
+ io_channel_destroy_cb, 0, name);
+
+ TAILQ_INSERT_TAIL(&g_nvme_bdev_ctrlrs, nvme_bdev_ctrlr, tailq);
+
+ return nvme_bdev_ctrlr;
+}
+
+static struct nvme_request *
+alloc_request(spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ struct nvme_request *ctx;
+
+ ctx = calloc(1, sizeof(*ctx));
+ SPDK_CU_ASSERT_FATAL(ctx != NULL);
+
+ ctx->cb_fn = cb_fn;
+ ctx->cb_arg = cb_arg;
+
+ return ctx;
+}
+
+uint32_t
+spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return ctrlr->ns_count;
+}
+
+uint32_t
+spdk_nvme_ns_get_id(struct spdk_nvme_ns *ns)
+{
+ return ns->nsid;
+}
+
+struct spdk_nvme_ns *
+spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
+{
+ if (nsid == 0 || nsid > ctrlr->ns_count) {
+ return NULL;
+ }
+
+ return &ctrlr->ns[nsid - 1];
+}
+
+struct spdk_nvme_ctrlr *
+spdk_nvme_connect(const struct spdk_nvme_transport_id *trid,
+ const struct spdk_nvme_ctrlr_opts *opts,
+ size_t opts_size)
+{
+ return find_controller(trid);
+}
+
+int
+spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return 0;
+}
+
+struct spdk_bdev *
+spdk_bdev_get_by_name(const char *bdev_name)
+{
+ struct spdk_bdev *bdev;
+
+ SPDK_CU_ASSERT_FATAL(bdev_name != NULL);
+
+ TAILQ_FOREACH(bdev, &g_bdev_list, internal.link) {
+ if (!strcmp(bdev->name, bdev_name)) {
+ return bdev;
+ }
+ }
+
+ return NULL;
+}
+
+const char *
+spdk_bdev_get_name(const struct spdk_bdev *bdev)
+{
+ return bdev->name;
+}
+
+int
+spdk_bdev_register(struct spdk_bdev *bdev)
+{
+ CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(bdev->name));
+ TAILQ_INSERT_TAIL(&g_bdev_list, bdev, internal.link);
+
+ return 0;
+}
+
+void
+spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
+{
+ int rc;
+
+ CU_ASSERT_EQUAL(spdk_bdev_get_by_name(bdev->name), bdev);
+ TAILQ_REMOVE(&g_bdev_list, bdev, internal.link);
+
+ rc = bdev->fn_table->destruct(bdev->ctxt);
+ if (rc <= 0 && cb_fn != NULL) {
+ cb_fn(cb_arg, 0);
+ }
+}
+
+size_t
+spdk_bdev_get_zone_size(const struct spdk_bdev *bdev)
+{
+ return bdev->zone_size;
+}
+
+int
+spdk_nvme_ocssd_ctrlr_cmd_geometry(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
+ void *payload, uint32_t payload_size,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ struct spdk_nvme_cpl cpl = {};
+
+ CU_ASSERT_EQUAL(payload_size, sizeof(ctrlr->geometry));
+ memcpy(payload, &ctrlr->geometry, sizeof(ctrlr->geometry));
+
+ cb_fn(cb_arg, &cpl);
+
+ return 0;
+}
+
+int
+spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1,
+ const struct spdk_nvme_transport_id *trid2)
+{
+ return memcmp(trid1, trid2, sizeof(*trid1));
+}
+
+void
+spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
+{
+}
+
+void
+spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
+{
+}
+
+int32_t
+spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return spdk_nvme_qpair_process_completions(ctrlr->admin_qpair, 0);
+}
+
+struct spdk_nvme_qpair *
+spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
+ const struct spdk_nvme_io_qpair_opts *opts,
+ size_t opts_size)
+{
+ struct spdk_nvme_qpair *qpair;
+
+ qpair = calloc(1, sizeof(*qpair));
+ SPDK_CU_ASSERT_FATAL(qpair != NULL);
+
+ TAILQ_INIT(&qpair->requests);
+ return qpair;
+}
+
+int
+spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
+{
+ CU_ASSERT(TAILQ_EMPTY(&qpair->requests));
+ free(qpair);
+
+ return 0;
+}
+
+int32_t
+spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
+{
+ struct nvme_request *req;
+ struct spdk_nvme_cpl cpl = {};
+ int32_t num_requests = 0;
+
+ while ((req = TAILQ_FIRST(&qpair->requests))) {
+ TAILQ_REMOVE(&qpair->requests, req, tailq);
+
+ req->cb_fn(req->cb_arg, &cpl);
+ free(req);
+
+ num_requests++;
+ }
+
+ return num_requests;
+}
+
+int
+spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
+ uint64_t lba, uint32_t lba_count,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
+ spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
+ spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
+ uint16_t apptag_mask, uint16_t apptag)
+{
+ struct nvme_request *req;
+
+ req = alloc_request(cb_fn, cb_arg);
+ TAILQ_INSERT_TAIL(&qpair->requests, req, tailq);
+
+ return 0;
+}
+
+int
+spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
+ uint64_t lba, uint32_t lba_count,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
+ spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
+ spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
+ uint16_t apptag_mask, uint16_t apptag)
+{
+ struct nvme_request *req;
+
+ req = alloc_request(cb_fn, cb_arg);
+ TAILQ_INSERT_TAIL(&qpair->requests, req, tailq);
+
+ return 0;
+}
+
+int
+spdk_nvme_ocssd_ns_cmd_vector_reset(struct spdk_nvme_ns *ns,
+ struct spdk_nvme_qpair *qpair,
+ uint64_t *lba_list, uint32_t num_lbas,
+ struct spdk_ocssd_chunk_information_entry *chunk_info,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ struct nvme_request *req;
+
+ req = alloc_request(cb_fn, cb_arg);
+ TAILQ_INSERT_TAIL(&qpair->requests, req, tailq);
+
+ return 0;
+}
+
+static struct spdk_nvme_cpl g_chunk_info_cpl;
+static bool g_zone_info_status = true;
+
+int
+spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr,
+ uint8_t log_page, uint32_t nsid,
+ void *payload, uint32_t payload_size,
+ uint64_t offset,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ SPDK_CU_ASSERT_FATAL(offset + payload_size <= sizeof(*ctrlr->chunk_info) * ctrlr->num_chunks);
+ memcpy(payload, ((char *)ctrlr->chunk_info) + offset, payload_size);
+
+ cb_fn(cb_arg, &g_chunk_info_cpl);
+
+ return 0;
+}
+
+static void
+create_bdev_cb(const char *bdev_name, int status, void *ctx)
+{
+ *(int *)ctx = status;
+}
+
+static int
+create_bdev(const char *ctrlr_name, const char *bdev_name, uint32_t nsid,
+ const struct bdev_ocssd_range *range)
+{
+ int status = EFAULT;
+
+ bdev_ocssd_create_bdev(ctrlr_name, bdev_name, nsid, range, create_bdev_cb, &status);
+
+ while (spdk_thread_poll(g_thread, 0, 0) > 0) {}
+
+ return status;
+}
+
+static void
+delete_nvme_bdev_controller(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr)
+{
+ struct nvme_bdev *nvme_bdev, *tmp;
+ struct nvme_bdev_ns *nvme_ns;
+ bool empty = true;
+ uint32_t nsid;
+
+ nvme_bdev_ctrlr->destruct = true;
+
+ for (nsid = 0; nsid < nvme_bdev_ctrlr->num_ns; ++nsid) {
+ nvme_ns = nvme_bdev_ctrlr->namespaces[nsid];
+
+ if (!TAILQ_EMPTY(&nvme_ns->bdevs)) {
+ TAILQ_FOREACH_SAFE(nvme_bdev, &nvme_ns->bdevs, tailq, tmp) {
+ spdk_bdev_unregister(&nvme_bdev->disk, NULL, NULL);
+ }
+
+ empty = false;
+ }
+
+ bdev_ocssd_depopulate_namespace(nvme_bdev_ctrlr->namespaces[nsid]);
+ }
+
+ if (empty) {
+ nvme_bdev_ctrlr_destruct(nvme_bdev_ctrlr);
+ }
+
+ while (spdk_thread_poll(g_thread, 0, 0) > 0) {}
+
+ CU_ASSERT(TAILQ_EMPTY(&g_nvme_bdev_ctrlrs));
+}
+
+static void
+test_create_controller(void)
+{
+ struct spdk_nvme_ctrlr *ctrlr;
+ struct nvme_bdev_ctrlr *nvme_bdev_ctrlr;
+ struct spdk_nvme_transport_id trid = { .traddr = "00:00:00" };
+ struct spdk_ocssd_geometry_data geometry = {};
+ struct spdk_bdev *bdev;
+ struct bdev_ocssd_range range;
+ const char *controller_name = "nvme0";
+ const size_t ns_count = 16;
+ char namebuf[128];
+ uint32_t nsid;
+ int rc;
+
+ geometry = (struct spdk_ocssd_geometry_data) {
+ .clba = 512,
+ .num_chk = 64,
+ .num_pu = 8,
+ .num_grp = 4,
+ .maxoc = 69,
+ .maxocpu = 68,
+ .ws_opt = 86,
+ .lbaf = {
+ .lbk_len = 9,
+ .chk_len = 6,
+ .pu_len = 3,
+ .grp_len = 2,
+ }
+ };
+
+ ctrlr = create_controller(&trid, ns_count, &geometry);
+ nvme_bdev_ctrlr = create_nvme_bdev_controller(&trid, controller_name);
+
+ for (nsid = 1; nsid <= ns_count; ++nsid) {
+ snprintf(namebuf, sizeof(namebuf), "%sn%"PRIu32, controller_name, nsid);
+ rc = create_bdev(controller_name, namebuf, nsid, NULL);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ bdev = spdk_bdev_get_by_name(namebuf);
+ SPDK_CU_ASSERT_FATAL(bdev != NULL);
+ CU_ASSERT_TRUE(bdev->zoned);
+ }
+
+ delete_nvme_bdev_controller(nvme_bdev_ctrlr);
+
+ /* Verify that after deletion the bdevs can still be created */
+ nvme_bdev_ctrlr = create_nvme_bdev_controller(&trid, controller_name);
+
+ for (nsid = 1; nsid <= ns_count; ++nsid) {
+ snprintf(namebuf, sizeof(namebuf), "%sn%"PRIu32, controller_name, nsid);
+ rc = create_bdev(controller_name, namebuf, nsid, NULL);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ bdev = spdk_bdev_get_by_name(namebuf);
+ SPDK_CU_ASSERT_FATAL(bdev != NULL);
+ CU_ASSERT_TRUE(bdev->zoned);
+ }
+
+ delete_nvme_bdev_controller(nvme_bdev_ctrlr);
+
+ nvme_bdev_ctrlr = create_nvme_bdev_controller(&trid, controller_name);
+
+ /* Verify it's not possible to create a bdev on non-existent namespace */
+ rc = create_bdev(controller_name, "invalid", ns_count + 1, NULL);
+ CU_ASSERT_EQUAL(rc, -ENODEV);
+
+ delete_nvme_bdev_controller(nvme_bdev_ctrlr);
+
+ /* Verify the correctness of parallel unit range validation */
+ nvme_bdev_ctrlr = create_nvme_bdev_controller(&trid, controller_name);
+
+ range.begin = 0;
+ range.end = geometry.num_grp * geometry.num_pu;
+
+ rc = create_bdev(controller_name, "invalid", 1, &range);
+ CU_ASSERT_EQUAL(rc, -EINVAL);
+
+ /* Verify it's not possible for the bdevs to overlap */
+ range.begin = 0;
+ range.end = 16;
+ rc = create_bdev(controller_name, "valid", 1, &range);
+ CU_ASSERT_EQUAL(rc, 0);
+ bdev = spdk_bdev_get_by_name("valid");
+ CU_ASSERT_PTR_NOT_NULL(bdev);
+
+ range.begin = 16;
+ range.end = 31;
+ rc = create_bdev(controller_name, "invalid", 1, &range);
+ CU_ASSERT_EQUAL(rc, -EINVAL);
+
+ /* But it is possible to create them without overlap */
+ range.begin = 17;
+ range.end = 31;
+ rc = create_bdev(controller_name, "valid2", 1, &range);
+ CU_ASSERT_EQUAL(rc, 0);
+ bdev = spdk_bdev_get_by_name("valid2");
+ CU_ASSERT_PTR_NOT_NULL(bdev);
+
+ delete_nvme_bdev_controller(nvme_bdev_ctrlr);
+
+ free_controller(ctrlr);
+}
+
+static void
+test_device_geometry(void)
+{
+ struct spdk_nvme_ctrlr *ctrlr;
+ struct nvme_bdev_ctrlr *nvme_bdev_ctrlr;
+ struct spdk_nvme_transport_id trid = { .traddr = "00:00:00" };
+ const char *controller_name = "nvme0";
+ const char *bdev_name = "nvme0n1";
+ struct spdk_ocssd_geometry_data geometry;
+ struct spdk_bdev *bdev;
+ int rc;
+
+ geometry = (struct spdk_ocssd_geometry_data) {
+ .clba = 512,
+ .num_chk = 64,
+ .num_pu = 8,
+ .num_grp = 4,
+ .maxoc = 69,
+ .maxocpu = 68,
+ .ws_opt = 86,
+ .lbaf = {
+ .lbk_len = 9,
+ .chk_len = 6,
+ .pu_len = 3,
+ .grp_len = 2,
+ }
+ };
+
+ ctrlr = create_controller(&trid, 1, &geometry);
+ nvme_bdev_ctrlr = create_nvme_bdev_controller(&trid, controller_name);
+
+ rc = create_bdev(controller_name, bdev_name, 1, NULL);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ bdev = spdk_bdev_get_by_name(bdev_name);
+ CU_ASSERT_EQUAL(bdev->blockcnt, geometry.clba *
+ geometry.num_chk *
+ geometry.num_pu *
+ geometry.num_grp);
+ CU_ASSERT_EQUAL(bdev->zone_size, geometry.clba);
+ CU_ASSERT_EQUAL(bdev->optimal_open_zones, geometry.num_pu * geometry.num_grp);
+ CU_ASSERT_EQUAL(bdev->max_open_zones, geometry.maxocpu);
+ CU_ASSERT_EQUAL(bdev->write_unit_size, geometry.ws_opt);
+
+ delete_nvme_bdev_controller(nvme_bdev_ctrlr);
+
+ free_controller(ctrlr);
+}
+
+static uint64_t
+generate_lba(const struct spdk_ocssd_geometry_data *geo, uint64_t lbk,
+ uint64_t chk, uint64_t pu, uint64_t grp)
+{
+ uint64_t lba, len;
+
+ lba = lbk;
+ len = geo->lbaf.lbk_len;
+ CU_ASSERT(lbk < (1ull << geo->lbaf.lbk_len));
+
+ lba |= chk << len;
+ len += geo->lbaf.chk_len;
+ CU_ASSERT(chk < (1ull << geo->lbaf.chk_len));
+
+ lba |= pu << len;
+ len += geo->lbaf.pu_len;
+ CU_ASSERT(pu < (1ull << geo->lbaf.pu_len));
+
+ lba |= grp << len;
+
+ return lba;
+}
+
+static void
+test_lba_translation(void)
+{
+ struct spdk_nvme_ctrlr *ctrlr;
+ struct nvme_bdev_ctrlr *nvme_bdev_ctrlr;
+ struct spdk_nvme_transport_id trid = { .traddr = "00:00:00" };
+ const char *controller_name = "nvme0";
+ const char *bdev_name = "nvme0n1";
+ struct spdk_ocssd_geometry_data geometry = {};
+ struct ocssd_bdev *ocssd_bdev;
+ struct spdk_bdev *bdev;
+ uint64_t lba;
+ int rc;
+
+ geometry = (struct spdk_ocssd_geometry_data) {
+ .clba = 512,
+ .num_chk = 64,
+ .num_pu = 8,
+ .num_grp = 4,
+ .lbaf = {
+ .lbk_len = 9,
+ .chk_len = 6,
+ .pu_len = 3,
+ .grp_len = 2,
+ }
+ };
+
+ ctrlr = create_controller(&trid, 1, &geometry);
+ nvme_bdev_ctrlr = create_nvme_bdev_controller(&trid, controller_name);
+
+ rc = create_bdev(controller_name, bdev_name, 1, NULL);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ bdev = spdk_bdev_get_by_name(bdev_name);
+ SPDK_CU_ASSERT_FATAL(bdev != NULL);
+ ocssd_bdev = SPDK_CONTAINEROF(bdev, struct ocssd_bdev, nvme_bdev.disk);
+
+ lba = bdev_ocssd_to_disk_lba(ocssd_bdev, 0);
+ CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 0, 0, 0, 0));
+ CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev, lba), 0);
+
+ lba = bdev_ocssd_to_disk_lba(ocssd_bdev, bdev->zone_size - 1);
+ CU_ASSERT_EQUAL(lba, generate_lba(&geometry, bdev->zone_size - 1, 0, 0, 0));
+ CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev, lba), bdev->zone_size - 1);
+
+ lba = bdev_ocssd_to_disk_lba(ocssd_bdev, bdev->zone_size);
+ CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 0, 0, 1, 0));
+ CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev, lba), bdev->zone_size);
+
+ lba = bdev_ocssd_to_disk_lba(ocssd_bdev, bdev->zone_size * geometry.num_pu);
+ CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 0, 0, 0, 1));
+ CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev, lba), bdev->zone_size * geometry.num_pu);
+
+ lba = bdev_ocssd_to_disk_lba(ocssd_bdev, bdev->zone_size * geometry.num_pu + 68);
+ CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 68, 0, 0, 1));
+ CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev, lba), bdev->zone_size * geometry.num_pu + 68);
+
+ lba = bdev_ocssd_to_disk_lba(ocssd_bdev, bdev->zone_size + 68);
+ CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 68, 0, 1, 0));
+ CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev, lba), bdev->zone_size + 68);
+
+ delete_nvme_bdev_controller(nvme_bdev_ctrlr);
+ free_controller(ctrlr);
+
+ geometry = (struct spdk_ocssd_geometry_data) {
+ .clba = 5120,
+ .num_chk = 501,
+ .num_pu = 9,
+ .num_grp = 1,
+ .lbaf = {
+ .lbk_len = 13,
+ .chk_len = 9,
+ .pu_len = 4,
+ .grp_len = 1,
+ }
+ };
+
+ ctrlr = create_controller(&trid, 1, &geometry);
+ nvme_bdev_ctrlr = create_nvme_bdev_controller(&trid, controller_name);
+
+ rc = create_bdev(controller_name, bdev_name, 1, NULL);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ bdev = spdk_bdev_get_by_name(bdev_name);
+ SPDK_CU_ASSERT_FATAL(bdev != NULL);
+ ocssd_bdev = SPDK_CONTAINEROF(bdev, struct ocssd_bdev, nvme_bdev.disk);
+
+ lba = bdev_ocssd_to_disk_lba(ocssd_bdev, 0);
+ CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 0, 0, 0, 0));
+ CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev, lba), 0);
+
+ lba = bdev_ocssd_to_disk_lba(ocssd_bdev, bdev->zone_size - 1);
+ CU_ASSERT_EQUAL(lba, generate_lba(&geometry, bdev->zone_size - 1, 0, 0, 0));
+ CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev, lba), bdev->zone_size - 1);
+
+ lba = bdev_ocssd_to_disk_lba(ocssd_bdev, bdev->zone_size);
+ CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 0, 0, 1, 0));
+ CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev, lba), bdev->zone_size);
+
+ lba = bdev_ocssd_to_disk_lba(ocssd_bdev, bdev->zone_size * (geometry.num_pu - 1));
+ CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 0, 0, geometry.num_pu - 1, 0));
+ CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev, lba), bdev->zone_size * (geometry.num_pu - 1));
+
+ lba = bdev_ocssd_to_disk_lba(ocssd_bdev, bdev->zone_size * geometry.num_pu * geometry.num_grp);
+ CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 0, 1, 0, 0));
+ CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev, lba),
+ bdev->zone_size * geometry.num_pu * geometry.num_grp);
+
+ lba = bdev_ocssd_to_disk_lba(ocssd_bdev, bdev->zone_size * geometry.num_pu * geometry.num_grp + 68);
+ CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 68, 1, 0, 0));
+ CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev, lba),
+ bdev->zone_size * geometry.num_pu * geometry.num_grp + 68);
+
+ delete_nvme_bdev_controller(nvme_bdev_ctrlr);
+
+ free_controller(ctrlr);
+}
+
+static void
+punit_range_to_addr(const struct spdk_nvme_ctrlr *ctrlr, uint64_t punit,
+ uint64_t *grp, uint64_t *pu)
+{
+ const struct spdk_ocssd_geometry_data *geo = &ctrlr->geometry;
+
+ *grp = punit / geo->num_pu;
+ *pu = punit % geo->num_pu;
+
+ CU_ASSERT(*grp < geo->num_grp);
+}
+
+static void
+test_parallel_unit_range(void)
+{
+ struct spdk_nvme_ctrlr *ctrlr;
+ struct nvme_bdev_ctrlr *nvme_bdev_ctrlr;
+ struct spdk_nvme_transport_id trid = { .traddr = "00:00:00" };
+ const char *controller_name = "nvme0";
+ const char *bdev_name[] = { "nvme0n1", "nvme0n2", "nvme0n3" };
+ const struct bdev_ocssd_range range[3] = { { 0, 5 }, { 6, 18 }, { 19, 23 } };
+ struct ocssd_bdev *ocssd_bdev[3];
+ struct spdk_ocssd_geometry_data geometry = {};
+ struct spdk_bdev *bdev[3];
+ uint64_t lba, i, offset, grp, pu, zone_size;
+ int rc;
+
+ geometry = (struct spdk_ocssd_geometry_data) {
+ .clba = 500,
+ .num_chk = 60,
+ .num_pu = 8,
+ .num_grp = 3,
+ .lbaf = {
+ .lbk_len = 9,
+ .chk_len = 6,
+ .pu_len = 3,
+ .grp_len = 2,
+ }
+ };
+
+ ctrlr = create_controller(&trid, 1, &geometry);
+ nvme_bdev_ctrlr = create_nvme_bdev_controller(&trid, controller_name);
+
+ for (i = 0; i < SPDK_COUNTOF(range); ++i) {
+ rc = create_bdev(controller_name, bdev_name[i], 1, &range[i]);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ bdev[i] = spdk_bdev_get_by_name(bdev_name[i]);
+ SPDK_CU_ASSERT_FATAL(bdev[i] != NULL);
+ ocssd_bdev[i] = SPDK_CONTAINEROF(bdev[i], struct ocssd_bdev, nvme_bdev.disk);
+ }
+
+ zone_size = bdev[0]->zone_size;
+ CU_ASSERT_EQUAL(zone_size, bdev[1]->zone_size);
+ CU_ASSERT_EQUAL(zone_size, bdev[2]->zone_size);
+
+ /* Verify the first addresses are correct */
+ lba = bdev_ocssd_to_disk_lba(ocssd_bdev[0], 0);
+ CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 0, 0, 0, 0));
+ CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev[0], lba), 0);
+
+ lba = bdev_ocssd_to_disk_lba(ocssd_bdev[1], 0);
+ CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 0, 0, 6, 0));
+ CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev[1], lba), 0);
+
+ lba = bdev_ocssd_to_disk_lba(ocssd_bdev[2], 0);
+ CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 0, 0, 3, 2));
+ CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev[2], lba), 0);
+
+ /* Verify last address correctness */
+ lba = bdev_ocssd_to_disk_lba(ocssd_bdev[0], bdev[0]->blockcnt - 1);
+ CU_ASSERT_EQUAL(lba, generate_lba(&geometry, geometry.clba - 1, geometry.num_chk - 1, 5, 0));
+ CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev[0], lba), bdev[0]->blockcnt - 1);
+
+ lba = bdev_ocssd_to_disk_lba(ocssd_bdev[1], bdev[1]->blockcnt - 1);
+ CU_ASSERT_EQUAL(lba, generate_lba(&geometry, geometry.clba - 1, geometry.num_chk - 1, 2, 2));
+ CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev[1], lba), bdev[1]->blockcnt - 1);
+
+ lba = bdev_ocssd_to_disk_lba(ocssd_bdev[2], bdev[2]->blockcnt - 1);
+ CU_ASSERT_EQUAL(lba, generate_lba(&geometry, geometry.clba - 1, geometry.num_chk - 1, 7, 2));
+ CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev[2], lba), bdev[2]->blockcnt - 1);
+
+ /* Verify correct jumps across parallel units / groups */
+ for (i = 0; i < SPDK_COUNTOF(range); ++i) {
+ for (offset = 0; offset < bdev_ocssd_num_parallel_units(ocssd_bdev[i]); ++offset) {
+ punit_range_to_addr(ctrlr, range[i].begin + offset, &grp, &pu);
+ lba = bdev_ocssd_to_disk_lba(ocssd_bdev[i], offset * zone_size + 68);
+ CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 68, 0, pu, grp));
+ CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev[i], lba),
+ offset * zone_size + 68);
+ }
+ }
+
+ /* Verify correct address wrapping */
+ for (i = 0; i < SPDK_COUNTOF(range); ++i) {
+ punit_range_to_addr(ctrlr, range[i].begin, &grp, &pu);
+
+ offset = bdev_ocssd_num_parallel_units(ocssd_bdev[i]) * zone_size + 68;
+ lba = bdev_ocssd_to_disk_lba(ocssd_bdev[i], offset);
+ CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 68, 1, pu, grp));
+ assert(lba == generate_lba(&geometry, 68, 1, pu, grp));
+ CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev[i], lba), offset);
+ }
+
+ delete_nvme_bdev_controller(nvme_bdev_ctrlr);
+
+ free_controller(ctrlr);
+}
+
+static void
+get_zone_info_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
+{
+ CU_ASSERT_EQUAL(g_zone_info_status, success);
+}
+
+static uint64_t
+generate_chunk_offset(const struct spdk_ocssd_geometry_data *geo, uint64_t chk,
+ uint64_t pu, uint64_t grp)
+{
+ return grp * geo->num_pu * geo->num_chk +
+ pu * geo->num_chk + chk;
+}
+
+static struct spdk_bdev_io *
+alloc_ocssd_io(void)
+{
+ struct spdk_bdev_io *bdev_io;
+
+ bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct bdev_ocssd_io));
+ SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
+
+ return bdev_io;
+}
+
+static struct spdk_ocssd_chunk_information_entry *
+get_chunk_info(struct spdk_nvme_ctrlr *ctrlr, uint64_t offset)
+{
+ assert(offset < ctrlr->num_chunks);
+ SPDK_CU_ASSERT_FATAL(offset < ctrlr->num_chunks);
+ return &ctrlr->chunk_info[offset];
+}
+
+enum chunk_state {
+ CHUNK_STATE_FREE,
+ CHUNK_STATE_CLOSED,
+ CHUNK_STATE_OPEN,
+ CHUNK_STATE_OFFLINE
+};
+
+static void
+set_chunk_state(struct spdk_ocssd_chunk_information_entry *chunk, enum chunk_state state)
+{
+ memset(&chunk->cs, 0, sizeof(chunk->cs));
+ switch (state) {
+ case CHUNK_STATE_FREE:
+ chunk->cs.free = 1;
+ break;
+ case CHUNK_STATE_CLOSED:
+ chunk->cs.closed = 1;
+ break;
+ case CHUNK_STATE_OPEN:
+ chunk->cs.open = 1;
+ break;
+ case CHUNK_STATE_OFFLINE:
+ chunk->cs.offline = 1;
+ break;
+ default:
+ SPDK_CU_ASSERT_FATAL(0 && "Invalid state");
+ }
+}
+
+static void
+test_get_zone_info(void)
+{
+ struct spdk_nvme_ctrlr *ctrlr;
+ struct nvme_bdev_ctrlr *nvme_bdev_ctrlr;
+ struct spdk_nvme_transport_id trid = { .traddr = "00:00:00" };
+ const char *controller_name = "nvme0";
+ const char *bdev_name = "nvme0n1";
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_io *bdev_io;
+#define MAX_ZONE_INFO_COUNT 64
+ struct spdk_bdev_zone_info zone_info[MAX_ZONE_INFO_COUNT];
+ struct spdk_ocssd_chunk_information_entry *chunk_info;
+ struct spdk_ocssd_geometry_data geometry;
+ uint64_t chunk_offset;
+ int rc, offset;
+
+ geometry = (struct spdk_ocssd_geometry_data) {
+ .clba = 512,
+ .num_chk = 64,
+ .num_pu = 8,
+ .num_grp = 4,
+ .lbaf = {
+ .lbk_len = 9,
+ .chk_len = 6,
+ .pu_len = 3,
+ .grp_len = 2,
+ }
+ };
+
+ ctrlr = create_controller(&trid, 1, &geometry);
+ nvme_bdev_ctrlr = create_nvme_bdev_controller(&trid, controller_name);
+
+ rc = create_bdev(controller_name, bdev_name, 1, NULL);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ bdev = spdk_bdev_get_by_name(bdev_name);
+ SPDK_CU_ASSERT_FATAL(bdev != NULL);
+
+ bdev_io = alloc_ocssd_io();
+ bdev_io->internal.cb = get_zone_info_cb;
+ bdev_io->bdev = bdev;
+
+ /* Verify empty zone */
+ bdev_io->u.zone_mgmt.zone_id = 0;
+ bdev_io->u.zone_mgmt.num_zones = 1;
+ bdev_io->u.zone_mgmt.buf = &zone_info;
+ chunk_info = get_chunk_info(ctrlr, 0);
+ set_chunk_state(chunk_info, CHUNK_STATE_FREE);
+ chunk_info->wp = 0;
+
+ rc = bdev_ocssd_get_zone_info(NULL, bdev_io);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ CU_ASSERT_EQUAL(zone_info[0].state, SPDK_BDEV_ZONE_STATE_EMPTY);
+ CU_ASSERT_EQUAL(zone_info[0].zone_id, 0);
+ CU_ASSERT_EQUAL(zone_info[0].write_pointer, 0);
+ CU_ASSERT_EQUAL(zone_info[0].capacity, geometry.clba);
+
+ /* Verify open zone */
+ bdev_io->u.zone_mgmt.zone_id = bdev->zone_size;
+ bdev_io->u.zone_mgmt.num_zones = 1;
+ bdev_io->u.zone_mgmt.buf = &zone_info;
+ chunk_info = get_chunk_info(ctrlr, generate_chunk_offset(&geometry, 0, 1, 0));
+ set_chunk_state(chunk_info, CHUNK_STATE_OPEN);
+ chunk_info->wp = chunk_info->slba + 68;
+ chunk_info->cnlb = 511;
+ chunk_info->ct.size_deviate = 1;
+
+ rc = bdev_ocssd_get_zone_info(NULL, bdev_io);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ CU_ASSERT_EQUAL(zone_info[0].state, SPDK_BDEV_ZONE_STATE_OPEN);
+ CU_ASSERT_EQUAL(zone_info[0].zone_id, bdev->zone_size);
+ CU_ASSERT_EQUAL(zone_info[0].write_pointer, bdev->zone_size + 68);
+ CU_ASSERT_EQUAL(zone_info[0].capacity, chunk_info->cnlb);
+
+ /* Verify offline zone at 2nd chunk */
+ bdev_io->u.zone_mgmt.zone_id = bdev->zone_size * geometry.num_pu * geometry.num_grp;
+ bdev_io->u.zone_mgmt.num_zones = 1;
+ bdev_io->u.zone_mgmt.buf = &zone_info;
+ chunk_info = get_chunk_info(ctrlr, generate_chunk_offset(&geometry, 1, 0, 0));
+ set_chunk_state(chunk_info, CHUNK_STATE_OFFLINE);
+ chunk_info->wp = chunk_info->slba;
+
+ rc = bdev_ocssd_get_zone_info(NULL, bdev_io);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ CU_ASSERT_EQUAL(zone_info[0].state, SPDK_BDEV_ZONE_STATE_OFFLINE);
+ CU_ASSERT_EQUAL(zone_info[0].zone_id, bdev_io->u.zone_mgmt.zone_id);
+ CU_ASSERT_EQUAL(zone_info[0].write_pointer, bdev_io->u.zone_mgmt.zone_id);
+
+ /* Verify multiple zones at a time */
+ bdev_io->u.zone_mgmt.zone_id = 0;
+ bdev_io->u.zone_mgmt.num_zones = MAX_ZONE_INFO_COUNT;
+ bdev_io->u.zone_mgmt.buf = &zone_info;
+
+ for (offset = 0; offset < MAX_ZONE_INFO_COUNT; ++offset) {
+ chunk_offset = generate_chunk_offset(&geometry,
+ (offset / (geometry.num_grp * geometry.num_pu)) % geometry.num_chk,
+ offset % geometry.num_pu,
+ (offset / geometry.num_pu) % geometry.num_grp);
+
+
+ chunk_info = get_chunk_info(ctrlr, chunk_offset);
+ set_chunk_state(chunk_info, CHUNK_STATE_OPEN);
+ chunk_info->wp = chunk_info->slba + 68;
+ chunk_info->ct.size_deviate = 0;
+ }
+
+ rc = bdev_ocssd_get_zone_info(NULL, bdev_io);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ for (offset = 0; offset < MAX_ZONE_INFO_COUNT; ++offset) {
+ CU_ASSERT_EQUAL(zone_info[offset].state, SPDK_BDEV_ZONE_STATE_OPEN);
+ CU_ASSERT_EQUAL(zone_info[offset].zone_id, bdev->zone_size * offset);
+ CU_ASSERT_EQUAL(zone_info[offset].write_pointer, bdev->zone_size * offset + 68);
+ CU_ASSERT_EQUAL(zone_info[offset].capacity, geometry.clba);
+ }
+
+ /* Verify misaligned start zone LBA */
+ bdev_io->u.zone_mgmt.zone_id = 1;
+ bdev_io->u.zone_mgmt.num_zones = MAX_ZONE_INFO_COUNT;
+ bdev_io->u.zone_mgmt.buf = &zone_info;
+
+ rc = bdev_ocssd_get_zone_info(NULL, bdev_io);
+ CU_ASSERT_EQUAL(rc, -EINVAL);
+
+ /* Verify correct NVMe error forwarding */
+ bdev_io->u.zone_mgmt.zone_id = 0;
+ bdev_io->u.zone_mgmt.num_zones = MAX_ZONE_INFO_COUNT;
+ bdev_io->u.zone_mgmt.buf = &zone_info;
+ chunk_info = get_chunk_info(ctrlr, 0);
+ set_chunk_state(chunk_info, CHUNK_STATE_FREE);
+
+ rc = bdev_ocssd_get_zone_info(NULL, bdev_io);
+ CU_ASSERT_EQUAL(rc, 0);
+ g_chunk_info_cpl = (struct spdk_nvme_cpl) {
+ .status = {
+ .sct = SPDK_NVME_SCT_GENERIC,
+ .sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR
+ }
+ };
+ g_zone_info_status = false;
+
+ g_chunk_info_cpl = (struct spdk_nvme_cpl) {};
+ g_zone_info_status = true;
+
+ delete_nvme_bdev_controller(nvme_bdev_ctrlr);
+
+ free(bdev_io);
+ free_controller(ctrlr);
+}
+
+int
+main(int argc, const char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("ocssd", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_create_controller);
+ CU_ADD_TEST(suite, test_device_geometry);
+ CU_ADD_TEST(suite, test_lba_translation);
+ CU_ADD_TEST(suite, test_parallel_unit_range);
+ CU_ADD_TEST(suite, test_get_zone_info);
+
+ g_thread = spdk_thread_create("test", NULL);
+ spdk_set_thread(g_thread);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+
+ spdk_thread_exit(g_thread);
+ while (!spdk_thread_is_exited(g_thread)) {
+ spdk_thread_poll(g_thread, 0, 0);
+ }
+ spdk_thread_destroy(g_thread);
+
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/bdev/bdev_zone.c/.gitignore b/src/spdk/test/unit/lib/bdev/bdev_zone.c/.gitignore
new file mode 100644
index 000000000..99af16132
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/bdev_zone.c/.gitignore
@@ -0,0 +1 @@
+bdev_zone_ut
diff --git a/src/spdk/test/unit/lib/bdev/bdev_zone.c/Makefile b/src/spdk/test/unit/lib/bdev/bdev_zone.c/Makefile
new file mode 100644
index 000000000..52dc65f23
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/bdev_zone.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = bdev_zone_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/bdev/bdev_zone.c/bdev_zone_ut.c b/src/spdk/test/unit/lib/bdev/bdev_zone.c/bdev_zone_ut.c
new file mode 100644
index 000000000..589e105b9
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/bdev_zone.c/bdev_zone_ut.c
@@ -0,0 +1,429 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE AiRE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+#include "spdk_cunit.h"
+#include "spdk/env.h"
+#include "spdk_internal/mock.h"
+
+#include "bdev/bdev_zone.c"
+
+DEFINE_STUB_V(bdev_io_init, (struct spdk_bdev_io *bdev_io,
+ struct spdk_bdev *bdev, void *cb_arg,
+ spdk_bdev_io_completion_cb cb));
+
+DEFINE_STUB_V(bdev_io_submit, (struct spdk_bdev_io *bdev_io));
+
+/* Construct zone_io_operation structure */
+struct zone_io_operation {
+ struct spdk_bdev_desc *desc;
+ struct spdk_io_channel *ch;
+ struct iovec iov;
+ union {
+ struct {
+ uint64_t zone_id;
+ size_t num_zones;
+ enum spdk_bdev_zone_action zone_action;
+ void *buf;
+ struct spdk_bdev_zone_info *info_;
+ } zone_mgmt;
+ struct {
+ void *md_buf;
+ struct iovec *iovs;
+ int iovcnt;
+ uint64_t num_blocks;
+ uint64_t offset_blocks;
+ uint64_t start_lba;
+ } bdev;
+ };
+ spdk_bdev_io_completion_cb cb;
+ void *cb_arg;
+ enum spdk_bdev_io_type io_type;
+};
+
+/* Global variables */
+struct zone_io_operation *g_zone_op = NULL;
+static struct spdk_bdev *g_bdev = NULL;
+static struct spdk_bdev_io *g_bdev_io = NULL;
+static struct spdk_bdev_zone_info g_zone_info = {0};
+static enum spdk_bdev_zone_action g_zone_action = SPDK_BDEV_ZONE_OPEN;
+static enum spdk_bdev_zone_action g_unexpected_zone_action = SPDK_BDEV_ZONE_CLOSE;
+static enum spdk_bdev_io_type g_io_type = SPDK_BDEV_IO_TYPE_GET_ZONE_INFO;
+
+static uint64_t g_expected_zone_id;
+static uint64_t g_expected_num_zones;
+static uint64_t g_unexpected_zone_id;
+static uint64_t g_unexpected_num_zones;
+static uint64_t g_num_blocks;
+static uint64_t g_unexpected_num_blocks;
+static uint64_t g_start_lba;
+static uint64_t g_unexpected_start_lba;
+static uint64_t g_bdev_blocklen;
+static uint64_t g_unexpected_bdev_blocklen;
+static bool g_append_with_md;
+static int g_unexpected_iovcnt;
+static void *g_md_buf;
+static void *g_unexpetced_md_buf;
+static void *g_buf;
+static void *g_unexpected_buf;
+
+static int
+test_setup(void)
+{
+ /* Initiate expected and unexpected value here */
+ g_expected_zone_id = 0x1000;
+ g_expected_num_zones = 1024;
+ g_unexpected_zone_id = 0xFFFF;
+ g_unexpected_num_zones = 0;
+ g_num_blocks = 4096 * 1024;
+ g_unexpected_num_blocks = 0;
+ g_start_lba = 4096;
+ g_unexpected_start_lba = 0;
+ g_bdev_blocklen = 4096;
+ g_unexpected_bdev_blocklen = 0;
+ g_append_with_md = false;
+ g_unexpected_iovcnt = 1000;
+ g_md_buf = (void *)0xEFDCFEDE;
+ g_unexpetced_md_buf = (void *)0xFECDEFDC;
+ g_buf = (void *)0xFEEDBEEF;
+ g_unexpected_buf = (void *)0xDEADBEEF;
+
+ return 0;
+}
+
+static int
+test_cleanup(void)
+{
+ return 0;
+}
+
+static void
+start_operation(void)
+{
+ g_zone_op = calloc(1, sizeof(struct zone_io_operation));
+ SPDK_CU_ASSERT_FATAL(g_zone_op != NULL);
+
+ switch (g_io_type) {
+ case SPDK_BDEV_IO_TYPE_ZONE_APPEND:
+ g_zone_op->bdev.iovs = &g_zone_op->iov;
+ g_zone_op->bdev.iovs[0].iov_base = g_unexpected_buf;
+ g_zone_op->bdev.iovs[0].iov_len = g_unexpected_num_blocks * g_unexpected_bdev_blocklen;
+ g_zone_op->bdev.iovcnt = g_unexpected_iovcnt;
+ g_zone_op->bdev.md_buf = g_unexpetced_md_buf;
+ g_zone_op->bdev.num_blocks = g_unexpected_num_blocks;
+ g_zone_op->bdev.offset_blocks = g_unexpected_zone_id;
+ g_zone_op->bdev.start_lba = g_unexpected_start_lba;
+ break;
+ default:
+ g_zone_op->bdev.iovcnt = 0;
+ g_zone_op->zone_mgmt.zone_id = g_unexpected_zone_id;
+ g_zone_op->zone_mgmt.num_zones = g_unexpected_num_zones;
+ g_zone_op->zone_mgmt.zone_action = g_unexpected_zone_action;
+ g_zone_op->zone_mgmt.buf = g_unexpected_buf;
+ break;
+ }
+}
+
+static void
+stop_operation(void)
+{
+ free(g_bdev_io);
+ free(g_bdev);
+ free(g_zone_op);
+ g_bdev_io = NULL;
+ g_bdev = NULL;
+ g_zone_op = NULL;
+}
+
+struct spdk_bdev_io *
+bdev_channel_get_io(struct spdk_bdev_channel *channel)
+{
+ struct spdk_bdev_io *bdev_io;
+
+ bdev_io = calloc(1, sizeof(struct spdk_bdev_io));
+ SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
+
+ bdev_io->internal.ch = channel;
+ bdev_io->type = g_io_type;
+
+ CU_ASSERT(g_zone_op != NULL);
+
+ switch (g_io_type) {
+ case SPDK_BDEV_IO_TYPE_GET_ZONE_INFO:
+ case SPDK_BDEV_IO_TYPE_ZONE_MANAGEMENT:
+ bdev_io->u.bdev.iovcnt = 0;
+ bdev_io->u.zone_mgmt.zone_id = g_zone_op->zone_mgmt.zone_id;
+ bdev_io->u.zone_mgmt.num_zones = g_zone_op->zone_mgmt.num_zones;
+ bdev_io->u.zone_mgmt.zone_action = g_zone_op->zone_mgmt.zone_action;
+ bdev_io->u.zone_mgmt.buf = g_zone_op->zone_mgmt.buf;
+ break;
+ case SPDK_BDEV_IO_TYPE_ZONE_APPEND:
+ bdev_io->u.bdev.iovs = g_zone_op->bdev.iovs;
+ bdev_io->u.bdev.iovs[0].iov_base = g_zone_op->bdev.iovs[0].iov_base;
+ bdev_io->u.bdev.iovs[0].iov_len = g_zone_op->bdev.iovs[0].iov_len;
+ bdev_io->u.bdev.iovcnt = g_zone_op->bdev.iovcnt;
+ bdev_io->u.bdev.md_buf = g_zone_op->bdev.md_buf;
+ bdev_io->u.bdev.num_blocks = g_zone_op->bdev.num_blocks;
+ bdev_io->u.bdev.offset_blocks = g_zone_op->bdev.offset_blocks;
+ break;
+ default:
+ CU_ASSERT(0);
+ }
+
+ g_bdev_io = bdev_io;
+
+ return bdev_io;
+}
+
+int
+spdk_bdev_open(struct spdk_bdev *bdev, bool write, spdk_bdev_remove_cb_t remove_cb,
+ void *remove_ctx, struct spdk_bdev_desc **_desc)
+{
+ *_desc = (void *)0x1;
+ return 0;
+}
+
+struct spdk_io_channel *
+spdk_bdev_get_io_channel(struct spdk_bdev_desc *desc)
+{
+ return (struct spdk_io_channel *)0x1;
+}
+
+void
+spdk_put_io_channel(struct spdk_io_channel *ch)
+{
+ CU_ASSERT(ch == (void *)1);
+}
+
+struct spdk_bdev *
+spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc)
+{
+ struct spdk_bdev *bdev;
+
+ bdev = calloc(1, sizeof(struct spdk_bdev));
+ SPDK_CU_ASSERT_FATAL(bdev != NULL);
+
+ if (g_io_type == SPDK_BDEV_IO_TYPE_ZONE_APPEND) {
+ bdev->blocklen = g_bdev_blocklen;
+ }
+
+ g_bdev = bdev;
+
+ return bdev;
+}
+
+static void
+test_get_zone_size(void)
+{
+ struct spdk_bdev bdev = {};
+ uint64_t get_zone_size;
+
+ bdev.zone_size = 1024 * 4096;
+
+ get_zone_size = spdk_bdev_get_zone_size(&bdev);
+ CU_ASSERT(get_zone_size == 1024 * 4096);
+}
+
+static void
+test_get_max_open_zones(void)
+{
+ struct spdk_bdev bdev = {};
+ uint32_t get_max_open_zones;
+
+ bdev.max_open_zones = 8192;
+
+ get_max_open_zones = spdk_bdev_get_max_open_zones(&bdev);
+ CU_ASSERT(get_max_open_zones == 8192);
+}
+
+static void
+test_get_optimal_open_zones(void)
+{
+ struct spdk_bdev bdev = {};
+ uint32_t get_optimal_open_zones;
+
+ bdev.optimal_open_zones = 4096;
+
+ get_optimal_open_zones = spdk_bdev_get_optimal_open_zones(&bdev);
+ CU_ASSERT(get_optimal_open_zones == 4096);
+}
+
+static void
+test_bdev_io_get_append_location(void)
+{
+ struct spdk_bdev_io bdev_io = {};
+ uint64_t get_offset_blocks;
+
+ bdev_io.u.bdev.offset_blocks = 1024 * 10;
+
+ get_offset_blocks = spdk_bdev_io_get_append_location(&bdev_io);
+ CU_ASSERT(get_offset_blocks == 1024 * 10);
+}
+
+static void
+test_zone_get_operation(void)
+{
+ test_get_zone_size();
+ test_get_max_open_zones();
+ test_get_optimal_open_zones();
+}
+
+#define DECLARE_VIRTUAL_BDEV_START() \
+ struct spdk_bdev bdev; \
+ struct spdk_io_channel *ch; \
+ struct spdk_bdev_desc *desc = NULL; \
+ int rc; \
+ memset(&bdev, 0, sizeof(bdev)); \
+ bdev.name = "bdev_zone_ut"; \
+ rc = spdk_bdev_open(&bdev, true, NULL, NULL, &desc); \
+ CU_ASSERT(rc == 0); \
+ SPDK_CU_ASSERT_FATAL(desc != NULL); \
+ ch = spdk_bdev_get_io_channel(desc); \
+ CU_ASSERT(ch != NULL);\
+
+static void
+test_bdev_zone_get_info(void)
+{
+ DECLARE_VIRTUAL_BDEV_START();
+
+ g_zone_info.zone_id = g_expected_zone_id;
+ g_io_type = SPDK_BDEV_IO_TYPE_GET_ZONE_INFO;
+
+ start_operation();
+
+ rc = spdk_bdev_get_zone_info(desc, ch, g_expected_zone_id, g_expected_num_zones, &g_zone_info, NULL,
+ NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_io->type == SPDK_BDEV_IO_TYPE_GET_ZONE_INFO);
+ CU_ASSERT(g_bdev_io->u.zone_mgmt.zone_id == g_expected_zone_id);
+ CU_ASSERT(g_bdev_io->u.zone_mgmt.num_zones == g_expected_num_zones);
+ CU_ASSERT(g_bdev_io->u.zone_mgmt.buf == &g_zone_info);
+
+ stop_operation();
+}
+
+static void
+test_bdev_zone_management(void)
+{
+ DECLARE_VIRTUAL_BDEV_START();
+
+ g_zone_info.zone_id = g_expected_zone_id;
+ g_io_type = SPDK_BDEV_IO_TYPE_ZONE_MANAGEMENT;
+
+ start_operation();
+
+ rc = spdk_bdev_zone_management(desc, ch, g_expected_zone_id, g_zone_action, NULL,
+ NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_io->type == SPDK_BDEV_IO_TYPE_ZONE_MANAGEMENT);
+ CU_ASSERT(g_bdev_io->u.zone_mgmt.zone_id == g_expected_zone_id);
+ CU_ASSERT(g_bdev_io->u.zone_mgmt.zone_action == g_zone_action);
+ CU_ASSERT(g_bdev_io->u.zone_mgmt.num_zones == 1);
+
+ stop_operation();
+}
+
+static void
+test_bdev_zone_append(void)
+{
+ DECLARE_VIRTUAL_BDEV_START();
+
+ g_io_type = SPDK_BDEV_IO_TYPE_ZONE_APPEND;
+ g_append_with_md = false;
+
+ start_operation();
+
+ rc = spdk_bdev_zone_append(desc, ch, g_buf, g_start_lba, g_num_blocks, NULL, NULL);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_io->internal.desc == desc);
+ CU_ASSERT(g_bdev_io->type == SPDK_BDEV_IO_TYPE_ZONE_APPEND);
+ CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == g_buf);
+ CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_len == g_num_blocks * g_bdev_blocklen);
+ CU_ASSERT(g_bdev_io->u.bdev.iovcnt == 1);
+ CU_ASSERT(g_bdev_io->u.bdev.md_buf == NULL);
+ CU_ASSERT(g_bdev_io->u.bdev.num_blocks == g_num_blocks);
+ CU_ASSERT(g_bdev_io->u.bdev.offset_blocks == g_expected_zone_id);
+
+ stop_operation();
+}
+
+static void
+test_bdev_zone_append_with_md(void)
+{
+ DECLARE_VIRTUAL_BDEV_START();
+
+ g_io_type = SPDK_BDEV_IO_TYPE_ZONE_APPEND;
+ g_append_with_md = true;
+
+ start_operation();
+
+ rc = spdk_bdev_zone_append_with_md(desc, ch, g_buf, g_md_buf, g_start_lba, g_num_blocks, NULL,
+ NULL);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_io->internal.desc == desc);
+ CU_ASSERT(g_bdev_io->type == SPDK_BDEV_IO_TYPE_ZONE_APPEND);
+ CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == g_buf);
+ CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_len == g_num_blocks * g_bdev_blocklen);
+ CU_ASSERT(g_bdev_io->u.bdev.iovcnt == 1);
+ CU_ASSERT(g_bdev_io->u.bdev.md_buf == g_md_buf);
+ CU_ASSERT(g_bdev_io->u.bdev.num_blocks == g_num_blocks);
+ CU_ASSERT(g_bdev_io->u.bdev.offset_blocks == g_expected_zone_id);
+
+ stop_operation();
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("zone", test_setup, test_cleanup);
+ CU_ADD_TEST(suite, test_zone_get_operation);
+ CU_ADD_TEST(suite, test_bdev_zone_get_info);
+ CU_ADD_TEST(suite, test_bdev_zone_management);
+ CU_ADD_TEST(suite, test_bdev_zone_append);
+ CU_ADD_TEST(suite, test_bdev_zone_append_with_md);
+ CU_ADD_TEST(suite, test_bdev_io_get_append_location);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/bdev/compress.c/.gitignore b/src/spdk/test/unit/lib/bdev/compress.c/.gitignore
new file mode 100644
index 000000000..bac80ced6
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/compress.c/.gitignore
@@ -0,0 +1 @@
+compress_ut
diff --git a/src/spdk/test/unit/lib/bdev/compress.c/Makefile b/src/spdk/test/unit/lib/bdev/compress.c/Makefile
new file mode 100644
index 000000000..6f33eef39
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/compress.c/Makefile
@@ -0,0 +1,39 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = compress_ut.c
+CFLAGS += $(ENV_CFLAGS)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/bdev/compress.c/compress_ut.c b/src/spdk/test/unit/lib/bdev/compress.c/compress_ut.c
new file mode 100644
index 000000000..53c14310c
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/compress.c/compress_ut.c
@@ -0,0 +1,1140 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+/* We have our own mock for this */
+#define UNIT_TEST_NO_VTOPHYS
+#include "common/lib/test_env.c"
+#include "spdk_internal/mock.h"
+#include "unit/lib/json_mock.c"
+#include "spdk/reduce.h"
+
+#include <rte_compressdev.h>
+
+/* There will be one if the data perfectly matches the chunk size,
+ * or there could be an offset into the data and a remainder after
+ * the data or both for a max of 3.
+ */
+#define UT_MBUFS_PER_OP 3
+/* For testing the crossing of a huge page boundary on address translation,
+ * we'll have an extra one but we only test on the source side.
+ */
+#define UT_MBUFS_PER_OP_BOUND_TEST 4
+
+struct spdk_bdev_io *g_bdev_io;
+struct spdk_io_channel *g_io_ch;
+struct rte_comp_op g_comp_op[2];
+struct vbdev_compress g_comp_bdev;
+struct comp_device_qp g_device_qp;
+struct compress_dev g_device;
+struct rte_compressdev_capabilities g_cdev_cap;
+static struct rte_mbuf *g_src_mbufs[UT_MBUFS_PER_OP_BOUND_TEST];
+static struct rte_mbuf *g_dst_mbufs[UT_MBUFS_PER_OP];
+static struct rte_mbuf g_expected_src_mbufs[UT_MBUFS_PER_OP_BOUND_TEST];
+static struct rte_mbuf g_expected_dst_mbufs[UT_MBUFS_PER_OP];
+struct comp_bdev_io *g_io_ctx;
+struct comp_io_channel *g_comp_ch;
+
+/* Those functions are defined as static inline in DPDK, so we can't
+ * mock them straight away. We use defines to redirect them into
+ * our custom functions.
+ */
+
+static void mock_rte_pktmbuf_attach_extbuf(struct rte_mbuf *m, void *buf_addr, rte_iova_t buf_iova,
+ uint16_t buf_len, struct rte_mbuf_ext_shared_info *shinfo);
+#define rte_pktmbuf_attach_extbuf mock_rte_pktmbuf_attach_extbuf
+static void mock_rte_pktmbuf_attach_extbuf(struct rte_mbuf *m, void *buf_addr, rte_iova_t buf_iova,
+ uint16_t buf_len, struct rte_mbuf_ext_shared_info *shinfo)
+{
+ assert(m != NULL);
+ m->buf_addr = buf_addr;
+ m->buf_iova = buf_iova;
+ m->buf_len = buf_len;
+ m->data_len = m->pkt_len = 0;
+}
+
+static char *mock_rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len);
+#define rte_pktmbuf_append mock_rte_pktmbuf_append
+static char *mock_rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
+{
+ m->pkt_len = m->pkt_len + len;
+ return NULL;
+}
+
+static inline int mock_rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail);
+#define rte_pktmbuf_chain mock_rte_pktmbuf_chain
+static inline int mock_rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
+{
+ struct rte_mbuf *cur_tail;
+
+ cur_tail = rte_pktmbuf_lastseg(head);
+ cur_tail->next = tail;
+
+ return 0;
+}
+
+uint16_t ut_max_nb_queue_pairs = 0;
+void __rte_experimental mock_rte_compressdev_info_get(uint8_t dev_id,
+ struct rte_compressdev_info *dev_info);
+#define rte_compressdev_info_get mock_rte_compressdev_info_get
+void __rte_experimental
+mock_rte_compressdev_info_get(uint8_t dev_id, struct rte_compressdev_info *dev_info)
+{
+ dev_info->max_nb_queue_pairs = ut_max_nb_queue_pairs;
+ dev_info->capabilities = &g_cdev_cap;
+ dev_info->driver_name = "compress_isal";
+}
+
+int ut_rte_compressdev_configure = 0;
+int __rte_experimental mock_rte_compressdev_configure(uint8_t dev_id,
+ struct rte_compressdev_config *config);
+#define rte_compressdev_configure mock_rte_compressdev_configure
+int __rte_experimental
+mock_rte_compressdev_configure(uint8_t dev_id, struct rte_compressdev_config *config)
+{
+ return ut_rte_compressdev_configure;
+}
+
+int ut_rte_compressdev_queue_pair_setup = 0;
+int __rte_experimental mock_rte_compressdev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
+ uint32_t max_inflight_ops, int socket_id);
+#define rte_compressdev_queue_pair_setup mock_rte_compressdev_queue_pair_setup
+int __rte_experimental
+mock_rte_compressdev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
+ uint32_t max_inflight_ops, int socket_id)
+{
+ return ut_rte_compressdev_queue_pair_setup;
+}
+
+int ut_rte_compressdev_start = 0;
+int __rte_experimental mock_rte_compressdev_start(uint8_t dev_id);
+#define rte_compressdev_start mock_rte_compressdev_start
+int __rte_experimental
+mock_rte_compressdev_start(uint8_t dev_id)
+{
+ return ut_rte_compressdev_start;
+}
+
+int ut_rte_compressdev_private_xform_create = 0;
+int __rte_experimental mock_rte_compressdev_private_xform_create(uint8_t dev_id,
+ const struct rte_comp_xform *xform, void **private_xform);
+#define rte_compressdev_private_xform_create mock_rte_compressdev_private_xform_create
+int __rte_experimental
+mock_rte_compressdev_private_xform_create(uint8_t dev_id,
+ const struct rte_comp_xform *xform, void **private_xform)
+{
+ return ut_rte_compressdev_private_xform_create;
+}
+
+uint8_t ut_rte_compressdev_count = 0;
+uint8_t __rte_experimental mock_rte_compressdev_count(void);
+#define rte_compressdev_count mock_rte_compressdev_count
+uint8_t __rte_experimental
+mock_rte_compressdev_count(void)
+{
+ return ut_rte_compressdev_count;
+}
+
+struct rte_mempool *ut_rte_comp_op_pool_create = NULL;
+struct rte_mempool *__rte_experimental mock_rte_comp_op_pool_create(const char *name,
+ unsigned int nb_elts, unsigned int cache_size, uint16_t user_size,
+ int socket_id);
+#define rte_comp_op_pool_create mock_rte_comp_op_pool_create
+struct rte_mempool *__rte_experimental
+mock_rte_comp_op_pool_create(const char *name, unsigned int nb_elts,
+ unsigned int cache_size, uint16_t user_size, int socket_id)
+{
+ return ut_rte_comp_op_pool_create;
+}
+
+void mock_rte_pktmbuf_free(struct rte_mbuf *m);
+#define rte_pktmbuf_free mock_rte_pktmbuf_free
+void mock_rte_pktmbuf_free(struct rte_mbuf *m)
+{
+}
+
+static bool ut_boundary_alloc = false;
+static int ut_rte_pktmbuf_alloc_bulk = 0;
+int mock_rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs,
+ unsigned count);
+#define rte_pktmbuf_alloc_bulk mock_rte_pktmbuf_alloc_bulk
+int mock_rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs,
+ unsigned count)
+{
+ int i;
+
+ /* This mocked function only supports the alloc of up to 3 src and 3 dst. */
+ ut_rte_pktmbuf_alloc_bulk += count;
+
+ if (ut_rte_pktmbuf_alloc_bulk == 1) {
+ /* allocation of an extra mbuf for boundary cross test */
+ ut_boundary_alloc = true;
+ g_src_mbufs[UT_MBUFS_PER_OP_BOUND_TEST - 1]->next = NULL;
+ *mbufs = g_src_mbufs[UT_MBUFS_PER_OP_BOUND_TEST - 1];
+ ut_rte_pktmbuf_alloc_bulk = 0;
+ } else if (ut_rte_pktmbuf_alloc_bulk == UT_MBUFS_PER_OP) {
+ /* first test allocation, src mbufs */
+ for (i = 0; i < UT_MBUFS_PER_OP; i++) {
+ g_src_mbufs[i]->next = NULL;
+ *mbufs++ = g_src_mbufs[i];
+ }
+ } else if (ut_rte_pktmbuf_alloc_bulk == UT_MBUFS_PER_OP * 2) {
+ /* second test allocation, dst mbufs */
+ for (i = 0; i < UT_MBUFS_PER_OP; i++) {
+ g_dst_mbufs[i]->next = NULL;
+ *mbufs++ = g_dst_mbufs[i];
+ }
+ ut_rte_pktmbuf_alloc_bulk = 0;
+ } else {
+ return -1;
+ }
+ return 0;
+}
+
+struct rte_mempool *
+rte_pktmbuf_pool_create(const char *name, unsigned n, unsigned cache_size,
+ uint16_t priv_size, uint16_t data_room_size, int socket_id)
+{
+ struct spdk_mempool *tmp;
+
+ tmp = spdk_mempool_create("mbuf_mp", 1024, sizeof(struct rte_mbuf),
+ SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
+ SPDK_ENV_SOCKET_ID_ANY);
+
+ return (struct rte_mempool *)tmp;
+}
+
+void
+rte_mempool_free(struct rte_mempool *mp)
+{
+ if (mp) {
+ spdk_mempool_free((struct spdk_mempool *)mp);
+ }
+}
+
+static int ut_spdk_reduce_vol_op_complete_err = 0;
+void
+spdk_reduce_vol_writev(struct spdk_reduce_vol *vol, struct iovec *iov, int iovcnt,
+ uint64_t offset, uint64_t length, spdk_reduce_vol_op_complete cb_fn,
+ void *cb_arg)
+{
+ cb_fn(cb_arg, ut_spdk_reduce_vol_op_complete_err);
+}
+
+void
+spdk_reduce_vol_readv(struct spdk_reduce_vol *vol, struct iovec *iov, int iovcnt,
+ uint64_t offset, uint64_t length, spdk_reduce_vol_op_complete cb_fn,
+ void *cb_arg)
+{
+ cb_fn(cb_arg, ut_spdk_reduce_vol_op_complete_err);
+}
+
+#include "bdev/compress/vbdev_compress.c"
+
+/* SPDK stubs */
+DEFINE_STUB(spdk_bdev_get_aliases, const struct spdk_bdev_aliases_list *,
+ (const struct spdk_bdev *bdev), NULL);
+DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
+DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *g_bdev_io));
+DEFINE_STUB(spdk_bdev_io_type_supported, bool, (struct spdk_bdev *bdev,
+ enum spdk_bdev_io_type io_type), 0);
+DEFINE_STUB_V(spdk_bdev_module_release_bdev, (struct spdk_bdev *bdev));
+DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
+DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), 0);
+DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_desc *desc), 0);
+DEFINE_STUB_V(spdk_bdev_unregister, (struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn,
+ void *cb_arg));
+DEFINE_STUB(spdk_bdev_open, int, (struct spdk_bdev *bdev, bool write,
+ spdk_bdev_remove_cb_t remove_cb,
+ void *remove_ctx, struct spdk_bdev_desc **_desc), 0);
+DEFINE_STUB(spdk_bdev_module_claim_bdev, int, (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
+ struct spdk_bdev_module *module), 0);
+DEFINE_STUB_V(spdk_bdev_module_examine_done, (struct spdk_bdev_module *module));
+DEFINE_STUB(spdk_bdev_register, int, (struct spdk_bdev *bdev), 0);
+DEFINE_STUB(spdk_bdev_get_by_name, struct spdk_bdev *, (const char *bdev_name), NULL);
+DEFINE_STUB(spdk_bdev_io_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_io *bdev_io),
+ 0);
+DEFINE_STUB(spdk_bdev_queue_io_wait, int, (struct spdk_bdev *bdev, struct spdk_io_channel *ch,
+ struct spdk_bdev_io_wait_entry *entry), 0);
+DEFINE_STUB_V(spdk_reduce_vol_unload, (struct spdk_reduce_vol *vol,
+ spdk_reduce_vol_op_complete cb_fn, void *cb_arg));
+DEFINE_STUB_V(spdk_reduce_vol_load, (struct spdk_reduce_backing_dev *backing_dev,
+ spdk_reduce_vol_op_with_handle_complete cb_fn, void *cb_arg));
+DEFINE_STUB(spdk_reduce_vol_get_params, const struct spdk_reduce_vol_params *,
+ (struct spdk_reduce_vol *vol), NULL);
+
+/* DPDK stubs */
+DEFINE_STUB(rte_socket_id, unsigned, (void), 0);
+DEFINE_STUB(rte_vdev_init, int, (const char *name, const char *args), 0);
+DEFINE_STUB_V(rte_comp_op_free, (struct rte_comp_op *op));
+DEFINE_STUB(rte_comp_op_alloc, struct rte_comp_op *, (struct rte_mempool *mempool), NULL);
+
+int g_small_size_counter = 0;
+int g_small_size_modify = 0;
+uint64_t g_small_size = 0;
+uint64_t
+spdk_vtophys(void *buf, uint64_t *size)
+{
+ g_small_size_counter++;
+ if (g_small_size_counter == g_small_size_modify) {
+ *size = g_small_size;
+ g_small_size_counter = 0;
+ g_small_size_modify = 0;
+ }
+ return (uint64_t)buf;
+}
+
+void
+spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
+{
+ cb(g_io_ch, g_bdev_io, true);
+}
+
+/* Mock these functions to call the callback and then return the value we require */
+int ut_spdk_bdev_readv_blocks = 0;
+int
+spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct iovec *iov, int iovcnt,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ cb(g_bdev_io, !ut_spdk_bdev_readv_blocks, cb_arg);
+ return ut_spdk_bdev_readv_blocks;
+}
+
+int ut_spdk_bdev_writev_blocks = 0;
+bool ut_spdk_bdev_writev_blocks_mocked = false;
+int
+spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct iovec *iov, int iovcnt,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ cb(g_bdev_io, !ut_spdk_bdev_writev_blocks, cb_arg);
+ return ut_spdk_bdev_writev_blocks;
+}
+
+int ut_spdk_bdev_unmap_blocks = 0;
+bool ut_spdk_bdev_unmap_blocks_mocked = false;
+int
+spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ cb(g_bdev_io, !ut_spdk_bdev_unmap_blocks, cb_arg);
+ return ut_spdk_bdev_unmap_blocks;
+}
+
+int ut_spdk_bdev_flush_blocks = 0;
+bool ut_spdk_bdev_flush_blocks_mocked = false;
+int
+spdk_bdev_flush_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb,
+ void *cb_arg)
+{
+ cb(g_bdev_io, !ut_spdk_bdev_flush_blocks, cb_arg);
+ return ut_spdk_bdev_flush_blocks;
+}
+
+int ut_spdk_bdev_reset = 0;
+bool ut_spdk_bdev_reset_mocked = false;
+int
+spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ cb(g_bdev_io, !ut_spdk_bdev_reset, cb_arg);
+ return ut_spdk_bdev_reset;
+}
+
+bool g_completion_called = false;
+void
+spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
+{
+ bdev_io->internal.status = status;
+ g_completion_called = true;
+}
+
+static uint16_t ut_rte_compressdev_dequeue_burst = 0;
+uint16_t
+rte_compressdev_dequeue_burst(uint8_t dev_id, uint16_t qp_id, struct rte_comp_op **ops,
+ uint16_t nb_op)
+{
+ if (ut_rte_compressdev_dequeue_burst == 0) {
+ return 0;
+ }
+
+ ops[0] = &g_comp_op[0];
+ ops[1] = &g_comp_op[1];
+
+ return ut_rte_compressdev_dequeue_burst;
+}
+
+static int ut_compress_done[2];
+/* done_count and done_idx together control which expected assertion
+ * value to use when dequeuing 2 operations.
+ */
+static uint16_t done_count = 1;
+static uint16_t done_idx = 0;
+static void
+_compress_done(void *_req, int reduce_errno)
+{
+ if (done_count == 1) {
+ CU_ASSERT(reduce_errno == ut_compress_done[0]);
+ } else if (done_count == 2) {
+ CU_ASSERT(reduce_errno == ut_compress_done[done_idx++]);
+ }
+}
+
+static void
+_get_mbuf_array(struct rte_mbuf *mbuf_array[UT_MBUFS_PER_OP_BOUND_TEST],
+ struct rte_mbuf *mbuf_head, int mbuf_count, bool null_final)
+{
+ int i;
+
+ for (i = 0; i < mbuf_count; i++) {
+ mbuf_array[i] = mbuf_head;
+ if (mbuf_head) {
+ mbuf_head = mbuf_head->next;
+ }
+ }
+ if (null_final) {
+ mbuf_array[i - 1] = NULL;
+ }
+}
+
+#define FAKE_ENQUEUE_SUCCESS 255
+#define FAKE_ENQUEUE_ERROR 128
+#define FAKE_ENQUEUE_BUSY 64
+static uint16_t ut_enqueue_value = FAKE_ENQUEUE_SUCCESS;
+static struct rte_comp_op ut_expected_op;
+uint16_t
+rte_compressdev_enqueue_burst(uint8_t dev_id, uint16_t qp_id, struct rte_comp_op **ops,
+ uint16_t nb_ops)
+{
+ struct rte_comp_op *op = *ops;
+ struct rte_mbuf *op_mbuf[UT_MBUFS_PER_OP_BOUND_TEST];
+ struct rte_mbuf *exp_mbuf[UT_MBUFS_PER_OP_BOUND_TEST];
+ int i, num_src_mbufs = UT_MBUFS_PER_OP;
+
+ switch (ut_enqueue_value) {
+ case FAKE_ENQUEUE_BUSY:
+ op->status = RTE_COMP_OP_STATUS_NOT_PROCESSED;
+ return 0;
+ break;
+ case FAKE_ENQUEUE_SUCCESS:
+ op->status = RTE_COMP_OP_STATUS_SUCCESS;
+ return 1;
+ break;
+ case FAKE_ENQUEUE_ERROR:
+ op->status = RTE_COMP_OP_STATUS_ERROR;
+ return 0;
+ break;
+ default:
+ break;
+ }
+
+ /* by design the compress module will never send more than 1 op at a time */
+ CU_ASSERT(op->private_xform == ut_expected_op.private_xform);
+
+ /* setup our local pointers to the chained mbufs, those pointed to in the
+ * operation struct and the expected values.
+ */
+ _get_mbuf_array(op_mbuf, op->m_src, SPDK_COUNTOF(op_mbuf), true);
+ _get_mbuf_array(exp_mbuf, ut_expected_op.m_src, SPDK_COUNTOF(exp_mbuf), true);
+
+ if (ut_boundary_alloc == true) {
+ /* if we crossed a boundary, we need to check the 4th src mbuf and
+ * reset the global that is used to identify whether we crossed
+ * or not
+ */
+ num_src_mbufs = UT_MBUFS_PER_OP_BOUND_TEST;
+ exp_mbuf[UT_MBUFS_PER_OP_BOUND_TEST - 1] = ut_expected_op.m_src->next->next->next;
+ op_mbuf[UT_MBUFS_PER_OP_BOUND_TEST - 1] = op->m_src->next->next->next;
+ ut_boundary_alloc = false;
+ }
+
+
+ for (i = 0; i < num_src_mbufs; i++) {
+ CU_ASSERT(op_mbuf[i]->buf_addr == exp_mbuf[i]->buf_addr);
+ CU_ASSERT(op_mbuf[i]->buf_iova == exp_mbuf[i]->buf_iova);
+ CU_ASSERT(op_mbuf[i]->buf_len == exp_mbuf[i]->buf_len);
+ CU_ASSERT(op_mbuf[i]->pkt_len == exp_mbuf[i]->pkt_len);
+ }
+
+ /* if only 3 mbufs were used in the test, the 4th should be zeroed */
+ if (num_src_mbufs == UT_MBUFS_PER_OP) {
+ CU_ASSERT(op_mbuf[UT_MBUFS_PER_OP_BOUND_TEST - 1] == NULL);
+ CU_ASSERT(exp_mbuf[UT_MBUFS_PER_OP_BOUND_TEST - 1] == NULL);
+ }
+
+ CU_ASSERT(op->m_src->userdata == ut_expected_op.m_src->userdata);
+ CU_ASSERT(op->src.offset == ut_expected_op.src.offset);
+ CU_ASSERT(op->src.length == ut_expected_op.src.length);
+
+ /* check dst mbuf values */
+ _get_mbuf_array(op_mbuf, op->m_dst, SPDK_COUNTOF(op_mbuf), true);
+ _get_mbuf_array(exp_mbuf, ut_expected_op.m_dst, SPDK_COUNTOF(exp_mbuf), true);
+
+ for (i = 0; i < UT_MBUFS_PER_OP; i++) {
+ CU_ASSERT(op_mbuf[i]->buf_addr == exp_mbuf[i]->buf_addr);
+ CU_ASSERT(op_mbuf[i]->buf_iova == exp_mbuf[i]->buf_iova);
+ CU_ASSERT(op_mbuf[i]->buf_len == exp_mbuf[i]->buf_len);
+ CU_ASSERT(op_mbuf[i]->pkt_len == exp_mbuf[i]->pkt_len);
+ }
+ CU_ASSERT(op->dst.offset == ut_expected_op.dst.offset);
+
+ return ut_enqueue_value;
+}
+
+/* Global setup for all tests that share a bunch of preparation... */
+static int
+test_setup(void)
+{
+ struct spdk_thread *thread;
+ int i;
+
+ spdk_thread_lib_init(NULL, 0);
+
+ thread = spdk_thread_create(NULL, NULL);
+ spdk_set_thread(thread);
+
+ g_comp_bdev.reduce_thread = thread;
+ g_comp_bdev.backing_dev.unmap = _comp_reduce_unmap;
+ g_comp_bdev.backing_dev.readv = _comp_reduce_readv;
+ g_comp_bdev.backing_dev.writev = _comp_reduce_writev;
+ g_comp_bdev.backing_dev.compress = _comp_reduce_compress;
+ g_comp_bdev.backing_dev.decompress = _comp_reduce_decompress;
+ g_comp_bdev.backing_dev.blocklen = 512;
+ g_comp_bdev.backing_dev.blockcnt = 1024 * 16;
+
+ g_comp_bdev.device_qp = &g_device_qp;
+ g_comp_bdev.device_qp->device = &g_device;
+
+ TAILQ_INIT(&g_comp_bdev.queued_comp_ops);
+
+ g_comp_xform = (struct rte_comp_xform) {
+ .type = RTE_COMP_COMPRESS,
+ .compress = {
+ .algo = RTE_COMP_ALGO_DEFLATE,
+ .deflate.huffman = RTE_COMP_HUFFMAN_DEFAULT,
+ .level = RTE_COMP_LEVEL_MAX,
+ .window_size = DEFAULT_WINDOW_SIZE,
+ .chksum = RTE_COMP_CHECKSUM_NONE,
+ .hash_algo = RTE_COMP_HASH_ALGO_NONE
+ }
+ };
+
+ g_decomp_xform = (struct rte_comp_xform) {
+ .type = RTE_COMP_DECOMPRESS,
+ .decompress = {
+ .algo = RTE_COMP_ALGO_DEFLATE,
+ .chksum = RTE_COMP_CHECKSUM_NONE,
+ .window_size = DEFAULT_WINDOW_SIZE,
+ .hash_algo = RTE_COMP_HASH_ALGO_NONE
+ }
+ };
+ g_device.comp_xform = &g_comp_xform;
+ g_device.decomp_xform = &g_decomp_xform;
+ g_cdev_cap.comp_feature_flags = RTE_COMP_FF_SHAREABLE_PRIV_XFORM;
+ g_device.cdev_info.driver_name = "compress_isal";
+ g_device.cdev_info.capabilities = &g_cdev_cap;
+ for (i = 0; i < UT_MBUFS_PER_OP_BOUND_TEST; i++) {
+ g_src_mbufs[i] = calloc(1, sizeof(struct rte_mbuf));
+ }
+ for (i = 0; i < UT_MBUFS_PER_OP; i++) {
+ g_dst_mbufs[i] = calloc(1, sizeof(struct rte_mbuf));
+ }
+
+ g_bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct comp_bdev_io));
+ g_bdev_io->u.bdev.iovs = calloc(128, sizeof(struct iovec));
+ g_bdev_io->bdev = &g_comp_bdev.comp_bdev;
+ g_io_ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct comp_io_channel));
+ g_io_ch->thread = thread;
+ g_comp_ch = (struct comp_io_channel *)((uint8_t *)g_io_ch + sizeof(struct spdk_io_channel));
+ g_io_ctx = (struct comp_bdev_io *)g_bdev_io->driver_ctx;
+
+ g_io_ctx->comp_ch = g_comp_ch;
+ g_io_ctx->comp_bdev = &g_comp_bdev;
+ g_comp_bdev.device_qp = &g_device_qp;
+
+ for (i = 0; i < UT_MBUFS_PER_OP_BOUND_TEST - 1; i++) {
+ g_expected_src_mbufs[i].next = &g_expected_src_mbufs[i + 1];
+ }
+ g_expected_src_mbufs[UT_MBUFS_PER_OP_BOUND_TEST - 1].next = NULL;
+
+ /* we only test w/4 mbufs on src side */
+ for (i = 0; i < UT_MBUFS_PER_OP - 1; i++) {
+ g_expected_dst_mbufs[i].next = &g_expected_dst_mbufs[i + 1];
+ }
+ g_expected_dst_mbufs[UT_MBUFS_PER_OP - 1].next = NULL;
+
+ return 0;
+}
+
+/* Global teardown for all tests */
+static int
+test_cleanup(void)
+{
+ struct spdk_thread *thread;
+ int i;
+
+ for (i = 0; i < UT_MBUFS_PER_OP_BOUND_TEST; i++) {
+ free(g_src_mbufs[i]);
+ }
+ for (i = 0; i < UT_MBUFS_PER_OP; i++) {
+ free(g_dst_mbufs[i]);
+ }
+ free(g_bdev_io->u.bdev.iovs);
+ free(g_bdev_io);
+ free(g_io_ch);
+
+ thread = spdk_get_thread();
+ spdk_thread_exit(thread);
+ while (!spdk_thread_is_exited(thread)) {
+ spdk_thread_poll(thread, 0, 0);
+ }
+ spdk_thread_destroy(thread);
+
+ spdk_thread_lib_fini();
+
+ return 0;
+}
+
+static void
+test_compress_operation(void)
+{
+ struct iovec src_iovs[3] = {};
+ int src_iovcnt;
+ struct iovec dst_iovs[3] = {};
+ int dst_iovcnt;
+ struct spdk_reduce_vol_cb_args cb_arg;
+ int rc, i;
+ struct vbdev_comp_op *op;
+ struct rte_mbuf *exp_src_mbuf[UT_MBUFS_PER_OP];
+ struct rte_mbuf *exp_dst_mbuf[UT_MBUFS_PER_OP];
+
+ src_iovcnt = dst_iovcnt = 3;
+ for (i = 0; i < dst_iovcnt; i++) {
+ src_iovs[i].iov_len = 0x1000;
+ dst_iovs[i].iov_len = 0x1000;
+ src_iovs[i].iov_base = (void *)0x10000000 + 0x1000 * i;
+ dst_iovs[i].iov_base = (void *)0x20000000 + 0x1000 * i;
+ }
+
+ /* test rte_comp_op_alloc failure */
+ MOCK_SET(rte_comp_op_alloc, NULL);
+ CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
+ rc = _compress_operation(&g_comp_bdev.backing_dev, &src_iovs[0], src_iovcnt,
+ &dst_iovs[0], dst_iovcnt, true, &cb_arg);
+ CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == false);
+ while (!TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops)) {
+ op = TAILQ_FIRST(&g_comp_bdev.queued_comp_ops);
+ TAILQ_REMOVE(&g_comp_bdev.queued_comp_ops, op, link);
+ free(op);
+ }
+ CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
+ CU_ASSERT(rc == 0);
+ MOCK_SET(rte_comp_op_alloc, &g_comp_op[0]);
+
+ /* test mempool get failure */
+ ut_rte_pktmbuf_alloc_bulk = -1;
+ CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
+ rc = _compress_operation(&g_comp_bdev.backing_dev, &src_iovs[0], src_iovcnt,
+ &dst_iovs[0], dst_iovcnt, true, &cb_arg);
+ CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == false);
+ while (!TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops)) {
+ op = TAILQ_FIRST(&g_comp_bdev.queued_comp_ops);
+ TAILQ_REMOVE(&g_comp_bdev.queued_comp_ops, op, link);
+ free(op);
+ }
+ CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
+ CU_ASSERT(rc == 0);
+ ut_rte_pktmbuf_alloc_bulk = 0;
+
+ /* test enqueue failure busy */
+ ut_enqueue_value = FAKE_ENQUEUE_BUSY;
+ CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
+ rc = _compress_operation(&g_comp_bdev.backing_dev, &src_iovs[0], src_iovcnt,
+ &dst_iovs[0], dst_iovcnt, true, &cb_arg);
+ CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == false);
+ while (!TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops)) {
+ op = TAILQ_FIRST(&g_comp_bdev.queued_comp_ops);
+ TAILQ_REMOVE(&g_comp_bdev.queued_comp_ops, op, link);
+ free(op);
+ }
+ CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
+ CU_ASSERT(rc == 0);
+ ut_enqueue_value = 1;
+
+ /* test enqueue failure error */
+ ut_enqueue_value = FAKE_ENQUEUE_ERROR;
+ CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
+ rc = _compress_operation(&g_comp_bdev.backing_dev, &src_iovs[0], src_iovcnt,
+ &dst_iovs[0], dst_iovcnt, true, &cb_arg);
+ CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
+ CU_ASSERT(rc == -EINVAL);
+ ut_enqueue_value = FAKE_ENQUEUE_SUCCESS;
+
+ /* test success with 3 vector iovec */
+ ut_expected_op.private_xform = &g_decomp_xform;
+ ut_expected_op.src.offset = 0;
+ ut_expected_op.src.length = src_iovs[0].iov_len + src_iovs[1].iov_len + src_iovs[2].iov_len;
+
+ /* setup the src expected values */
+ _get_mbuf_array(exp_src_mbuf, &g_expected_src_mbufs[0], SPDK_COUNTOF(exp_src_mbuf), false);
+ ut_expected_op.m_src = exp_src_mbuf[0];
+
+ for (i = 0; i < UT_MBUFS_PER_OP; i++) {
+ exp_src_mbuf[i]->userdata = &cb_arg;
+ exp_src_mbuf[i]->buf_addr = src_iovs[i].iov_base;
+ exp_src_mbuf[i]->buf_iova = spdk_vtophys(src_iovs[i].iov_base, &src_iovs[i].iov_len);
+ exp_src_mbuf[i]->buf_len = src_iovs[i].iov_len;
+ exp_src_mbuf[i]->pkt_len = src_iovs[i].iov_len;
+ }
+
+ /* setup the dst expected values */
+ _get_mbuf_array(exp_dst_mbuf, &g_expected_dst_mbufs[0], SPDK_COUNTOF(exp_dst_mbuf), false);
+ ut_expected_op.dst.offset = 0;
+ ut_expected_op.m_dst = exp_dst_mbuf[0];
+
+ for (i = 0; i < UT_MBUFS_PER_OP; i++) {
+ exp_dst_mbuf[i]->buf_addr = dst_iovs[i].iov_base;
+ exp_dst_mbuf[i]->buf_iova = spdk_vtophys(dst_iovs[i].iov_base, &dst_iovs[i].iov_len);
+ exp_dst_mbuf[i]->buf_len = dst_iovs[i].iov_len;
+ exp_dst_mbuf[i]->pkt_len = dst_iovs[i].iov_len;
+ }
+
+ rc = _compress_operation(&g_comp_bdev.backing_dev, &src_iovs[0], src_iovcnt,
+ &dst_iovs[0], dst_iovcnt, false, &cb_arg);
+ CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
+ CU_ASSERT(rc == 0);
+
+}
+
+static void
+test_compress_operation_cross_boundary(void)
+{
+ struct iovec src_iovs[3] = {};
+ int src_iovcnt;
+ struct iovec dst_iovs[3] = {};
+ int dst_iovcnt;
+ struct spdk_reduce_vol_cb_args cb_arg;
+ int rc, i;
+ struct rte_mbuf *exp_src_mbuf[UT_MBUFS_PER_OP_BOUND_TEST];
+ struct rte_mbuf *exp_dst_mbuf[UT_MBUFS_PER_OP_BOUND_TEST];
+
+ /* Setup the same basic 3 IOV test as used in the simple success case
+ * but then we'll start testing a vtophy boundary crossing at each
+ * position.
+ */
+ src_iovcnt = dst_iovcnt = 3;
+ for (i = 0; i < dst_iovcnt; i++) {
+ src_iovs[i].iov_len = 0x1000;
+ dst_iovs[i].iov_len = 0x1000;
+ src_iovs[i].iov_base = (void *)0x10000000 + 0x1000 * i;
+ dst_iovs[i].iov_base = (void *)0x20000000 + 0x1000 * i;
+ }
+
+ ut_expected_op.private_xform = &g_decomp_xform;
+ ut_expected_op.src.offset = 0;
+ ut_expected_op.src.length = src_iovs[0].iov_len + src_iovs[1].iov_len + src_iovs[2].iov_len;
+
+ /* setup the src expected values */
+ _get_mbuf_array(exp_src_mbuf, &g_expected_src_mbufs[0], SPDK_COUNTOF(exp_src_mbuf), false);
+ ut_expected_op.m_src = exp_src_mbuf[0];
+
+ for (i = 0; i < UT_MBUFS_PER_OP; i++) {
+ exp_src_mbuf[i]->userdata = &cb_arg;
+ exp_src_mbuf[i]->buf_addr = src_iovs[i].iov_base;
+ exp_src_mbuf[i]->buf_iova = spdk_vtophys(src_iovs[i].iov_base, &src_iovs[i].iov_len);
+ exp_src_mbuf[i]->buf_len = src_iovs[i].iov_len;
+ exp_src_mbuf[i]->pkt_len = src_iovs[i].iov_len;
+ }
+
+ /* setup the dst expected values, we don't test needing a 4th dst mbuf */
+ _get_mbuf_array(exp_dst_mbuf, &g_expected_dst_mbufs[0], SPDK_COUNTOF(exp_dst_mbuf), false);
+ ut_expected_op.dst.offset = 0;
+ ut_expected_op.m_dst = exp_dst_mbuf[0];
+
+ for (i = 0; i < UT_MBUFS_PER_OP; i++) {
+ exp_dst_mbuf[i]->buf_addr = dst_iovs[i].iov_base;
+ exp_dst_mbuf[i]->buf_iova = spdk_vtophys(dst_iovs[i].iov_base, &dst_iovs[i].iov_len);
+ exp_dst_mbuf[i]->buf_len = dst_iovs[i].iov_len;
+ exp_dst_mbuf[i]->pkt_len = dst_iovs[i].iov_len;
+ }
+
+ /* force the 1st IOV to get partial length from spdk_vtophys */
+ g_small_size_counter = 0;
+ g_small_size_modify = 1;
+ g_small_size = 0x800;
+ exp_src_mbuf[3]->userdata = &cb_arg;
+
+ /* first only has shorter length */
+ exp_src_mbuf[0]->pkt_len = exp_src_mbuf[0]->buf_len = 0x800;
+
+ /* 2nd was inserted by the boundary crossing condition and finishes off
+ * the length from the first */
+ exp_src_mbuf[1]->buf_addr = (void *)0x10000800;
+ exp_src_mbuf[1]->buf_iova = 0x10000800;
+ exp_src_mbuf[1]->pkt_len = exp_src_mbuf[1]->buf_len = 0x800;
+
+ /* 3rd looks like that the 2nd would have */
+ exp_src_mbuf[2]->buf_addr = (void *)0x10001000;
+ exp_src_mbuf[2]->buf_iova = 0x10001000;
+ exp_src_mbuf[2]->pkt_len = exp_src_mbuf[2]->buf_len = 0x1000;
+
+ /* a new 4th looks like what the 3rd would have */
+ exp_src_mbuf[3]->buf_addr = (void *)0x10002000;
+ exp_src_mbuf[3]->buf_iova = 0x10002000;
+ exp_src_mbuf[3]->pkt_len = exp_src_mbuf[3]->buf_len = 0x1000;
+
+ rc = _compress_operation(&g_comp_bdev.backing_dev, &src_iovs[0], src_iovcnt,
+ &dst_iovs[0], dst_iovcnt, false, &cb_arg);
+ CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
+ CU_ASSERT(rc == 0);
+
+ /* Now force the 2nd IOV to get partial length from spdk_vtophys */
+ g_small_size_counter = 0;
+ g_small_size_modify = 2;
+ g_small_size = 0x800;
+
+ /* first is normal */
+ exp_src_mbuf[0]->buf_addr = (void *)0x10000000;
+ exp_src_mbuf[0]->buf_iova = 0x10000000;
+ exp_src_mbuf[0]->pkt_len = exp_src_mbuf[0]->buf_len = 0x1000;
+
+ /* second only has shorter length */
+ exp_src_mbuf[1]->buf_addr = (void *)0x10001000;
+ exp_src_mbuf[1]->buf_iova = 0x10001000;
+ exp_src_mbuf[1]->pkt_len = exp_src_mbuf[1]->buf_len = 0x800;
+
+ /* 3rd was inserted by the boundary crossing condition and finishes off
+ * the length from the first */
+ exp_src_mbuf[2]->buf_addr = (void *)0x10001800;
+ exp_src_mbuf[2]->buf_iova = 0x10001800;
+ exp_src_mbuf[2]->pkt_len = exp_src_mbuf[2]->buf_len = 0x800;
+
+ /* a new 4th looks like what the 3rd would have */
+ exp_src_mbuf[3]->buf_addr = (void *)0x10002000;
+ exp_src_mbuf[3]->buf_iova = 0x10002000;
+ exp_src_mbuf[3]->pkt_len = exp_src_mbuf[3]->buf_len = 0x1000;
+
+ rc = _compress_operation(&g_comp_bdev.backing_dev, &src_iovs[0], src_iovcnt,
+ &dst_iovs[0], dst_iovcnt, false, &cb_arg);
+ CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
+ CU_ASSERT(rc == 0);
+
+ /* Finally force the 3rd IOV to get partial length from spdk_vtophys */
+ g_small_size_counter = 0;
+ g_small_size_modify = 3;
+ g_small_size = 0x800;
+
+ /* first is normal */
+ exp_src_mbuf[0]->buf_addr = (void *)0x10000000;
+ exp_src_mbuf[0]->buf_iova = 0x10000000;
+ exp_src_mbuf[0]->pkt_len = exp_src_mbuf[0]->buf_len = 0x1000;
+
+ /* second is normal */
+ exp_src_mbuf[1]->buf_addr = (void *)0x10001000;
+ exp_src_mbuf[1]->buf_iova = 0x10001000;
+ exp_src_mbuf[1]->pkt_len = exp_src_mbuf[1]->buf_len = 0x1000;
+
+ /* 3rd has shorter length */
+ exp_src_mbuf[2]->buf_addr = (void *)0x10002000;
+ exp_src_mbuf[2]->buf_iova = 0x10002000;
+ exp_src_mbuf[2]->pkt_len = exp_src_mbuf[2]->buf_len = 0x800;
+
+ /* a new 4th handles the remainder from the 3rd */
+ exp_src_mbuf[3]->buf_addr = (void *)0x10002800;
+ exp_src_mbuf[3]->buf_iova = 0x10002800;
+ exp_src_mbuf[3]->pkt_len = exp_src_mbuf[3]->buf_len = 0x800;
+
+ rc = _compress_operation(&g_comp_bdev.backing_dev, &src_iovs[0], src_iovcnt,
+ &dst_iovs[0], dst_iovcnt, false, &cb_arg);
+ CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
+ CU_ASSERT(rc == 0);
+}
+
+static void
+test_poller(void)
+{
+ int rc;
+ struct spdk_reduce_vol_cb_args *cb_args;
+ struct rte_mbuf mbuf[4]; /* one src, one dst, 2 ops */
+ struct vbdev_comp_op *op_to_queue;
+ struct iovec src_iovs[3] = {};
+ struct iovec dst_iovs[3] = {};
+ int i;
+
+ cb_args = calloc(1, sizeof(*cb_args));
+ SPDK_CU_ASSERT_FATAL(cb_args != NULL);
+ cb_args->cb_fn = _compress_done;
+ memset(&g_comp_op[0], 0, sizeof(struct rte_comp_op));
+ g_comp_op[0].m_src = &mbuf[0];
+ g_comp_op[1].m_src = &mbuf[1];
+ g_comp_op[0].m_dst = &mbuf[2];
+ g_comp_op[1].m_dst = &mbuf[3];
+ for (i = 0; i < 3; i++) {
+ src_iovs[i].iov_len = 0x1000;
+ dst_iovs[i].iov_len = 0x1000;
+ src_iovs[i].iov_base = (void *)0x10000000 + 0x1000 * i;
+ dst_iovs[i].iov_base = (void *)0x20000000 + 0x1000 * i;
+ }
+
+ /* Error from dequeue, nothing needing to be resubmitted.
+ */
+ ut_rte_compressdev_dequeue_burst = 1;
+ /* setup what we want dequeue to return for the op */
+ g_comp_op[0].m_src->userdata = (void *)cb_args;
+ g_comp_op[0].produced = 1;
+ g_comp_op[0].status = 1;
+ /* value asserted in the reduce callback */
+ ut_compress_done[0] = -EINVAL;
+ CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
+ rc = comp_dev_poller((void *)&g_comp_bdev);
+ CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
+ CU_ASSERT(rc == SPDK_POLLER_BUSY);
+
+ /* Success from dequeue, 2 ops. nothing needing to be resubmitted.
+ */
+ ut_rte_compressdev_dequeue_burst = 2;
+ /* setup what we want dequeue to return for the op */
+ g_comp_op[0].m_src->userdata = (void *)cb_args;
+ g_comp_op[0].produced = 16;
+ g_comp_op[0].status = 0;
+ g_comp_op[1].m_src->userdata = (void *)cb_args;
+ g_comp_op[1].produced = 32;
+ g_comp_op[1].status = 0;
+ /* value asserted in the reduce callback */
+ ut_compress_done[0] = 16;
+ ut_compress_done[1] = 32;
+ done_count = 2;
+ CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
+ rc = comp_dev_poller((void *)&g_comp_bdev);
+ CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
+ CU_ASSERT(rc == SPDK_POLLER_BUSY);
+
+ /* Success from dequeue, one op to be resubmitted.
+ */
+ ut_rte_compressdev_dequeue_burst = 1;
+ /* setup what we want dequeue to return for the op */
+ g_comp_op[0].m_src->userdata = (void *)cb_args;
+ g_comp_op[0].produced = 16;
+ g_comp_op[0].status = 0;
+ /* value asserted in the reduce callback */
+ ut_compress_done[0] = 16;
+ done_count = 1;
+ op_to_queue = calloc(1, sizeof(struct vbdev_comp_op));
+ SPDK_CU_ASSERT_FATAL(op_to_queue != NULL);
+ op_to_queue->backing_dev = &g_comp_bdev.backing_dev;
+ op_to_queue->src_iovs = &src_iovs[0];
+ op_to_queue->src_iovcnt = 3;
+ op_to_queue->dst_iovs = &dst_iovs[0];
+ op_to_queue->dst_iovcnt = 3;
+ op_to_queue->compress = true;
+ op_to_queue->cb_arg = cb_args;
+ ut_enqueue_value = FAKE_ENQUEUE_SUCCESS;
+ TAILQ_INSERT_TAIL(&g_comp_bdev.queued_comp_ops,
+ op_to_queue,
+ link);
+ CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == false);
+ rc = comp_dev_poller((void *)&g_comp_bdev);
+ CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
+ CU_ASSERT(rc == SPDK_POLLER_BUSY);
+
+ /* op_to_queue is freed in code under test */
+ free(cb_args);
+}
+
+static void
+test_vbdev_compress_submit_request(void)
+{
+ /* Single element block size write */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
+ g_completion_called = false;
+ vbdev_compress_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(g_completion_called == true);
+ CU_ASSERT(g_io_ctx->orig_io == g_bdev_io);
+ CU_ASSERT(g_io_ctx->comp_bdev == &g_comp_bdev);
+ CU_ASSERT(g_io_ctx->comp_ch == g_comp_ch);
+
+ /* same write but now fail it */
+ ut_spdk_reduce_vol_op_complete_err = 1;
+ g_completion_called = false;
+ vbdev_compress_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ CU_ASSERT(g_completion_called == true);
+
+ /* test a read success */
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
+ ut_spdk_reduce_vol_op_complete_err = 0;
+ g_completion_called = false;
+ vbdev_compress_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(g_completion_called == true);
+
+ /* test a read failure */
+ ut_spdk_reduce_vol_op_complete_err = 1;
+ g_completion_called = false;
+ vbdev_compress_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ CU_ASSERT(g_completion_called == true);
+}
+
+static void
+test_passthru(void)
+{
+
+}
+
+static void
+test_reset(void)
+{
+ /* TODO: There are a few different ways to do this given that
+ * the code uses spdk_for_each_channel() to implement reset
+ * handling. SUbmitting w/o UT for this function for now and
+ * will follow up with something shortly.
+ */
+}
+
+static void
+test_initdrivers(void)
+{
+ int rc;
+
+ /* test return values from rte_vdev_init() */
+ MOCK_SET(rte_vdev_init, -EEXIST);
+ rc = vbdev_init_compress_drivers();
+ /* This is not an error condition, we already have one */
+ CU_ASSERT(rc == 0);
+
+ /* error */
+ MOCK_SET(rte_vdev_init, -2);
+ rc = vbdev_init_compress_drivers();
+ CU_ASSERT(rc == -EINVAL);
+ CU_ASSERT(g_mbuf_mp == NULL);
+ CU_ASSERT(g_comp_op_mp == NULL);
+
+ /* compressdev count 0 */
+ ut_rte_compressdev_count = 0;
+ MOCK_SET(rte_vdev_init, 0);
+ rc = vbdev_init_compress_drivers();
+ CU_ASSERT(rc == 0);
+
+ /* bogus count */
+ ut_rte_compressdev_count = RTE_COMPRESS_MAX_DEVS + 1;
+ rc = vbdev_init_compress_drivers();
+ CU_ASSERT(rc == -EINVAL);
+
+ /* can't get mbuf pool */
+ ut_rte_compressdev_count = 1;
+ MOCK_SET(spdk_mempool_create, NULL);
+ rc = vbdev_init_compress_drivers();
+ CU_ASSERT(rc == -ENOMEM);
+ MOCK_CLEAR(spdk_mempool_create);
+
+ /* can't get comp op pool */
+ ut_rte_comp_op_pool_create = NULL;
+ rc = vbdev_init_compress_drivers();
+ CU_ASSERT(rc == -ENOMEM);
+
+ /* error on create_compress_dev() */
+ ut_rte_comp_op_pool_create = (struct rte_mempool *)&test_initdrivers;
+ ut_rte_compressdev_configure = -1;
+ rc = vbdev_init_compress_drivers();
+ CU_ASSERT(rc == -1);
+
+ /* error on create_compress_dev() but coverage for large num queues */
+ ut_max_nb_queue_pairs = 99;
+ rc = vbdev_init_compress_drivers();
+ CU_ASSERT(rc == -1);
+
+ /* qpair setup fails */
+ ut_rte_compressdev_configure = 0;
+ ut_max_nb_queue_pairs = 0;
+ ut_rte_compressdev_queue_pair_setup = -1;
+ rc = vbdev_init_compress_drivers();
+ CU_ASSERT(rc == -EINVAL);
+
+ /* rte_compressdev_start fails */
+ ut_rte_compressdev_queue_pair_setup = 0;
+ ut_rte_compressdev_start = -1;
+ rc = vbdev_init_compress_drivers();
+ CU_ASSERT(rc == -1);
+
+ /* rte_compressdev_private_xform_create() fails */
+ ut_rte_compressdev_start = 0;
+ ut_rte_compressdev_private_xform_create = -2;
+ rc = vbdev_init_compress_drivers();
+ CU_ASSERT(rc == -2);
+
+ /* success */
+ ut_rte_compressdev_private_xform_create = 0;
+ rc = vbdev_init_compress_drivers();
+ CU_ASSERT(rc == 0);
+ spdk_mempool_free((struct spdk_mempool *)g_mbuf_mp);
+}
+
+static void
+test_supported_io(void)
+{
+
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("compress", test_setup, test_cleanup);
+ CU_ADD_TEST(suite, test_compress_operation);
+ CU_ADD_TEST(suite, test_compress_operation_cross_boundary);
+ CU_ADD_TEST(suite, test_vbdev_compress_submit_request);
+ CU_ADD_TEST(suite, test_passthru);
+ CU_ADD_TEST(suite, test_initdrivers);
+ CU_ADD_TEST(suite, test_supported_io);
+ CU_ADD_TEST(suite, test_poller);
+ CU_ADD_TEST(suite, test_reset);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/bdev/crypto.c/.gitignore b/src/spdk/test/unit/lib/bdev/crypto.c/.gitignore
new file mode 100644
index 000000000..b2777562d
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/crypto.c/.gitignore
@@ -0,0 +1 @@
+crypto_ut
diff --git a/src/spdk/test/unit/lib/bdev/crypto.c/Makefile b/src/spdk/test/unit/lib/bdev/crypto.c/Makefile
new file mode 100644
index 000000000..a987fbf2e
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/crypto.c/Makefile
@@ -0,0 +1,39 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = crypto_ut.c
+CFLAGS += $(ENV_CFLAGS)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/bdev/crypto.c/crypto_ut.c b/src/spdk/test/unit/lib/bdev/crypto.c/crypto_ut.c
new file mode 100644
index 000000000..f6298fd7d
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/crypto.c/crypto_ut.c
@@ -0,0 +1,1084 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "common/lib/test_env.c"
+#include "spdk_internal/mock.h"
+#include "unit/lib/json_mock.c"
+
+#include <rte_crypto.h>
+#include <rte_cryptodev.h>
+
+#define MAX_TEST_BLOCKS 8192
+struct rte_crypto_op *g_test_crypto_ops[MAX_TEST_BLOCKS];
+struct rte_crypto_op *g_test_dev_full_ops[MAX_TEST_BLOCKS];
+
+uint16_t g_dequeue_mock;
+uint16_t g_enqueue_mock;
+unsigned ut_rte_crypto_op_bulk_alloc;
+int ut_rte_crypto_op_attach_sym_session = 0;
+#define MOCK_INFO_GET_1QP_AESNI 0
+#define MOCK_INFO_GET_1QP_QAT 1
+#define MOCK_INFO_GET_1QP_BOGUS_PMD 2
+int ut_rte_cryptodev_info_get = 0;
+bool ut_rte_cryptodev_info_get_mocked = false;
+
+/* Those functions are defined as static inline in DPDK, so we can't
+ * mock them straight away. We use defines to redirect them into
+ * our custom functions.
+ */
+static bool g_resubmit_test = false;
+#define rte_cryptodev_enqueue_burst mock_rte_cryptodev_enqueue_burst
+static inline uint16_t
+mock_rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ int i;
+
+ CU_ASSERT(nb_ops > 0);
+
+ for (i = 0; i < nb_ops; i++) {
+ /* Use this empty (til now) array of pointers to store
+ * enqueued operations for assertion in dev_full test.
+ */
+ g_test_dev_full_ops[i] = *ops++;
+ if (g_resubmit_test == true) {
+ CU_ASSERT(g_test_dev_full_ops[i] == (void *)0xDEADBEEF);
+ }
+ }
+
+ return g_enqueue_mock;
+}
+
+#define rte_cryptodev_dequeue_burst mock_rte_cryptodev_dequeue_burst
+static inline uint16_t
+mock_rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ int i;
+
+ CU_ASSERT(nb_ops > 0);
+
+ for (i = 0; i < g_dequeue_mock; i++) {
+ *ops++ = g_test_crypto_ops[i];
+ }
+
+ return g_dequeue_mock;
+}
+
+/* Instead of allocating real memory, assign the allocations to our
+ * test array for assertion in tests.
+ */
+#define rte_crypto_op_bulk_alloc mock_rte_crypto_op_bulk_alloc
+static inline unsigned
+mock_rte_crypto_op_bulk_alloc(struct rte_mempool *mempool,
+ enum rte_crypto_op_type type,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ int i;
+
+ for (i = 0; i < nb_ops; i++) {
+ *ops++ = g_test_crypto_ops[i];
+ }
+ return ut_rte_crypto_op_bulk_alloc;
+}
+
+#define rte_mempool_put_bulk mock_rte_mempool_put_bulk
+static __rte_always_inline void
+mock_rte_mempool_put_bulk(struct rte_mempool *mp, void *const *obj_table,
+ unsigned int n)
+{
+ return;
+}
+
+#define rte_crypto_op_attach_sym_session mock_rte_crypto_op_attach_sym_session
+static inline int
+mock_rte_crypto_op_attach_sym_session(struct rte_crypto_op *op,
+ struct rte_cryptodev_sym_session *sess)
+{
+ return ut_rte_crypto_op_attach_sym_session;
+}
+
+#define rte_lcore_count mock_rte_lcore_count
+static inline unsigned
+mock_rte_lcore_count(void)
+{
+ return 1;
+}
+
+#include "bdev/crypto/vbdev_crypto.c"
+
+/* SPDK stubs */
+DEFINE_STUB(spdk_bdev_queue_io_wait, int, (struct spdk_bdev *bdev, struct spdk_io_channel *ch,
+ struct spdk_bdev_io_wait_entry *entry), 0);
+DEFINE_STUB(spdk_conf_find_section, struct spdk_conf_section *,
+ (struct spdk_conf *cp, const char *name), NULL);
+DEFINE_STUB(spdk_conf_section_get_nval, char *,
+ (struct spdk_conf_section *sp, const char *key, int idx), NULL);
+DEFINE_STUB(spdk_conf_section_get_nmval, char *,
+ (struct spdk_conf_section *sp, const char *key, int idx1, int idx2), NULL);
+DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
+DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *g_bdev_io));
+DEFINE_STUB_V(spdk_bdev_io_put_aux_buf, (struct spdk_bdev_io *bdev_io, void *aux_buf));
+DEFINE_STUB(spdk_bdev_io_type_supported, bool, (struct spdk_bdev *bdev,
+ enum spdk_bdev_io_type io_type), 0);
+DEFINE_STUB_V(spdk_bdev_module_release_bdev, (struct spdk_bdev *bdev));
+DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
+DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), 0);
+DEFINE_STUB(spdk_bdev_get_buf_align, size_t, (const struct spdk_bdev *bdev), 64);
+DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_desc *desc), 0);
+DEFINE_STUB_V(spdk_bdev_unregister, (struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn,
+ void *cb_arg));
+DEFINE_STUB(spdk_bdev_open, int, (struct spdk_bdev *bdev, bool write,
+ spdk_bdev_remove_cb_t remove_cb,
+ void *remove_ctx, struct spdk_bdev_desc **_desc), 0);
+DEFINE_STUB(spdk_bdev_module_claim_bdev, int, (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
+ struct spdk_bdev_module *module), 0);
+DEFINE_STUB_V(spdk_bdev_module_examine_done, (struct spdk_bdev_module *module));
+DEFINE_STUB(spdk_bdev_register, int, (struct spdk_bdev *vbdev), 0);
+
+/* DPDK stubs */
+DEFINE_STUB(rte_cryptodev_count, uint8_t, (void), 0);
+DEFINE_STUB_V(rte_mempool_free, (struct rte_mempool *mp));
+DEFINE_STUB(rte_mempool_create, struct rte_mempool *, (const char *name, unsigned n,
+ unsigned elt_size,
+ unsigned cache_size, unsigned private_data_size,
+ rte_mempool_ctor_t *mp_init, void *mp_init_arg,
+ rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
+ int socket_id, unsigned flags), (struct rte_mempool *)1);
+DEFINE_STUB(rte_socket_id, unsigned, (void), 0);
+DEFINE_STUB(rte_crypto_op_pool_create, struct rte_mempool *,
+ (const char *name, enum rte_crypto_op_type type, unsigned nb_elts,
+ unsigned cache_size, uint16_t priv_size, int socket_id), (struct rte_mempool *)1);
+DEFINE_STUB(rte_cryptodev_device_count_by_driver, uint8_t, (uint8_t driver_id), 0);
+DEFINE_STUB(rte_cryptodev_configure, int, (uint8_t dev_id, struct rte_cryptodev_config *config), 0);
+#if RTE_VERSION >= RTE_VERSION_NUM(19, 02, 0, 0)
+DEFINE_STUB(rte_cryptodev_queue_pair_setup, int, (uint8_t dev_id, uint16_t queue_pair_id,
+ const struct rte_cryptodev_qp_conf *qp_conf, int socket_id), 0);
+DEFINE_STUB(rte_cryptodev_sym_session_pool_create, struct rte_mempool *, (const char *name,
+ uint32_t nb_elts,
+ uint32_t elt_size, uint32_t cache_size, uint16_t priv_size,
+ int socket_id), (struct rte_mempool *)1);
+#else
+DEFINE_STUB(rte_cryptodev_queue_pair_setup, int, (uint8_t dev_id, uint16_t queue_pair_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id, struct rte_mempool *session_pool), 0);
+#endif
+DEFINE_STUB(rte_cryptodev_start, int, (uint8_t dev_id), 0);
+DEFINE_STUB_V(rte_cryptodev_stop, (uint8_t dev_id));
+DEFINE_STUB(rte_cryptodev_sym_session_create, struct rte_cryptodev_sym_session *,
+ (struct rte_mempool *mempool), (struct rte_cryptodev_sym_session *)1);
+DEFINE_STUB(rte_cryptodev_sym_session_init, int, (uint8_t dev_id,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_crypto_sym_xform *xforms, struct rte_mempool *mempool), 0);
+DEFINE_STUB(rte_vdev_init, int, (const char *name, const char *args), 0);
+DEFINE_STUB(rte_cryptodev_sym_session_free, int, (struct rte_cryptodev_sym_session *sess), 0);
+DEFINE_STUB(rte_vdev_uninit, int, (const char *name), 0);
+
+struct rte_cryptodev *rte_cryptodevs;
+
+/* global vars and setup/cleanup functions used for all test functions */
+struct spdk_bdev_io *g_bdev_io;
+struct crypto_bdev_io *g_io_ctx;
+struct crypto_io_channel *g_crypto_ch;
+struct spdk_io_channel *g_io_ch;
+struct vbdev_dev g_device;
+struct vbdev_crypto g_crypto_bdev;
+struct device_qp g_dev_qp;
+
+void
+rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
+{
+ dev_info->max_nb_queue_pairs = 1;
+ if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_AESNI) {
+ dev_info->driver_name = g_driver_names[0];
+ } else if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_QAT) {
+ dev_info->driver_name = g_driver_names[1];
+ } else if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_BOGUS_PMD) {
+ dev_info->driver_name = "junk";
+ }
+}
+
+unsigned int
+rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
+{
+ return (unsigned int)dev_id;
+}
+
+void
+spdk_bdev_io_get_aux_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_aux_buf_cb cb)
+{
+ cb(g_io_ch, g_bdev_io, (void *)0xDEADBEEF);
+}
+
+void
+spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
+{
+ cb(g_io_ch, g_bdev_io, true);
+}
+
+/* Mock these functions to call the callback and then return the value we require */
+int ut_spdk_bdev_readv_blocks = 0;
+bool ut_spdk_bdev_readv_blocks_mocked = false;
+int
+spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct iovec *iov, int iovcnt,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ cb(g_bdev_io, !ut_spdk_bdev_readv_blocks, cb_arg);
+ return ut_spdk_bdev_readv_blocks;
+}
+
+int ut_spdk_bdev_writev_blocks = 0;
+bool ut_spdk_bdev_writev_blocks_mocked = false;
+int
+spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct iovec *iov, int iovcnt,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ cb(g_bdev_io, !ut_spdk_bdev_writev_blocks, cb_arg);
+ return ut_spdk_bdev_writev_blocks;
+}
+
+int ut_spdk_bdev_unmap_blocks = 0;
+bool ut_spdk_bdev_unmap_blocks_mocked = false;
+int
+spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ cb(g_bdev_io, !ut_spdk_bdev_unmap_blocks, cb_arg);
+ return ut_spdk_bdev_unmap_blocks;
+}
+
+int ut_spdk_bdev_flush_blocks = 0;
+bool ut_spdk_bdev_flush_blocks_mocked = false;
+int
+spdk_bdev_flush_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb,
+ void *cb_arg)
+{
+ cb(g_bdev_io, !ut_spdk_bdev_flush_blocks, cb_arg);
+ return ut_spdk_bdev_flush_blocks;
+}
+
+int ut_spdk_bdev_reset = 0;
+bool ut_spdk_bdev_reset_mocked = false;
+int
+spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ cb(g_bdev_io, !ut_spdk_bdev_reset, cb_arg);
+ return ut_spdk_bdev_reset;
+}
+
+bool g_completion_called = false;
+void
+spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
+{
+ bdev_io->internal.status = status;
+ g_completion_called = true;
+}
+
+/* Global setup for all tests that share a bunch of preparation... */
+static int
+test_setup(void)
+{
+ int i, rc;
+
+ /* Prepare essential variables for test routines */
+ g_bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct crypto_bdev_io));
+ g_bdev_io->u.bdev.iovs = calloc(1, sizeof(struct iovec) * 128);
+ g_bdev_io->bdev = &g_crypto_bdev.crypto_bdev;
+ g_io_ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct crypto_io_channel));
+ g_crypto_ch = (struct crypto_io_channel *)((uint8_t *)g_io_ch + sizeof(struct spdk_io_channel));
+ g_io_ctx = (struct crypto_bdev_io *)g_bdev_io->driver_ctx;
+ memset(&g_device, 0, sizeof(struct vbdev_dev));
+ memset(&g_crypto_bdev, 0, sizeof(struct vbdev_crypto));
+ g_dev_qp.device = &g_device;
+ g_io_ctx->crypto_ch = g_crypto_ch;
+ g_io_ctx->crypto_bdev = &g_crypto_bdev;
+ g_crypto_ch->device_qp = &g_dev_qp;
+ TAILQ_INIT(&g_crypto_ch->pending_cry_ios);
+ TAILQ_INIT(&g_crypto_ch->queued_cry_ops);
+
+ /* Allocate a real mbuf pool so we can test error paths */
+ g_mbuf_mp = spdk_mempool_create("mbuf_mp", NUM_MBUFS, sizeof(struct rte_mbuf),
+ SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
+ SPDK_ENV_SOCKET_ID_ANY);
+
+ /* Instead of allocating real rte mempools for these, it's easier and provides the
+ * same coverage just calloc them here.
+ */
+ for (i = 0; i < MAX_TEST_BLOCKS; i++) {
+ rc = posix_memalign((void **)&g_test_crypto_ops[i], 64,
+ sizeof(struct rte_crypto_op) + sizeof(struct rte_crypto_sym_op) +
+ AES_CBC_IV_LENGTH + QUEUED_OP_LENGTH);
+ if (rc != 0) {
+ assert(false);
+ }
+ memset(g_test_crypto_ops[i], 0, sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op) + QUEUED_OP_LENGTH);
+ }
+ return 0;
+}
+
+/* Global teardown for all tests */
+static int
+test_cleanup(void)
+{
+ int i;
+
+ spdk_mempool_free(g_mbuf_mp);
+ for (i = 0; i < MAX_TEST_BLOCKS; i++) {
+ free(g_test_crypto_ops[i]);
+ }
+ free(g_bdev_io->u.bdev.iovs);
+ free(g_bdev_io);
+ free(g_io_ch);
+ return 0;
+}
+
+static void
+test_error_paths(void)
+{
+ /* Single element block size write, just to test error paths
+ * in vbdev_crypto_submit_request().
+ */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->u.bdev.iovcnt = 1;
+ g_bdev_io->u.bdev.num_blocks = 1;
+ g_bdev_io->u.bdev.iovs[0].iov_len = 512;
+ g_crypto_bdev.crypto_bdev.blocklen = 512;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
+ g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
+
+ /* test failure of spdk_mempool_get_bulk(), will result in success because it
+ * will get queued.
+ */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ MOCK_SET(spdk_mempool_get, NULL);
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ /* same thing but switch to reads to test error path in _crypto_complete_io() */
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ TAILQ_INSERT_TAIL(&g_crypto_ch->pending_cry_ios, g_bdev_io, module_link);
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ /* Now with the read_blocks failing */
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ MOCK_SET(spdk_bdev_readv_blocks, -1);
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ MOCK_SET(spdk_bdev_readv_blocks, 0);
+ MOCK_CLEAR(spdk_mempool_get);
+
+ /* test failure of rte_crypto_op_bulk_alloc() */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ ut_rte_crypto_op_bulk_alloc = 0;
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ ut_rte_crypto_op_bulk_alloc = 1;
+
+ /* test failure of rte_crypto_op_attach_sym_session() */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ ut_rte_crypto_op_attach_sym_session = -1;
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ ut_rte_crypto_op_attach_sym_session = 0;
+}
+
+static void
+test_simple_write(void)
+{
+ /* Single element block size write */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->u.bdev.iovcnt = 1;
+ g_bdev_io->u.bdev.num_blocks = 1;
+ g_bdev_io->u.bdev.offset_blocks = 0;
+ g_bdev_io->u.bdev.iovs[0].iov_len = 512;
+ g_bdev_io->u.bdev.iovs[0].iov_base = &test_simple_write;
+ g_crypto_bdev.crypto_bdev.blocklen = 512;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
+ g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
+
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 1);
+ CU_ASSERT(g_io_ctx->aux_buf_iov.iov_len == 512);
+ CU_ASSERT(g_io_ctx->aux_buf_iov.iov_base != NULL);
+ CU_ASSERT(g_io_ctx->aux_offset_blocks == 0);
+ CU_ASSERT(g_io_ctx->aux_num_blocks == 1);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == &test_simple_write);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == 512);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->userdata == g_bdev_io);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->buf_addr != NULL);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->data_len == 512);
+
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_src);
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_dst);
+}
+
+static void
+test_simple_read(void)
+{
+ /* Single element block size read */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->u.bdev.iovcnt = 1;
+ g_bdev_io->u.bdev.num_blocks = 1;
+ g_bdev_io->u.bdev.iovs[0].iov_len = 512;
+ g_bdev_io->u.bdev.iovs[0].iov_base = &test_simple_read;
+ g_crypto_bdev.crypto_bdev.blocklen = 512;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
+ g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
+
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 1);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == &test_simple_read);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == 512);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->userdata == g_bdev_io);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst == NULL);
+
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_src);
+}
+
+static void
+test_large_rw(void)
+{
+ unsigned block_len = 512;
+ unsigned num_blocks = CRYPTO_MAX_IO / block_len;
+ unsigned io_len = block_len * num_blocks;
+ unsigned i;
+
+ /* Multi block size read, multi-element */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->u.bdev.iovcnt = 1;
+ g_bdev_io->u.bdev.num_blocks = num_blocks;
+ g_bdev_io->u.bdev.iovs[0].iov_len = io_len;
+ g_bdev_io->u.bdev.iovs[0].iov_base = &test_large_rw;
+ g_crypto_bdev.crypto_bdev.blocklen = block_len;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
+ g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
+
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(g_io_ctx->cryop_cnt_remaining == (int)num_blocks);
+
+ for (i = 0; i < num_blocks; i++) {
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_large_rw + (i * block_len));
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->userdata == g_bdev_io);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src);
+ }
+
+ /* Multi block size write, multi-element */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->u.bdev.iovcnt = 1;
+ g_bdev_io->u.bdev.num_blocks = num_blocks;
+ g_bdev_io->u.bdev.iovs[0].iov_len = io_len;
+ g_bdev_io->u.bdev.iovs[0].iov_base = &test_large_rw;
+ g_crypto_bdev.crypto_bdev.blocklen = block_len;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
+ g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
+
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(g_io_ctx->cryop_cnt_remaining == (int)num_blocks);
+
+ for (i = 0; i < num_blocks; i++) {
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_large_rw + (i * block_len));
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->userdata == g_bdev_io);
+ CU_ASSERT(g_io_ctx->aux_buf_iov.iov_len == io_len);
+ CU_ASSERT(g_io_ctx->aux_buf_iov.iov_base != NULL);
+ CU_ASSERT(g_io_ctx->aux_offset_blocks == 0);
+ CU_ASSERT(g_io_ctx->aux_num_blocks == num_blocks);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->buf_addr != NULL);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->data_len == block_len);
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src);
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_dst);
+ }
+}
+
+static void
+test_dev_full(void)
+{
+ struct vbdev_crypto_op *queued_op;
+ struct rte_crypto_sym_op *sym_op;
+ struct crypto_bdev_io *io_ctx;
+
+ /* Two element block size read */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->u.bdev.iovcnt = 1;
+ g_bdev_io->u.bdev.num_blocks = 2;
+ g_bdev_io->u.bdev.iovs[0].iov_len = 512;
+ g_bdev_io->u.bdev.iovs[0].iov_base = (void *)0xDEADBEEF;
+ g_bdev_io->u.bdev.iovs[1].iov_len = 512;
+ g_bdev_io->u.bdev.iovs[1].iov_base = (void *)0xFEEDBEEF;
+ g_crypto_bdev.crypto_bdev.blocklen = 512;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
+ g_enqueue_mock = g_dequeue_mock = 1;
+ ut_rte_crypto_op_bulk_alloc = 2;
+
+ g_test_crypto_ops[1]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
+
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 2);
+ sym_op = g_test_crypto_ops[0]->sym;
+ CU_ASSERT(sym_op->m_src->buf_addr == (void *)0xDEADBEEF);
+ CU_ASSERT(sym_op->m_src->data_len == 512);
+ CU_ASSERT(sym_op->m_src->next == NULL);
+ CU_ASSERT(sym_op->cipher.data.length == 512);
+ CU_ASSERT(sym_op->cipher.data.offset == 0);
+ CU_ASSERT(sym_op->m_src->userdata == g_bdev_io);
+ CU_ASSERT(sym_op->m_dst == NULL);
+
+ /* make sure one got queued and confirm its values */
+ CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == false);
+ queued_op = TAILQ_FIRST(&g_crypto_ch->queued_cry_ops);
+ sym_op = queued_op->crypto_op->sym;
+ TAILQ_REMOVE(&g_crypto_ch->queued_cry_ops, queued_op, link);
+ CU_ASSERT(queued_op->bdev_io == g_bdev_io);
+ CU_ASSERT(queued_op->crypto_op == g_test_crypto_ops[1]);
+ CU_ASSERT(sym_op->m_src->buf_addr == (void *)0xFEEDBEEF);
+ CU_ASSERT(sym_op->m_src->data_len == 512);
+ CU_ASSERT(sym_op->m_src->next == NULL);
+ CU_ASSERT(sym_op->cipher.data.length == 512);
+ CU_ASSERT(sym_op->cipher.data.offset == 0);
+ CU_ASSERT(sym_op->m_src->userdata == g_bdev_io);
+ CU_ASSERT(sym_op->m_dst == NULL);
+ CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_src);
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[1]->sym->m_src);
+
+ /* Non-busy reason for enqueue failure, all were rejected. */
+ g_enqueue_mock = 0;
+ g_test_crypto_ops[0]->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ io_ctx = (struct crypto_bdev_io *)g_bdev_io->driver_ctx;
+ CU_ASSERT(io_ctx->bdev_io_status == SPDK_BDEV_IO_STATUS_FAILED);
+}
+
+static void
+test_crazy_rw(void)
+{
+ unsigned block_len = 512;
+ int num_blocks = 4;
+ int i;
+
+ /* Multi block size read, single element, strange IOV makeup */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->u.bdev.iovcnt = 3;
+ g_bdev_io->u.bdev.num_blocks = num_blocks;
+ g_bdev_io->u.bdev.iovs[0].iov_len = 512;
+ g_bdev_io->u.bdev.iovs[0].iov_base = &test_crazy_rw;
+ g_bdev_io->u.bdev.iovs[1].iov_len = 1024;
+ g_bdev_io->u.bdev.iovs[1].iov_base = &test_crazy_rw + 512;
+ g_bdev_io->u.bdev.iovs[2].iov_len = 512;
+ g_bdev_io->u.bdev.iovs[2].iov_base = &test_crazy_rw + 512 + 1024;
+
+ g_crypto_bdev.crypto_bdev.blocklen = block_len;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
+ g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
+
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(g_io_ctx->cryop_cnt_remaining == num_blocks);
+
+ for (i = 0; i < num_blocks; i++) {
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_crazy_rw + (i * block_len));
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->userdata == g_bdev_io);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src == g_test_crypto_ops[i]->sym->m_src);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src);
+ }
+
+ /* Multi block size write, single element strange IOV makeup */
+ num_blocks = 8;
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->u.bdev.iovcnt = 4;
+ g_bdev_io->u.bdev.num_blocks = num_blocks;
+ g_bdev_io->u.bdev.iovs[0].iov_len = 2048;
+ g_bdev_io->u.bdev.iovs[0].iov_base = &test_crazy_rw;
+ g_bdev_io->u.bdev.iovs[1].iov_len = 512;
+ g_bdev_io->u.bdev.iovs[1].iov_base = &test_crazy_rw + 2048;
+ g_bdev_io->u.bdev.iovs[2].iov_len = 512;
+ g_bdev_io->u.bdev.iovs[2].iov_base = &test_crazy_rw + 2048 + 512;
+ g_bdev_io->u.bdev.iovs[3].iov_len = 1024;
+ g_bdev_io->u.bdev.iovs[3].iov_base = &test_crazy_rw + 2048 + 512 + 512;
+
+ g_crypto_bdev.crypto_bdev.blocklen = block_len;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
+ g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
+
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(g_io_ctx->cryop_cnt_remaining == num_blocks);
+
+ for (i = 0; i < num_blocks; i++) {
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_crazy_rw + (i * block_len));
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->userdata == g_bdev_io);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src == g_test_crypto_ops[i]->sym->m_src);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == g_test_crypto_ops[i]->sym->m_dst);
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src);
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_dst);
+ }
+}
+
+static void
+test_passthru(void)
+{
+ /* Make sure these follow our completion callback, test success & fail. */
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_UNMAP;
+ MOCK_SET(spdk_bdev_unmap_blocks, 0);
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ MOCK_SET(spdk_bdev_unmap_blocks, -1);
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ MOCK_CLEAR(spdk_bdev_unmap_blocks);
+
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_FLUSH;
+ MOCK_SET(spdk_bdev_flush_blocks, 0);
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ MOCK_SET(spdk_bdev_flush_blocks, -1);
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ MOCK_CLEAR(spdk_bdev_flush_blocks);
+
+ /* We should never get a WZ command, we report that we don't support it. */
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE_ZEROES;
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+}
+
+static void
+test_reset(void)
+{
+ /* TODO: There are a few different ways to do this given that
+ * the code uses spdk_for_each_channel() to implement reset
+ * handling. Submitting w/o UT for this function for now and
+ * will follow up with something shortly.
+ */
+}
+
+static void
+init_cleanup(void)
+{
+ spdk_mempool_free(g_mbuf_mp);
+ rte_mempool_free(g_session_mp);
+ g_mbuf_mp = NULL;
+ g_session_mp = NULL;
+ if (g_session_mp_priv != NULL) {
+ /* g_session_mp_priv may or may not be set depending on the DPDK version */
+ rte_mempool_free(g_session_mp_priv);
+ }
+}
+
+static void
+test_initdrivers(void)
+{
+ int rc;
+ static struct spdk_mempool *orig_mbuf_mp;
+ static struct rte_mempool *orig_session_mp;
+ static struct rte_mempool *orig_session_mp_priv;
+
+ /* These tests will alloc and free our g_mbuf_mp
+ * so save that off here and restore it after each test is over.
+ */
+ orig_mbuf_mp = g_mbuf_mp;
+ orig_session_mp = g_session_mp;
+ orig_session_mp_priv = g_session_mp_priv;
+
+ g_session_mp_priv = NULL;
+ g_session_mp = NULL;
+ g_mbuf_mp = NULL;
+
+ /* No drivers available, not an error though */
+ MOCK_SET(rte_cryptodev_count, 0);
+ rc = vbdev_crypto_init_crypto_drivers();
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_mbuf_mp == NULL);
+ CU_ASSERT(g_session_mp == NULL);
+ CU_ASSERT(g_session_mp_priv == NULL);
+
+ /* Test failure of DPDK dev init. */
+ MOCK_SET(rte_cryptodev_count, 2);
+ MOCK_SET(rte_vdev_init, -1);
+ rc = vbdev_crypto_init_crypto_drivers();
+ CU_ASSERT(rc == -EINVAL);
+ CU_ASSERT(g_mbuf_mp == NULL);
+ CU_ASSERT(g_session_mp == NULL);
+ CU_ASSERT(g_session_mp_priv == NULL);
+ MOCK_SET(rte_vdev_init, 0);
+
+ /* Can't create session pool. */
+ MOCK_SET(spdk_mempool_create, NULL);
+ rc = vbdev_crypto_init_crypto_drivers();
+ CU_ASSERT(rc == -ENOMEM);
+ CU_ASSERT(g_mbuf_mp == NULL);
+ CU_ASSERT(g_session_mp == NULL);
+ CU_ASSERT(g_session_mp_priv == NULL);
+ MOCK_CLEAR(spdk_mempool_create);
+
+ /* Can't create op pool. */
+ MOCK_SET(rte_crypto_op_pool_create, NULL);
+ rc = vbdev_crypto_init_crypto_drivers();
+ CU_ASSERT(rc == -ENOMEM);
+ CU_ASSERT(g_mbuf_mp == NULL);
+ CU_ASSERT(g_session_mp == NULL);
+ CU_ASSERT(g_session_mp_priv == NULL);
+ MOCK_SET(rte_crypto_op_pool_create, (struct rte_mempool *)1);
+
+ /* Check resources are not sufficient */
+ MOCK_CLEARED_ASSERT(spdk_mempool_create);
+ rc = vbdev_crypto_init_crypto_drivers();
+ CU_ASSERT(rc == -EINVAL);
+
+ /* Test crypto dev configure failure. */
+ MOCK_SET(rte_cryptodev_device_count_by_driver, 2);
+ MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_AESNI);
+ MOCK_SET(rte_cryptodev_configure, -1);
+ MOCK_CLEARED_ASSERT(spdk_mempool_create);
+ rc = vbdev_crypto_init_crypto_drivers();
+ MOCK_SET(rte_cryptodev_configure, 0);
+ CU_ASSERT(g_mbuf_mp == NULL);
+ CU_ASSERT(g_session_mp == NULL);
+ CU_ASSERT(g_session_mp_priv == NULL);
+ CU_ASSERT(rc == -EINVAL);
+
+ /* Test failure of qp setup. */
+ MOCK_SET(rte_cryptodev_queue_pair_setup, -1);
+ MOCK_CLEARED_ASSERT(spdk_mempool_create);
+ rc = vbdev_crypto_init_crypto_drivers();
+ CU_ASSERT(rc == -EINVAL);
+ CU_ASSERT(g_mbuf_mp == NULL);
+ CU_ASSERT(g_session_mp == NULL);
+ CU_ASSERT(g_session_mp_priv == NULL);
+ MOCK_SET(rte_cryptodev_queue_pair_setup, 0);
+
+ /* Test failure of dev start. */
+ MOCK_SET(rte_cryptodev_start, -1);
+ MOCK_CLEARED_ASSERT(spdk_mempool_create);
+ rc = vbdev_crypto_init_crypto_drivers();
+ CU_ASSERT(rc == -EINVAL);
+ CU_ASSERT(g_mbuf_mp == NULL);
+ CU_ASSERT(g_session_mp == NULL);
+ CU_ASSERT(g_session_mp_priv == NULL);
+ MOCK_SET(rte_cryptodev_start, 0);
+
+ /* Test bogus PMD */
+ MOCK_CLEARED_ASSERT(spdk_mempool_create);
+ MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_BOGUS_PMD);
+ rc = vbdev_crypto_init_crypto_drivers();
+ CU_ASSERT(g_mbuf_mp == NULL);
+ CU_ASSERT(g_session_mp == NULL);
+ CU_ASSERT(rc == -EINVAL);
+
+ /* Test happy path QAT. */
+ MOCK_CLEARED_ASSERT(spdk_mempool_create);
+ MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_QAT);
+ rc = vbdev_crypto_init_crypto_drivers();
+ CU_ASSERT(g_mbuf_mp != NULL);
+ CU_ASSERT(g_session_mp != NULL);
+ init_cleanup();
+ CU_ASSERT(rc == 0);
+
+ /* Test happy path AESNI. */
+ MOCK_CLEARED_ASSERT(spdk_mempool_create);
+ MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_AESNI);
+ rc = vbdev_crypto_init_crypto_drivers();
+ init_cleanup();
+ CU_ASSERT(rc == 0);
+
+ /* restore our initial values. */
+ g_mbuf_mp = orig_mbuf_mp;
+ g_session_mp = orig_session_mp;
+ g_session_mp_priv = orig_session_mp_priv;
+}
+
+static void
+test_crypto_op_complete(void)
+{
+ /* Make sure completion code respects failure. */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
+ g_completion_called = false;
+ _crypto_operation_complete(g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ CU_ASSERT(g_completion_called == true);
+
+ /* Test read completion. */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
+ g_completion_called = false;
+ _crypto_operation_complete(g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(g_completion_called == true);
+
+ /* Test write completion success. */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
+ g_completion_called = false;
+ MOCK_SET(spdk_bdev_writev_blocks, 0);
+ _crypto_operation_complete(g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(g_completion_called == true);
+
+ /* Test write completion failed. */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
+ g_completion_called = false;
+ MOCK_SET(spdk_bdev_writev_blocks, -1);
+ _crypto_operation_complete(g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ CU_ASSERT(g_completion_called == true);
+
+ /* Test bogus type for this completion. */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_RESET;
+ g_completion_called = false;
+ _crypto_operation_complete(g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ CU_ASSERT(g_completion_called == true);
+}
+
+static void
+test_supported_io(void)
+{
+ void *ctx = NULL;
+ bool rc = true;
+
+ /* Make sure we always report false to WZ, we need the bdev layer to
+ * send real 0's so we can encrypt/decrypt them.
+ */
+ rc = vbdev_crypto_io_type_supported(ctx, SPDK_BDEV_IO_TYPE_WRITE_ZEROES);
+ CU_ASSERT(rc == false);
+}
+
+static void
+test_poller(void)
+{
+ int rc;
+ struct rte_mbuf *src_mbufs[2];
+ struct vbdev_crypto_op *op_to_resubmit;
+
+ /* test regular 1 op to dequeue and complete */
+ g_dequeue_mock = g_enqueue_mock = 1;
+ spdk_mempool_get_bulk(g_mbuf_mp, (void **)&src_mbufs[0], 1);
+ g_test_crypto_ops[0]->sym->m_src = src_mbufs[0];
+ g_test_crypto_ops[0]->sym->m_src->userdata = g_bdev_io;
+ g_test_crypto_ops[0]->sym->m_dst = NULL;
+ g_io_ctx->cryop_cnt_remaining = 1;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
+ rc = crypto_dev_poller(g_crypto_ch);
+ CU_ASSERT(rc == 1);
+
+ /* We have nothing dequeued but have some to resubmit */
+ g_dequeue_mock = 0;
+ CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
+
+ /* add an op to the queued list. */
+ g_resubmit_test = true;
+ op_to_resubmit = (struct vbdev_crypto_op *)((uint8_t *)g_test_crypto_ops[0] + QUEUED_OP_OFFSET);
+ op_to_resubmit->crypto_op = (void *)0xDEADBEEF;
+ op_to_resubmit->bdev_io = g_bdev_io;
+ TAILQ_INSERT_TAIL(&g_crypto_ch->queued_cry_ops,
+ op_to_resubmit,
+ link);
+ CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == false);
+ rc = crypto_dev_poller(g_crypto_ch);
+ g_resubmit_test = false;
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
+
+ /* 2 to dequeue but 2nd one failed */
+ g_dequeue_mock = g_enqueue_mock = 2;
+ g_io_ctx->cryop_cnt_remaining = 2;
+ spdk_mempool_get_bulk(g_mbuf_mp, (void **)&src_mbufs[0], 2);
+ g_test_crypto_ops[0]->sym->m_src = src_mbufs[0];
+ g_test_crypto_ops[0]->sym->m_src->userdata = g_bdev_io;
+ g_test_crypto_ops[0]->sym->m_dst = NULL;
+ g_test_crypto_ops[0]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ g_test_crypto_ops[1]->sym->m_src = src_mbufs[1];
+ g_test_crypto_ops[1]->sym->m_src->userdata = g_bdev_io;
+ g_test_crypto_ops[1]->sym->m_dst = NULL;
+ g_test_crypto_ops[1]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ rc = crypto_dev_poller(g_crypto_ch);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ CU_ASSERT(rc == 2);
+}
+
+/* Helper function for test_assign_device_qp() */
+static void
+_clear_device_qp_lists(void)
+{
+ struct device_qp *device_qp = NULL;
+
+ while (!TAILQ_EMPTY(&g_device_qp_qat)) {
+ device_qp = TAILQ_FIRST(&g_device_qp_qat);
+ TAILQ_REMOVE(&g_device_qp_qat, device_qp, link);
+ free(device_qp);
+
+ }
+ CU_ASSERT(TAILQ_EMPTY(&g_device_qp_qat) == true);
+ while (!TAILQ_EMPTY(&g_device_qp_aesni_mb)) {
+ device_qp = TAILQ_FIRST(&g_device_qp_aesni_mb);
+ TAILQ_REMOVE(&g_device_qp_aesni_mb, device_qp, link);
+ free(device_qp);
+ }
+ CU_ASSERT(TAILQ_EMPTY(&g_device_qp_aesni_mb) == true);
+}
+
+/* Helper function for test_assign_device_qp() */
+static void
+_check_expected_values(struct vbdev_crypto *crypto_bdev, struct device_qp *device_qp,
+ struct crypto_io_channel *crypto_ch, uint8_t expected_index,
+ uint8_t current_index)
+{
+ _assign_device_qp(&g_crypto_bdev, device_qp, g_crypto_ch);
+ CU_ASSERT(g_crypto_ch->device_qp->index == expected_index);
+ CU_ASSERT(g_next_qat_index == current_index);
+}
+
+static void
+test_assign_device_qp(void)
+{
+ struct device_qp *device_qp = NULL;
+ int i;
+
+ /* start with a known state, clear the device/qp lists */
+ _clear_device_qp_lists();
+
+ /* make sure that one AESNI_MB qp is found */
+ device_qp = calloc(1, sizeof(struct device_qp));
+ TAILQ_INSERT_TAIL(&g_device_qp_aesni_mb, device_qp, link);
+ g_crypto_ch->device_qp = NULL;
+ g_crypto_bdev.drv_name = AESNI_MB;
+ _assign_device_qp(&g_crypto_bdev, device_qp, g_crypto_ch);
+ CU_ASSERT(g_crypto_ch->device_qp != NULL);
+
+ /* QAT testing is more complex as the code under test load balances by
+ * assigning each subsequent device/qp to every QAT_VF_SPREAD modulo
+ * g_qat_total_qp. For the current latest QAT we'll have 48 virtual functions
+ * each with 2 qp so the "spread" betwen assignments is 32.
+ */
+ g_qat_total_qp = 96;
+ for (i = 0; i < g_qat_total_qp; i++) {
+ device_qp = calloc(1, sizeof(struct device_qp));
+ device_qp->index = i;
+ TAILQ_INSERT_TAIL(&g_device_qp_qat, device_qp, link);
+ }
+ g_crypto_ch->device_qp = NULL;
+ g_crypto_bdev.drv_name = QAT;
+
+ /* First assignment will assign to 0 and next at 32. */
+ _check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch,
+ 0, QAT_VF_SPREAD);
+
+ /* Second assignment will assign to 32 and next at 64. */
+ _check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch,
+ QAT_VF_SPREAD, QAT_VF_SPREAD * 2);
+
+ /* Third assignment will assign to 64 and next at 0. */
+ _check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch,
+ QAT_VF_SPREAD * 2, 0);
+
+ /* Fourth assignment will assign to 1 and next at 33. */
+ _check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch,
+ 1, QAT_VF_SPREAD + 1);
+
+ _clear_device_qp_lists();
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("crypto", test_setup, test_cleanup);
+ CU_ADD_TEST(suite, test_error_paths);
+ CU_ADD_TEST(suite, test_simple_write);
+ CU_ADD_TEST(suite, test_simple_read);
+ CU_ADD_TEST(suite, test_large_rw);
+ CU_ADD_TEST(suite, test_dev_full);
+ CU_ADD_TEST(suite, test_crazy_rw);
+ CU_ADD_TEST(suite, test_passthru);
+ CU_ADD_TEST(suite, test_initdrivers);
+ CU_ADD_TEST(suite, test_crypto_op_complete);
+ CU_ADD_TEST(suite, test_supported_io);
+ CU_ADD_TEST(suite, test_reset);
+ CU_ADD_TEST(suite, test_poller);
+ CU_ADD_TEST(suite, test_assign_device_qp);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/bdev/gpt/Makefile b/src/spdk/test/unit/lib/bdev/gpt/Makefile
new file mode 100644
index 000000000..2fad9ba03
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/gpt/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = gpt.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/bdev/gpt/gpt.c/.gitignore b/src/spdk/test/unit/lib/bdev/gpt/gpt.c/.gitignore
new file mode 100644
index 000000000..74d476f5c
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/gpt/gpt.c/.gitignore
@@ -0,0 +1 @@
+gpt_ut
diff --git a/src/spdk/test/unit/lib/bdev/gpt/gpt.c/Makefile b/src/spdk/test/unit/lib/bdev/gpt/gpt.c/Makefile
new file mode 100644
index 000000000..202fe9cb4
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/gpt/gpt.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of the copyright holder nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../../..)
+
+TEST_FILE = gpt_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/bdev/gpt/gpt.c/gpt_ut.c b/src/spdk/test/unit/lib/bdev/gpt/gpt.c/gpt_ut.c
new file mode 100644
index 000000000..8095fce19
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/gpt/gpt.c/gpt_ut.c
@@ -0,0 +1,363 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "common/lib/test_env.c"
+
+#include "bdev/gpt/gpt.c"
+
+static void
+test_check_mbr(void)
+{
+ struct spdk_gpt *gpt;
+ struct spdk_mbr *mbr;
+ unsigned char a[SPDK_GPT_BUFFER_SIZE];
+ int re;
+
+ /* Set gpt is NULL */
+ re = gpt_parse_mbr(NULL);
+ CU_ASSERT(re == -1);
+
+ /* Set gpt->buf is NULL */
+ gpt = calloc(1, sizeof(*gpt));
+ SPDK_CU_ASSERT_FATAL(gpt != NULL);
+ re = gpt_parse_mbr(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set *gpt is "aaa...", all are mismatch include mbr_signature */
+ memset(a, 'a', sizeof(a));
+ gpt->buf = &a[0];
+ re = gpt_check_mbr(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set mbr->mbr_signature matched, start lba mismatch */
+ mbr = (struct spdk_mbr *)gpt->buf;
+ mbr->mbr_signature = 0xAA55;
+ re = gpt_check_mbr(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set mbr->partitions[0].start lba matched, os_type mismatch */
+ mbr->partitions[0].start_lba = 1;
+ re = gpt_check_mbr(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set mbr->partitions[0].os_type matched, size_lba mismatch */
+ mbr->partitions[0].os_type = 0xEE;
+ re = gpt_check_mbr(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set mbr->partitions[0].size_lba matched, passing case */
+ mbr->partitions[0].size_lba = 0xFFFFFFFF;
+ re = gpt_check_mbr(gpt);
+ CU_ASSERT(re == 0);
+
+ free(gpt);
+}
+
+static void
+test_read_header(void)
+{
+ struct spdk_gpt *gpt;
+ struct spdk_gpt_header *head;
+ unsigned char a[SPDK_GPT_BUFFER_SIZE];
+ int re;
+
+ /* gpt_read_header(NULL) does not exist, NULL is filtered out in gpt_parse_mbr() */
+ gpt = calloc(1, sizeof(*gpt));
+ SPDK_CU_ASSERT_FATAL(gpt != NULL);
+ gpt->parse_phase = SPDK_GPT_PARSE_PHASE_PRIMARY;
+ gpt->sector_size = 512;
+
+ /* Set *gpt is "aaa..." */
+ memset(a, 'a', sizeof(a));
+ gpt->buf = &a[0];
+ gpt->buf_size = sizeof(a);
+
+ /* Set header_size mismatch */
+ gpt->sector_size = 512;
+ head = (struct spdk_gpt_header *)(gpt->buf + GPT_PRIMARY_PARTITION_TABLE_LBA * gpt->sector_size);
+ to_le32(&head->header_size, 0x258);
+ re = gpt_read_header(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set head->header_size matched, header_crc32 mismatch */
+ head->header_size = sizeof(*head);
+ to_le32(&head->header_crc32, 0x22D18C80);
+ re = gpt_read_header(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set head->header_crc32 matched, gpt_signature mismatch */
+ to_le32(&head->header_crc32, 0xC5B2117E);
+ re = gpt_read_header(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set head->gpt_signature matched, head->my_lba mismatch */
+ to_le32(&head->header_crc32, 0xD637335A);
+ head->gpt_signature[0] = 'E';
+ head->gpt_signature[1] = 'F';
+ head->gpt_signature[2] = 'I';
+ head->gpt_signature[3] = ' ';
+ head->gpt_signature[4] = 'P';
+ head->gpt_signature[5] = 'A';
+ head->gpt_signature[6] = 'R';
+ head->gpt_signature[7] = 'T';
+ re = gpt_read_header(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set head->my_lba matched, lba_end usable_lba mismatch */
+ to_le32(&head->header_crc32, 0xB3CDB2D2);
+ to_le64(&head->my_lba, 0x1);
+ re = gpt_read_header(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set gpt->lba_end usable_lba matched, passing case */
+ to_le32(&head->header_crc32, 0x5531F2F0);
+ to_le64(&gpt->lba_start, 0x0);
+ to_le64(&gpt->lba_end, 0x2E935FFE);
+ to_le64(&head->first_usable_lba, 0xA);
+ to_le64(&head->last_usable_lba, 0xF4240);
+ re = gpt_read_header(gpt);
+ CU_ASSERT(re == 0);
+
+ free(gpt);
+}
+
+static void
+test_read_partitions(void)
+{
+ struct spdk_gpt *gpt;
+ struct spdk_gpt_header *head;
+ unsigned char a[SPDK_GPT_BUFFER_SIZE];
+ int re;
+
+ /* gpt_read_partitions(NULL) does not exist, NULL is filtered out in gpt_parse_mbr() */
+ gpt = calloc(1, sizeof(*gpt));
+ SPDK_CU_ASSERT_FATAL(gpt != NULL);
+ gpt->parse_phase = SPDK_GPT_PARSE_PHASE_PRIMARY;
+ gpt->sector_size = 512;
+
+ /* Set *gpt is "aaa..." */
+ memset(a, 'a', sizeof(a));
+ gpt->buf = &a[0];
+ gpt->buf_size = sizeof(a);
+
+ /* Set num_partition_entries exceeds Max value of entries GPT supported */
+ gpt->sector_size = 512;
+ head = (struct spdk_gpt_header *)(gpt->buf + GPT_PRIMARY_PARTITION_TABLE_LBA * gpt->sector_size);
+ gpt->header = head;
+ to_le32(&head->num_partition_entries, 0x100);
+ re = gpt_read_partitions(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set num_partition_entries within Max value, size_of_partition_entry mismatch */
+ to_le32(&head->header_crc32, 0x573857BE);
+ to_le32(&head->num_partition_entries, 0x40);
+ to_le32(&head->size_of_partition_entry, 0x0);
+ re = gpt_read_partitions(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set size_of_partition_entry matched, partition_entry_lba mismatch */
+ to_le32(&head->header_crc32, 0x5279B712);
+ to_le32(&head->size_of_partition_entry, 0x80);
+ to_le64(&head->partition_entry_lba, 0x64);
+ re = gpt_read_partitions(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set partition_entry_lba matched, partition_entry_array_crc32 mismatch */
+ to_le32(&head->header_crc32, 0xEC093B43);
+ to_le64(&head->partition_entry_lba, 0x20);
+ to_le32(&head->partition_entry_array_crc32, 0x0);
+ re = gpt_read_partitions(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set partition_entry_array_crc32 matched, passing case */
+ to_le32(&head->header_crc32, 0xE1A08822);
+ to_le32(&head->partition_entry_array_crc32, 0xEBEE44FB);
+ to_le32(&head->num_partition_entries, 0x80);
+ re = gpt_read_partitions(gpt);
+ CU_ASSERT(re == 0);
+
+ free(gpt);
+}
+
+static void
+test_parse_mbr_and_primary(void)
+{
+ struct spdk_gpt *gpt;
+ struct spdk_mbr *mbr;
+ struct spdk_gpt_header *head;
+ unsigned char a[SPDK_GPT_BUFFER_SIZE];
+ int re;
+
+ /* Set gpt is NULL */
+ re = gpt_parse_mbr(NULL);
+ CU_ASSERT(re == -1);
+
+ /* Set gpt->buf is NULL */
+ gpt = calloc(1, sizeof(*gpt));
+ SPDK_CU_ASSERT_FATAL(gpt != NULL);
+ gpt->parse_phase = SPDK_GPT_PARSE_PHASE_PRIMARY;
+ gpt->sector_size = 512;
+ re = gpt_parse_mbr(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set *gpt is "aaa...", check_mbr failed */
+ memset(a, 'a', sizeof(a));
+ gpt->buf = &a[0];
+ gpt->buf_size = sizeof(a);
+ re = gpt_parse_mbr(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set check_mbr passed */
+ mbr = (struct spdk_mbr *)gpt->buf;
+ mbr->mbr_signature = 0xAA55;
+ mbr->partitions[0].start_lba = 1;
+ mbr->partitions[0].os_type = 0xEE;
+ mbr->partitions[0].size_lba = 0xFFFFFFFF;
+ re = gpt_parse_mbr(gpt);
+ CU_ASSERT(re == 0);
+
+ /* Expect read_header failed */
+ re = gpt_parse_partition_table(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set read_header passed, read_partitions failed */
+ head = (struct spdk_gpt_header *)(gpt->buf + GPT_PRIMARY_PARTITION_TABLE_LBA * gpt->sector_size);
+ head->header_size = sizeof(*head);
+ head->gpt_signature[0] = 'E';
+ head->gpt_signature[1] = 'F';
+ head->gpt_signature[2] = 'I';
+ head->gpt_signature[3] = ' ';
+ head->gpt_signature[4] = 'P';
+ head->gpt_signature[5] = 'A';
+ head->gpt_signature[6] = 'R';
+ head->gpt_signature[7] = 'T';
+ to_le32(&head->header_crc32, 0x5531F2F0);
+ to_le64(&head->my_lba, 0x1);
+ to_le64(&gpt->lba_start, 0x0);
+ to_le64(&gpt->lba_end, 0x2E935FFE);
+ to_le64(&head->first_usable_lba, 0xA);
+ to_le64(&head->last_usable_lba, 0xF4240);
+ re = gpt_parse_partition_table(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set read_partitions passed, all passed */
+ to_le32(&head->size_of_partition_entry, 0x80);
+ to_le64(&head->partition_entry_lba, 0x20);
+ to_le32(&head->header_crc32, 0x845A09AA);
+ to_le32(&head->partition_entry_array_crc32, 0xEBEE44FB);
+ to_le32(&head->num_partition_entries, 0x80);
+ re = gpt_parse_partition_table(gpt);
+ CU_ASSERT(re == 0);
+
+ free(gpt);
+}
+
+static void
+test_parse_secondary(void)
+{
+ struct spdk_gpt *gpt;
+ struct spdk_gpt_header *head;
+ unsigned char a[SPDK_GPT_BUFFER_SIZE];
+ int re;
+
+ /* gpt_parse_partition_table(NULL) does not exist, NULL is filtered out in gpt_parse_mbr() */
+ gpt = calloc(1, sizeof(*gpt));
+ SPDK_CU_ASSERT_FATAL(gpt != NULL);
+ gpt->parse_phase = SPDK_GPT_PARSE_PHASE_SECONDARY;
+ gpt->sector_size = 512;
+
+ /* Set *gpt is "aaa...", read_header failed */
+ memset(a, 'a', sizeof(a));
+ gpt->buf = &a[0];
+ gpt->buf_size = sizeof(a);
+ re = gpt_parse_partition_table(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set read_header passed, read_partitions failed */
+ head = (struct spdk_gpt_header *)(gpt->buf + gpt->buf_size - gpt->sector_size);
+ head->header_size = sizeof(*head);
+ head->gpt_signature[0] = 'E';
+ head->gpt_signature[1] = 'F';
+ head->gpt_signature[2] = 'I';
+ head->gpt_signature[3] = ' ';
+ head->gpt_signature[4] = 'P';
+ head->gpt_signature[5] = 'A';
+ head->gpt_signature[6] = 'R';
+ head->gpt_signature[7] = 'T';
+ to_le32(&head->header_crc32, 0xAA68A167);
+ to_le64(&head->my_lba, 0x63FFFFF);
+ to_le64(&gpt->lba_start, 0x0);
+ to_le64(&gpt->lba_end, 0x63FFFFF);
+ to_le64(&gpt->total_sectors, 0x6400000);
+ to_le64(&head->first_usable_lba, 0xA);
+ to_le64(&head->last_usable_lba, 0x63FFFDE);
+ re = gpt_parse_partition_table(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set read_partitions passed, all passed */
+ to_le32(&head->size_of_partition_entry, 0x80);
+ to_le64(&head->partition_entry_lba, 0x63FFFDF);
+ to_le32(&head->header_crc32, 0x204129E8);
+ to_le32(&head->partition_entry_array_crc32, 0xEBEE44FB);
+ to_le32(&head->num_partition_entries, 0x80);
+ re = gpt_parse_partition_table(gpt);
+ CU_ASSERT(re == 0);
+
+ free(gpt);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("gpt_parse", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_parse_mbr_and_primary);
+ CU_ADD_TEST(suite, test_parse_secondary);
+ CU_ADD_TEST(suite, test_check_mbr);
+ CU_ADD_TEST(suite, test_read_header);
+ CU_ADD_TEST(suite, test_read_partitions);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/bdev/mt/Makefile b/src/spdk/test/unit/lib/bdev/mt/Makefile
new file mode 100644
index 000000000..a19b345aa
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/mt/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = bdev.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/bdev/mt/bdev.c/.gitignore b/src/spdk/test/unit/lib/bdev/mt/bdev.c/.gitignore
new file mode 100644
index 000000000..a5a22d0d3
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/mt/bdev.c/.gitignore
@@ -0,0 +1 @@
+bdev_ut
diff --git a/src/spdk/test/unit/lib/bdev/mt/bdev.c/Makefile b/src/spdk/test/unit/lib/bdev/mt/bdev.c/Makefile
new file mode 100644
index 000000000..46b2987ae
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/mt/bdev.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../../..)
+
+TEST_FILE = bdev_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/bdev/mt/bdev.c/bdev_ut.c b/src/spdk/test/unit/lib/bdev/mt/bdev.c/bdev_ut.c
new file mode 100644
index 000000000..351404a37
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/mt/bdev.c/bdev_ut.c
@@ -0,0 +1,1994 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "common/lib/ut_multithread.c"
+#include "unit/lib/json_mock.c"
+
+#include "spdk/config.h"
+/* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
+#undef SPDK_CONFIG_VTUNE
+
+#include "bdev/bdev.c"
+
+#define BDEV_UT_NUM_THREADS 3
+
+DEFINE_STUB(spdk_conf_find_section, struct spdk_conf_section *, (struct spdk_conf *cp,
+ const char *name), NULL);
+DEFINE_STUB(spdk_conf_section_get_nmval, char *,
+ (struct spdk_conf_section *sp, const char *key, int idx1, int idx2), NULL);
+DEFINE_STUB(spdk_conf_section_get_intval, int, (struct spdk_conf_section *sp, const char *key), -1);
+
+struct spdk_trace_histories *g_trace_histories;
+DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn));
+DEFINE_STUB_V(spdk_trace_register_owner, (uint8_t type, char id_prefix));
+DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix));
+DEFINE_STUB_V(spdk_trace_register_description, (const char *name,
+ uint16_t tpoint_id, uint8_t owner_type,
+ uint8_t object_type, uint8_t new_object,
+ uint8_t arg1_type, const char *arg1_name));
+DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
+ uint32_t size, uint64_t object_id, uint64_t arg1));
+DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0);
+DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL);
+
+struct ut_bdev {
+ struct spdk_bdev bdev;
+ void *io_target;
+};
+
+struct ut_bdev_channel {
+ TAILQ_HEAD(, spdk_bdev_io) outstanding_io;
+ uint32_t outstanding_cnt;
+ uint32_t avail_cnt;
+};
+
+int g_io_device;
+struct ut_bdev g_bdev;
+struct spdk_bdev_desc *g_desc;
+bool g_teardown_done = false;
+bool g_get_io_channel = true;
+bool g_create_ch = true;
+bool g_init_complete_called = false;
+bool g_fini_start_called = true;
+int g_status = 0;
+int g_count = 0;
+struct spdk_histogram_data *g_histogram = NULL;
+
+static int
+stub_create_ch(void *io_device, void *ctx_buf)
+{
+ struct ut_bdev_channel *ch = ctx_buf;
+
+ if (g_create_ch == false) {
+ return -1;
+ }
+
+ TAILQ_INIT(&ch->outstanding_io);
+ ch->outstanding_cnt = 0;
+ /*
+ * When avail gets to 0, the submit_request function will return ENOMEM.
+ * Most tests to not want ENOMEM to occur, so by default set this to a
+ * big value that won't get hit. The ENOMEM tests can then override this
+ * value to something much smaller to induce ENOMEM conditions.
+ */
+ ch->avail_cnt = 2048;
+ return 0;
+}
+
+static void
+stub_destroy_ch(void *io_device, void *ctx_buf)
+{
+}
+
+static struct spdk_io_channel *
+stub_get_io_channel(void *ctx)
+{
+ struct ut_bdev *ut_bdev = ctx;
+
+ if (g_get_io_channel == true) {
+ return spdk_get_io_channel(ut_bdev->io_target);
+ } else {
+ return NULL;
+ }
+}
+
+static int
+stub_destruct(void *ctx)
+{
+ return 0;
+}
+
+static void
+stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
+{
+ struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
+ struct spdk_bdev_io *io;
+
+ if (bdev_io->type == SPDK_BDEV_IO_TYPE_RESET) {
+ while (!TAILQ_EMPTY(&ch->outstanding_io)) {
+ io = TAILQ_FIRST(&ch->outstanding_io);
+ TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
+ ch->outstanding_cnt--;
+ spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_ABORTED);
+ ch->avail_cnt++;
+ }
+ } else if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) {
+ TAILQ_FOREACH(io, &ch->outstanding_io, module_link) {
+ if (io == bdev_io->u.abort.bio_to_abort) {
+ TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
+ ch->outstanding_cnt--;
+ spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_ABORTED);
+ ch->avail_cnt++;
+
+ spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS);
+ return;
+ }
+ }
+
+ spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
+ return;
+ }
+
+ if (ch->avail_cnt > 0) {
+ TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link);
+ ch->outstanding_cnt++;
+ ch->avail_cnt--;
+ } else {
+ spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM);
+ }
+}
+
+static uint32_t
+stub_complete_io(void *io_target, uint32_t num_to_complete)
+{
+ struct spdk_io_channel *_ch = spdk_get_io_channel(io_target);
+ struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
+ struct spdk_bdev_io *io;
+ bool complete_all = (num_to_complete == 0);
+ uint32_t num_completed = 0;
+
+ while (complete_all || num_completed < num_to_complete) {
+ if (TAILQ_EMPTY(&ch->outstanding_io)) {
+ break;
+ }
+ io = TAILQ_FIRST(&ch->outstanding_io);
+ TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
+ ch->outstanding_cnt--;
+ spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_SUCCESS);
+ ch->avail_cnt++;
+ num_completed++;
+ }
+
+ spdk_put_io_channel(_ch);
+ return num_completed;
+}
+
+static bool
+stub_io_type_supported(void *ctx, enum spdk_bdev_io_type type)
+{
+ return true;
+}
+
+static struct spdk_bdev_fn_table fn_table = {
+ .get_io_channel = stub_get_io_channel,
+ .destruct = stub_destruct,
+ .submit_request = stub_submit_request,
+ .io_type_supported = stub_io_type_supported,
+};
+
+struct spdk_bdev_module bdev_ut_if;
+
+static int
+module_init(void)
+{
+ spdk_bdev_module_init_done(&bdev_ut_if);
+ return 0;
+}
+
+static void
+module_fini(void)
+{
+}
+
+static void
+init_complete(void)
+{
+ g_init_complete_called = true;
+}
+
+static void
+fini_start(void)
+{
+ g_fini_start_called = true;
+}
+
+struct spdk_bdev_module bdev_ut_if = {
+ .name = "bdev_ut",
+ .module_init = module_init,
+ .module_fini = module_fini,
+ .async_init = true,
+ .init_complete = init_complete,
+ .fini_start = fini_start,
+};
+
+SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if)
+
+static void
+register_bdev(struct ut_bdev *ut_bdev, char *name, void *io_target)
+{
+ memset(ut_bdev, 0, sizeof(*ut_bdev));
+
+ ut_bdev->io_target = io_target;
+ ut_bdev->bdev.ctxt = ut_bdev;
+ ut_bdev->bdev.name = name;
+ ut_bdev->bdev.fn_table = &fn_table;
+ ut_bdev->bdev.module = &bdev_ut_if;
+ ut_bdev->bdev.blocklen = 4096;
+ ut_bdev->bdev.blockcnt = 1024;
+
+ spdk_bdev_register(&ut_bdev->bdev);
+}
+
+static void
+unregister_bdev(struct ut_bdev *ut_bdev)
+{
+ /* Handle any deferred messages. */
+ poll_threads();
+ spdk_bdev_unregister(&ut_bdev->bdev, NULL, NULL);
+}
+
+static void
+bdev_init_cb(void *done, int rc)
+{
+ CU_ASSERT(rc == 0);
+ *(bool *)done = true;
+}
+
+static void
+setup_test(void)
+{
+ bool done = false;
+
+ allocate_cores(BDEV_UT_NUM_THREADS);
+ allocate_threads(BDEV_UT_NUM_THREADS);
+ set_thread(0);
+ spdk_bdev_initialize(bdev_init_cb, &done);
+ spdk_io_device_register(&g_io_device, stub_create_ch, stub_destroy_ch,
+ sizeof(struct ut_bdev_channel), NULL);
+ register_bdev(&g_bdev, "ut_bdev", &g_io_device);
+ spdk_bdev_open(&g_bdev.bdev, true, NULL, NULL, &g_desc);
+}
+
+static void
+finish_cb(void *cb_arg)
+{
+ g_teardown_done = true;
+}
+
+static void
+teardown_test(void)
+{
+ set_thread(0);
+ g_teardown_done = false;
+ spdk_bdev_close(g_desc);
+ g_desc = NULL;
+ unregister_bdev(&g_bdev);
+ spdk_io_device_unregister(&g_io_device, NULL);
+ spdk_bdev_finish(finish_cb, NULL);
+ poll_threads();
+ memset(&g_bdev, 0, sizeof(g_bdev));
+ CU_ASSERT(g_teardown_done == true);
+ g_teardown_done = false;
+ free_threads();
+ free_cores();
+}
+
+static uint32_t
+bdev_io_tailq_cnt(bdev_io_tailq_t *tailq)
+{
+ struct spdk_bdev_io *io;
+ uint32_t cnt = 0;
+
+ TAILQ_FOREACH(io, tailq, internal.link) {
+ cnt++;
+ }
+
+ return cnt;
+}
+
+static void
+basic(void)
+{
+ g_init_complete_called = false;
+ setup_test();
+ CU_ASSERT(g_init_complete_called == true);
+
+ set_thread(0);
+
+ g_get_io_channel = false;
+ g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
+ CU_ASSERT(g_ut_threads[0].ch == NULL);
+
+ g_get_io_channel = true;
+ g_create_ch = false;
+ g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
+ CU_ASSERT(g_ut_threads[0].ch == NULL);
+
+ g_get_io_channel = true;
+ g_create_ch = true;
+ g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
+ CU_ASSERT(g_ut_threads[0].ch != NULL);
+ spdk_put_io_channel(g_ut_threads[0].ch);
+
+ g_fini_start_called = false;
+ teardown_test();
+ CU_ASSERT(g_fini_start_called == true);
+}
+
+static void
+_bdev_removed(void *done)
+{
+ *(bool *)done = true;
+}
+
+static void
+_bdev_unregistered(void *done, int rc)
+{
+ CU_ASSERT(rc == 0);
+ *(bool *)done = true;
+}
+
+static void
+unregister_and_close(void)
+{
+ bool done, remove_notify;
+ struct spdk_bdev_desc *desc = NULL;
+
+ setup_test();
+ set_thread(0);
+
+ /* setup_test() automatically opens the bdev,
+ * but this test needs to do that in a different
+ * way. */
+ spdk_bdev_close(g_desc);
+ poll_threads();
+
+ /* Try hotremoving a bdev with descriptors which don't provide
+ * the notification callback */
+ spdk_bdev_open(&g_bdev.bdev, true, NULL, NULL, &desc);
+ SPDK_CU_ASSERT_FATAL(desc != NULL);
+
+ /* There is an open descriptor on the device. Unregister it,
+ * which can't proceed until the descriptor is closed. */
+ done = false;
+ spdk_bdev_unregister(&g_bdev.bdev, _bdev_unregistered, &done);
+
+ /* Poll the threads to allow all events to be processed */
+ poll_threads();
+
+ /* Make sure the bdev was not unregistered. We still have a
+ * descriptor open */
+ CU_ASSERT(done == false);
+
+ spdk_bdev_close(desc);
+ poll_threads();
+ desc = NULL;
+
+ /* The unregister should have completed */
+ CU_ASSERT(done == true);
+
+
+ /* Register the bdev again */
+ register_bdev(&g_bdev, "ut_bdev", &g_io_device);
+
+ remove_notify = false;
+ spdk_bdev_open(&g_bdev.bdev, true, _bdev_removed, &remove_notify, &desc);
+ SPDK_CU_ASSERT_FATAL(desc != NULL);
+ CU_ASSERT(remove_notify == false);
+
+ /* There is an open descriptor on the device. Unregister it,
+ * which can't proceed until the descriptor is closed. */
+ done = false;
+ spdk_bdev_unregister(&g_bdev.bdev, _bdev_unregistered, &done);
+ /* No polling has occurred, so neither of these should execute */
+ CU_ASSERT(remove_notify == false);
+ CU_ASSERT(done == false);
+
+ /* Prior to the unregister completing, close the descriptor */
+ spdk_bdev_close(desc);
+
+ /* Poll the threads to allow all events to be processed */
+ poll_threads();
+
+ /* Remove notify should not have been called because the
+ * descriptor is already closed. */
+ CU_ASSERT(remove_notify == false);
+
+ /* The unregister should have completed */
+ CU_ASSERT(done == true);
+
+ /* Restore the original g_bdev so that we can use teardown_test(). */
+ register_bdev(&g_bdev, "ut_bdev", &g_io_device);
+ spdk_bdev_open(&g_bdev.bdev, true, NULL, NULL, &g_desc);
+ teardown_test();
+}
+
+static void
+reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
+{
+ bool *done = cb_arg;
+
+ CU_ASSERT(success == true);
+ *done = true;
+ spdk_bdev_free_io(bdev_io);
+}
+
+static void
+put_channel_during_reset(void)
+{
+ struct spdk_io_channel *io_ch;
+ bool done = false;
+
+ setup_test();
+
+ set_thread(0);
+ io_ch = spdk_bdev_get_io_channel(g_desc);
+ CU_ASSERT(io_ch != NULL);
+
+ /*
+ * Start a reset, but then put the I/O channel before
+ * the deferred messages for the reset get a chance to
+ * execute.
+ */
+ spdk_bdev_reset(g_desc, io_ch, reset_done, &done);
+ spdk_put_io_channel(io_ch);
+ poll_threads();
+ stub_complete_io(g_bdev.io_target, 0);
+
+ teardown_test();
+}
+
+static void
+aborted_reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
+{
+ enum spdk_bdev_io_status *status = cb_arg;
+
+ *status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
+ spdk_bdev_free_io(bdev_io);
+}
+
+static void
+aborted_reset(void)
+{
+ struct spdk_io_channel *io_ch[2];
+ enum spdk_bdev_io_status status1 = SPDK_BDEV_IO_STATUS_PENDING,
+ status2 = SPDK_BDEV_IO_STATUS_PENDING;
+
+ setup_test();
+
+ set_thread(0);
+ io_ch[0] = spdk_bdev_get_io_channel(g_desc);
+ CU_ASSERT(io_ch[0] != NULL);
+ spdk_bdev_reset(g_desc, io_ch[0], aborted_reset_done, &status1);
+ poll_threads();
+ CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
+
+ /*
+ * First reset has been submitted on ch0. Now submit a second
+ * reset on ch1 which will get queued since there is already a
+ * reset in progress.
+ */
+ set_thread(1);
+ io_ch[1] = spdk_bdev_get_io_channel(g_desc);
+ CU_ASSERT(io_ch[1] != NULL);
+ spdk_bdev_reset(g_desc, io_ch[1], aborted_reset_done, &status2);
+ poll_threads();
+ CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
+
+ /*
+ * Now destroy ch1. This will abort the queued reset. Check that
+ * the second reset was completed with failed status. Also check
+ * that bdev->internal.reset_in_progress != NULL, since the
+ * original reset has not been completed yet. This ensures that
+ * the bdev code is correctly noticing that the failed reset is
+ * *not* the one that had been submitted to the bdev module.
+ */
+ set_thread(1);
+ spdk_put_io_channel(io_ch[1]);
+ poll_threads();
+ CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_FAILED);
+ CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
+
+ /*
+ * Now complete the first reset, verify that it completed with SUCCESS
+ * status and that bdev->internal.reset_in_progress is also set back to NULL.
+ */
+ set_thread(0);
+ spdk_put_io_channel(io_ch[0]);
+ stub_complete_io(g_bdev.io_target, 0);
+ poll_threads();
+ CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
+
+ teardown_test();
+}
+
+static void
+io_during_io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
+{
+ enum spdk_bdev_io_status *status = cb_arg;
+
+ *status = bdev_io->internal.status;
+ spdk_bdev_free_io(bdev_io);
+}
+
+static void
+io_during_reset(void)
+{
+ struct spdk_io_channel *io_ch[2];
+ struct spdk_bdev_channel *bdev_ch[2];
+ enum spdk_bdev_io_status status0, status1, status_reset;
+ int rc;
+
+ setup_test();
+
+ /*
+ * First test normal case - submit an I/O on each of two channels (with no resets)
+ * and verify they complete successfully.
+ */
+ set_thread(0);
+ io_ch[0] = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
+ CU_ASSERT(bdev_ch[0]->flags == 0);
+ status0 = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
+ CU_ASSERT(rc == 0);
+
+ set_thread(1);
+ io_ch[1] = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
+ CU_ASSERT(bdev_ch[1]->flags == 0);
+ status1 = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
+ CU_ASSERT(rc == 0);
+
+ poll_threads();
+ CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
+ CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
+
+ set_thread(0);
+ stub_complete_io(g_bdev.io_target, 0);
+ CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ set_thread(1);
+ stub_complete_io(g_bdev.io_target, 0);
+ CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ /*
+ * Now submit a reset, and leave it pending while we submit I/O on two different
+ * channels. These I/O should be failed by the bdev layer since the reset is in
+ * progress.
+ */
+ set_thread(0);
+ status_reset = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &status_reset);
+ CU_ASSERT(rc == 0);
+
+ CU_ASSERT(bdev_ch[0]->flags == 0);
+ CU_ASSERT(bdev_ch[1]->flags == 0);
+ poll_threads();
+ CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_RESET_IN_PROGRESS);
+ CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_RESET_IN_PROGRESS);
+
+ set_thread(0);
+ status0 = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
+ CU_ASSERT(rc == 0);
+
+ set_thread(1);
+ status1 = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
+ CU_ASSERT(rc == 0);
+
+ /*
+ * A reset is in progress so these read I/O should complete with aborted. Note that we
+ * need to poll_threads() since I/O completed inline have their completion deferred.
+ */
+ poll_threads();
+ CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
+ CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_ABORTED);
+ CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_ABORTED);
+
+ /*
+ * Complete the reset
+ */
+ set_thread(0);
+ stub_complete_io(g_bdev.io_target, 0);
+
+ /*
+ * Only poll thread 0. We should not get a completion.
+ */
+ poll_thread(0);
+ CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
+
+ /*
+ * Poll both thread 0 and 1 so the messages can propagate and we
+ * get a completion.
+ */
+ poll_threads();
+ CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ spdk_put_io_channel(io_ch[0]);
+ set_thread(1);
+ spdk_put_io_channel(io_ch[1]);
+ poll_threads();
+
+ teardown_test();
+}
+
+static void
+basic_qos(void)
+{
+ struct spdk_io_channel *io_ch[2];
+ struct spdk_bdev_channel *bdev_ch[2];
+ struct spdk_bdev *bdev;
+ enum spdk_bdev_io_status status, abort_status;
+ int rc;
+
+ setup_test();
+
+ /* Enable QoS */
+ bdev = &g_bdev.bdev;
+ bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
+ SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
+ TAILQ_INIT(&bdev->internal.qos->queued);
+ /*
+ * Enable read/write IOPS, read only byte per second and
+ * read/write byte per second rate limits.
+ * In this case, all rate limits will take equal effect.
+ */
+ /* 2000 read/write I/O per second, or 2 per millisecond */
+ bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT].limit = 2000;
+ /* 8K read/write byte per millisecond with 4K block size */
+ bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT].limit = 8192000;
+ /* 8K read only byte per millisecond with 4K block size */
+ bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT].limit = 8192000;
+
+ g_get_io_channel = true;
+
+ set_thread(0);
+ io_ch[0] = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
+ CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
+
+ set_thread(1);
+ io_ch[1] = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
+ CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
+
+ /*
+ * Send an I/O on thread 0, which is where the QoS thread is running.
+ */
+ set_thread(0);
+ status = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
+ poll_threads();
+ stub_complete_io(g_bdev.io_target, 0);
+ poll_threads();
+ CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ /* Send an I/O on thread 1. The QoS thread is not running here. */
+ status = SPDK_BDEV_IO_STATUS_PENDING;
+ set_thread(1);
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
+ poll_threads();
+ /* Complete I/O on thread 1. This should not complete the I/O we submitted */
+ stub_complete_io(g_bdev.io_target, 0);
+ poll_threads();
+ CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
+ /* Now complete I/O on thread 0 */
+ set_thread(0);
+ poll_threads();
+ stub_complete_io(g_bdev.io_target, 0);
+ poll_threads();
+ CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ /* Reset rate limit for the next test cases. */
+ spdk_delay_us(SPDK_BDEV_QOS_TIMESLICE_IN_USEC);
+ poll_threads();
+
+ /*
+ * Test abort request when QoS is enabled.
+ */
+
+ /* Send an I/O on thread 0, which is where the QoS thread is running. */
+ set_thread(0);
+ status = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
+ /* Send an abort to the I/O on the same thread. */
+ abort_status = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_abort(g_desc, io_ch[0], &status, io_during_io_done, &abort_status);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_PENDING);
+ poll_threads();
+ CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(status == SPDK_BDEV_IO_STATUS_ABORTED);
+
+ /* Send an I/O on thread 1. The QoS thread is not running here. */
+ status = SPDK_BDEV_IO_STATUS_PENDING;
+ set_thread(1);
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
+ poll_threads();
+ /* Send an abort to the I/O on the same thread. */
+ abort_status = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_abort(g_desc, io_ch[1], &status, io_during_io_done, &abort_status);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_PENDING);
+ poll_threads();
+ /* Complete the I/O with failure and the abort with success on thread 1. */
+ CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(status == SPDK_BDEV_IO_STATUS_ABORTED);
+
+ set_thread(0);
+
+ /*
+ * Close the descriptor only, which should stop the qos channel as
+ * the last descriptor removed.
+ */
+ spdk_bdev_close(g_desc);
+ poll_threads();
+ CU_ASSERT(bdev->internal.qos->ch == NULL);
+
+ /*
+ * Open the bdev again which shall setup the qos channel as the
+ * channels are valid.
+ */
+ spdk_bdev_open(bdev, true, NULL, NULL, &g_desc);
+ poll_threads();
+ CU_ASSERT(bdev->internal.qos->ch != NULL);
+
+ /* Tear down the channels */
+ set_thread(0);
+ spdk_put_io_channel(io_ch[0]);
+ set_thread(1);
+ spdk_put_io_channel(io_ch[1]);
+ poll_threads();
+ set_thread(0);
+
+ /* Close the descriptor, which should stop the qos channel */
+ spdk_bdev_close(g_desc);
+ poll_threads();
+ CU_ASSERT(bdev->internal.qos->ch == NULL);
+
+ /* Open the bdev again, no qos channel setup without valid channels. */
+ spdk_bdev_open(bdev, true, NULL, NULL, &g_desc);
+ poll_threads();
+ CU_ASSERT(bdev->internal.qos->ch == NULL);
+
+ /* Create the channels in reverse order. */
+ set_thread(1);
+ io_ch[1] = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
+ CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
+
+ set_thread(0);
+ io_ch[0] = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
+ CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
+
+ /* Confirm that the qos thread is now thread 1 */
+ CU_ASSERT(bdev->internal.qos->ch == bdev_ch[1]);
+
+ /* Tear down the channels */
+ set_thread(0);
+ spdk_put_io_channel(io_ch[0]);
+ set_thread(1);
+ spdk_put_io_channel(io_ch[1]);
+ poll_threads();
+
+ set_thread(0);
+
+ teardown_test();
+}
+
+static void
+io_during_qos_queue(void)
+{
+ struct spdk_io_channel *io_ch[2];
+ struct spdk_bdev_channel *bdev_ch[2];
+ struct spdk_bdev *bdev;
+ enum spdk_bdev_io_status status0, status1, status2;
+ int rc;
+
+ setup_test();
+ MOCK_SET(spdk_get_ticks, 0);
+
+ /* Enable QoS */
+ bdev = &g_bdev.bdev;
+ bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
+ SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
+ TAILQ_INIT(&bdev->internal.qos->queued);
+ /*
+ * Enable read/write IOPS, read only byte per sec, write only
+ * byte per sec and read/write byte per sec rate limits.
+ * In this case, both read only and write only byte per sec
+ * rate limit will take effect.
+ */
+ /* 4000 read/write I/O per second, or 4 per millisecond */
+ bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT].limit = 4000;
+ /* 8K byte per millisecond with 4K block size */
+ bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT].limit = 8192000;
+ /* 4K byte per millisecond with 4K block size */
+ bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT].limit = 4096000;
+ /* 4K byte per millisecond with 4K block size */
+ bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT].limit = 4096000;
+
+ g_get_io_channel = true;
+
+ /* Create channels */
+ set_thread(0);
+ io_ch[0] = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
+ CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
+
+ set_thread(1);
+ io_ch[1] = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
+ CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
+
+ /* Send two read I/Os */
+ status1 = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
+ set_thread(0);
+ status0 = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
+ /* Send one write I/O */
+ status2 = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_write_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status2);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_PENDING);
+
+ /* Complete any I/O that arrived at the disk */
+ poll_threads();
+ set_thread(1);
+ stub_complete_io(g_bdev.io_target, 0);
+ set_thread(0);
+ stub_complete_io(g_bdev.io_target, 0);
+ poll_threads();
+
+ /* Only one of the two read I/Os should complete. (logical XOR) */
+ if (status0 == SPDK_BDEV_IO_STATUS_SUCCESS) {
+ CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
+ } else {
+ CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
+ }
+ /* The write I/O should complete. */
+ CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ /* Advance in time by a millisecond */
+ spdk_delay_us(1000);
+
+ /* Complete more I/O */
+ poll_threads();
+ set_thread(1);
+ stub_complete_io(g_bdev.io_target, 0);
+ set_thread(0);
+ stub_complete_io(g_bdev.io_target, 0);
+ poll_threads();
+
+ /* Now the second read I/O should be done */
+ CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ /* Tear down the channels */
+ set_thread(1);
+ spdk_put_io_channel(io_ch[1]);
+ set_thread(0);
+ spdk_put_io_channel(io_ch[0]);
+ poll_threads();
+
+ teardown_test();
+}
+
+static void
+io_during_qos_reset(void)
+{
+ struct spdk_io_channel *io_ch[2];
+ struct spdk_bdev_channel *bdev_ch[2];
+ struct spdk_bdev *bdev;
+ enum spdk_bdev_io_status status0, status1, reset_status;
+ int rc;
+
+ setup_test();
+ MOCK_SET(spdk_get_ticks, 0);
+
+ /* Enable QoS */
+ bdev = &g_bdev.bdev;
+ bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
+ SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
+ TAILQ_INIT(&bdev->internal.qos->queued);
+ /*
+ * Enable read/write IOPS, write only byte per sec and
+ * read/write byte per second rate limits.
+ * In this case, read/write byte per second rate limit will
+ * take effect first.
+ */
+ /* 2000 read/write I/O per second, or 2 per millisecond */
+ bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT].limit = 2000;
+ /* 4K byte per millisecond with 4K block size */
+ bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT].limit = 4096000;
+ /* 8K byte per millisecond with 4K block size */
+ bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT].limit = 8192000;
+
+ g_get_io_channel = true;
+
+ /* Create channels */
+ set_thread(0);
+ io_ch[0] = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
+ CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
+
+ set_thread(1);
+ io_ch[1] = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
+ CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
+
+ /* Send two I/O. One of these gets queued by QoS. The other is sitting at the disk. */
+ status1 = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_write_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
+ CU_ASSERT(rc == 0);
+ set_thread(0);
+ status0 = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_write_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
+ CU_ASSERT(rc == 0);
+
+ poll_threads();
+ CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
+ CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
+
+ /* Reset the bdev. */
+ reset_status = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &reset_status);
+ CU_ASSERT(rc == 0);
+
+ /* Complete any I/O that arrived at the disk */
+ poll_threads();
+ set_thread(1);
+ stub_complete_io(g_bdev.io_target, 0);
+ set_thread(0);
+ stub_complete_io(g_bdev.io_target, 0);
+ poll_threads();
+
+ CU_ASSERT(reset_status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_ABORTED);
+ CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_ABORTED);
+
+ /* Tear down the channels */
+ set_thread(1);
+ spdk_put_io_channel(io_ch[1]);
+ set_thread(0);
+ spdk_put_io_channel(io_ch[0]);
+ poll_threads();
+
+ teardown_test();
+}
+
+static void
+enomem_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
+{
+ enum spdk_bdev_io_status *status = cb_arg;
+
+ *status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
+ spdk_bdev_free_io(bdev_io);
+}
+
+static void
+enomem(void)
+{
+ struct spdk_io_channel *io_ch;
+ struct spdk_bdev_channel *bdev_ch;
+ struct spdk_bdev_shared_resource *shared_resource;
+ struct ut_bdev_channel *ut_ch;
+ const uint32_t IO_ARRAY_SIZE = 64;
+ const uint32_t AVAIL = 20;
+ enum spdk_bdev_io_status status[IO_ARRAY_SIZE], status_reset;
+ uint32_t nomem_cnt, i;
+ struct spdk_bdev_io *first_io;
+ int rc;
+
+ setup_test();
+
+ set_thread(0);
+ io_ch = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch = spdk_io_channel_get_ctx(io_ch);
+ shared_resource = bdev_ch->shared_resource;
+ ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
+ ut_ch->avail_cnt = AVAIL;
+
+ /* First submit a number of IOs equal to what the channel can support. */
+ for (i = 0; i < AVAIL; i++) {
+ status[i] = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
+ CU_ASSERT(rc == 0);
+ }
+ CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io));
+
+ /*
+ * Next, submit one additional I/O. This one should fail with ENOMEM and then go onto
+ * the enomem_io list.
+ */
+ status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io));
+ first_io = TAILQ_FIRST(&shared_resource->nomem_io);
+
+ /*
+ * Now submit a bunch more I/O. These should all fail with ENOMEM and get queued behind
+ * the first_io above.
+ */
+ for (i = AVAIL + 1; i < IO_ARRAY_SIZE; i++) {
+ status[i] = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
+ CU_ASSERT(rc == 0);
+ }
+
+ /* Assert that first_io is still at the head of the list. */
+ CU_ASSERT(TAILQ_FIRST(&shared_resource->nomem_io) == first_io);
+ CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == (IO_ARRAY_SIZE - AVAIL));
+ nomem_cnt = bdev_io_tailq_cnt(&shared_resource->nomem_io);
+ CU_ASSERT(shared_resource->nomem_threshold == (AVAIL - NOMEM_THRESHOLD_COUNT));
+
+ /*
+ * Complete 1 I/O only. The key check here is bdev_io_tailq_cnt - this should not have
+ * changed since completing just 1 I/O should not trigger retrying the queued nomem_io
+ * list.
+ */
+ stub_complete_io(g_bdev.io_target, 1);
+ CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == nomem_cnt);
+
+ /*
+ * Complete enough I/O to hit the nomem_theshold. This should trigger retrying nomem_io,
+ * and we should see I/O get resubmitted to the test bdev module.
+ */
+ stub_complete_io(g_bdev.io_target, NOMEM_THRESHOLD_COUNT - 1);
+ CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) < nomem_cnt);
+ nomem_cnt = bdev_io_tailq_cnt(&shared_resource->nomem_io);
+
+ /* Complete 1 I/O only. This should not trigger retrying the queued nomem_io. */
+ stub_complete_io(g_bdev.io_target, 1);
+ CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == nomem_cnt);
+
+ /*
+ * Send a reset and confirm that all I/O are completed, including the ones that
+ * were queued on the nomem_io list.
+ */
+ status_reset = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_reset(g_desc, io_ch, enomem_done, &status_reset);
+ poll_threads();
+ CU_ASSERT(rc == 0);
+ /* This will complete the reset. */
+ stub_complete_io(g_bdev.io_target, 0);
+
+ CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == 0);
+ CU_ASSERT(shared_resource->io_outstanding == 0);
+
+ spdk_put_io_channel(io_ch);
+ poll_threads();
+ teardown_test();
+}
+
+static void
+enomem_multi_bdev(void)
+{
+ struct spdk_io_channel *io_ch;
+ struct spdk_bdev_channel *bdev_ch;
+ struct spdk_bdev_shared_resource *shared_resource;
+ struct ut_bdev_channel *ut_ch;
+ const uint32_t IO_ARRAY_SIZE = 64;
+ const uint32_t AVAIL = 20;
+ enum spdk_bdev_io_status status[IO_ARRAY_SIZE];
+ uint32_t i;
+ struct ut_bdev *second_bdev;
+ struct spdk_bdev_desc *second_desc = NULL;
+ struct spdk_bdev_channel *second_bdev_ch;
+ struct spdk_io_channel *second_ch;
+ int rc;
+
+ setup_test();
+
+ /* Register second bdev with the same io_target */
+ second_bdev = calloc(1, sizeof(*second_bdev));
+ SPDK_CU_ASSERT_FATAL(second_bdev != NULL);
+ register_bdev(second_bdev, "ut_bdev2", g_bdev.io_target);
+ spdk_bdev_open(&second_bdev->bdev, true, NULL, NULL, &second_desc);
+ SPDK_CU_ASSERT_FATAL(second_desc != NULL);
+
+ set_thread(0);
+ io_ch = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch = spdk_io_channel_get_ctx(io_ch);
+ shared_resource = bdev_ch->shared_resource;
+ ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
+ ut_ch->avail_cnt = AVAIL;
+
+ second_ch = spdk_bdev_get_io_channel(second_desc);
+ second_bdev_ch = spdk_io_channel_get_ctx(second_ch);
+ SPDK_CU_ASSERT_FATAL(shared_resource == second_bdev_ch->shared_resource);
+
+ /* Saturate io_target through bdev A. */
+ for (i = 0; i < AVAIL; i++) {
+ status[i] = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
+ CU_ASSERT(rc == 0);
+ }
+ CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io));
+
+ /*
+ * Now submit I/O through the second bdev. This should fail with ENOMEM
+ * and then go onto the nomem_io list.
+ */
+ status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io));
+
+ /* Complete first bdev's I/O. This should retry sending second bdev's nomem_io */
+ stub_complete_io(g_bdev.io_target, AVAIL);
+
+ SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&shared_resource->nomem_io));
+ CU_ASSERT(shared_resource->io_outstanding == 1);
+
+ /* Now complete our retried I/O */
+ stub_complete_io(g_bdev.io_target, 1);
+ SPDK_CU_ASSERT_FATAL(shared_resource->io_outstanding == 0);
+
+ spdk_put_io_channel(io_ch);
+ spdk_put_io_channel(second_ch);
+ spdk_bdev_close(second_desc);
+ unregister_bdev(second_bdev);
+ poll_threads();
+ free(second_bdev);
+ teardown_test();
+}
+
+
+static void
+enomem_multi_io_target(void)
+{
+ struct spdk_io_channel *io_ch;
+ struct spdk_bdev_channel *bdev_ch;
+ struct ut_bdev_channel *ut_ch;
+ const uint32_t IO_ARRAY_SIZE = 64;
+ const uint32_t AVAIL = 20;
+ enum spdk_bdev_io_status status[IO_ARRAY_SIZE];
+ uint32_t i;
+ int new_io_device;
+ struct ut_bdev *second_bdev;
+ struct spdk_bdev_desc *second_desc = NULL;
+ struct spdk_bdev_channel *second_bdev_ch;
+ struct spdk_io_channel *second_ch;
+ int rc;
+
+ setup_test();
+
+ /* Create new io_target and a second bdev using it */
+ spdk_io_device_register(&new_io_device, stub_create_ch, stub_destroy_ch,
+ sizeof(struct ut_bdev_channel), NULL);
+ second_bdev = calloc(1, sizeof(*second_bdev));
+ SPDK_CU_ASSERT_FATAL(second_bdev != NULL);
+ register_bdev(second_bdev, "ut_bdev2", &new_io_device);
+ spdk_bdev_open(&second_bdev->bdev, true, NULL, NULL, &second_desc);
+ SPDK_CU_ASSERT_FATAL(second_desc != NULL);
+
+ set_thread(0);
+ io_ch = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch = spdk_io_channel_get_ctx(io_ch);
+ ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
+ ut_ch->avail_cnt = AVAIL;
+
+ /* Different io_target should imply a different shared_resource */
+ second_ch = spdk_bdev_get_io_channel(second_desc);
+ second_bdev_ch = spdk_io_channel_get_ctx(second_ch);
+ SPDK_CU_ASSERT_FATAL(bdev_ch->shared_resource != second_bdev_ch->shared_resource);
+
+ /* Saturate io_target through bdev A. */
+ for (i = 0; i < AVAIL; i++) {
+ status[i] = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
+ CU_ASSERT(rc == 0);
+ }
+ CU_ASSERT(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
+
+ /* Issue one more I/O to fill ENOMEM list. */
+ status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
+
+ /*
+ * Now submit I/O through the second bdev. This should go through and complete
+ * successfully because we're using a different io_device underneath.
+ */
+ status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&second_bdev_ch->shared_resource->nomem_io));
+ stub_complete_io(second_bdev->io_target, 1);
+
+ /* Cleanup; Complete outstanding I/O. */
+ stub_complete_io(g_bdev.io_target, AVAIL);
+ SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
+ /* Complete the ENOMEM I/O */
+ stub_complete_io(g_bdev.io_target, 1);
+ CU_ASSERT(bdev_ch->shared_resource->io_outstanding == 0);
+
+ SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
+ CU_ASSERT(bdev_ch->shared_resource->io_outstanding == 0);
+ spdk_put_io_channel(io_ch);
+ spdk_put_io_channel(second_ch);
+ spdk_bdev_close(second_desc);
+ unregister_bdev(second_bdev);
+ spdk_io_device_unregister(&new_io_device, NULL);
+ poll_threads();
+ free(second_bdev);
+ teardown_test();
+}
+
+static void
+qos_dynamic_enable_done(void *cb_arg, int status)
+{
+ int *rc = cb_arg;
+ *rc = status;
+}
+
+static void
+qos_dynamic_enable(void)
+{
+ struct spdk_io_channel *io_ch[2];
+ struct spdk_bdev_channel *bdev_ch[2];
+ struct spdk_bdev *bdev;
+ enum spdk_bdev_io_status bdev_io_status[2];
+ uint64_t limits[SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES] = {};
+ int status, second_status, rc, i;
+
+ setup_test();
+ MOCK_SET(spdk_get_ticks, 0);
+
+ for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
+ limits[i] = UINT64_MAX;
+ }
+
+ bdev = &g_bdev.bdev;
+
+ g_get_io_channel = true;
+
+ /* Create channels */
+ set_thread(0);
+ io_ch[0] = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
+ CU_ASSERT(bdev_ch[0]->flags == 0);
+
+ set_thread(1);
+ io_ch[1] = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
+ CU_ASSERT(bdev_ch[1]->flags == 0);
+
+ set_thread(0);
+
+ /*
+ * Enable QoS: Read/Write IOPS, Read/Write byte,
+ * Read only byte and Write only byte per second
+ * rate limits.
+ * More than 10 I/Os allowed per timeslice.
+ */
+ status = -1;
+ limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 10000;
+ limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT] = 100;
+ limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT] = 100;
+ limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT] = 10;
+ spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
+ poll_threads();
+ CU_ASSERT(status == 0);
+ CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
+ CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
+
+ /*
+ * Submit and complete 10 I/O to fill the QoS allotment for this timeslice.
+ * Additional I/O will then be queued.
+ */
+ set_thread(0);
+ for (i = 0; i < 10; i++) {
+ bdev_io_status[0] = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &bdev_io_status[0]);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_PENDING);
+ poll_thread(0);
+ stub_complete_io(g_bdev.io_target, 0);
+ CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_SUCCESS);
+ }
+
+ /*
+ * Send two more I/O. These I/O will be queued since the current timeslice allotment has been
+ * filled already. We want to test that when QoS is disabled that these two I/O:
+ * 1) are not aborted
+ * 2) are sent back to their original thread for resubmission
+ */
+ bdev_io_status[0] = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &bdev_io_status[0]);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_PENDING);
+ set_thread(1);
+ bdev_io_status[1] = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &bdev_io_status[1]);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_PENDING);
+ poll_threads();
+
+ /*
+ * Disable QoS: Read/Write IOPS, Read/Write byte,
+ * Read only byte rate limits
+ */
+ status = -1;
+ limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 0;
+ limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT] = 0;
+ limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT] = 0;
+ spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
+ poll_threads();
+ CU_ASSERT(status == 0);
+ CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
+ CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
+
+ /* Disable QoS: Write only Byte per second rate limit */
+ status = -1;
+ limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT] = 0;
+ spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
+ poll_threads();
+ CU_ASSERT(status == 0);
+ CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
+ CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
+
+ /*
+ * All I/O should have been resubmitted back on their original thread. Complete
+ * all I/O on thread 0, and ensure that only the thread 0 I/O was completed.
+ */
+ set_thread(0);
+ stub_complete_io(g_bdev.io_target, 0);
+ poll_threads();
+ CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_PENDING);
+
+ /* Now complete all I/O on thread 1 and ensure the thread 1 I/O was completed. */
+ set_thread(1);
+ stub_complete_io(g_bdev.io_target, 0);
+ poll_threads();
+ CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ /* Disable QoS again */
+ status = -1;
+ limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 0;
+ spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
+ poll_threads();
+ CU_ASSERT(status == 0); /* This should succeed */
+ CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
+ CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
+
+ /* Enable QoS on thread 0 */
+ status = -1;
+ limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 10000;
+ spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
+ poll_threads();
+ CU_ASSERT(status == 0);
+ CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
+ CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
+
+ /* Disable QoS on thread 1 */
+ set_thread(1);
+ status = -1;
+ limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 0;
+ spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
+ /* Don't poll yet. This should leave the channels with QoS enabled */
+ CU_ASSERT(status == -1);
+ CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
+ CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
+
+ /* Enable QoS. This should immediately fail because the previous disable QoS hasn't completed. */
+ second_status = 0;
+ limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT] = 10;
+ spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &second_status);
+ poll_threads();
+ CU_ASSERT(status == 0); /* The disable should succeed */
+ CU_ASSERT(second_status < 0); /* The enable should fail */
+ CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
+ CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
+
+ /* Enable QoS on thread 1. This should succeed now that the disable has completed. */
+ status = -1;
+ limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 10000;
+ spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
+ poll_threads();
+ CU_ASSERT(status == 0);
+ CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
+ CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
+
+ /* Tear down the channels */
+ set_thread(0);
+ spdk_put_io_channel(io_ch[0]);
+ set_thread(1);
+ spdk_put_io_channel(io_ch[1]);
+ poll_threads();
+
+ set_thread(0);
+ teardown_test();
+}
+
+static void
+histogram_status_cb(void *cb_arg, int status)
+{
+ g_status = status;
+}
+
+static void
+histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram)
+{
+ g_status = status;
+ g_histogram = histogram;
+}
+
+static void
+histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count,
+ uint64_t total, uint64_t so_far)
+{
+ g_count += count;
+}
+
+static void
+bdev_histograms_mt(void)
+{
+ struct spdk_io_channel *ch[2];
+ struct spdk_histogram_data *histogram;
+ uint8_t buf[4096];
+ int status = false;
+ int rc;
+
+
+ setup_test();
+
+ set_thread(0);
+ ch[0] = spdk_bdev_get_io_channel(g_desc);
+ CU_ASSERT(ch[0] != NULL);
+
+ set_thread(1);
+ ch[1] = spdk_bdev_get_io_channel(g_desc);
+ CU_ASSERT(ch[1] != NULL);
+
+
+ /* Enable histogram */
+ spdk_bdev_histogram_enable(&g_bdev.bdev, histogram_status_cb, NULL, true);
+ poll_threads();
+ CU_ASSERT(g_status == 0);
+ CU_ASSERT(g_bdev.bdev.internal.histogram_enabled == true);
+
+ /* Allocate histogram */
+ histogram = spdk_histogram_data_alloc();
+
+ /* Check if histogram is zeroed */
+ spdk_bdev_histogram_get(&g_bdev.bdev, histogram, histogram_data_cb, NULL);
+ poll_threads();
+ CU_ASSERT(g_status == 0);
+ SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
+
+ g_count = 0;
+ spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
+
+ CU_ASSERT(g_count == 0);
+
+ set_thread(0);
+ rc = spdk_bdev_write_blocks(g_desc, ch[0], &buf, 0, 1, io_during_io_done, &status);
+ CU_ASSERT(rc == 0);
+
+ spdk_delay_us(10);
+ stub_complete_io(g_bdev.io_target, 1);
+ poll_threads();
+ CU_ASSERT(status == true);
+
+
+ set_thread(1);
+ rc = spdk_bdev_read_blocks(g_desc, ch[1], &buf, 0, 1, io_during_io_done, &status);
+ CU_ASSERT(rc == 0);
+
+ spdk_delay_us(10);
+ stub_complete_io(g_bdev.io_target, 1);
+ poll_threads();
+ CU_ASSERT(status == true);
+
+ set_thread(0);
+
+ /* Check if histogram gathered data from all I/O channels */
+ spdk_bdev_histogram_get(&g_bdev.bdev, histogram, histogram_data_cb, NULL);
+ poll_threads();
+ CU_ASSERT(g_status == 0);
+ CU_ASSERT(g_bdev.bdev.internal.histogram_enabled == true);
+ SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
+
+ g_count = 0;
+ spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
+ CU_ASSERT(g_count == 2);
+
+ /* Disable histogram */
+ spdk_bdev_histogram_enable(&g_bdev.bdev, histogram_status_cb, NULL, false);
+ poll_threads();
+ CU_ASSERT(g_status == 0);
+ CU_ASSERT(g_bdev.bdev.internal.histogram_enabled == false);
+
+ spdk_histogram_data_free(histogram);
+
+ /* Tear down the channels */
+ set_thread(0);
+ spdk_put_io_channel(ch[0]);
+ set_thread(1);
+ spdk_put_io_channel(ch[1]);
+ poll_threads();
+ set_thread(0);
+ teardown_test();
+
+}
+
+struct timeout_io_cb_arg {
+ struct iovec iov;
+ uint8_t type;
+};
+
+static int
+bdev_channel_count_submitted_io(struct spdk_bdev_channel *ch)
+{
+ struct spdk_bdev_io *bdev_io;
+ int n = 0;
+
+ if (!ch) {
+ return -1;
+ }
+
+ TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) {
+ n++;
+ }
+
+ return n;
+}
+
+static void
+bdev_channel_io_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io)
+{
+ struct timeout_io_cb_arg *ctx = cb_arg;
+
+ ctx->type = bdev_io->type;
+ ctx->iov.iov_base = bdev_io->iov.iov_base;
+ ctx->iov.iov_len = bdev_io->iov.iov_len;
+}
+
+static bool g_io_done;
+
+static void
+io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
+{
+ g_io_done = true;
+ spdk_bdev_free_io(bdev_io);
+}
+
+static void
+bdev_set_io_timeout_mt(void)
+{
+ struct spdk_io_channel *ch[3];
+ struct spdk_bdev_channel *bdev_ch[3];
+ struct timeout_io_cb_arg cb_arg;
+
+ setup_test();
+
+ g_bdev.bdev.optimal_io_boundary = 16;
+ g_bdev.bdev.split_on_optimal_io_boundary = true;
+
+ set_thread(0);
+ ch[0] = spdk_bdev_get_io_channel(g_desc);
+ CU_ASSERT(ch[0] != NULL);
+
+ set_thread(1);
+ ch[1] = spdk_bdev_get_io_channel(g_desc);
+ CU_ASSERT(ch[1] != NULL);
+
+ set_thread(2);
+ ch[2] = spdk_bdev_get_io_channel(g_desc);
+ CU_ASSERT(ch[2] != NULL);
+
+ /* Multi-thread mode
+ * 1, Check the poller was registered successfully
+ * 2, Check the timeout IO and ensure the IO was the submitted by user
+ * 3, Check the link int the bdev_ch works right.
+ * 4, Close desc and put io channel during the timeout poller is polling
+ */
+
+ /* In desc thread set the timeout */
+ set_thread(0);
+ CU_ASSERT(spdk_bdev_set_timeout(g_desc, 5, bdev_channel_io_timeout_cb, &cb_arg) == 0);
+ CU_ASSERT(g_desc->io_timeout_poller != NULL);
+ CU_ASSERT(g_desc->cb_fn == bdev_channel_io_timeout_cb);
+ CU_ASSERT(g_desc->cb_arg == &cb_arg);
+
+ /* check the IO submitted list and timeout handler */
+ CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[0], (void *)0x2000, 0, 1, io_done, NULL) == 0);
+ bdev_ch[0] = spdk_io_channel_get_ctx(ch[0]);
+ CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[0]) == 1);
+
+ set_thread(1);
+ CU_ASSERT(spdk_bdev_write_blocks(g_desc, ch[1], (void *)0x1000, 0, 1, io_done, NULL) == 0);
+ bdev_ch[1] = spdk_io_channel_get_ctx(ch[1]);
+ CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[1]) == 1);
+
+ /* Now test that a single-vector command is split correctly.
+ * Offset 14, length 8, payload 0xF000
+ * Child - Offset 14, length 2, payload 0xF000
+ * Child - Offset 16, length 6, payload 0xF000 + 2 * 512
+ *
+ * Set up the expected values before calling spdk_bdev_read_blocks
+ */
+ set_thread(2);
+ CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[2], (void *)0xF000, 14, 8, io_done, NULL) == 0);
+ bdev_ch[2] = spdk_io_channel_get_ctx(ch[2]);
+ CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[2]) == 3);
+
+ set_thread(0);
+ memset(&cb_arg, 0, sizeof(cb_arg));
+ spdk_delay_us(3 * spdk_get_ticks_hz());
+ poll_threads();
+ CU_ASSERT(cb_arg.type == 0);
+ CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0);
+ CU_ASSERT(cb_arg.iov.iov_len == 0);
+
+ /* Now the time reach the limit */
+ spdk_delay_us(3 * spdk_get_ticks_hz());
+ poll_thread(0);
+ CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_READ);
+ CU_ASSERT(cb_arg.iov.iov_base == (void *)0x2000);
+ CU_ASSERT(cb_arg.iov.iov_len == 1 * g_bdev.bdev.blocklen);
+ stub_complete_io(g_bdev.io_target, 1);
+ CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[0]) == 0);
+
+ memset(&cb_arg, 0, sizeof(cb_arg));
+ set_thread(1);
+ poll_thread(1);
+ CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE);
+ CU_ASSERT(cb_arg.iov.iov_base == (void *)0x1000);
+ CU_ASSERT(cb_arg.iov.iov_len == 1 * g_bdev.bdev.blocklen);
+ stub_complete_io(g_bdev.io_target, 1);
+ CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[1]) == 0);
+
+ memset(&cb_arg, 0, sizeof(cb_arg));
+ set_thread(2);
+ poll_thread(2);
+ CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_READ);
+ CU_ASSERT(cb_arg.iov.iov_base == (void *)0xF000);
+ CU_ASSERT(cb_arg.iov.iov_len == 8 * g_bdev.bdev.blocklen);
+ stub_complete_io(g_bdev.io_target, 1);
+ CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[2]) == 2);
+ stub_complete_io(g_bdev.io_target, 1);
+ CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[2]) == 0);
+
+ /* Run poll_timeout_done() it means complete the timeout poller */
+ set_thread(0);
+ poll_thread(0);
+ CU_ASSERT(g_desc->refs == 0);
+ CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[0], (void *)0x1000, 0, 1, io_done, NULL) == 0);
+ set_thread(1);
+ CU_ASSERT(spdk_bdev_write_blocks(g_desc, ch[1], (void *)0x2000, 0, 2, io_done, NULL) == 0);
+ set_thread(2);
+ CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[2], (void *)0x3000, 0, 3, io_done, NULL) == 0);
+
+ /* Trigger timeout poller to run again, desc->refs is incremented.
+ * In thread 0 we destroy the io channel before timeout poller runs.
+ * Timeout callback is not called on thread 0.
+ */
+ spdk_delay_us(6 * spdk_get_ticks_hz());
+ memset(&cb_arg, 0, sizeof(cb_arg));
+ set_thread(0);
+ stub_complete_io(g_bdev.io_target, 1);
+ spdk_put_io_channel(ch[0]);
+ poll_thread(0);
+ CU_ASSERT(g_desc->refs == 1)
+ CU_ASSERT(cb_arg.type == 0);
+ CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0);
+ CU_ASSERT(cb_arg.iov.iov_len == 0);
+
+ /* In thread 1 timeout poller runs then we destroy the io channel
+ * Timeout callback is called on thread 1.
+ */
+ memset(&cb_arg, 0, sizeof(cb_arg));
+ set_thread(1);
+ poll_thread(1);
+ stub_complete_io(g_bdev.io_target, 1);
+ spdk_put_io_channel(ch[1]);
+ poll_thread(1);
+ CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE);
+ CU_ASSERT(cb_arg.iov.iov_base == (void *)0x2000);
+ CU_ASSERT(cb_arg.iov.iov_len == 2 * g_bdev.bdev.blocklen);
+
+ /* Close the desc.
+ * Unregister the timeout poller first.
+ * Then decrement desc->refs but it's not zero yet so desc is not freed.
+ */
+ set_thread(0);
+ spdk_bdev_close(g_desc);
+ CU_ASSERT(g_desc->refs == 1);
+ CU_ASSERT(g_desc->io_timeout_poller == NULL);
+
+ /* Timeout poller runs on thread 2 then we destroy the io channel.
+ * Desc is closed so we would exit the timeout poller directly.
+ * timeout callback is not called on thread 2.
+ */
+ memset(&cb_arg, 0, sizeof(cb_arg));
+ set_thread(2);
+ poll_thread(2);
+ stub_complete_io(g_bdev.io_target, 1);
+ spdk_put_io_channel(ch[2]);
+ poll_thread(2);
+ CU_ASSERT(cb_arg.type == 0);
+ CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0);
+ CU_ASSERT(cb_arg.iov.iov_len == 0);
+
+ set_thread(0);
+ poll_thread(0);
+ g_teardown_done = false;
+ unregister_bdev(&g_bdev);
+ spdk_io_device_unregister(&g_io_device, NULL);
+ spdk_bdev_finish(finish_cb, NULL);
+ poll_threads();
+ memset(&g_bdev, 0, sizeof(g_bdev));
+ CU_ASSERT(g_teardown_done == true);
+ g_teardown_done = false;
+ free_threads();
+ free_cores();
+}
+
+static bool g_io_done2;
+static bool g_lock_lba_range_done;
+static bool g_unlock_lba_range_done;
+
+static void
+io_done2(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
+{
+ g_io_done2 = true;
+ spdk_bdev_free_io(bdev_io);
+}
+
+static void
+lock_lba_range_done(void *ctx, int status)
+{
+ g_lock_lba_range_done = true;
+}
+
+static void
+unlock_lba_range_done(void *ctx, int status)
+{
+ g_unlock_lba_range_done = true;
+}
+
+static uint32_t
+stub_channel_outstanding_cnt(void *io_target)
+{
+ struct spdk_io_channel *_ch = spdk_get_io_channel(io_target);
+ struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
+ uint32_t outstanding_cnt;
+
+ outstanding_cnt = ch->outstanding_cnt;
+
+ spdk_put_io_channel(_ch);
+ return outstanding_cnt;
+}
+
+static void
+lock_lba_range_then_submit_io(void)
+{
+ struct spdk_bdev_desc *desc = NULL;
+ void *io_target;
+ struct spdk_io_channel *io_ch[3];
+ struct spdk_bdev_channel *bdev_ch[3];
+ struct lba_range *range;
+ char buf[4096];
+ int ctx0, ctx1, ctx2;
+ int rc;
+
+ setup_test();
+
+ io_target = g_bdev.io_target;
+ desc = g_desc;
+
+ set_thread(0);
+ io_ch[0] = spdk_bdev_get_io_channel(desc);
+ bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
+ CU_ASSERT(io_ch[0] != NULL);
+
+ set_thread(1);
+ io_ch[1] = spdk_bdev_get_io_channel(desc);
+ bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
+ CU_ASSERT(io_ch[1] != NULL);
+
+ set_thread(0);
+ g_lock_lba_range_done = false;
+ rc = bdev_lock_lba_range(desc, io_ch[0], 20, 10, lock_lba_range_done, &ctx0);
+ CU_ASSERT(rc == 0);
+ poll_threads();
+
+ /* The lock should immediately become valid, since there are no outstanding
+ * write I/O.
+ */
+ CU_ASSERT(g_lock_lba_range_done == true);
+ range = TAILQ_FIRST(&bdev_ch[0]->locked_ranges);
+ SPDK_CU_ASSERT_FATAL(range != NULL);
+ CU_ASSERT(range->offset == 20);
+ CU_ASSERT(range->length == 10);
+ CU_ASSERT(range->owner_ch == bdev_ch[0]);
+
+ g_io_done = false;
+ CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked));
+ rc = spdk_bdev_read_blocks(desc, io_ch[0], buf, 20, 1, io_done, &ctx0);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1);
+
+ stub_complete_io(io_target, 1);
+ poll_threads();
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked));
+
+ /* Try a write I/O. This should actually be allowed to execute, since the channel
+ * holding the lock is submitting the write I/O.
+ */
+ g_io_done = false;
+ CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked));
+ rc = spdk_bdev_write_blocks(desc, io_ch[0], buf, 20, 1, io_done, &ctx0);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1);
+
+ stub_complete_io(io_target, 1);
+ poll_threads();
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked));
+
+ /* Try a write I/O. This should get queued in the io_locked tailq. */
+ set_thread(1);
+ g_io_done = false;
+ CU_ASSERT(TAILQ_EMPTY(&bdev_ch[1]->io_locked));
+ rc = spdk_bdev_write_blocks(desc, io_ch[1], buf, 20, 1, io_done, &ctx1);
+ CU_ASSERT(rc == 0);
+ poll_threads();
+ CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 0);
+ CU_ASSERT(!TAILQ_EMPTY(&bdev_ch[1]->io_locked));
+ CU_ASSERT(g_io_done == false);
+
+ /* Try to unlock the lba range using thread 1's io_ch. This should fail. */
+ rc = bdev_unlock_lba_range(desc, io_ch[1], 20, 10, unlock_lba_range_done, &ctx1);
+ CU_ASSERT(rc == -EINVAL);
+
+ /* Now create a new channel and submit a write I/O with it. This should also be queued.
+ * The new channel should inherit the active locks from the bdev's internal list.
+ */
+ set_thread(2);
+ io_ch[2] = spdk_bdev_get_io_channel(desc);
+ bdev_ch[2] = spdk_io_channel_get_ctx(io_ch[2]);
+ CU_ASSERT(io_ch[2] != NULL);
+
+ g_io_done2 = false;
+ CU_ASSERT(TAILQ_EMPTY(&bdev_ch[2]->io_locked));
+ rc = spdk_bdev_write_blocks(desc, io_ch[2], buf, 22, 2, io_done2, &ctx2);
+ CU_ASSERT(rc == 0);
+ poll_threads();
+ CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 0);
+ CU_ASSERT(!TAILQ_EMPTY(&bdev_ch[2]->io_locked));
+ CU_ASSERT(g_io_done2 == false);
+
+ set_thread(0);
+ rc = bdev_unlock_lba_range(desc, io_ch[0], 20, 10, unlock_lba_range_done, &ctx0);
+ CU_ASSERT(rc == 0);
+ poll_threads();
+ CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->locked_ranges));
+
+ /* The LBA range is unlocked, so the write IOs should now have started execution. */
+ CU_ASSERT(TAILQ_EMPTY(&bdev_ch[1]->io_locked));
+ CU_ASSERT(TAILQ_EMPTY(&bdev_ch[2]->io_locked));
+
+ set_thread(1);
+ CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1);
+ stub_complete_io(io_target, 1);
+ set_thread(2);
+ CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1);
+ stub_complete_io(io_target, 1);
+
+ poll_threads();
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(g_io_done2 == true);
+
+ /* Tear down the channels */
+ set_thread(0);
+ spdk_put_io_channel(io_ch[0]);
+ set_thread(1);
+ spdk_put_io_channel(io_ch[1]);
+ set_thread(2);
+ spdk_put_io_channel(io_ch[2]);
+ poll_threads();
+ set_thread(0);
+ teardown_test();
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("bdev", NULL, NULL);
+
+ CU_ADD_TEST(suite, basic);
+ CU_ADD_TEST(suite, unregister_and_close);
+ CU_ADD_TEST(suite, basic_qos);
+ CU_ADD_TEST(suite, put_channel_during_reset);
+ CU_ADD_TEST(suite, aborted_reset);
+ CU_ADD_TEST(suite, io_during_reset);
+ CU_ADD_TEST(suite, io_during_qos_queue);
+ CU_ADD_TEST(suite, io_during_qos_reset);
+ CU_ADD_TEST(suite, enomem);
+ CU_ADD_TEST(suite, enomem_multi_bdev);
+ CU_ADD_TEST(suite, enomem_multi_io_target);
+ CU_ADD_TEST(suite, qos_dynamic_enable);
+ CU_ADD_TEST(suite, bdev_histograms_mt);
+ CU_ADD_TEST(suite, bdev_set_io_timeout_mt);
+ CU_ADD_TEST(suite, lock_lba_range_then_submit_io);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/bdev/part.c/.gitignore b/src/spdk/test/unit/lib/bdev/part.c/.gitignore
new file mode 100644
index 000000000..c8302779b
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/part.c/.gitignore
@@ -0,0 +1 @@
+part_ut
diff --git a/src/spdk/test/unit/lib/bdev/part.c/Makefile b/src/spdk/test/unit/lib/bdev/part.c/Makefile
new file mode 100644
index 000000000..9b9637dbb
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/part.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = part_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/bdev/part.c/part_ut.c b/src/spdk/test/unit/lib/bdev/part.c/part_ut.c
new file mode 100644
index 000000000..8bab15f48
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/part.c/part_ut.c
@@ -0,0 +1,173 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "common/lib/ut_multithread.c"
+#include "unit/lib/json_mock.c"
+
+#include "spdk/config.h"
+/* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
+#undef SPDK_CONFIG_VTUNE
+
+#include "spdk_internal/thread.h"
+
+#include "bdev/bdev.c"
+#include "bdev/part.c"
+
+DEFINE_STUB(spdk_conf_find_section, struct spdk_conf_section *, (struct spdk_conf *cp,
+ const char *name), NULL);
+DEFINE_STUB(spdk_conf_section_get_nmval, char *,
+ (struct spdk_conf_section *sp, const char *key, int idx1, int idx2), NULL);
+DEFINE_STUB(spdk_conf_section_get_intval, int, (struct spdk_conf_section *sp, const char *key), -1);
+
+struct spdk_trace_histories *g_trace_histories;
+DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn));
+DEFINE_STUB_V(spdk_trace_register_owner, (uint8_t type, char id_prefix));
+DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix));
+DEFINE_STUB_V(spdk_trace_register_description, (const char *name,
+ uint16_t tpoint_id, uint8_t owner_type,
+ uint8_t object_type, uint8_t new_object,
+ uint8_t arg1_type, const char *arg1_name));
+DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
+ uint32_t size, uint64_t object_id, uint64_t arg1));
+DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0);
+DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL);
+
+static void
+_part_cleanup(struct spdk_bdev_part *part)
+{
+ free(part->internal.bdev.name);
+ free(part->internal.bdev.product_name);
+}
+
+void
+spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io,
+ int *sc, int *sk, int *asc, int *ascq)
+{
+}
+
+struct spdk_bdev_module bdev_ut_if = {
+ .name = "bdev_ut",
+};
+
+static void vbdev_ut_examine(struct spdk_bdev *bdev);
+
+struct spdk_bdev_module vbdev_ut_if = {
+ .name = "vbdev_ut",
+ .examine_config = vbdev_ut_examine,
+};
+
+SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if)
+SPDK_BDEV_MODULE_REGISTER(vbdev_ut, &vbdev_ut_if)
+
+static void
+vbdev_ut_examine(struct spdk_bdev *bdev)
+{
+ spdk_bdev_module_examine_done(&vbdev_ut_if);
+}
+
+static int
+__destruct(void *ctx)
+{
+ return 0;
+}
+
+static struct spdk_bdev_fn_table base_fn_table = {
+ .destruct = __destruct,
+};
+static struct spdk_bdev_fn_table part_fn_table = {
+ .destruct = __destruct,
+};
+
+static void
+part_test(void)
+{
+ struct spdk_bdev_part_base *base;
+ struct spdk_bdev_part part1 = {};
+ struct spdk_bdev_part part2 = {};
+ struct spdk_bdev bdev_base = {};
+ SPDK_BDEV_PART_TAILQ tailq = TAILQ_HEAD_INITIALIZER(tailq);
+ int rc;
+
+ bdev_base.name = "base";
+ bdev_base.fn_table = &base_fn_table;
+ bdev_base.module = &bdev_ut_if;
+ rc = spdk_bdev_register(&bdev_base);
+ CU_ASSERT(rc == 0);
+ base = spdk_bdev_part_base_construct(&bdev_base, NULL, &vbdev_ut_if,
+ &part_fn_table, &tailq, NULL,
+ NULL, 0, NULL, NULL);
+
+ SPDK_CU_ASSERT_FATAL(base != NULL);
+
+ rc = spdk_bdev_part_construct(&part1, base, "test1", 0, 100, "test");
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ rc = spdk_bdev_part_construct(&part2, base, "test2", 100, 100, "test");
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+
+ spdk_bdev_part_base_hotremove(base, &tailq);
+
+ spdk_bdev_part_base_free(base);
+ _part_cleanup(&part1);
+ _part_cleanup(&part2);
+ spdk_bdev_unregister(&bdev_base, NULL, NULL);
+
+ poll_threads();
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("bdev_part", NULL, NULL);
+
+ CU_ADD_TEST(suite, part_test);
+
+ allocate_threads(1);
+ set_thread(0);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ free_threads();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/bdev/pmem/.gitignore b/src/spdk/test/unit/lib/bdev/pmem/.gitignore
new file mode 100644
index 000000000..b2e0df1eb
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/pmem/.gitignore
@@ -0,0 +1 @@
+bdev_pmem_ut
diff --git a/src/spdk/test/unit/lib/bdev/pmem/Makefile b/src/spdk/test/unit/lib/bdev/pmem/Makefile
new file mode 100644
index 000000000..cb601f1e0
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/pmem/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = bdev_pmem_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/bdev/pmem/bdev_pmem_ut.c b/src/spdk/test/unit/lib/bdev/pmem/bdev_pmem_ut.c
new file mode 100644
index 000000000..8cd51e9f7
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/pmem/bdev_pmem_ut.c
@@ -0,0 +1,772 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "common/lib/ut_multithread.c"
+#include "unit/lib/json_mock.c"
+
+#include "spdk_internal/thread.h"
+
+#include "bdev/pmem/bdev_pmem.c"
+
+DEFINE_STUB(spdk_conf_find_section, struct spdk_conf_section *,
+ (struct spdk_conf *cp, const char *name), NULL);
+DEFINE_STUB(spdk_conf_section_get_nval, char *,
+ (struct spdk_conf_section *sp, const char *key, int idx), NULL);
+DEFINE_STUB(spdk_conf_section_get_nmval, char *,
+ (struct spdk_conf_section *sp, const char *key, int idx1, int idx2), NULL);
+
+static struct spdk_bdev_module *g_bdev_pmem_module;
+static int g_bdev_module_cnt;
+
+struct pmemblk {
+ const char *name;
+ bool is_open;
+ bool is_consistent;
+ size_t bsize;
+ long long nblock;
+
+ uint8_t *buffer;
+};
+
+static const char *g_bdev_name = "pmem0";
+
+/* PMEMblkpool is a typedef of struct pmemblk */
+static PMEMblkpool g_pool_ok = {
+ .name = "/pools/ok_pool",
+ .is_open = false,
+ .is_consistent = true,
+ .bsize = 4096,
+ .nblock = 150
+};
+
+static PMEMblkpool g_pool_nblock_0 = {
+ .name = "/pools/nblock_0",
+ .is_open = false,
+ .is_consistent = true,
+ .bsize = 4096,
+ .nblock = 0
+};
+
+static PMEMblkpool g_pool_bsize_0 = {
+ .name = "/pools/nblock_0",
+ .is_open = false,
+ .is_consistent = true,
+ .bsize = 0,
+ .nblock = 100
+};
+
+static PMEMblkpool g_pool_inconsistent = {
+ .name = "/pools/inconsistent",
+ .is_open = false,
+ .is_consistent = false,
+ .bsize = 512,
+ .nblock = 1
+};
+
+static int g_opened_pools;
+static struct spdk_bdev *g_bdev;
+static const char *g_check_version_msg;
+static bool g_pmemblk_open_allow_open = true;
+
+static PMEMblkpool *
+find_pmemblk_pool(const char *path)
+{
+ if (path == NULL) {
+ errno = EINVAL;
+ return NULL;
+ } else if (strcmp(g_pool_ok.name, path) == 0) {
+ return &g_pool_ok;
+ } else if (strcmp(g_pool_nblock_0.name, path) == 0) {
+ return &g_pool_nblock_0;
+ } else if (strcmp(g_pool_bsize_0.name, path) == 0) {
+ return &g_pool_bsize_0;
+ } else if (strcmp(g_pool_inconsistent.name, path) == 0) {
+ return &g_pool_inconsistent;
+ }
+
+ errno = ENOENT;
+ return NULL;
+}
+
+PMEMblkpool *
+pmemblk_open(const char *path, size_t bsize)
+{
+ PMEMblkpool *pool;
+
+ if (!g_pmemblk_open_allow_open) {
+ errno = EIO;
+ return NULL;
+ }
+
+ pool = find_pmemblk_pool(path);
+ if (!pool) {
+ errno = ENOENT;
+ return NULL;
+ }
+
+ CU_ASSERT_TRUE_FATAL(pool->is_consistent);
+ CU_ASSERT_FALSE(pool->is_open);
+ if (pool->is_open == false) {
+ pool->is_open = true;
+ g_opened_pools++;
+ } else {
+ errno = EBUSY;
+ pool = NULL;
+ }
+
+ return pool;
+}
+void
+spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
+{
+ cb(NULL, bdev_io, true);
+}
+
+static void
+check_open_pool_fatal(PMEMblkpool *pool)
+{
+ SPDK_CU_ASSERT_FATAL(pool != NULL);
+ SPDK_CU_ASSERT_FATAL(find_pmemblk_pool(pool->name) == pool);
+ SPDK_CU_ASSERT_FATAL(pool->is_open == true);
+}
+
+void
+pmemblk_close(PMEMblkpool *pool)
+{
+ check_open_pool_fatal(pool);
+ pool->is_open = false;
+ CU_ASSERT(g_opened_pools > 0);
+ g_opened_pools--;
+}
+
+size_t
+pmemblk_bsize(PMEMblkpool *pool)
+{
+ check_open_pool_fatal(pool);
+ return pool->bsize;
+}
+
+size_t
+pmemblk_nblock(PMEMblkpool *pool)
+{
+ check_open_pool_fatal(pool);
+ return pool->nblock;
+}
+
+int
+pmemblk_read(PMEMblkpool *pool, void *buf, long long blockno)
+{
+ check_open_pool_fatal(pool);
+ if (blockno >= pool->nblock) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ memcpy(buf, &pool->buffer[blockno * pool->bsize], pool->bsize);
+ return 0;
+}
+
+int
+pmemblk_write(PMEMblkpool *pool, const void *buf, long long blockno)
+{
+ check_open_pool_fatal(pool);
+ if (blockno >= pool->nblock) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ memcpy(&pool->buffer[blockno * pool->bsize], buf, pool->bsize);
+ return 0;
+}
+
+int
+pmemblk_set_zero(PMEMblkpool *pool, long long blockno)
+{
+ check_open_pool_fatal(pool);
+ if (blockno >= pool->nblock) {
+
+ errno = EINVAL;
+ return -1;
+ }
+
+ memset(&pool->buffer[blockno * pool->bsize], 0, pool->bsize);
+ return 0;
+}
+
+const char *
+pmemblk_errormsg(void)
+{
+ return strerror(errno);
+}
+
+const char *
+pmemblk_check_version(unsigned major_required, unsigned minor_required)
+{
+ return g_check_version_msg;
+}
+
+int
+pmemblk_check(const char *path, size_t bsize)
+{
+ PMEMblkpool *pool = find_pmemblk_pool(path);
+
+ if (!pool) {
+ errno = ENOENT;
+ return -1;
+ }
+
+ if (!pool->is_consistent) {
+ /* errno ? */
+ return 0;
+ }
+
+ if (bsize != 0 && pool->bsize != bsize) {
+ /* errno ? */
+ return 0;
+ }
+
+ return 1;
+}
+
+void
+spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
+{
+ bdev_io->internal.status = status;
+}
+
+int
+spdk_bdev_register(struct spdk_bdev *bdev)
+{
+ CU_ASSERT_PTR_NULL(g_bdev);
+ g_bdev = bdev;
+
+ return 0;
+}
+
+void
+spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
+{
+}
+
+void
+spdk_bdev_module_finish_done(void)
+{
+}
+
+int
+spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size)
+{
+ bdev->blockcnt = size;
+ return 0;
+}
+
+static void
+ut_bdev_pmem_destruct(struct spdk_bdev *bdev)
+{
+ SPDK_CU_ASSERT_FATAL(g_bdev != NULL);
+ CU_ASSERT_EQUAL(bdev_pmem_destruct(bdev->ctxt), 0);
+ g_bdev = NULL;
+}
+
+void
+spdk_bdev_module_list_add(struct spdk_bdev_module *bdev_module)
+{
+ g_bdev_pmem_module = bdev_module;
+ g_bdev_module_cnt++;
+}
+
+static int
+bdev_submit_request(struct spdk_bdev *bdev, int16_t io_type, uint64_t offset_blocks,
+ uint64_t num_blocks, struct iovec *iovs, size_t iov_cnt)
+{
+ struct spdk_bdev_io bio = { 0 };
+
+ switch (io_type) {
+ case SPDK_BDEV_IO_TYPE_READ:
+ bio.u.bdev.iovs = iovs;
+ bio.u.bdev.iovcnt = iov_cnt;
+ bio.u.bdev.offset_blocks = offset_blocks;
+ bio.u.bdev.num_blocks = num_blocks;
+ break;
+ case SPDK_BDEV_IO_TYPE_WRITE:
+ bio.u.bdev.iovs = iovs;
+ bio.u.bdev.iovcnt = iov_cnt;
+ bio.u.bdev.offset_blocks = offset_blocks;
+ bio.u.bdev.num_blocks = num_blocks;
+ break;
+ case SPDK_BDEV_IO_TYPE_FLUSH:
+ bio.u.bdev.offset_blocks = offset_blocks;
+ bio.u.bdev.num_blocks = num_blocks;
+ break;
+ case SPDK_BDEV_IO_TYPE_RESET:
+ break;
+ case SPDK_BDEV_IO_TYPE_UNMAP:
+ bio.u.bdev.offset_blocks = offset_blocks;
+ bio.u.bdev.num_blocks = num_blocks;
+ break;
+ case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
+ bio.u.bdev.offset_blocks = offset_blocks;
+ bio.u.bdev.num_blocks = num_blocks;
+ break;
+ default:
+ CU_FAIL_FATAL("BUG:Unexpected IO type");
+ break;
+ }
+
+ /*
+ * Set status to value that shouldn't be returned
+ */
+ bio.type = io_type;
+ bio.internal.status = SPDK_BDEV_IO_STATUS_PENDING;
+ bio.bdev = bdev;
+ bdev_pmem_submit_request(NULL, &bio);
+ return bio.internal.status;
+}
+
+
+static int
+ut_pmem_blk_clean(void)
+{
+ free(g_pool_ok.buffer);
+ g_pool_ok.buffer = NULL;
+
+ /* Unload module to free IO channel */
+ g_bdev_pmem_module->module_fini();
+ poll_threads();
+
+ free_threads();
+
+ return 0;
+}
+
+static int
+ut_pmem_blk_init(void)
+{
+ errno = 0;
+
+ allocate_threads(1);
+ set_thread(0);
+
+ g_pool_ok.buffer = calloc(g_pool_ok.nblock, g_pool_ok.bsize);
+ if (g_pool_ok.buffer == NULL) {
+ ut_pmem_blk_clean();
+ return -1;
+ }
+
+ return 0;
+}
+
+static void
+ut_pmem_init(void)
+{
+ SPDK_CU_ASSERT_FATAL(g_bdev_pmem_module != NULL);
+ CU_ASSERT_EQUAL(g_bdev_module_cnt, 1);
+
+ /* Make pmemblk_check_version fail with provided error message */
+ g_check_version_msg = "TEST FAIL MESSAGE";
+ CU_ASSERT_NOT_EQUAL(g_bdev_pmem_module->module_init(), 0);
+
+ /* This init must success */
+ g_check_version_msg = NULL;
+ CU_ASSERT_EQUAL(g_bdev_pmem_module->module_init(), 0);
+}
+
+static void
+ut_pmem_open_close(void)
+{
+ struct spdk_bdev *bdev = NULL;
+ int pools_cnt;
+ int rc;
+
+ pools_cnt = g_opened_pools;
+
+ /* Try opening with NULL name */
+ rc = create_pmem_disk(NULL, NULL, &bdev);
+ CU_ASSERT_PTR_NULL(bdev);
+ CU_ASSERT_EQUAL(pools_cnt, g_opened_pools);
+ CU_ASSERT_NOT_EQUAL(rc, 0);
+
+ /* Open non-existent pool */
+ rc = create_pmem_disk("non existent pool", NULL, &bdev);
+ CU_ASSERT_PTR_NULL(bdev);
+ CU_ASSERT_EQUAL(pools_cnt, g_opened_pools);
+ CU_ASSERT_NOT_EQUAL(rc, 0);
+
+ /* Open inconsistent pool */
+ rc = create_pmem_disk(g_pool_inconsistent.name, NULL, &bdev);
+ CU_ASSERT_PTR_NULL(bdev);
+ CU_ASSERT_EQUAL(pools_cnt, g_opened_pools);
+ CU_ASSERT_NOT_EQUAL(rc, 0);
+
+ /* Open consistent pool fail the open from unknown reason. */
+ g_pmemblk_open_allow_open = false;
+ rc = create_pmem_disk(g_pool_inconsistent.name, NULL, &bdev);
+ g_pmemblk_open_allow_open = true;
+ CU_ASSERT_PTR_NULL(bdev);
+ CU_ASSERT_EQUAL(pools_cnt, g_opened_pools);
+ CU_ASSERT_NOT_EQUAL(rc, 0);
+
+ /* Open pool with nblocks = 0 */
+ rc = create_pmem_disk(g_pool_nblock_0.name, NULL, &bdev);
+ CU_ASSERT_PTR_NULL(bdev);
+ CU_ASSERT_EQUAL(pools_cnt, g_opened_pools);
+ CU_ASSERT_NOT_EQUAL(rc, 0);
+
+ /* Open pool with bsize = 0 */
+ rc = create_pmem_disk(g_pool_bsize_0.name, NULL, &bdev);
+ CU_ASSERT_PTR_NULL(bdev);
+ CU_ASSERT_EQUAL(pools_cnt, g_opened_pools);
+ CU_ASSERT_NOT_EQUAL(rc, 0);
+
+ /* Open pool with NULL name */
+ rc = create_pmem_disk(g_pool_ok.name, NULL, &bdev);
+ CU_ASSERT_PTR_NULL(bdev);
+ CU_ASSERT_EQUAL(pools_cnt, g_opened_pools);
+ CU_ASSERT_NOT_EQUAL(rc, 0);
+
+ /* Open good pool */
+ rc = create_pmem_disk(g_pool_ok.name, g_bdev_name, &bdev);
+ SPDK_CU_ASSERT_FATAL(bdev != NULL);
+ CU_ASSERT_TRUE(g_pool_ok.is_open);
+ CU_ASSERT_EQUAL(pools_cnt + 1, g_opened_pools);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ /* Now remove this bdev */
+ ut_bdev_pmem_destruct(bdev);
+ CU_ASSERT_FALSE(g_pool_ok.is_open);
+ CU_ASSERT_EQUAL(pools_cnt, g_opened_pools);
+}
+
+static void
+ut_pmem_write_read(void)
+{
+ uint8_t *write_buf, *read_buf;
+ struct spdk_bdev *bdev;
+ int rc;
+ size_t unaligned_aligned_size = 100;
+ size_t buf_size = g_pool_ok.bsize * g_pool_ok.nblock;
+ size_t i;
+ const uint64_t nblock_offset = 10;
+ uint64_t offset;
+ size_t io_size, nblock, total_io_size, bsize;
+
+ bsize = 4096;
+ struct iovec iov[] = {
+ { 0, 2 * bsize },
+ { 0, 3 * bsize },
+ { 0, 4 * bsize },
+ };
+
+ rc = create_pmem_disk(g_pool_ok.name, g_bdev_name, &bdev);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ SPDK_CU_ASSERT_FATAL(g_pool_ok.nblock > 40);
+
+ write_buf = calloc(1, buf_size);
+ read_buf = calloc(1, buf_size);
+
+ SPDK_CU_ASSERT_FATAL(bdev != NULL);
+ SPDK_CU_ASSERT_FATAL(write_buf != NULL);
+ SPDK_CU_ASSERT_FATAL(read_buf != NULL);
+
+ total_io_size = 0;
+ offset = nblock_offset * g_pool_ok.bsize;
+ for (i = 0; i < 3; i++) {
+ iov[i].iov_base = &write_buf[offset + total_io_size];
+ total_io_size += iov[i].iov_len;
+ }
+
+ for (i = 0; i < total_io_size + unaligned_aligned_size; i++) {
+ write_buf[offset + i] = 0x42 + i;
+ }
+
+ SPDK_CU_ASSERT_FATAL(total_io_size < buf_size);
+
+ /*
+ * Write outside pool.
+ */
+ rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_WRITE, g_pool_ok.nblock, 1, &iov[0], 2);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_FAILED);
+
+ /*
+ * Write with insufficient IOV buffers length.
+ */
+ rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_WRITE, 0, g_pool_ok.nblock, &iov[0], 2);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_FAILED);
+
+ /*
+ * Try to write two IOV with first one iov_len % bsize != 0.
+ */
+ io_size = iov[0].iov_len + iov[1].iov_len;
+ nblock = io_size / g_pool_ok.bsize;
+ iov[0].iov_len += unaligned_aligned_size;
+ rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_WRITE, 0, nblock, &iov[0], 2);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_FAILED);
+ iov[0].iov_len -= unaligned_aligned_size;
+
+ /*
+ * Try to write one IOV.
+ */
+ nblock = iov[0].iov_len / g_pool_ok.bsize;
+ rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_WRITE, nblock_offset, nblock, &iov[0], 1);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ /*
+ * Try to write 2 IOV.
+ * Sum of IOV length is larger than IO size and last IOV is larger and iov_len % bsize != 0
+ */
+ offset = iov[0].iov_len / g_pool_ok.bsize;
+ io_size = iov[1].iov_len + iov[2].iov_len;
+ nblock = io_size / g_pool_ok.bsize;
+ iov[2].iov_len += unaligned_aligned_size;
+ rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_WRITE, nblock_offset + offset, nblock,
+ &iov[1], 2);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_SUCCESS);
+ iov[2].iov_len -= unaligned_aligned_size;
+
+ /*
+ * Examine pool state:
+ * 1. Written area should have expected values.
+ * 2. Anything else should contain zeros.
+ */
+ offset = nblock_offset * g_pool_ok.bsize + total_io_size;
+ rc = memcmp(&g_pool_ok.buffer[0], write_buf, offset);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ for (i = offset; i < buf_size; i++) {
+ if (g_pool_ok.buffer[i] != 0) {
+ CU_ASSERT_EQUAL(g_pool_ok.buffer[i], 0);
+ break;
+ }
+ }
+
+ /* Setup IOV for reads */
+ memset(read_buf, 0xAB, buf_size);
+ offset = nblock_offset * g_pool_ok.bsize;
+ for (i = 0; i < 3; i++) {
+ iov[i].iov_base = &read_buf[offset];
+ offset += iov[i].iov_len;
+ }
+
+ /*
+ * Write outside pool.
+ */
+ rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_READ, g_pool_ok.nblock, 1, &iov[0], 2);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_FAILED);
+
+ /*
+ * Read with insufficient IOV buffers length.
+ */
+ rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_READ, 0, g_pool_ok.nblock, &iov[0], 2);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_FAILED);
+
+ /*
+ * Try to read two IOV with first one iov_len % bsize != 0.
+ */
+ io_size = iov[0].iov_len + iov[1].iov_len;
+ nblock = io_size / g_pool_ok.bsize;
+ iov[0].iov_len += unaligned_aligned_size;
+ rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_READ, 0, nblock, &iov[0], 2);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_FAILED);
+ iov[0].iov_len -= unaligned_aligned_size;
+
+ /*
+ * Try to write one IOV.
+ */
+ nblock = iov[0].iov_len / g_pool_ok.bsize;
+ rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_READ, nblock_offset, nblock, &iov[0], 1);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ /*
+ * Try to read 2 IOV.
+ * Sum of IOV length is larger than IO size and last IOV is larger and iov_len % bsize != 0
+ */
+ offset = iov[0].iov_len / g_pool_ok.bsize;
+ io_size = iov[1].iov_len + iov[2].iov_len;
+ nblock = io_size / g_pool_ok.bsize;
+ iov[2].iov_len += unaligned_aligned_size;
+ rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_READ, nblock_offset + offset, nblock,
+ &iov[1], 2);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_SUCCESS);
+ iov[2].iov_len -= unaligned_aligned_size;
+
+
+ /*
+ * Examine what we read state:
+ * 1. Written area should have expected values.
+ * 2. Anything else should contain zeros.
+ */
+ offset = nblock_offset * g_pool_ok.bsize;
+ for (i = 0; i < offset; i++) {
+ if (read_buf[i] != 0xAB) {
+ CU_ASSERT_EQUAL(read_buf[i], 0xAB);
+ break;
+ }
+ }
+
+ rc = memcmp(&read_buf[offset], &write_buf[offset], total_io_size);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ offset += total_io_size;
+ for (i = offset; i < buf_size; i++) {
+ if (read_buf[i] != 0xAB) {
+ CU_ASSERT_EQUAL(read_buf[i], 0xAB);
+ break;
+ }
+ }
+
+ memset(g_pool_ok.buffer, 0, g_pool_ok.bsize * g_pool_ok.nblock);
+ free(write_buf);
+ free(read_buf);
+
+ /* Now remove this bdev */
+ ut_bdev_pmem_destruct(bdev);
+ CU_ASSERT_FALSE(g_pool_ok.is_open);
+ CU_ASSERT_EQUAL(g_opened_pools, 0);
+}
+
+static void
+ut_pmem_reset(void)
+{
+ struct spdk_bdev *bdev;
+ int rc;
+
+ rc = create_pmem_disk(g_pool_ok.name, g_bdev_name, &bdev);
+ CU_ASSERT_EQUAL(rc, 0);
+ SPDK_CU_ASSERT_FATAL(bdev != NULL);
+
+ rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_RESET, 0, 0, NULL, 0);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ ut_bdev_pmem_destruct(bdev);
+}
+
+static void
+ut_pmem_unmap_write_zero(int16_t io_type)
+{
+ struct spdk_bdev *bdev;
+ size_t buff_size = g_pool_ok.nblock * g_pool_ok.bsize;
+ size_t i;
+ uint8_t *buffer;
+ int rc;
+
+ CU_ASSERT(io_type == SPDK_BDEV_IO_TYPE_UNMAP || io_type == SPDK_BDEV_IO_TYPE_WRITE_ZEROES);
+ rc = create_pmem_disk(g_pool_ok.name, g_bdev_name, &bdev);
+ CU_ASSERT_EQUAL(rc, 0);
+ SPDK_CU_ASSERT_FATAL(bdev != NULL);
+ SPDK_CU_ASSERT_FATAL(g_pool_ok.nblock > 40);
+
+ buffer = calloc(1, buff_size);
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+
+ for (i = 10 * g_pool_ok.bsize; i < 30 * g_pool_ok.bsize; i++) {
+ buffer[i] = 0x30 + io_type + i;
+ }
+ memcpy(g_pool_ok.buffer, buffer, buff_size);
+
+ /*
+ * Block outside of pool.
+ */
+ rc = bdev_submit_request(bdev, io_type, g_pool_ok.nblock, 1, NULL, 0);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_FAILED);
+
+ rc = memcmp(buffer, g_pool_ok.buffer, buff_size);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ /*
+ * Blocks 15 to 25
+ */
+ memset(&buffer[15 * g_pool_ok.bsize], 0, 10 * g_pool_ok.bsize);
+ rc = bdev_submit_request(bdev, io_type, 15, 10, NULL, 0);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ rc = memcmp(buffer, g_pool_ok.buffer, buff_size);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ /*
+ * All blocks.
+ */
+ memset(buffer, 0, buff_size);
+ rc = bdev_submit_request(bdev, io_type, 0, g_pool_ok.nblock, NULL, 0);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ rc = memcmp(buffer, g_pool_ok.buffer, buff_size);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ /* Now remove this bdev */
+ ut_bdev_pmem_destruct(bdev);
+ CU_ASSERT_FALSE(g_pool_ok.is_open);
+ CU_ASSERT_EQUAL(g_opened_pools, 0);
+
+ free(buffer);
+}
+
+static void
+ut_pmem_write_zero(void)
+{
+ ut_pmem_unmap_write_zero(SPDK_BDEV_IO_TYPE_WRITE_ZEROES);
+}
+
+static void
+ut_pmem_unmap(void)
+{
+ ut_pmem_unmap_write_zero(SPDK_BDEV_IO_TYPE_UNMAP);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("bdev_pmem", ut_pmem_blk_init, ut_pmem_blk_clean);
+
+ CU_ADD_TEST(suite, ut_pmem_init);
+ CU_ADD_TEST(suite, ut_pmem_open_close);
+ CU_ADD_TEST(suite, ut_pmem_write_read);
+ CU_ADD_TEST(suite, ut_pmem_reset);
+ CU_ADD_TEST(suite, ut_pmem_write_zero);
+ CU_ADD_TEST(suite, ut_pmem_unmap);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/bdev/raid/Makefile b/src/spdk/test/unit/lib/bdev/raid/Makefile
new file mode 100644
index 000000000..0090a85ce
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/raid/Makefile
@@ -0,0 +1,46 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = bdev_raid.c
+
+DIRS-$(CONFIG_RAID5) += raid5.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/bdev/raid/bdev_raid.c/.gitignore b/src/spdk/test/unit/lib/bdev/raid/bdev_raid.c/.gitignore
new file mode 100644
index 000000000..98d1a166e
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/raid/bdev_raid.c/.gitignore
@@ -0,0 +1 @@
+bdev_raid_ut
diff --git a/src/spdk/test/unit/lib/bdev/raid/bdev_raid.c/Makefile b/src/spdk/test/unit/lib/bdev/raid/bdev_raid.c/Makefile
new file mode 100644
index 000000000..da0ab94ba
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/raid/bdev_raid.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../../..)
+
+TEST_FILE = bdev_raid_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/bdev/raid/bdev_raid.c/bdev_raid_ut.c b/src/spdk/test/unit/lib/bdev/raid/bdev_raid.c/bdev_raid_ut.c
new file mode 100644
index 000000000..6cf8e9f69
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/raid/bdev_raid.c/bdev_raid_ut.c
@@ -0,0 +1,2258 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+#include "spdk_cunit.h"
+#include "spdk/env.h"
+#include "spdk_internal/mock.h"
+#include "bdev/raid/bdev_raid.c"
+#include "bdev/raid/bdev_raid_rpc.c"
+#include "bdev/raid/raid0.c"
+#include "common/lib/ut_multithread.c"
+
+#define MAX_BASE_DRIVES 32
+#define MAX_RAIDS 2
+#define INVALID_IO_SUBMIT 0xFFFF
+#define MAX_TEST_IO_RANGE (3 * 3 * 3 * (MAX_BASE_DRIVES + 5))
+#define BLOCK_CNT (1024ul * 1024ul * 1024ul * 1024ul)
+
+struct spdk_bdev_channel {
+ struct spdk_io_channel *channel;
+};
+
+/* Data structure to capture the output of IO for verification */
+struct io_output {
+ struct spdk_bdev_desc *desc;
+ struct spdk_io_channel *ch;
+ uint64_t offset_blocks;
+ uint64_t num_blocks;
+ spdk_bdev_io_completion_cb cb;
+ void *cb_arg;
+ enum spdk_bdev_io_type iotype;
+};
+
+struct raid_io_ranges {
+ uint64_t lba;
+ uint64_t nblocks;
+};
+
+/* Globals */
+int g_bdev_io_submit_status;
+struct io_output *g_io_output = NULL;
+uint32_t g_io_output_index;
+uint32_t g_io_comp_status;
+bool g_child_io_status_flag;
+void *g_rpc_req;
+uint32_t g_rpc_req_size;
+TAILQ_HEAD(bdev, spdk_bdev);
+struct bdev g_bdev_list;
+TAILQ_HEAD(waitq, spdk_bdev_io_wait_entry);
+struct waitq g_io_waitq;
+uint32_t g_block_len;
+uint32_t g_strip_size;
+uint32_t g_max_io_size;
+uint8_t g_max_base_drives;
+uint8_t g_max_raids;
+uint8_t g_ignore_io_output;
+uint8_t g_rpc_err;
+char *g_get_raids_output[MAX_RAIDS];
+uint32_t g_get_raids_count;
+uint8_t g_json_decode_obj_err;
+uint8_t g_json_decode_obj_create;
+uint8_t g_config_level_create = 0;
+uint8_t g_test_multi_raids;
+struct raid_io_ranges g_io_ranges[MAX_TEST_IO_RANGE];
+uint32_t g_io_range_idx;
+uint64_t g_lba_offset;
+struct spdk_io_channel g_io_channel;
+
+DEFINE_STUB_V(spdk_bdev_module_examine_done, (struct spdk_bdev_module *module));
+DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
+DEFINE_STUB(spdk_bdev_register, int, (struct spdk_bdev *bdev), 0);
+DEFINE_STUB(spdk_bdev_io_type_supported, bool, (struct spdk_bdev *bdev,
+ enum spdk_bdev_io_type io_type), true);
+DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
+DEFINE_STUB(spdk_bdev_flush_blocks, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb,
+ void *cb_arg), 0);
+DEFINE_STUB(spdk_conf_next_section, struct spdk_conf_section *, (struct spdk_conf_section *sp),
+ NULL);
+DEFINE_STUB_V(spdk_rpc_register_method, (const char *method, spdk_rpc_method_handler func,
+ uint32_t state_mask));
+DEFINE_STUB_V(spdk_rpc_register_alias_deprecated, (const char *method, const char *alias));
+DEFINE_STUB_V(spdk_jsonrpc_end_result, (struct spdk_jsonrpc_request *request,
+ struct spdk_json_write_ctx *w));
+DEFINE_STUB(spdk_json_decode_string, int, (const struct spdk_json_val *val, void *out), 0);
+DEFINE_STUB(spdk_json_decode_uint32, int, (const struct spdk_json_val *val, void *out), 0);
+DEFINE_STUB(spdk_json_decode_array, int, (const struct spdk_json_val *values,
+ spdk_json_decode_fn decode_func,
+ void *out, size_t max_size, size_t *out_size, size_t stride), 0);
+DEFINE_STUB(spdk_json_write_name, int, (struct spdk_json_write_ctx *w, const char *name), 0);
+DEFINE_STUB(spdk_json_write_object_begin, int, (struct spdk_json_write_ctx *w), 0);
+DEFINE_STUB(spdk_json_write_named_object_begin, int, (struct spdk_json_write_ctx *w,
+ const char *name), 0);
+DEFINE_STUB(spdk_json_write_object_end, int, (struct spdk_json_write_ctx *w), 0);
+DEFINE_STUB(spdk_json_write_array_begin, int, (struct spdk_json_write_ctx *w), 0);
+DEFINE_STUB(spdk_json_write_array_end, int, (struct spdk_json_write_ctx *w), 0);
+DEFINE_STUB(spdk_json_write_named_array_begin, int, (struct spdk_json_write_ctx *w,
+ const char *name), 0);
+DEFINE_STUB(spdk_json_write_bool, int, (struct spdk_json_write_ctx *w, bool val), 0);
+DEFINE_STUB(spdk_json_write_null, int, (struct spdk_json_write_ctx *w), 0);
+DEFINE_STUB(spdk_strerror, const char *, (int errnum), NULL);
+DEFINE_STUB(spdk_bdev_queue_io_wait, int, (struct spdk_bdev *bdev, struct spdk_io_channel *ch,
+ struct spdk_bdev_io_wait_entry *entry), 0);
+
+struct spdk_io_channel *
+spdk_bdev_get_io_channel(struct spdk_bdev_desc *desc)
+{
+ g_io_channel.thread = spdk_get_thread();
+
+ return &g_io_channel;
+}
+
+static void
+set_test_opts(void)
+{
+
+ g_max_base_drives = MAX_BASE_DRIVES;
+ g_max_raids = MAX_RAIDS;
+ g_block_len = 4096;
+ g_strip_size = 64;
+ g_max_io_size = 1024;
+
+ printf("Test Options\n");
+ printf("blocklen = %u, strip_size = %u, max_io_size = %u, g_max_base_drives = %u, "
+ "g_max_raids = %u\n",
+ g_block_len, g_strip_size, g_max_io_size, g_max_base_drives, g_max_raids);
+}
+
+/* Set globals before every test run */
+static void
+set_globals(void)
+{
+ uint32_t max_splits;
+
+ g_bdev_io_submit_status = 0;
+ if (g_max_io_size < g_strip_size) {
+ max_splits = 2;
+ } else {
+ max_splits = (g_max_io_size / g_strip_size) + 1;
+ }
+ if (max_splits < g_max_base_drives) {
+ max_splits = g_max_base_drives;
+ }
+
+ g_io_output = calloc(max_splits, sizeof(struct io_output));
+ SPDK_CU_ASSERT_FATAL(g_io_output != NULL);
+ g_io_output_index = 0;
+ memset(g_get_raids_output, 0, sizeof(g_get_raids_output));
+ g_get_raids_count = 0;
+ g_io_comp_status = 0;
+ g_ignore_io_output = 0;
+ g_config_level_create = 0;
+ g_rpc_err = 0;
+ g_test_multi_raids = 0;
+ g_child_io_status_flag = true;
+ TAILQ_INIT(&g_bdev_list);
+ TAILQ_INIT(&g_io_waitq);
+ g_rpc_req = NULL;
+ g_rpc_req_size = 0;
+ g_json_decode_obj_err = 0;
+ g_json_decode_obj_create = 0;
+ g_lba_offset = 0;
+}
+
+static void
+base_bdevs_cleanup(void)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev *bdev_next;
+
+ if (!TAILQ_EMPTY(&g_bdev_list)) {
+ TAILQ_FOREACH_SAFE(bdev, &g_bdev_list, internal.link, bdev_next) {
+ free(bdev->name);
+ TAILQ_REMOVE(&g_bdev_list, bdev, internal.link);
+ free(bdev);
+ }
+ }
+}
+
+static void
+check_and_remove_raid_bdev(struct raid_bdev_config *raid_cfg)
+{
+ struct raid_bdev *raid_bdev;
+ struct raid_base_bdev_info *base_info;
+
+ /* Get the raid structured allocated if exists */
+ raid_bdev = raid_cfg->raid_bdev;
+ if (raid_bdev == NULL) {
+ return;
+ }
+
+ assert(raid_bdev->base_bdev_info != NULL);
+
+ RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) {
+ if (base_info->bdev) {
+ raid_bdev_free_base_bdev_resource(raid_bdev, base_info);
+ }
+ }
+ assert(raid_bdev->num_base_bdevs_discovered == 0);
+ raid_bdev_cleanup(raid_bdev);
+}
+
+/* Reset globals */
+static void
+reset_globals(void)
+{
+ if (g_io_output) {
+ free(g_io_output);
+ g_io_output = NULL;
+ }
+ g_rpc_req = NULL;
+ g_rpc_req_size = 0;
+}
+
+void
+spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb,
+ uint64_t len)
+{
+ cb(bdev_io->internal.ch->channel, bdev_io, true);
+}
+
+/* Store the IO completion status in global variable to verify by various tests */
+void
+spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
+{
+ g_io_comp_status = ((status == SPDK_BDEV_IO_STATUS_SUCCESS) ? true : false);
+}
+
+static void
+set_io_output(struct io_output *output,
+ struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg,
+ enum spdk_bdev_io_type iotype)
+{
+ output->desc = desc;
+ output->ch = ch;
+ output->offset_blocks = offset_blocks;
+ output->num_blocks = num_blocks;
+ output->cb = cb;
+ output->cb_arg = cb_arg;
+ output->iotype = iotype;
+}
+
+/* It will cache the split IOs for verification */
+int
+spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct iovec *iov, int iovcnt,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ struct io_output *output = &g_io_output[g_io_output_index];
+ struct spdk_bdev_io *child_io;
+
+ if (g_ignore_io_output) {
+ return 0;
+ }
+
+ if (g_max_io_size < g_strip_size) {
+ SPDK_CU_ASSERT_FATAL(g_io_output_index < 2);
+ } else {
+ SPDK_CU_ASSERT_FATAL(g_io_output_index < (g_max_io_size / g_strip_size) + 1);
+ }
+ if (g_bdev_io_submit_status == 0) {
+ set_io_output(output, desc, ch, offset_blocks, num_blocks, cb, cb_arg,
+ SPDK_BDEV_IO_TYPE_WRITE);
+ g_io_output_index++;
+
+ child_io = calloc(1, sizeof(struct spdk_bdev_io));
+ SPDK_CU_ASSERT_FATAL(child_io != NULL);
+ cb(child_io, g_child_io_status_flag, cb_arg);
+ }
+
+ return g_bdev_io_submit_status;
+}
+
+int
+spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ struct io_output *output = &g_io_output[g_io_output_index];
+ struct spdk_bdev_io *child_io;
+
+ if (g_ignore_io_output) {
+ return 0;
+ }
+
+ if (g_bdev_io_submit_status == 0) {
+ set_io_output(output, desc, ch, 0, 0, cb, cb_arg, SPDK_BDEV_IO_TYPE_RESET);
+ g_io_output_index++;
+
+ child_io = calloc(1, sizeof(struct spdk_bdev_io));
+ SPDK_CU_ASSERT_FATAL(child_io != NULL);
+ cb(child_io, g_child_io_status_flag, cb_arg);
+ }
+
+ return g_bdev_io_submit_status;
+}
+
+int
+spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ struct io_output *output = &g_io_output[g_io_output_index];
+ struct spdk_bdev_io *child_io;
+
+ if (g_ignore_io_output) {
+ return 0;
+ }
+
+ if (g_bdev_io_submit_status == 0) {
+ set_io_output(output, desc, ch, offset_blocks, num_blocks, cb, cb_arg,
+ SPDK_BDEV_IO_TYPE_UNMAP);
+ g_io_output_index++;
+
+ child_io = calloc(1, sizeof(struct spdk_bdev_io));
+ SPDK_CU_ASSERT_FATAL(child_io != NULL);
+ cb(child_io, g_child_io_status_flag, cb_arg);
+ }
+
+ return g_bdev_io_submit_status;
+}
+
+void
+spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
+{
+ bdev->fn_table->destruct(bdev->ctxt);
+
+ if (cb_fn) {
+ cb_fn(cb_arg, 0);
+ }
+}
+
+int
+spdk_bdev_open(struct spdk_bdev *bdev, bool write, spdk_bdev_remove_cb_t remove_cb,
+ void *remove_ctx, struct spdk_bdev_desc **_desc)
+{
+ *_desc = (void *)0x1;
+ return 0;
+}
+
+char *
+spdk_sprintf_alloc(const char *format, ...)
+{
+ return strdup(format);
+}
+
+int spdk_json_write_named_uint32(struct spdk_json_write_ctx *w, const char *name, uint32_t val)
+{
+ struct rpc_bdev_raid_create *req = g_rpc_req;
+ if (strcmp(name, "strip_size_kb") == 0) {
+ CU_ASSERT(req->strip_size_kb == val);
+ } else if (strcmp(name, "blocklen_shift") == 0) {
+ CU_ASSERT(spdk_u32log2(g_block_len) == val);
+ } else if (strcmp(name, "num_base_bdevs") == 0) {
+ CU_ASSERT(req->base_bdevs.num_base_bdevs == val);
+ } else if (strcmp(name, "state") == 0) {
+ CU_ASSERT(val == RAID_BDEV_STATE_ONLINE);
+ } else if (strcmp(name, "destruct_called") == 0) {
+ CU_ASSERT(val == 0);
+ } else if (strcmp(name, "num_base_bdevs_discovered") == 0) {
+ CU_ASSERT(req->base_bdevs.num_base_bdevs == val);
+ }
+ return 0;
+}
+
+int spdk_json_write_named_string(struct spdk_json_write_ctx *w, const char *name, const char *val)
+{
+ struct rpc_bdev_raid_create *req = g_rpc_req;
+ if (strcmp(name, "raid_level") == 0) {
+ CU_ASSERT(strcmp(val, raid_bdev_level_to_str(req->level)) == 0);
+ }
+ return 0;
+}
+
+void
+spdk_bdev_free_io(struct spdk_bdev_io *bdev_io)
+{
+ if (bdev_io) {
+ free(bdev_io);
+ }
+}
+
+/* It will cache split IOs for verification */
+int
+spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct iovec *iov, int iovcnt,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ struct io_output *output = &g_io_output[g_io_output_index];
+ struct spdk_bdev_io *child_io;
+
+ if (g_ignore_io_output) {
+ return 0;
+ }
+
+ SPDK_CU_ASSERT_FATAL(g_io_output_index <= (g_max_io_size / g_strip_size) + 1);
+ if (g_bdev_io_submit_status == 0) {
+ set_io_output(output, desc, ch, offset_blocks, num_blocks, cb, cb_arg,
+ SPDK_BDEV_IO_TYPE_READ);
+ g_io_output_index++;
+
+ child_io = calloc(1, sizeof(struct spdk_bdev_io));
+ SPDK_CU_ASSERT_FATAL(child_io != NULL);
+ cb(child_io, g_child_io_status_flag, cb_arg);
+ }
+
+ return g_bdev_io_submit_status;
+}
+
+void
+spdk_bdev_module_release_bdev(struct spdk_bdev *bdev)
+{
+ CU_ASSERT(bdev->internal.claim_module != NULL);
+ bdev->internal.claim_module = NULL;
+}
+
+struct spdk_conf_section *
+spdk_conf_first_section(struct spdk_conf *cp)
+{
+ if (g_config_level_create) {
+ return (void *) 0x1;
+ }
+
+ return NULL;
+}
+
+bool
+spdk_conf_section_match_prefix(const struct spdk_conf_section *sp, const char *name_prefix)
+{
+ if (g_config_level_create) {
+ return true;
+ }
+
+ return false;
+}
+
+char *
+spdk_conf_section_get_val(struct spdk_conf_section *sp, const char *key)
+{
+ struct rpc_bdev_raid_create *req = g_rpc_req;
+
+ if (g_config_level_create) {
+ if (strcmp(key, "Name") == 0) {
+ return req->name;
+ } else if (strcmp(key, "RaidLevel") == 0) {
+ return (char *)raid_bdev_level_to_str(req->level);
+ }
+ }
+
+ return NULL;
+}
+
+int
+spdk_conf_section_get_intval(struct spdk_conf_section *sp, const char *key)
+{
+ struct rpc_bdev_raid_create *req = g_rpc_req;
+
+ if (g_config_level_create) {
+ if (strcmp(key, "StripSize") == 0) {
+ return req->strip_size_kb;
+ } else if (strcmp(key, "NumDevices") == 0) {
+ return req->base_bdevs.num_base_bdevs;
+ }
+ }
+
+ return 0;
+}
+
+char *
+spdk_conf_section_get_nmval(struct spdk_conf_section *sp, const char *key, int idx1, int idx2)
+{
+ struct rpc_bdev_raid_create *req = g_rpc_req;
+
+ if (g_config_level_create) {
+ if (strcmp(key, "Devices") == 0) {
+ if (idx2 >= g_max_base_drives) {
+ return NULL;
+ }
+ return req->base_bdevs.base_bdevs[idx2];
+ }
+ }
+
+ return NULL;
+}
+
+int
+spdk_bdev_module_claim_bdev(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
+ struct spdk_bdev_module *module)
+{
+ if (bdev->internal.claim_module != NULL) {
+ return -1;
+ }
+ bdev->internal.claim_module = module;
+ return 0;
+}
+
+int
+spdk_json_decode_object(const struct spdk_json_val *values,
+ const struct spdk_json_object_decoder *decoders, size_t num_decoders,
+ void *out)
+{
+ struct rpc_bdev_raid_create *req, *_out;
+ size_t i;
+
+ if (g_json_decode_obj_err) {
+ return -1;
+ } else if (g_json_decode_obj_create) {
+ req = g_rpc_req;
+ _out = out;
+
+ _out->name = strdup(req->name);
+ SPDK_CU_ASSERT_FATAL(_out->name != NULL);
+ _out->strip_size_kb = req->strip_size_kb;
+ _out->level = req->level;
+ _out->base_bdevs.num_base_bdevs = req->base_bdevs.num_base_bdevs;
+ for (i = 0; i < req->base_bdevs.num_base_bdevs; i++) {
+ _out->base_bdevs.base_bdevs[i] = strdup(req->base_bdevs.base_bdevs[i]);
+ SPDK_CU_ASSERT_FATAL(_out->base_bdevs.base_bdevs[i]);
+ }
+ } else {
+ memcpy(out, g_rpc_req, g_rpc_req_size);
+ }
+
+ return 0;
+}
+
+struct spdk_json_write_ctx *
+spdk_jsonrpc_begin_result(struct spdk_jsonrpc_request *request)
+{
+ return (void *)1;
+}
+
+int
+spdk_json_write_string(struct spdk_json_write_ctx *w, const char *val)
+{
+ if (g_test_multi_raids) {
+ g_get_raids_output[g_get_raids_count] = strdup(val);
+ SPDK_CU_ASSERT_FATAL(g_get_raids_output[g_get_raids_count] != NULL);
+ g_get_raids_count++;
+ }
+
+ return 0;
+}
+
+void
+spdk_jsonrpc_send_error_response(struct spdk_jsonrpc_request *request,
+ int error_code, const char *msg)
+{
+ g_rpc_err = 1;
+}
+
+void
+spdk_jsonrpc_send_error_response_fmt(struct spdk_jsonrpc_request *request,
+ int error_code, const char *fmt, ...)
+{
+ g_rpc_err = 1;
+}
+
+struct spdk_bdev *
+spdk_bdev_get_by_name(const char *bdev_name)
+{
+ struct spdk_bdev *bdev;
+
+ if (!TAILQ_EMPTY(&g_bdev_list)) {
+ TAILQ_FOREACH(bdev, &g_bdev_list, internal.link) {
+ if (strcmp(bdev_name, bdev->name) == 0) {
+ return bdev;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+static void
+bdev_io_cleanup(struct spdk_bdev_io *bdev_io)
+{
+ if (bdev_io->u.bdev.iovs) {
+ if (bdev_io->u.bdev.iovs->iov_base) {
+ free(bdev_io->u.bdev.iovs->iov_base);
+ }
+ free(bdev_io->u.bdev.iovs);
+ }
+ free(bdev_io);
+}
+
+static void
+bdev_io_initialize(struct spdk_bdev_io *bdev_io, struct spdk_io_channel *ch, struct spdk_bdev *bdev,
+ uint64_t lba, uint64_t blocks, int16_t iotype)
+{
+ struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
+
+ bdev_io->bdev = bdev;
+ bdev_io->u.bdev.offset_blocks = lba;
+ bdev_io->u.bdev.num_blocks = blocks;
+ bdev_io->type = iotype;
+
+ if (bdev_io->type == SPDK_BDEV_IO_TYPE_UNMAP || bdev_io->type == SPDK_BDEV_IO_TYPE_FLUSH) {
+ return;
+ }
+
+ bdev_io->u.bdev.iovcnt = 1;
+ bdev_io->u.bdev.iovs = calloc(1, sizeof(struct iovec));
+ SPDK_CU_ASSERT_FATAL(bdev_io->u.bdev.iovs != NULL);
+ bdev_io->u.bdev.iovs->iov_base = calloc(1, bdev_io->u.bdev.num_blocks * g_block_len);
+ SPDK_CU_ASSERT_FATAL(bdev_io->u.bdev.iovs->iov_base != NULL);
+ bdev_io->u.bdev.iovs->iov_len = bdev_io->u.bdev.num_blocks * g_block_len;
+ bdev_io->internal.ch = channel;
+}
+
+static void
+verify_reset_io(struct spdk_bdev_io *bdev_io, uint8_t num_base_drives,
+ struct raid_bdev_io_channel *ch_ctx, struct raid_bdev *raid_bdev, uint32_t io_status)
+{
+ uint8_t index = 0;
+ struct io_output *output;
+
+ SPDK_CU_ASSERT_FATAL(raid_bdev != NULL);
+ SPDK_CU_ASSERT_FATAL(num_base_drives != 0);
+ SPDK_CU_ASSERT_FATAL(io_status != INVALID_IO_SUBMIT);
+ SPDK_CU_ASSERT_FATAL(ch_ctx->base_channel != NULL);
+
+ CU_ASSERT(g_io_output_index == num_base_drives);
+ for (index = 0; index < g_io_output_index; index++) {
+ output = &g_io_output[index];
+ CU_ASSERT(ch_ctx->base_channel[index] == output->ch);
+ CU_ASSERT(raid_bdev->base_bdev_info[index].desc == output->desc);
+ CU_ASSERT(bdev_io->type == output->iotype);
+ }
+ CU_ASSERT(g_io_comp_status == io_status);
+}
+
+static void
+verify_io(struct spdk_bdev_io *bdev_io, uint8_t num_base_drives,
+ struct raid_bdev_io_channel *ch_ctx, struct raid_bdev *raid_bdev, uint32_t io_status)
+{
+ uint32_t strip_shift = spdk_u32log2(g_strip_size);
+ uint64_t start_strip = bdev_io->u.bdev.offset_blocks >> strip_shift;
+ uint64_t end_strip = (bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) >>
+ strip_shift;
+ uint32_t splits_reqd = (end_strip - start_strip + 1);
+ uint32_t strip;
+ uint64_t pd_strip;
+ uint8_t pd_idx;
+ uint32_t offset_in_strip;
+ uint64_t pd_lba;
+ uint64_t pd_blocks;
+ uint32_t index = 0;
+ uint8_t *buf = bdev_io->u.bdev.iovs->iov_base;
+ struct io_output *output;
+
+ if (io_status == INVALID_IO_SUBMIT) {
+ CU_ASSERT(g_io_comp_status == false);
+ return;
+ }
+ SPDK_CU_ASSERT_FATAL(raid_bdev != NULL);
+ SPDK_CU_ASSERT_FATAL(num_base_drives != 0);
+
+ CU_ASSERT(splits_reqd == g_io_output_index);
+ for (strip = start_strip; strip <= end_strip; strip++, index++) {
+ pd_strip = strip / num_base_drives;
+ pd_idx = strip % num_base_drives;
+ if (strip == start_strip) {
+ offset_in_strip = bdev_io->u.bdev.offset_blocks & (g_strip_size - 1);
+ pd_lba = (pd_strip << strip_shift) + offset_in_strip;
+ if (strip == end_strip) {
+ pd_blocks = bdev_io->u.bdev.num_blocks;
+ } else {
+ pd_blocks = g_strip_size - offset_in_strip;
+ }
+ } else if (strip == end_strip) {
+ pd_lba = pd_strip << strip_shift;
+ pd_blocks = ((bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) &
+ (g_strip_size - 1)) + 1;
+ } else {
+ pd_lba = pd_strip << raid_bdev->strip_size_shift;
+ pd_blocks = raid_bdev->strip_size;
+ }
+ output = &g_io_output[index];
+ CU_ASSERT(pd_lba == output->offset_blocks);
+ CU_ASSERT(pd_blocks == output->num_blocks);
+ CU_ASSERT(ch_ctx->base_channel[pd_idx] == output->ch);
+ CU_ASSERT(raid_bdev->base_bdev_info[pd_idx].desc == output->desc);
+ CU_ASSERT(bdev_io->type == output->iotype);
+ buf += (pd_blocks << spdk_u32log2(g_block_len));
+ }
+ CU_ASSERT(g_io_comp_status == io_status);
+}
+
+static void
+verify_io_without_payload(struct spdk_bdev_io *bdev_io, uint8_t num_base_drives,
+ struct raid_bdev_io_channel *ch_ctx, struct raid_bdev *raid_bdev,
+ uint32_t io_status)
+{
+ uint32_t strip_shift = spdk_u32log2(g_strip_size);
+ uint64_t start_offset_in_strip = bdev_io->u.bdev.offset_blocks % g_strip_size;
+ uint64_t end_offset_in_strip = (bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) %
+ g_strip_size;
+ uint64_t start_strip = bdev_io->u.bdev.offset_blocks >> strip_shift;
+ uint64_t end_strip = (bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) >>
+ strip_shift;
+ uint8_t n_disks_involved;
+ uint64_t start_strip_disk_idx;
+ uint64_t end_strip_disk_idx;
+ uint64_t nblocks_in_start_disk;
+ uint64_t offset_in_start_disk;
+ uint8_t disk_idx;
+ uint64_t base_io_idx;
+ uint64_t sum_nblocks = 0;
+ struct io_output *output;
+
+ if (io_status == INVALID_IO_SUBMIT) {
+ CU_ASSERT(g_io_comp_status == false);
+ return;
+ }
+ SPDK_CU_ASSERT_FATAL(raid_bdev != NULL);
+ SPDK_CU_ASSERT_FATAL(num_base_drives != 0);
+ SPDK_CU_ASSERT_FATAL(bdev_io->type != SPDK_BDEV_IO_TYPE_READ);
+ SPDK_CU_ASSERT_FATAL(bdev_io->type != SPDK_BDEV_IO_TYPE_WRITE);
+
+ n_disks_involved = spdk_min(end_strip - start_strip + 1, num_base_drives);
+ CU_ASSERT(n_disks_involved == g_io_output_index);
+
+ start_strip_disk_idx = start_strip % num_base_drives;
+ end_strip_disk_idx = end_strip % num_base_drives;
+
+ offset_in_start_disk = g_io_output[0].offset_blocks;
+ nblocks_in_start_disk = g_io_output[0].num_blocks;
+
+ for (base_io_idx = 0, disk_idx = start_strip_disk_idx; base_io_idx < n_disks_involved;
+ base_io_idx++, disk_idx++) {
+ uint64_t start_offset_in_disk;
+ uint64_t end_offset_in_disk;
+
+ output = &g_io_output[base_io_idx];
+
+ /* round disk_idx */
+ if (disk_idx >= num_base_drives) {
+ disk_idx %= num_base_drives;
+ }
+
+ /* start_offset_in_disk aligned in strip check:
+ * The first base io has a same start_offset_in_strip with the whole raid io.
+ * Other base io should have aligned start_offset_in_strip which is 0.
+ */
+ start_offset_in_disk = output->offset_blocks;
+ if (base_io_idx == 0) {
+ CU_ASSERT(start_offset_in_disk % g_strip_size == start_offset_in_strip);
+ } else {
+ CU_ASSERT(start_offset_in_disk % g_strip_size == 0);
+ }
+
+ /* end_offset_in_disk aligned in strip check:
+ * Base io on disk at which end_strip is located, has a same end_offset_in_strip
+ * with the whole raid io.
+ * Other base io should have aligned end_offset_in_strip.
+ */
+ end_offset_in_disk = output->offset_blocks + output->num_blocks - 1;
+ if (disk_idx == end_strip_disk_idx) {
+ CU_ASSERT(end_offset_in_disk % g_strip_size == end_offset_in_strip);
+ } else {
+ CU_ASSERT(end_offset_in_disk % g_strip_size == g_strip_size - 1);
+ }
+
+ /* start_offset_in_disk compared with start_disk.
+ * 1. For disk_idx which is larger than start_strip_disk_idx: Its start_offset_in_disk
+ * mustn't be larger than the start offset of start_offset_in_disk; And the gap
+ * must be less than strip size.
+ * 2. For disk_idx which is less than start_strip_disk_idx, Its start_offset_in_disk
+ * must be larger than the start offset of start_offset_in_disk; And the gap mustn't
+ * be less than strip size.
+ */
+ if (disk_idx > start_strip_disk_idx) {
+ CU_ASSERT(start_offset_in_disk <= offset_in_start_disk);
+ CU_ASSERT(offset_in_start_disk - start_offset_in_disk < g_strip_size);
+ } else if (disk_idx < start_strip_disk_idx) {
+ CU_ASSERT(start_offset_in_disk > offset_in_start_disk);
+ CU_ASSERT(output->offset_blocks - offset_in_start_disk <= g_strip_size);
+ }
+
+ /* nblocks compared with start_disk:
+ * The gap between them must be within a strip size.
+ */
+ if (output->num_blocks <= nblocks_in_start_disk) {
+ CU_ASSERT(nblocks_in_start_disk - output->num_blocks <= g_strip_size);
+ } else {
+ CU_ASSERT(output->num_blocks - nblocks_in_start_disk < g_strip_size);
+ }
+
+ sum_nblocks += output->num_blocks;
+
+ CU_ASSERT(ch_ctx->base_channel[disk_idx] == output->ch);
+ CU_ASSERT(raid_bdev->base_bdev_info[disk_idx].desc == output->desc);
+ CU_ASSERT(bdev_io->type == output->iotype);
+ }
+
+ /* Sum of each nblocks should be same with raid bdev_io */
+ CU_ASSERT(bdev_io->u.bdev.num_blocks == sum_nblocks);
+
+ CU_ASSERT(g_io_comp_status == io_status);
+}
+
+static void
+verify_raid_config_present(const char *name, bool presence)
+{
+ struct raid_bdev_config *raid_cfg;
+ bool cfg_found;
+
+ cfg_found = false;
+
+ TAILQ_FOREACH(raid_cfg, &g_raid_config.raid_bdev_config_head, link) {
+ if (raid_cfg->name != NULL) {
+ if (strcmp(name, raid_cfg->name) == 0) {
+ cfg_found = true;
+ break;
+ }
+ }
+ }
+
+ if (presence == true) {
+ CU_ASSERT(cfg_found == true);
+ } else {
+ CU_ASSERT(cfg_found == false);
+ }
+}
+
+static void
+verify_raid_bdev_present(const char *name, bool presence)
+{
+ struct raid_bdev *pbdev;
+ bool pbdev_found;
+
+ pbdev_found = false;
+ TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
+ if (strcmp(pbdev->bdev.name, name) == 0) {
+ pbdev_found = true;
+ break;
+ }
+ }
+ if (presence == true) {
+ CU_ASSERT(pbdev_found == true);
+ } else {
+ CU_ASSERT(pbdev_found == false);
+ }
+}
+static void
+verify_raid_config(struct rpc_bdev_raid_create *r, bool presence)
+{
+ struct raid_bdev_config *raid_cfg = NULL;
+ uint8_t i;
+ int val;
+
+ TAILQ_FOREACH(raid_cfg, &g_raid_config.raid_bdev_config_head, link) {
+ if (strcmp(r->name, raid_cfg->name) == 0) {
+ if (presence == false) {
+ break;
+ }
+ CU_ASSERT(raid_cfg->raid_bdev != NULL);
+ CU_ASSERT(raid_cfg->strip_size == r->strip_size_kb);
+ CU_ASSERT(raid_cfg->num_base_bdevs == r->base_bdevs.num_base_bdevs);
+ CU_ASSERT(raid_cfg->level == r->level);
+ if (raid_cfg->base_bdev != NULL) {
+ for (i = 0; i < raid_cfg->num_base_bdevs; i++) {
+ val = strcmp(raid_cfg->base_bdev[i].name,
+ r->base_bdevs.base_bdevs[i]);
+ CU_ASSERT(val == 0);
+ }
+ }
+ break;
+ }
+ }
+
+ if (presence == true) {
+ CU_ASSERT(raid_cfg != NULL);
+ } else {
+ CU_ASSERT(raid_cfg == NULL);
+ }
+}
+
+static void
+verify_raid_bdev(struct rpc_bdev_raid_create *r, bool presence, uint32_t raid_state)
+{
+ struct raid_bdev *pbdev;
+ struct raid_base_bdev_info *base_info;
+ struct spdk_bdev *bdev = NULL;
+ bool pbdev_found;
+ uint64_t min_blockcnt = 0xFFFFFFFFFFFFFFFF;
+
+ pbdev_found = false;
+ TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
+ if (strcmp(pbdev->bdev.name, r->name) == 0) {
+ pbdev_found = true;
+ if (presence == false) {
+ break;
+ }
+ CU_ASSERT(pbdev->config->raid_bdev == pbdev);
+ CU_ASSERT(pbdev->base_bdev_info != NULL);
+ CU_ASSERT(pbdev->strip_size == ((r->strip_size_kb * 1024) / g_block_len));
+ CU_ASSERT(pbdev->strip_size_shift == spdk_u32log2(((r->strip_size_kb * 1024) /
+ g_block_len)));
+ CU_ASSERT(pbdev->blocklen_shift == spdk_u32log2(g_block_len));
+ CU_ASSERT(pbdev->state == raid_state);
+ CU_ASSERT(pbdev->num_base_bdevs == r->base_bdevs.num_base_bdevs);
+ CU_ASSERT(pbdev->num_base_bdevs_discovered == r->base_bdevs.num_base_bdevs);
+ CU_ASSERT(pbdev->level == r->level);
+ CU_ASSERT(pbdev->destruct_called == false);
+ CU_ASSERT(pbdev->base_bdev_info != NULL);
+ RAID_FOR_EACH_BASE_BDEV(pbdev, base_info) {
+ CU_ASSERT(base_info->bdev != NULL);
+ bdev = spdk_bdev_get_by_name(base_info->bdev->name);
+ CU_ASSERT(bdev != NULL);
+ CU_ASSERT(base_info->remove_scheduled == false);
+
+ if (bdev && bdev->blockcnt < min_blockcnt) {
+ min_blockcnt = bdev->blockcnt;
+ }
+ }
+ CU_ASSERT((((min_blockcnt / (r->strip_size_kb * 1024 / g_block_len)) *
+ (r->strip_size_kb * 1024 / g_block_len)) *
+ r->base_bdevs.num_base_bdevs) == pbdev->bdev.blockcnt);
+ CU_ASSERT(strcmp(pbdev->bdev.product_name, "Raid Volume") == 0);
+ CU_ASSERT(pbdev->bdev.write_cache == 0);
+ CU_ASSERT(pbdev->bdev.blocklen == g_block_len);
+ if (pbdev->num_base_bdevs > 1) {
+ CU_ASSERT(pbdev->bdev.optimal_io_boundary == pbdev->strip_size);
+ CU_ASSERT(pbdev->bdev.split_on_optimal_io_boundary == true);
+ } else {
+ CU_ASSERT(pbdev->bdev.optimal_io_boundary == 0);
+ CU_ASSERT(pbdev->bdev.split_on_optimal_io_boundary == false);
+ }
+ CU_ASSERT(pbdev->bdev.ctxt == pbdev);
+ CU_ASSERT(pbdev->bdev.fn_table == &g_raid_bdev_fn_table);
+ CU_ASSERT(pbdev->bdev.module == &g_raid_if);
+ break;
+ }
+ }
+ if (presence == true) {
+ CU_ASSERT(pbdev_found == true);
+ } else {
+ CU_ASSERT(pbdev_found == false);
+ }
+ pbdev_found = false;
+ if (raid_state == RAID_BDEV_STATE_ONLINE) {
+ TAILQ_FOREACH(pbdev, &g_raid_bdev_configured_list, state_link) {
+ if (strcmp(pbdev->bdev.name, r->name) == 0) {
+ pbdev_found = true;
+ break;
+ }
+ }
+ } else if (raid_state == RAID_BDEV_STATE_CONFIGURING) {
+ TAILQ_FOREACH(pbdev, &g_raid_bdev_configuring_list, state_link) {
+ if (strcmp(pbdev->bdev.name, r->name) == 0) {
+ pbdev_found = true;
+ break;
+ }
+ }
+ } else if (raid_state == RAID_BDEV_STATE_OFFLINE) {
+ TAILQ_FOREACH(pbdev, &g_raid_bdev_offline_list, state_link) {
+ if (strcmp(pbdev->bdev.name, r->name) == 0) {
+ pbdev_found = true;
+ break;
+ }
+ }
+ }
+ if (presence == true) {
+ CU_ASSERT(pbdev_found == true);
+ } else {
+ CU_ASSERT(pbdev_found == false);
+ }
+}
+
+static void
+verify_get_raids(struct rpc_bdev_raid_create *construct_req,
+ uint8_t g_max_raids,
+ char **g_get_raids_output, uint32_t g_get_raids_count)
+{
+ uint8_t i, j;
+ bool found;
+
+ CU_ASSERT(g_max_raids == g_get_raids_count);
+ if (g_max_raids == g_get_raids_count) {
+ for (i = 0; i < g_max_raids; i++) {
+ found = false;
+ for (j = 0; j < g_max_raids; j++) {
+ if (construct_req[i].name &&
+ strcmp(construct_req[i].name, g_get_raids_output[i]) == 0) {
+ found = true;
+ break;
+ }
+ }
+ CU_ASSERT(found == true);
+ }
+ }
+}
+
+static void
+create_base_bdevs(uint32_t bbdev_start_idx)
+{
+ uint8_t i;
+ struct spdk_bdev *base_bdev;
+ char name[16];
+
+ for (i = 0; i < g_max_base_drives; i++, bbdev_start_idx++) {
+ snprintf(name, 16, "%s%u%s", "Nvme", bbdev_start_idx, "n1");
+ base_bdev = calloc(1, sizeof(struct spdk_bdev));
+ SPDK_CU_ASSERT_FATAL(base_bdev != NULL);
+ base_bdev->name = strdup(name);
+ SPDK_CU_ASSERT_FATAL(base_bdev->name != NULL);
+ base_bdev->blocklen = g_block_len;
+ base_bdev->blockcnt = BLOCK_CNT;
+ TAILQ_INSERT_TAIL(&g_bdev_list, base_bdev, internal.link);
+ }
+}
+
+static void
+create_test_req(struct rpc_bdev_raid_create *r, const char *raid_name,
+ uint8_t bbdev_start_idx, bool create_base_bdev)
+{
+ uint8_t i;
+ char name[16];
+ uint8_t bbdev_idx = bbdev_start_idx;
+
+ r->name = strdup(raid_name);
+ SPDK_CU_ASSERT_FATAL(r->name != NULL);
+ r->strip_size_kb = (g_strip_size * g_block_len) / 1024;
+ r->level = RAID0;
+ r->base_bdevs.num_base_bdevs = g_max_base_drives;
+ for (i = 0; i < g_max_base_drives; i++, bbdev_idx++) {
+ snprintf(name, 16, "%s%u%s", "Nvme", bbdev_idx, "n1");
+ r->base_bdevs.base_bdevs[i] = strdup(name);
+ SPDK_CU_ASSERT_FATAL(r->base_bdevs.base_bdevs[i] != NULL);
+ }
+ if (create_base_bdev == true) {
+ create_base_bdevs(bbdev_start_idx);
+ }
+ g_rpc_req = r;
+ g_rpc_req_size = sizeof(*r);
+}
+
+static void
+create_raid_bdev_create_req(struct rpc_bdev_raid_create *r, const char *raid_name,
+ uint8_t bbdev_start_idx, bool create_base_bdev,
+ uint8_t json_decode_obj_err)
+{
+ create_test_req(r, raid_name, bbdev_start_idx, create_base_bdev);
+
+ g_rpc_err = 0;
+ g_json_decode_obj_create = 1;
+ g_json_decode_obj_err = json_decode_obj_err;
+ g_config_level_create = 0;
+ g_test_multi_raids = 0;
+}
+
+static void
+create_raid_bdev_create_config(struct rpc_bdev_raid_create *r, const char *raid_name,
+ uint8_t bbdev_start_idx, bool create_base_bdev)
+{
+ create_test_req(r, raid_name, bbdev_start_idx, create_base_bdev);
+
+ g_config_level_create = 1;
+ g_test_multi_raids = 0;
+}
+
+static void
+free_test_req(struct rpc_bdev_raid_create *r)
+{
+ uint8_t i;
+
+ free(r->name);
+ for (i = 0; i < r->base_bdevs.num_base_bdevs; i++) {
+ free(r->base_bdevs.base_bdevs[i]);
+ }
+}
+
+static void
+create_raid_bdev_delete_req(struct rpc_bdev_raid_delete *r, const char *raid_name,
+ uint8_t json_decode_obj_err)
+{
+ r->name = strdup(raid_name);
+ SPDK_CU_ASSERT_FATAL(r->name != NULL);
+
+ g_rpc_req = r;
+ g_rpc_req_size = sizeof(*r);
+ g_rpc_err = 0;
+ g_json_decode_obj_create = 0;
+ g_json_decode_obj_err = json_decode_obj_err;
+ g_config_level_create = 0;
+ g_test_multi_raids = 0;
+}
+
+static void
+create_get_raids_req(struct rpc_bdev_raid_get_bdevs *r, const char *category,
+ uint8_t json_decode_obj_err)
+{
+ r->category = strdup(category);
+ SPDK_CU_ASSERT_FATAL(r->category != NULL);
+
+ g_rpc_req = r;
+ g_rpc_req_size = sizeof(*r);
+ g_rpc_err = 0;
+ g_json_decode_obj_create = 0;
+ g_json_decode_obj_err = json_decode_obj_err;
+ g_config_level_create = 0;
+ g_test_multi_raids = 1;
+ g_get_raids_count = 0;
+}
+
+static void
+test_create_raid(void)
+{
+ struct rpc_bdev_raid_create req;
+ struct rpc_bdev_raid_delete delete_req;
+
+ set_globals();
+ CU_ASSERT(raid_bdev_init() == 0);
+
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+ create_raid_bdev_create_req(&req, "raid1", 0, true, 0);
+ rpc_bdev_raid_create(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config(&req, true);
+ verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
+ free_test_req(&req);
+
+ create_raid_bdev_delete_req(&delete_req, "raid1", 0);
+ rpc_bdev_raid_delete(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ raid_bdev_exit();
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+static void
+test_delete_raid(void)
+{
+ struct rpc_bdev_raid_create construct_req;
+ struct rpc_bdev_raid_delete delete_req;
+
+ set_globals();
+ CU_ASSERT(raid_bdev_init() == 0);
+
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+ create_raid_bdev_create_req(&construct_req, "raid1", 0, true, 0);
+ rpc_bdev_raid_create(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config(&construct_req, true);
+ verify_raid_bdev(&construct_req, true, RAID_BDEV_STATE_ONLINE);
+ free_test_req(&construct_req);
+
+ create_raid_bdev_delete_req(&delete_req, "raid1", 0);
+ rpc_bdev_raid_delete(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ raid_bdev_exit();
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+static void
+test_create_raid_invalid_args(void)
+{
+ struct rpc_bdev_raid_create req;
+ struct rpc_bdev_raid_delete destroy_req;
+ struct raid_bdev_config *raid_cfg;
+
+ set_globals();
+ CU_ASSERT(raid_bdev_init() == 0);
+
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+ create_raid_bdev_create_req(&req, "raid1", 0, true, 0);
+ req.level = INVALID_RAID_LEVEL;
+ rpc_bdev_raid_create(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 1);
+ free_test_req(&req);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ create_raid_bdev_create_req(&req, "raid1", 0, false, 1);
+ rpc_bdev_raid_create(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 1);
+ free_test_req(&req);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ create_raid_bdev_create_req(&req, "raid1", 0, false, 0);
+ req.strip_size_kb = 1231;
+ rpc_bdev_raid_create(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 1);
+ free_test_req(&req);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ create_raid_bdev_create_req(&req, "raid1", 0, false, 0);
+ rpc_bdev_raid_create(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config(&req, true);
+ verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
+ free_test_req(&req);
+
+ create_raid_bdev_create_req(&req, "raid1", 0, false, 0);
+ rpc_bdev_raid_create(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 1);
+ free_test_req(&req);
+
+ create_raid_bdev_create_req(&req, "raid2", 0, false, 0);
+ rpc_bdev_raid_create(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 1);
+ free_test_req(&req);
+ verify_raid_config_present("raid2", false);
+ verify_raid_bdev_present("raid2", false);
+
+ create_raid_bdev_create_req(&req, "raid2", g_max_base_drives, true, 0);
+ free(req.base_bdevs.base_bdevs[g_max_base_drives - 1]);
+ req.base_bdevs.base_bdevs[g_max_base_drives - 1] = strdup("Nvme0n1");
+ SPDK_CU_ASSERT_FATAL(req.base_bdevs.base_bdevs[g_max_base_drives - 1] != NULL);
+ rpc_bdev_raid_create(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 1);
+ free_test_req(&req);
+ verify_raid_config_present("raid2", false);
+ verify_raid_bdev_present("raid2", false);
+
+ create_raid_bdev_create_req(&req, "raid2", g_max_base_drives, true, 0);
+ free(req.base_bdevs.base_bdevs[g_max_base_drives - 1]);
+ req.base_bdevs.base_bdevs[g_max_base_drives - 1] = strdup("Nvme100000n1");
+ SPDK_CU_ASSERT_FATAL(req.base_bdevs.base_bdevs[g_max_base_drives - 1] != NULL);
+ rpc_bdev_raid_create(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ free_test_req(&req);
+ verify_raid_config_present("raid2", true);
+ verify_raid_bdev_present("raid2", true);
+ raid_cfg = raid_bdev_config_find_by_name("raid2");
+ SPDK_CU_ASSERT_FATAL(raid_cfg != NULL);
+ check_and_remove_raid_bdev(raid_cfg);
+ raid_bdev_config_cleanup(raid_cfg);
+
+ create_raid_bdev_create_req(&req, "raid2", g_max_base_drives, false, 0);
+ rpc_bdev_raid_create(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ free_test_req(&req);
+ verify_raid_config_present("raid2", true);
+ verify_raid_bdev_present("raid2", true);
+ verify_raid_config_present("raid1", true);
+ verify_raid_bdev_present("raid1", true);
+
+ create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
+ rpc_bdev_raid_delete(NULL, NULL);
+ create_raid_bdev_delete_req(&destroy_req, "raid2", 0);
+ rpc_bdev_raid_delete(NULL, NULL);
+ raid_bdev_exit();
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+static void
+test_delete_raid_invalid_args(void)
+{
+ struct rpc_bdev_raid_create construct_req;
+ struct rpc_bdev_raid_delete destroy_req;
+
+ set_globals();
+ CU_ASSERT(raid_bdev_init() == 0);
+
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+ create_raid_bdev_create_req(&construct_req, "raid1", 0, true, 0);
+ rpc_bdev_raid_create(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config(&construct_req, true);
+ verify_raid_bdev(&construct_req, true, RAID_BDEV_STATE_ONLINE);
+ free_test_req(&construct_req);
+
+ create_raid_bdev_delete_req(&destroy_req, "raid2", 0);
+ rpc_bdev_raid_delete(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 1);
+
+ create_raid_bdev_delete_req(&destroy_req, "raid1", 1);
+ rpc_bdev_raid_delete(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 1);
+ free(destroy_req.name);
+ verify_raid_config_present("raid1", true);
+ verify_raid_bdev_present("raid1", true);
+
+ create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
+ rpc_bdev_raid_delete(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ raid_bdev_exit();
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+static void
+test_io_channel(void)
+{
+ struct rpc_bdev_raid_create req;
+ struct rpc_bdev_raid_delete destroy_req;
+ struct raid_bdev *pbdev;
+ struct raid_bdev_io_channel *ch_ctx;
+ uint8_t i;
+
+ set_globals();
+ CU_ASSERT(raid_bdev_init() == 0);
+
+ create_raid_bdev_create_req(&req, "raid1", 0, true, 0);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+ rpc_bdev_raid_create(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config(&req, true);
+ verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
+
+ TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
+ if (strcmp(pbdev->bdev.name, "raid1") == 0) {
+ break;
+ }
+ }
+ CU_ASSERT(pbdev != NULL);
+ ch_ctx = calloc(1, sizeof(struct raid_bdev_io_channel));
+ SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
+
+ CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0);
+ for (i = 0; i < req.base_bdevs.num_base_bdevs; i++) {
+ CU_ASSERT(ch_ctx->base_channel && ch_ctx->base_channel[i] == &g_io_channel);
+ }
+ raid_bdev_destroy_cb(pbdev, ch_ctx);
+ CU_ASSERT(ch_ctx->base_channel == NULL);
+ free_test_req(&req);
+
+ create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
+ rpc_bdev_raid_delete(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ free(ch_ctx);
+ raid_bdev_exit();
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+static void
+test_write_io(void)
+{
+ struct rpc_bdev_raid_create req;
+ struct rpc_bdev_raid_delete destroy_req;
+ struct raid_bdev *pbdev;
+ struct spdk_io_channel *ch;
+ struct raid_bdev_io_channel *ch_ctx;
+ uint8_t i;
+ struct spdk_bdev_io *bdev_io;
+ uint64_t io_len;
+ uint64_t lba = 0;
+ struct spdk_io_channel *ch_b;
+ struct spdk_bdev_channel *ch_b_ctx;
+
+ set_globals();
+ CU_ASSERT(raid_bdev_init() == 0);
+
+ create_raid_bdev_create_req(&req, "raid1", 0, true, 0);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+ rpc_bdev_raid_create(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config(&req, true);
+ verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
+ TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
+ if (strcmp(pbdev->bdev.name, "raid1") == 0) {
+ break;
+ }
+ }
+ CU_ASSERT(pbdev != NULL);
+ ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct raid_bdev_io_channel));
+ SPDK_CU_ASSERT_FATAL(ch != NULL);
+
+ ch_b = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct spdk_bdev_channel));
+ SPDK_CU_ASSERT_FATAL(ch_b != NULL);
+ ch_b_ctx = spdk_io_channel_get_ctx(ch_b);
+ ch_b_ctx->channel = ch;
+
+ ch_ctx = spdk_io_channel_get_ctx(ch);
+ SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
+
+ CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0);
+ for (i = 0; i < req.base_bdevs.num_base_bdevs; i++) {
+ CU_ASSERT(ch_ctx->base_channel && ch_ctx->base_channel[i] == &g_io_channel);
+ }
+
+ /* test 2 IO sizes based on global strip size set earlier */
+ for (i = 0; i < 2; i++) {
+ bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
+ SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
+ io_len = (g_strip_size / 2) << i;
+ bdev_io_initialize(bdev_io, ch_b, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_WRITE);
+ lba += g_strip_size;
+ memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
+ g_io_output_index = 0;
+ raid_bdev_submit_request(ch, bdev_io);
+ verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
+ g_child_io_status_flag);
+ bdev_io_cleanup(bdev_io);
+ }
+
+ free_test_req(&req);
+ raid_bdev_destroy_cb(pbdev, ch_ctx);
+ CU_ASSERT(ch_ctx->base_channel == NULL);
+ free(ch);
+ free(ch_b);
+ create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
+ rpc_bdev_raid_delete(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ raid_bdev_exit();
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+static void
+test_read_io(void)
+{
+ struct rpc_bdev_raid_create req;
+ struct rpc_bdev_raid_delete destroy_req;
+ struct raid_bdev *pbdev;
+ struct spdk_io_channel *ch;
+ struct raid_bdev_io_channel *ch_ctx;
+ uint8_t i;
+ struct spdk_bdev_io *bdev_io;
+ uint64_t io_len;
+ uint64_t lba;
+ struct spdk_io_channel *ch_b;
+ struct spdk_bdev_channel *ch_b_ctx;
+
+ set_globals();
+ CU_ASSERT(raid_bdev_init() == 0);
+
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+ create_raid_bdev_create_req(&req, "raid1", 0, true, 0);
+ rpc_bdev_raid_create(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config(&req, true);
+ verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
+ TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
+ if (strcmp(pbdev->bdev.name, "raid1") == 0) {
+ break;
+ }
+ }
+ CU_ASSERT(pbdev != NULL);
+ ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct raid_bdev_io_channel));
+ SPDK_CU_ASSERT_FATAL(ch != NULL);
+
+ ch_b = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct spdk_bdev_channel));
+ SPDK_CU_ASSERT_FATAL(ch_b != NULL);
+ ch_b_ctx = spdk_io_channel_get_ctx(ch_b);
+ ch_b_ctx->channel = ch;
+
+ ch_ctx = spdk_io_channel_get_ctx(ch);
+ SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
+
+ CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0);
+ for (i = 0; i < req.base_bdevs.num_base_bdevs; i++) {
+ CU_ASSERT(ch_ctx->base_channel && ch_ctx->base_channel[i] == &g_io_channel);
+ }
+ free_test_req(&req);
+
+ /* test 2 IO sizes based on global strip size set earlier */
+ lba = 0;
+ for (i = 0; i < 2; i++) {
+ bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
+ SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
+ io_len = (g_strip_size / 2) << i;
+ bdev_io_initialize(bdev_io, ch_b, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_READ);
+ lba += g_strip_size;
+ memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
+ g_io_output_index = 0;
+ raid_bdev_submit_request(ch, bdev_io);
+ verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
+ g_child_io_status_flag);
+ bdev_io_cleanup(bdev_io);
+ }
+
+ raid_bdev_destroy_cb(pbdev, ch_ctx);
+ CU_ASSERT(ch_ctx->base_channel == NULL);
+ free(ch);
+ free(ch_b);
+ create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
+ rpc_bdev_raid_delete(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ raid_bdev_exit();
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+static void
+raid_bdev_io_generate_by_strips(uint64_t n_strips)
+{
+ uint64_t lba;
+ uint64_t nblocks;
+ uint64_t start_offset;
+ uint64_t end_offset;
+ uint64_t offsets_in_strip[3];
+ uint64_t start_bdev_idx;
+ uint64_t start_bdev_offset;
+ uint64_t start_bdev_idxs[3];
+ int i, j, l;
+
+ /* 3 different situations of offset in strip */
+ offsets_in_strip[0] = 0;
+ offsets_in_strip[1] = g_strip_size >> 1;
+ offsets_in_strip[2] = g_strip_size - 1;
+
+ /* 3 different situations of start_bdev_idx */
+ start_bdev_idxs[0] = 0;
+ start_bdev_idxs[1] = g_max_base_drives >> 1;
+ start_bdev_idxs[2] = g_max_base_drives - 1;
+
+ /* consider different offset in strip */
+ for (i = 0; i < 3; i++) {
+ start_offset = offsets_in_strip[i];
+ for (j = 0; j < 3; j++) {
+ end_offset = offsets_in_strip[j];
+ if (n_strips == 1 && start_offset > end_offset) {
+ continue;
+ }
+
+ /* consider at which base_bdev lba is started. */
+ for (l = 0; l < 3; l++) {
+ start_bdev_idx = start_bdev_idxs[l];
+ start_bdev_offset = start_bdev_idx * g_strip_size;
+ lba = g_lba_offset + start_bdev_offset + start_offset;
+ nblocks = (n_strips - 1) * g_strip_size + end_offset - start_offset + 1;
+
+ g_io_ranges[g_io_range_idx].lba = lba;
+ g_io_ranges[g_io_range_idx].nblocks = nblocks;
+
+ SPDK_CU_ASSERT_FATAL(g_io_range_idx < MAX_TEST_IO_RANGE);
+ g_io_range_idx++;
+ }
+ }
+ }
+}
+
+static void
+raid_bdev_io_generate(void)
+{
+ uint64_t n_strips;
+ uint64_t n_strips_span = g_max_base_drives;
+ uint64_t n_strips_times[5] = {g_max_base_drives + 1, g_max_base_drives * 2 - 1,
+ g_max_base_drives * 2, g_max_base_drives * 3,
+ g_max_base_drives * 4
+ };
+ uint32_t i;
+
+ g_io_range_idx = 0;
+
+ /* consider different number of strips from 1 to strips spanned base bdevs,
+ * and even to times of strips spanned base bdevs
+ */
+ for (n_strips = 1; n_strips < n_strips_span; n_strips++) {
+ raid_bdev_io_generate_by_strips(n_strips);
+ }
+
+ for (i = 0; i < SPDK_COUNTOF(n_strips_times); i++) {
+ n_strips = n_strips_times[i];
+ raid_bdev_io_generate_by_strips(n_strips);
+ }
+}
+
+static void
+test_unmap_io(void)
+{
+ struct rpc_bdev_raid_create req;
+ struct rpc_bdev_raid_delete destroy_req;
+ struct raid_bdev *pbdev;
+ struct spdk_io_channel *ch;
+ struct raid_bdev_io_channel *ch_ctx;
+ uint8_t i;
+ struct spdk_bdev_io *bdev_io;
+ uint32_t count;
+ uint64_t io_len;
+ uint64_t lba;
+
+ set_globals();
+ CU_ASSERT(raid_bdev_init() == 0);
+
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+ create_raid_bdev_create_req(&req, "raid1", 0, true, 0);
+ rpc_bdev_raid_create(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config(&req, true);
+ verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
+ TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
+ if (strcmp(pbdev->bdev.name, "raid1") == 0) {
+ break;
+ }
+ }
+ CU_ASSERT(pbdev != NULL);
+ ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct raid_bdev_io_channel));
+ SPDK_CU_ASSERT_FATAL(ch != NULL);
+ ch_ctx = spdk_io_channel_get_ctx(ch);
+ SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
+
+ CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0);
+ for (i = 0; i < req.base_bdevs.num_base_bdevs; i++) {
+ SPDK_CU_ASSERT_FATAL(ch_ctx->base_channel && ch_ctx->base_channel[i] == &g_io_channel);
+ }
+
+ CU_ASSERT(raid_bdev_io_type_supported(pbdev, SPDK_BDEV_IO_TYPE_UNMAP) == true);
+ CU_ASSERT(raid_bdev_io_type_supported(pbdev, SPDK_BDEV_IO_TYPE_FLUSH) == true);
+
+ raid_bdev_io_generate();
+ for (count = 0; count < g_io_range_idx; count++) {
+ bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
+ SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
+ io_len = g_io_ranges[count].nblocks;
+ lba = g_io_ranges[count].lba;
+ bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_UNMAP);
+ memset(g_io_output, 0, g_max_base_drives * sizeof(struct io_output));
+ g_io_output_index = 0;
+ raid_bdev_submit_request(ch, bdev_io);
+ verify_io_without_payload(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
+ g_child_io_status_flag);
+ bdev_io_cleanup(bdev_io);
+ }
+ free_test_req(&req);
+
+ raid_bdev_destroy_cb(pbdev, ch_ctx);
+ CU_ASSERT(ch_ctx->base_channel == NULL);
+ free(ch);
+ create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
+ rpc_bdev_raid_delete(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ raid_bdev_exit();
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+/* Test IO failures */
+static void
+test_io_failure(void)
+{
+ struct rpc_bdev_raid_create req;
+ struct rpc_bdev_raid_delete destroy_req;
+ struct raid_bdev *pbdev;
+ struct spdk_io_channel *ch;
+ struct raid_bdev_io_channel *ch_ctx;
+ uint8_t i;
+ struct spdk_bdev_io *bdev_io;
+ uint32_t count;
+ uint64_t io_len;
+ uint64_t lba;
+
+ set_globals();
+ CU_ASSERT(raid_bdev_init() == 0);
+
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+ create_raid_bdev_create_req(&req, "raid1", 0, true, 0);
+ rpc_bdev_raid_create(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config(&req, true);
+ verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
+ TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
+ if (strcmp(pbdev->bdev.name, req.name) == 0) {
+ break;
+ }
+ }
+ CU_ASSERT(pbdev != NULL);
+ ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct raid_bdev_io_channel));
+ SPDK_CU_ASSERT_FATAL(ch != NULL);
+ ch_ctx = spdk_io_channel_get_ctx(ch);
+ SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
+
+ CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0);
+ for (i = 0; i < req.base_bdevs.num_base_bdevs; i++) {
+ CU_ASSERT(ch_ctx->base_channel && ch_ctx->base_channel[i] == &g_io_channel);
+ }
+ free_test_req(&req);
+
+ lba = 0;
+ for (count = 0; count < 1; count++) {
+ bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
+ SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
+ io_len = (g_strip_size / 2) << count;
+ bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_INVALID);
+ lba += g_strip_size;
+ memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
+ g_io_output_index = 0;
+ raid_bdev_submit_request(ch, bdev_io);
+ verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
+ INVALID_IO_SUBMIT);
+ bdev_io_cleanup(bdev_io);
+ }
+
+
+ lba = 0;
+ g_child_io_status_flag = false;
+ for (count = 0; count < 1; count++) {
+ bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
+ SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
+ io_len = (g_strip_size / 2) << count;
+ bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_WRITE);
+ lba += g_strip_size;
+ memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
+ g_io_output_index = 0;
+ raid_bdev_submit_request(ch, bdev_io);
+ verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
+ g_child_io_status_flag);
+ bdev_io_cleanup(bdev_io);
+ }
+
+ raid_bdev_destroy_cb(pbdev, ch_ctx);
+ CU_ASSERT(ch_ctx->base_channel == NULL);
+ free(ch);
+ create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
+ rpc_bdev_raid_delete(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ raid_bdev_exit();
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+/* Test reset IO */
+static void
+test_reset_io(void)
+{
+ struct rpc_bdev_raid_create req;
+ struct rpc_bdev_raid_delete destroy_req;
+ struct raid_bdev *pbdev;
+ struct spdk_io_channel *ch;
+ struct raid_bdev_io_channel *ch_ctx;
+ uint8_t i;
+ struct spdk_bdev_io *bdev_io;
+
+ set_globals();
+ CU_ASSERT(raid_bdev_init() == 0);
+
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+ create_raid_bdev_create_req(&req, "raid1", 0, true, 0);
+ rpc_bdev_raid_create(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config(&req, true);
+ verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
+ TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
+ if (strcmp(pbdev->bdev.name, "raid1") == 0) {
+ break;
+ }
+ }
+ CU_ASSERT(pbdev != NULL);
+ ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct raid_bdev_io_channel));
+ SPDK_CU_ASSERT_FATAL(ch != NULL);
+ ch_ctx = spdk_io_channel_get_ctx(ch);
+ SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
+
+ SPDK_CU_ASSERT_FATAL(raid_bdev_create_cb(pbdev, ch_ctx) == 0);
+ for (i = 0; i < req.base_bdevs.num_base_bdevs; i++) {
+ CU_ASSERT(ch_ctx->base_channel && ch_ctx->base_channel[i] == &g_io_channel);
+ }
+ free_test_req(&req);
+
+ g_bdev_io_submit_status = 0;
+ g_child_io_status_flag = true;
+
+ CU_ASSERT(raid_bdev_io_type_supported(pbdev, SPDK_BDEV_IO_TYPE_RESET) == true);
+
+ bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
+ SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
+ bdev_io_initialize(bdev_io, ch, &pbdev->bdev, 0, 1, SPDK_BDEV_IO_TYPE_RESET);
+ memset(g_io_output, 0, g_max_base_drives * sizeof(struct io_output));
+ g_io_output_index = 0;
+ raid_bdev_submit_request(ch, bdev_io);
+ verify_reset_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
+ true);
+ bdev_io_cleanup(bdev_io);
+
+ raid_bdev_destroy_cb(pbdev, ch_ctx);
+ CU_ASSERT(ch_ctx->base_channel == NULL);
+ free(ch);
+ create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
+ rpc_bdev_raid_delete(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ raid_bdev_exit();
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+/* Create multiple raids, destroy raids without IO, get_raids related tests */
+static void
+test_multi_raid_no_io(void)
+{
+ struct rpc_bdev_raid_create *construct_req;
+ struct rpc_bdev_raid_delete destroy_req;
+ struct rpc_bdev_raid_get_bdevs get_raids_req;
+ uint8_t i;
+ char name[16];
+ uint8_t bbdev_idx = 0;
+
+ set_globals();
+ construct_req = calloc(MAX_RAIDS, sizeof(struct rpc_bdev_raid_create));
+ SPDK_CU_ASSERT_FATAL(construct_req != NULL);
+ CU_ASSERT(raid_bdev_init() == 0);
+ for (i = 0; i < g_max_raids; i++) {
+ snprintf(name, 16, "%s%u", "raid", i);
+ verify_raid_config_present(name, false);
+ verify_raid_bdev_present(name, false);
+ create_raid_bdev_create_req(&construct_req[i], name, bbdev_idx, true, 0);
+ bbdev_idx += g_max_base_drives;
+ rpc_bdev_raid_create(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config(&construct_req[i], true);
+ verify_raid_bdev(&construct_req[i], true, RAID_BDEV_STATE_ONLINE);
+ }
+
+ create_get_raids_req(&get_raids_req, "all", 0);
+ rpc_bdev_raid_get_bdevs(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_get_raids(construct_req, g_max_raids, g_get_raids_output, g_get_raids_count);
+ for (i = 0; i < g_get_raids_count; i++) {
+ free(g_get_raids_output[i]);
+ }
+
+ create_get_raids_req(&get_raids_req, "online", 0);
+ rpc_bdev_raid_get_bdevs(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_get_raids(construct_req, g_max_raids, g_get_raids_output, g_get_raids_count);
+ for (i = 0; i < g_get_raids_count; i++) {
+ free(g_get_raids_output[i]);
+ }
+
+ create_get_raids_req(&get_raids_req, "configuring", 0);
+ rpc_bdev_raid_get_bdevs(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ CU_ASSERT(g_get_raids_count == 0);
+
+ create_get_raids_req(&get_raids_req, "offline", 0);
+ rpc_bdev_raid_get_bdevs(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ CU_ASSERT(g_get_raids_count == 0);
+
+ create_get_raids_req(&get_raids_req, "invalid_category", 0);
+ rpc_bdev_raid_get_bdevs(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 1);
+ CU_ASSERT(g_get_raids_count == 0);
+
+ create_get_raids_req(&get_raids_req, "all", 1);
+ rpc_bdev_raid_get_bdevs(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 1);
+ free(get_raids_req.category);
+ CU_ASSERT(g_get_raids_count == 0);
+
+ create_get_raids_req(&get_raids_req, "all", 0);
+ rpc_bdev_raid_get_bdevs(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ CU_ASSERT(g_get_raids_count == g_max_raids);
+ for (i = 0; i < g_get_raids_count; i++) {
+ free(g_get_raids_output[i]);
+ }
+
+ for (i = 0; i < g_max_raids; i++) {
+ SPDK_CU_ASSERT_FATAL(construct_req[i].name != NULL);
+ snprintf(name, 16, "%s", construct_req[i].name);
+ create_raid_bdev_delete_req(&destroy_req, name, 0);
+ rpc_bdev_raid_delete(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config_present(name, false);
+ verify_raid_bdev_present(name, false);
+ }
+ raid_bdev_exit();
+ for (i = 0; i < g_max_raids; i++) {
+ free_test_req(&construct_req[i]);
+ }
+ free(construct_req);
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+/* Create multiple raids, fire IOs on raids */
+static void
+test_multi_raid_with_io(void)
+{
+ struct rpc_bdev_raid_create *construct_req;
+ struct rpc_bdev_raid_delete destroy_req;
+ uint8_t i, j;
+ char name[16];
+ uint8_t bbdev_idx = 0;
+ struct raid_bdev *pbdev;
+ struct spdk_io_channel *ch;
+ struct raid_bdev_io_channel *ch_ctx = NULL;
+ struct spdk_bdev_io *bdev_io;
+ uint64_t io_len;
+ uint64_t lba = 0;
+ int16_t iotype;
+ struct spdk_io_channel *ch_b;
+ struct spdk_bdev_channel *ch_b_ctx;
+
+ set_globals();
+ construct_req = calloc(g_max_raids, sizeof(struct rpc_bdev_raid_create));
+ SPDK_CU_ASSERT_FATAL(construct_req != NULL);
+ CU_ASSERT(raid_bdev_init() == 0);
+ ch = calloc(g_max_raids, sizeof(struct spdk_io_channel) + sizeof(struct raid_bdev_io_channel));
+ SPDK_CU_ASSERT_FATAL(ch != NULL);
+
+ ch_b = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct spdk_bdev_channel));
+ SPDK_CU_ASSERT_FATAL(ch_b != NULL);
+ ch_b_ctx = spdk_io_channel_get_ctx(ch_b);
+ ch_b_ctx->channel = ch;
+
+ for (i = 0; i < g_max_raids; i++) {
+ snprintf(name, 16, "%s%u", "raid", i);
+ verify_raid_config_present(name, false);
+ verify_raid_bdev_present(name, false);
+ create_raid_bdev_create_req(&construct_req[i], name, bbdev_idx, true, 0);
+ bbdev_idx += g_max_base_drives;
+ rpc_bdev_raid_create(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config(&construct_req[i], true);
+ verify_raid_bdev(&construct_req[i], true, RAID_BDEV_STATE_ONLINE);
+ TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
+ if (strcmp(pbdev->bdev.name, construct_req[i].name) == 0) {
+ break;
+ }
+ }
+ CU_ASSERT(pbdev != NULL);
+ ch_ctx = spdk_io_channel_get_ctx(&ch[i]);
+ SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
+ CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0);
+ SPDK_CU_ASSERT_FATAL(ch_ctx->base_channel != NULL);
+ for (j = 0; j < construct_req[i].base_bdevs.num_base_bdevs; j++) {
+ CU_ASSERT(ch_ctx->base_channel[j] == &g_io_channel);
+ }
+ }
+
+ /* This will perform a write on the first raid and a read on the second. It can be
+ * expanded in the future to perform r/w on each raid device in the event that
+ * multiple raid levels are supported.
+ */
+ for (i = 0; i < g_max_raids; i++) {
+ bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
+ SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
+ io_len = g_strip_size;
+ iotype = (i) ? SPDK_BDEV_IO_TYPE_WRITE : SPDK_BDEV_IO_TYPE_READ;
+ memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
+ g_io_output_index = 0;
+ TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
+ if (strcmp(pbdev->bdev.name, construct_req[i].name) == 0) {
+ break;
+ }
+ }
+ bdev_io_initialize(bdev_io, ch_b, &pbdev->bdev, lba, io_len, iotype);
+ CU_ASSERT(pbdev != NULL);
+ raid_bdev_submit_request(ch, bdev_io);
+ verify_io(bdev_io, g_max_base_drives, ch_ctx, pbdev,
+ g_child_io_status_flag);
+ bdev_io_cleanup(bdev_io);
+ }
+
+ for (i = 0; i < g_max_raids; i++) {
+ TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
+ if (strcmp(pbdev->bdev.name, construct_req[i].name) == 0) {
+ break;
+ }
+ }
+ CU_ASSERT(pbdev != NULL);
+ ch_ctx = spdk_io_channel_get_ctx(&ch[i]);
+ SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
+ raid_bdev_destroy_cb(pbdev, ch_ctx);
+ CU_ASSERT(ch_ctx->base_channel == NULL);
+ snprintf(name, 16, "%s", construct_req[i].name);
+ create_raid_bdev_delete_req(&destroy_req, name, 0);
+ rpc_bdev_raid_delete(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config_present(name, false);
+ verify_raid_bdev_present(name, false);
+ }
+ raid_bdev_exit();
+ for (i = 0; i < g_max_raids; i++) {
+ free_test_req(&construct_req[i]);
+ }
+ free(construct_req);
+ free(ch);
+ free(ch_b);
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+static void
+test_io_type_supported(void)
+{
+ CU_ASSERT(raid_bdev_io_type_supported(NULL, SPDK_BDEV_IO_TYPE_READ) == true);
+ CU_ASSERT(raid_bdev_io_type_supported(NULL, SPDK_BDEV_IO_TYPE_WRITE) == true);
+ CU_ASSERT(raid_bdev_io_type_supported(NULL, SPDK_BDEV_IO_TYPE_INVALID) == false);
+}
+
+static void
+test_create_raid_from_config(void)
+{
+ struct rpc_bdev_raid_create req;
+ struct spdk_bdev *bdev;
+ struct rpc_bdev_raid_delete destroy_req;
+ bool can_claim;
+ struct raid_bdev_config *raid_cfg;
+ uint8_t base_bdev_slot;
+
+ set_globals();
+ create_raid_bdev_create_config(&req, "raid1", 0, true);
+ CU_ASSERT(raid_bdev_init() == 0);
+
+ verify_raid_config_present("raid1", true);
+ verify_raid_bdev_present("raid1", true);
+
+ TAILQ_FOREACH(bdev, &g_bdev_list, internal.link) {
+ raid_bdev_examine(bdev);
+ }
+
+ can_claim = raid_bdev_can_claim_bdev("Invalid", &raid_cfg, &base_bdev_slot);
+ CU_ASSERT(can_claim == false);
+
+ verify_raid_config(&req, true);
+ verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
+
+ create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
+ rpc_bdev_raid_delete(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ raid_bdev_exit();
+ free_test_req(&req);
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+static void
+test_create_raid_from_config_invalid_params(void)
+{
+ struct rpc_bdev_raid_create req;
+
+ set_globals();
+
+ create_raid_bdev_create_config(&req, "raid1", 0, true);
+ free(req.name);
+ req.name = NULL;
+ CU_ASSERT(raid_bdev_init() != 0);
+ free_test_req(&req);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ create_raid_bdev_create_config(&req, "raid1", 0, false);
+ req.strip_size_kb = 1234;
+ CU_ASSERT(raid_bdev_init() != 0);
+ free_test_req(&req);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ create_raid_bdev_create_config(&req, "raid1", 0, false);
+ req.level = INVALID_RAID_LEVEL;
+ CU_ASSERT(raid_bdev_init() != 0);
+ free_test_req(&req);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ create_raid_bdev_create_config(&req, "raid1", 0, false);
+ req.level = INVALID_RAID_LEVEL;
+ CU_ASSERT(raid_bdev_init() != 0);
+ free_test_req(&req);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ create_raid_bdev_create_config(&req, "raid1", 0, false);
+ req.base_bdevs.num_base_bdevs++;
+ CU_ASSERT(raid_bdev_init() != 0);
+ req.base_bdevs.num_base_bdevs--;
+ free_test_req(&req);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ create_raid_bdev_create_config(&req, "raid1", 0, false);
+ req.base_bdevs.num_base_bdevs--;
+ CU_ASSERT(raid_bdev_init() != 0);
+ req.base_bdevs.num_base_bdevs++;
+ free_test_req(&req);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ if (g_max_base_drives > 1) {
+ create_raid_bdev_create_config(&req, "raid1", 0, false);
+ snprintf(req.base_bdevs.base_bdevs[g_max_base_drives - 1], 15, "%s", "Nvme0n1");
+ CU_ASSERT(raid_bdev_init() != 0);
+ free_test_req(&req);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+ }
+
+ raid_bdev_exit();
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+static void
+test_raid_json_dump_info(void)
+{
+ struct rpc_bdev_raid_create req;
+ struct rpc_bdev_raid_delete destroy_req;
+ struct raid_bdev *pbdev;
+
+ set_globals();
+ CU_ASSERT(raid_bdev_init() == 0);
+
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+ create_raid_bdev_create_req(&req, "raid1", 0, true, 0);
+ rpc_bdev_raid_create(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
+
+ TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
+ if (strcmp(pbdev->bdev.name, "raid1") == 0) {
+ break;
+ }
+ }
+ CU_ASSERT(pbdev != NULL);
+
+ CU_ASSERT(raid_bdev_dump_info_json(pbdev, NULL) == 0);
+
+ free_test_req(&req);
+
+ create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
+ rpc_bdev_raid_delete(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ raid_bdev_exit();
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+static void
+test_context_size(void)
+{
+ CU_ASSERT(raid_bdev_get_ctx_size() == sizeof(struct raid_bdev_io));
+}
+
+static void
+test_raid_level_conversions(void)
+{
+ const char *raid_str;
+
+ CU_ASSERT(raid_bdev_parse_raid_level("abcd123") == INVALID_RAID_LEVEL);
+ CU_ASSERT(raid_bdev_parse_raid_level("0") == RAID0);
+ CU_ASSERT(raid_bdev_parse_raid_level("raid0") == RAID0);
+ CU_ASSERT(raid_bdev_parse_raid_level("RAID0") == RAID0);
+
+ raid_str = raid_bdev_level_to_str(INVALID_RAID_LEVEL);
+ CU_ASSERT(raid_str != NULL && strlen(raid_str) == 0);
+ raid_str = raid_bdev_level_to_str(1234);
+ CU_ASSERT(raid_str != NULL && strlen(raid_str) == 0);
+ raid_str = raid_bdev_level_to_str(RAID0);
+ CU_ASSERT(raid_str != NULL && strcmp(raid_str, "raid0") == 0);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("raid", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_create_raid);
+ CU_ADD_TEST(suite, test_delete_raid);
+ CU_ADD_TEST(suite, test_create_raid_invalid_args);
+ CU_ADD_TEST(suite, test_delete_raid_invalid_args);
+ CU_ADD_TEST(suite, test_io_channel);
+ CU_ADD_TEST(suite, test_reset_io);
+ CU_ADD_TEST(suite, test_write_io);
+ CU_ADD_TEST(suite, test_read_io);
+ CU_ADD_TEST(suite, test_unmap_io);
+ CU_ADD_TEST(suite, test_io_failure);
+ CU_ADD_TEST(suite, test_multi_raid_no_io);
+ CU_ADD_TEST(suite, test_multi_raid_with_io);
+ CU_ADD_TEST(suite, test_io_type_supported);
+ CU_ADD_TEST(suite, test_create_raid_from_config);
+ CU_ADD_TEST(suite, test_create_raid_from_config_invalid_params);
+ CU_ADD_TEST(suite, test_raid_json_dump_info);
+ CU_ADD_TEST(suite, test_context_size);
+ CU_ADD_TEST(suite, test_raid_level_conversions);
+
+ allocate_threads(1);
+ set_thread(0);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ set_test_opts();
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ free_threads();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/bdev/raid/raid5.c/.gitignore b/src/spdk/test/unit/lib/bdev/raid/raid5.c/.gitignore
new file mode 100644
index 000000000..946026bf5
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/raid/raid5.c/.gitignore
@@ -0,0 +1 @@
+raid5_ut
diff --git a/src/spdk/test/unit/lib/bdev/raid/raid5.c/Makefile b/src/spdk/test/unit/lib/bdev/raid/raid5.c/Makefile
new file mode 100644
index 000000000..ddb733333
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/raid/raid5.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../../..)
+
+TEST_FILE = raid5_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/bdev/raid/raid5.c/raid5_ut.c b/src/spdk/test/unit/lib/bdev/raid/raid5.c/raid5_ut.c
new file mode 100644
index 000000000..ba30f327b
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/raid/raid5.c/raid5_ut.c
@@ -0,0 +1,214 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE AiRE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+#include "spdk_cunit.h"
+#include "spdk/env.h"
+#include "spdk_internal/mock.h"
+
+#include "bdev/raid/raid5.c"
+
+DEFINE_STUB_V(raid_bdev_module_list_add, (struct raid_bdev_module *raid_module));
+DEFINE_STUB_V(raid_bdev_io_complete, (struct raid_bdev_io *raid_io,
+ enum spdk_bdev_io_status status));
+
+struct raid5_params {
+ uint8_t num_base_bdevs;
+ uint64_t base_bdev_blockcnt;
+ uint32_t base_bdev_blocklen;
+ uint32_t strip_size;
+};
+
+static struct raid5_params *g_params;
+static size_t g_params_count;
+
+#define ARRAY_FOR_EACH(a, e) \
+ for (e = a; e < a + SPDK_COUNTOF(a); e++)
+
+#define RAID5_PARAMS_FOR_EACH(p) \
+ for (p = g_params; p < g_params + g_params_count; p++)
+
+static int
+test_setup(void)
+{
+ uint8_t num_base_bdevs_values[] = { 3, 4, 5 };
+ uint64_t base_bdev_blockcnt_values[] = { 1, 1024, 1024 * 1024 };
+ uint32_t base_bdev_blocklen_values[] = { 512, 4096 };
+ uint32_t strip_size_kb_values[] = { 1, 4, 128 };
+ uint8_t *num_base_bdevs;
+ uint64_t *base_bdev_blockcnt;
+ uint32_t *base_bdev_blocklen;
+ uint32_t *strip_size_kb;
+ struct raid5_params *params;
+
+ g_params_count = SPDK_COUNTOF(num_base_bdevs_values) *
+ SPDK_COUNTOF(base_bdev_blockcnt_values) *
+ SPDK_COUNTOF(base_bdev_blocklen_values) *
+ SPDK_COUNTOF(strip_size_kb_values);
+ g_params = calloc(g_params_count, sizeof(*g_params));
+ if (!g_params) {
+ return -ENOMEM;
+ }
+
+ params = g_params;
+
+ ARRAY_FOR_EACH(num_base_bdevs_values, num_base_bdevs) {
+ ARRAY_FOR_EACH(base_bdev_blockcnt_values, base_bdev_blockcnt) {
+ ARRAY_FOR_EACH(base_bdev_blocklen_values, base_bdev_blocklen) {
+ ARRAY_FOR_EACH(strip_size_kb_values, strip_size_kb) {
+ params->num_base_bdevs = *num_base_bdevs;
+ params->base_bdev_blockcnt = *base_bdev_blockcnt;
+ params->base_bdev_blocklen = *base_bdev_blocklen;
+ params->strip_size = *strip_size_kb * 1024 / *base_bdev_blocklen;
+ if (params->strip_size == 0 ||
+ params->strip_size > *base_bdev_blockcnt) {
+ g_params_count--;
+ continue;
+ }
+ params++;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int
+test_cleanup(void)
+{
+ free(g_params);
+ return 0;
+}
+
+static struct raid_bdev *
+create_raid_bdev(struct raid5_params *params)
+{
+ struct raid_bdev *raid_bdev;
+ struct raid_base_bdev_info *base_info;
+
+ raid_bdev = calloc(1, sizeof(*raid_bdev));
+ SPDK_CU_ASSERT_FATAL(raid_bdev != NULL);
+
+ raid_bdev->module = &g_raid5_module;
+ raid_bdev->num_base_bdevs = params->num_base_bdevs;
+ raid_bdev->base_bdev_info = calloc(raid_bdev->num_base_bdevs,
+ sizeof(struct raid_base_bdev_info));
+ SPDK_CU_ASSERT_FATAL(raid_bdev->base_bdev_info != NULL);
+
+ RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) {
+ base_info->bdev = calloc(1, sizeof(*base_info->bdev));
+ SPDK_CU_ASSERT_FATAL(base_info->bdev != NULL);
+
+ base_info->bdev->blockcnt = params->base_bdev_blockcnt;
+ base_info->bdev->blocklen = params->base_bdev_blocklen;
+ }
+
+ raid_bdev->strip_size = params->strip_size;
+ raid_bdev->strip_size_shift = spdk_u32log2(raid_bdev->strip_size);
+ raid_bdev->bdev.blocklen = params->base_bdev_blocklen;
+
+ return raid_bdev;
+}
+
+static void
+delete_raid_bdev(struct raid_bdev *raid_bdev)
+{
+ struct raid_base_bdev_info *base_info;
+
+ RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) {
+ free(base_info->bdev);
+ }
+ free(raid_bdev->base_bdev_info);
+ free(raid_bdev);
+}
+
+static struct raid5_info *
+create_raid5(struct raid5_params *params)
+{
+ struct raid_bdev *raid_bdev = create_raid_bdev(params);
+
+ SPDK_CU_ASSERT_FATAL(raid5_start(raid_bdev) == 0);
+
+ return raid_bdev->module_private;
+}
+
+static void
+delete_raid5(struct raid5_info *r5info)
+{
+ struct raid_bdev *raid_bdev = r5info->raid_bdev;
+
+ raid5_stop(raid_bdev);
+
+ delete_raid_bdev(raid_bdev);
+}
+
+static void
+test_raid5_start(void)
+{
+ struct raid5_params *params;
+
+ RAID5_PARAMS_FOR_EACH(params) {
+ struct raid5_info *r5info;
+
+ r5info = create_raid5(params);
+
+ CU_ASSERT_EQUAL(r5info->stripe_blocks, params->strip_size * (params->num_base_bdevs - 1));
+ CU_ASSERT_EQUAL(r5info->total_stripes, params->base_bdev_blockcnt / params->strip_size);
+ CU_ASSERT_EQUAL(r5info->raid_bdev->bdev.blockcnt,
+ (params->base_bdev_blockcnt - params->base_bdev_blockcnt % params->strip_size) *
+ (params->num_base_bdevs - 1));
+ CU_ASSERT_EQUAL(r5info->raid_bdev->bdev.optimal_io_boundary, r5info->stripe_blocks);
+
+ delete_raid5(r5info);
+ }
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("raid5", test_setup, test_cleanup);
+ CU_ADD_TEST(suite, test_raid5_start);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/bdev/scsi_nvme.c/.gitignore b/src/spdk/test/unit/lib/bdev/scsi_nvme.c/.gitignore
new file mode 100644
index 000000000..75800527d
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/scsi_nvme.c/.gitignore
@@ -0,0 +1 @@
+scsi_nvme_ut
diff --git a/src/spdk/test/unit/lib/bdev/scsi_nvme.c/Makefile b/src/spdk/test/unit/lib/bdev/scsi_nvme.c/Makefile
new file mode 100644
index 000000000..0dbe788db
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/scsi_nvme.c/Makefile
@@ -0,0 +1,37 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) 2016 FUJITSU LIMITED, All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of the copyright holder nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = scsi_nvme_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/bdev/scsi_nvme.c/scsi_nvme_ut.c b/src/spdk/test/unit/lib/bdev/scsi_nvme.c/scsi_nvme_ut.c
new file mode 100644
index 000000000..ef27d7c09
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/scsi_nvme.c/scsi_nvme_ut.c
@@ -0,0 +1,131 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 FUJITSU LIMITED, All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "bdev/scsi_nvme.c"
+
+static int
+null_init(void)
+{
+ return 0;
+}
+
+static int
+null_clean(void)
+{
+ return 0;
+}
+
+static void
+scsi_nvme_translate_test(void)
+{
+ struct spdk_bdev_io bdev_io;
+ int sc, sk, asc, ascq;
+
+ /* SPDK_NVME_SCT_GENERIC */
+ bdev_io.internal.error.nvme.sct = SPDK_NVME_SCT_GENERIC;
+ bdev_io.internal.error.nvme.sc = SPDK_NVME_SC_ABORTED_POWER_LOSS;
+ spdk_scsi_nvme_translate(&bdev_io, &sc, &sk, &asc, &ascq);
+ CU_ASSERT_EQUAL(sc, SPDK_SCSI_STATUS_TASK_ABORTED);
+ CU_ASSERT_EQUAL(sk, SPDK_SCSI_SENSE_ABORTED_COMMAND);
+ CU_ASSERT_EQUAL(asc, SPDK_SCSI_ASC_WARNING);
+ CU_ASSERT_EQUAL(ascq, SPDK_SCSI_ASCQ_POWER_LOSS_EXPECTED);
+
+ bdev_io.internal.error.nvme.sc = SPDK_NVME_SC_INVALID_NUM_SGL_DESCIRPTORS;
+ spdk_scsi_nvme_translate(&bdev_io, &sc, &sk, &asc, &ascq);
+ CU_ASSERT_EQUAL(sc, SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT_EQUAL(sk, SPDK_SCSI_SENSE_ILLEGAL_REQUEST);
+ CU_ASSERT_EQUAL(asc, SPDK_SCSI_ASC_NO_ADDITIONAL_SENSE);
+ CU_ASSERT_EQUAL(ascq, SPDK_SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+
+ /* SPDK_NVME_SCT_COMMAND_SPECIFIC */
+ bdev_io.internal.error.nvme.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
+ bdev_io.internal.error.nvme.sc = SPDK_NVME_SC_INVALID_FORMAT;
+ spdk_scsi_nvme_translate(&bdev_io, &sc, &sk, &asc, &ascq);
+ CU_ASSERT_EQUAL(sc, SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT_EQUAL(sk, SPDK_SCSI_SENSE_ILLEGAL_REQUEST);
+ CU_ASSERT_EQUAL(asc, SPDK_SCSI_ASC_FORMAT_COMMAND_FAILED);
+ CU_ASSERT_EQUAL(ascq, SPDK_SCSI_ASCQ_FORMAT_COMMAND_FAILED);
+
+ bdev_io.internal.error.nvme.sc = SPDK_NVME_SC_OVERLAPPING_RANGE;
+ spdk_scsi_nvme_translate(&bdev_io, &sc, &sk, &asc, &ascq);
+ CU_ASSERT_EQUAL(sc, SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT_EQUAL(sk, SPDK_SCSI_SENSE_ILLEGAL_REQUEST);
+ CU_ASSERT_EQUAL(asc, SPDK_SCSI_ASC_NO_ADDITIONAL_SENSE);
+ CU_ASSERT_EQUAL(ascq, SPDK_SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+
+ /* SPDK_NVME_SCT_MEDIA_ERROR */
+ bdev_io.internal.error.nvme.sct = SPDK_NVME_SCT_MEDIA_ERROR;
+ bdev_io.internal.error.nvme.sc = SPDK_NVME_SC_GUARD_CHECK_ERROR;
+ spdk_scsi_nvme_translate(&bdev_io, &sc, &sk, &asc, &ascq);
+ CU_ASSERT_EQUAL(sc, SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT_EQUAL(sk, SPDK_SCSI_SENSE_MEDIUM_ERROR);
+ CU_ASSERT_EQUAL(asc, SPDK_SCSI_ASC_LOGICAL_BLOCK_GUARD_CHECK_FAILED);
+ CU_ASSERT_EQUAL(ascq, SPDK_SCSI_ASCQ_LOGICAL_BLOCK_GUARD_CHECK_FAILED);
+
+ bdev_io.internal.error.nvme.sc = SPDK_NVME_SC_DEALLOCATED_OR_UNWRITTEN_BLOCK;
+ spdk_scsi_nvme_translate(&bdev_io, &sc, &sk, &asc, &ascq);
+ CU_ASSERT_EQUAL(sc, SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT_EQUAL(sk, SPDK_SCSI_SENSE_ILLEGAL_REQUEST);
+ CU_ASSERT_EQUAL(asc, SPDK_SCSI_ASC_NO_ADDITIONAL_SENSE);
+ CU_ASSERT_EQUAL(ascq, SPDK_SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+
+ /* SPDK_NVME_SCT_VENDOR_SPECIFIC */
+ bdev_io.internal.error.nvme.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
+ bdev_io.internal.error.nvme.sc = 0xff;
+ spdk_scsi_nvme_translate(&bdev_io, &sc, &sk, &asc, &ascq);
+ CU_ASSERT_EQUAL(sc, SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT_EQUAL(sk, SPDK_SCSI_SENSE_ILLEGAL_REQUEST);
+ CU_ASSERT_EQUAL(asc, SPDK_SCSI_ASC_NO_ADDITIONAL_SENSE);
+ CU_ASSERT_EQUAL(ascq, SPDK_SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("scsi_nvme_suite", null_init, null_clean);
+
+ CU_ADD_TEST(suite, scsi_nvme_translate_test);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/bdev/vbdev_lvol.c/.gitignore b/src/spdk/test/unit/lib/bdev/vbdev_lvol.c/.gitignore
new file mode 100644
index 000000000..5f2f6fdff
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/vbdev_lvol.c/.gitignore
@@ -0,0 +1 @@
+vbdev_lvol_ut
diff --git a/src/spdk/test/unit/lib/bdev/vbdev_lvol.c/Makefile b/src/spdk/test/unit/lib/bdev/vbdev_lvol.c/Makefile
new file mode 100644
index 000000000..a44f51372
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/vbdev_lvol.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../../)
+
+TEST_FILE = vbdev_lvol_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/bdev/vbdev_lvol.c/vbdev_lvol_ut.c b/src/spdk/test/unit/lib/bdev/vbdev_lvol.c/vbdev_lvol_ut.c
new file mode 100644
index 000000000..a963bd3b7
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/vbdev_lvol.c/vbdev_lvol_ut.c
@@ -0,0 +1,1440 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+#include "spdk/string.h"
+
+#include "bdev/lvol/vbdev_lvol.c"
+
+#include "unit/lib/json_mock.c"
+
+#define SPDK_BS_PAGE_SIZE 0x1000
+
+int g_lvolerrno;
+int g_lvserrno;
+int g_cluster_size;
+int g_registered_bdevs;
+int g_num_lvols = 0;
+struct spdk_lvol_store *g_lvs = NULL;
+struct spdk_lvol *g_lvol = NULL;
+struct lvol_store_bdev *g_lvs_bdev = NULL;
+struct spdk_bdev *g_base_bdev = NULL;
+struct spdk_bdev_io *g_io = NULL;
+struct spdk_io_channel *g_ch = NULL;
+
+static struct spdk_bdev g_bdev = {};
+static struct spdk_lvol_store *g_lvol_store = NULL;
+bool lvol_store_initialize_fail = false;
+bool lvol_store_initialize_cb_fail = false;
+bool lvol_already_opened = false;
+bool g_examine_done = false;
+bool g_bdev_alias_already_exists = false;
+bool g_lvs_with_name_already_exists = false;
+
+int
+spdk_bdev_alias_add(struct spdk_bdev *bdev, const char *alias)
+{
+ struct spdk_bdev_alias *tmp;
+
+ CU_ASSERT(alias != NULL);
+ CU_ASSERT(bdev != NULL);
+ if (g_bdev_alias_already_exists) {
+ return -EEXIST;
+ }
+
+ tmp = calloc(1, sizeof(*tmp));
+ SPDK_CU_ASSERT_FATAL(tmp != NULL);
+
+ tmp->alias = strdup(alias);
+ SPDK_CU_ASSERT_FATAL(tmp->alias != NULL);
+
+ TAILQ_INSERT_TAIL(&bdev->aliases, tmp, tailq);
+
+ return 0;
+}
+
+int
+spdk_bdev_alias_del(struct spdk_bdev *bdev, const char *alias)
+{
+ struct spdk_bdev_alias *tmp;
+
+ CU_ASSERT(bdev != NULL);
+
+ TAILQ_FOREACH(tmp, &bdev->aliases, tailq) {
+ SPDK_CU_ASSERT_FATAL(alias != NULL);
+ if (strncmp(alias, tmp->alias, SPDK_LVOL_NAME_MAX) == 0) {
+ TAILQ_REMOVE(&bdev->aliases, tmp, tailq);
+ free(tmp->alias);
+ free(tmp);
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
+void
+spdk_bdev_alias_del_all(struct spdk_bdev *bdev)
+{
+ struct spdk_bdev_alias *p, *tmp;
+
+ TAILQ_FOREACH_SAFE(p, &bdev->aliases, tailq, tmp) {
+ TAILQ_REMOVE(&bdev->aliases, p, tailq);
+ free(p->alias);
+ free(p);
+ }
+}
+
+void
+spdk_bdev_destruct_done(struct spdk_bdev *bdev, int bdeverrno)
+{
+}
+
+void
+spdk_lvs_rename(struct spdk_lvol_store *lvs, const char *new_name,
+ spdk_lvs_op_complete cb_fn, void *cb_arg)
+{
+ if (g_lvs_with_name_already_exists) {
+ g_lvolerrno = -EEXIST;
+ } else {
+ snprintf(lvs->name, sizeof(lvs->name), "%s", new_name);
+ g_lvolerrno = 0;
+ }
+
+ cb_fn(cb_arg, g_lvolerrno);
+}
+
+void
+spdk_lvol_rename(struct spdk_lvol *lvol, const char *new_name,
+ spdk_lvol_op_complete cb_fn, void *cb_arg)
+{
+ struct spdk_lvol *tmp;
+
+ if (strncmp(lvol->name, new_name, SPDK_LVOL_NAME_MAX) == 0) {
+ cb_fn(cb_arg, 0);
+ return;
+ }
+
+ TAILQ_FOREACH(tmp, &lvol->lvol_store->lvols, link) {
+ if (strncmp(tmp->name, new_name, SPDK_LVOL_NAME_MAX) == 0) {
+ SPDK_ERRLOG("Lvol %s already exists in lvol store %s\n", new_name, lvol->lvol_store->name);
+ cb_fn(cb_arg, -EEXIST);
+ return;
+ }
+ }
+
+ snprintf(lvol->name, sizeof(lvol->name), "%s", new_name);
+
+ cb_fn(cb_arg, g_lvolerrno);
+}
+
+void
+spdk_lvol_open(struct spdk_lvol *lvol, spdk_lvol_op_with_handle_complete cb_fn, void *cb_arg)
+{
+ cb_fn(cb_arg, lvol, g_lvolerrno);
+}
+
+uint64_t
+spdk_blob_get_num_clusters(struct spdk_blob *b)
+{
+ return 0;
+}
+
+int
+spdk_blob_get_clones(struct spdk_blob_store *bs, spdk_blob_id blobid, spdk_blob_id *ids,
+ size_t *count)
+{
+ *count = 0;
+ return 0;
+}
+
+spdk_blob_id
+spdk_blob_get_parent_snapshot(struct spdk_blob_store *bs, spdk_blob_id blobid)
+{
+ return 0;
+}
+
+bool g_blob_is_read_only = false;
+
+bool
+spdk_blob_is_read_only(struct spdk_blob *blob)
+{
+ return g_blob_is_read_only;
+}
+
+bool
+spdk_blob_is_snapshot(struct spdk_blob *blob)
+{
+ return false;
+}
+
+bool
+spdk_blob_is_clone(struct spdk_blob *blob)
+{
+ return false;
+}
+
+bool
+spdk_blob_is_thin_provisioned(struct spdk_blob *blob)
+{
+ return false;
+}
+
+static struct spdk_lvol *_lvol_create(struct spdk_lvol_store *lvs);
+
+void
+spdk_lvs_load(struct spdk_bs_dev *dev,
+ spdk_lvs_op_with_handle_complete cb_fn, void *cb_arg)
+{
+ struct spdk_lvol_store *lvs = NULL;
+ int i;
+ int lvserrno = g_lvserrno;
+
+ if (lvserrno != 0) {
+ /* On error blobstore destroys bs_dev itself,
+ * by puttin back io channels.
+ * This operation is asynchronous, and completed
+ * after calling the callback for lvol. */
+ cb_fn(cb_arg, g_lvol_store, lvserrno);
+ dev->destroy(dev);
+ return;
+ }
+
+ lvs = calloc(1, sizeof(*lvs));
+ SPDK_CU_ASSERT_FATAL(lvs != NULL);
+ TAILQ_INIT(&lvs->lvols);
+ TAILQ_INIT(&lvs->pending_lvols);
+ spdk_uuid_generate(&lvs->uuid);
+ lvs->bs_dev = dev;
+ for (i = 0; i < g_num_lvols; i++) {
+ _lvol_create(lvs);
+ }
+
+ cb_fn(cb_arg, lvs, lvserrno);
+}
+
+int
+spdk_bs_bdev_claim(struct spdk_bs_dev *bs_dev, struct spdk_bdev_module *module)
+{
+ if (lvol_already_opened == true) {
+ return -1;
+ }
+
+ lvol_already_opened = true;
+
+ return 0;
+}
+
+void
+spdk_bdev_unregister(struct spdk_bdev *vbdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
+{
+ int rc;
+
+ SPDK_CU_ASSERT_FATAL(vbdev != NULL);
+ rc = vbdev->fn_table->destruct(vbdev->ctxt);
+
+ SPDK_CU_ASSERT_FATAL(cb_fn != NULL);
+ cb_fn(cb_arg, rc);
+}
+
+void
+spdk_bdev_module_finish_done(void)
+{
+ return;
+}
+
+uint64_t
+spdk_bs_get_page_size(struct spdk_blob_store *bs)
+{
+ return SPDK_BS_PAGE_SIZE;
+}
+
+uint64_t
+spdk_bs_get_io_unit_size(struct spdk_blob_store *bs)
+{
+ return SPDK_BS_PAGE_SIZE;
+}
+
+static void
+bdev_blob_destroy(struct spdk_bs_dev *bs_dev)
+{
+ CU_ASSERT(bs_dev != NULL);
+ free(bs_dev);
+ lvol_already_opened = false;
+}
+
+struct spdk_bs_dev *
+spdk_bdev_create_bs_dev(struct spdk_bdev *bdev, spdk_bdev_remove_cb_t remove_cb, void *remove_ctx)
+{
+ struct spdk_bs_dev *bs_dev;
+
+ if (lvol_already_opened == true || bdev == NULL) {
+ return NULL;
+ }
+
+ bs_dev = calloc(1, sizeof(*bs_dev));
+ SPDK_CU_ASSERT_FATAL(bs_dev != NULL);
+ bs_dev->destroy = bdev_blob_destroy;
+
+ return bs_dev;
+}
+
+void
+spdk_lvs_opts_init(struct spdk_lvs_opts *opts)
+{
+}
+
+int
+spdk_lvs_init(struct spdk_bs_dev *bs_dev, struct spdk_lvs_opts *o,
+ spdk_lvs_op_with_handle_complete cb_fn, void *cb_arg)
+{
+ struct spdk_lvol_store *lvs;
+ int error = 0;
+
+ if (lvol_store_initialize_fail) {
+ return -1;
+ }
+
+ if (lvol_store_initialize_cb_fail) {
+ bs_dev->destroy(bs_dev);
+ lvs = NULL;
+ error = -1;
+ } else {
+ lvs = calloc(1, sizeof(*lvs));
+ SPDK_CU_ASSERT_FATAL(lvs != NULL);
+ TAILQ_INIT(&lvs->lvols);
+ TAILQ_INIT(&lvs->pending_lvols);
+ spdk_uuid_generate(&lvs->uuid);
+ snprintf(lvs->name, sizeof(lvs->name), "%s", o->name);
+ lvs->bs_dev = bs_dev;
+ error = 0;
+ }
+ cb_fn(cb_arg, lvs, error);
+
+ return 0;
+}
+
+int
+spdk_lvs_unload(struct spdk_lvol_store *lvs, spdk_lvs_op_complete cb_fn, void *cb_arg)
+{
+ struct spdk_lvol *lvol, *tmp;
+
+ TAILQ_FOREACH_SAFE(lvol, &lvs->lvols, link, tmp) {
+ TAILQ_REMOVE(&lvs->lvols, lvol, link);
+ free(lvol);
+ }
+ g_lvol_store = NULL;
+
+ lvs->bs_dev->destroy(lvs->bs_dev);
+ free(lvs);
+
+ if (cb_fn != NULL) {
+ cb_fn(cb_arg, 0);
+ }
+
+ return 0;
+}
+
+int
+spdk_lvs_destroy(struct spdk_lvol_store *lvs, spdk_lvs_op_complete cb_fn,
+ void *cb_arg)
+{
+ struct spdk_lvol *lvol, *tmp;
+ char *alias;
+
+ TAILQ_FOREACH_SAFE(lvol, &lvs->lvols, link, tmp) {
+ TAILQ_REMOVE(&lvs->lvols, lvol, link);
+
+ alias = spdk_sprintf_alloc("%s/%s", lvs->name, lvol->name);
+ if (alias == NULL) {
+ SPDK_ERRLOG("Cannot alloc memory for alias\n");
+ return -1;
+ }
+ spdk_bdev_alias_del(lvol->bdev, alias);
+
+ free(alias);
+ free(lvol);
+ }
+ g_lvol_store = NULL;
+
+ lvs->bs_dev->destroy(lvs->bs_dev);
+ free(lvs);
+
+ if (cb_fn != NULL) {
+ cb_fn(cb_arg, 0);
+ }
+
+ return 0;
+}
+
+void
+spdk_lvol_resize(struct spdk_lvol *lvol, size_t sz, spdk_lvol_op_complete cb_fn, void *cb_arg)
+{
+ cb_fn(cb_arg, 0);
+}
+
+void
+spdk_lvol_set_read_only(struct spdk_lvol *lvol, spdk_lvol_op_complete cb_fn, void *cb_arg)
+{
+ cb_fn(cb_arg, 0);
+}
+
+int
+spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size)
+{
+ bdev->blockcnt = size;
+ return 0;
+}
+
+uint64_t
+spdk_bs_get_cluster_size(struct spdk_blob_store *bs)
+{
+ return g_cluster_size;
+}
+
+struct spdk_bdev *
+spdk_bdev_get_by_name(const char *bdev_name)
+{
+ if (!strcmp(g_base_bdev->name, bdev_name)) {
+ return g_base_bdev;
+ }
+
+ return NULL;
+}
+
+void
+spdk_lvol_close(struct spdk_lvol *lvol, spdk_lvol_op_complete cb_fn, void *cb_arg)
+{
+ lvol->ref_count--;
+
+ SPDK_CU_ASSERT_FATAL(cb_fn != NULL);
+ cb_fn(cb_arg, 0);
+}
+
+bool
+spdk_lvol_deletable(struct spdk_lvol *lvol)
+{
+ return true;
+}
+
+void
+spdk_lvol_destroy(struct spdk_lvol *lvol, spdk_lvol_op_complete cb_fn, void *cb_arg)
+{
+ if (lvol->ref_count != 0) {
+ cb_fn(cb_arg, -ENODEV);
+ }
+
+ TAILQ_REMOVE(&lvol->lvol_store->lvols, lvol, link);
+
+ SPDK_CU_ASSERT_FATAL(cb_fn != NULL);
+ cb_fn(cb_arg, 0);
+
+ g_lvol = NULL;
+ free(lvol);
+}
+
+void
+spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
+{
+ bdev_io->internal.status = status;
+}
+
+struct spdk_io_channel *spdk_lvol_get_io_channel(struct spdk_lvol *lvol)
+{
+ CU_ASSERT(lvol == g_lvol);
+ return g_ch;
+}
+
+void
+spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
+{
+ CU_ASSERT(cb == lvol_get_buf_cb);
+}
+
+void
+spdk_blob_io_read(struct spdk_blob *blob, struct spdk_io_channel *channel,
+ void *payload, uint64_t offset, uint64_t length,
+ spdk_blob_op_complete cb_fn, void *cb_arg)
+{
+ CU_ASSERT(blob == NULL);
+ CU_ASSERT(channel == g_ch);
+ CU_ASSERT(offset == g_io->u.bdev.offset_blocks);
+ CU_ASSERT(length == g_io->u.bdev.num_blocks);
+ cb_fn(cb_arg, 0);
+}
+
+void
+spdk_blob_io_write(struct spdk_blob *blob, struct spdk_io_channel *channel,
+ void *payload, uint64_t offset, uint64_t length,
+ spdk_blob_op_complete cb_fn, void *cb_arg)
+{
+ CU_ASSERT(blob == NULL);
+ CU_ASSERT(channel == g_ch);
+ CU_ASSERT(offset == g_io->u.bdev.offset_blocks);
+ CU_ASSERT(length == g_io->u.bdev.num_blocks);
+ cb_fn(cb_arg, 0);
+}
+
+void
+spdk_blob_io_unmap(struct spdk_blob *blob, struct spdk_io_channel *channel,
+ uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg)
+{
+ CU_ASSERT(blob == NULL);
+ CU_ASSERT(channel == g_ch);
+ CU_ASSERT(offset == g_io->u.bdev.offset_blocks);
+ CU_ASSERT(length == g_io->u.bdev.num_blocks);
+ cb_fn(cb_arg, 0);
+}
+
+void
+spdk_blob_io_write_zeroes(struct spdk_blob *blob, struct spdk_io_channel *channel,
+ uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg)
+{
+ CU_ASSERT(blob == NULL);
+ CU_ASSERT(channel == g_ch);
+ CU_ASSERT(offset == g_io->u.bdev.offset_blocks);
+ CU_ASSERT(length == g_io->u.bdev.num_blocks);
+ cb_fn(cb_arg, 0);
+}
+
+void
+spdk_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel,
+ struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
+ spdk_blob_op_complete cb_fn, void *cb_arg)
+{
+ CU_ASSERT(blob == NULL);
+ CU_ASSERT(channel == g_ch);
+ CU_ASSERT(offset == g_io->u.bdev.offset_blocks);
+ CU_ASSERT(length == g_io->u.bdev.num_blocks);
+ cb_fn(cb_arg, 0);
+}
+
+void
+spdk_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel,
+ struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
+ spdk_blob_op_complete cb_fn, void *cb_arg)
+{
+ CU_ASSERT(blob == NULL);
+ CU_ASSERT(channel == g_ch);
+ CU_ASSERT(offset == g_io->u.bdev.offset_blocks);
+ CU_ASSERT(length == g_io->u.bdev.num_blocks);
+ cb_fn(cb_arg, 0);
+}
+
+void
+spdk_bdev_module_list_add(struct spdk_bdev_module *bdev_module)
+{
+}
+
+const char *
+spdk_bdev_get_name(const struct spdk_bdev *bdev)
+{
+ return "test";
+}
+
+int
+spdk_bdev_register(struct spdk_bdev *vbdev)
+{
+ TAILQ_INIT(&vbdev->aliases);
+
+ g_registered_bdevs++;
+ return 0;
+}
+
+void
+spdk_bdev_module_examine_done(struct spdk_bdev_module *module)
+{
+ SPDK_CU_ASSERT_FATAL(g_examine_done != true);
+ g_examine_done = true;
+}
+
+static struct spdk_lvol *
+_lvol_create(struct spdk_lvol_store *lvs)
+{
+ struct spdk_lvol *lvol = calloc(1, sizeof(*lvol));
+
+ SPDK_CU_ASSERT_FATAL(lvol != NULL);
+
+ lvol->lvol_store = lvs;
+ lvol->ref_count++;
+ snprintf(lvol->unique_id, sizeof(lvol->unique_id), "%s", "UNIT_TEST_UUID");
+
+ TAILQ_INSERT_TAIL(&lvol->lvol_store->lvols, lvol, link);
+
+ return lvol;
+}
+
+int
+spdk_lvol_create(struct spdk_lvol_store *lvs, const char *name, size_t sz,
+ bool thin_provision, enum lvol_clear_method clear_method, spdk_lvol_op_with_handle_complete cb_fn,
+ void *cb_arg)
+{
+ struct spdk_lvol *lvol;
+
+ lvol = _lvol_create(lvs);
+ snprintf(lvol->name, sizeof(lvol->name), "%s", name);
+ cb_fn(cb_arg, lvol, 0);
+
+ return 0;
+}
+
+void
+spdk_lvol_create_snapshot(struct spdk_lvol *lvol, const char *snapshot_name,
+ spdk_lvol_op_with_handle_complete cb_fn, void *cb_arg)
+{
+ struct spdk_lvol *snap;
+
+ snap = _lvol_create(lvol->lvol_store);
+ snprintf(snap->name, sizeof(snap->name), "%s", snapshot_name);
+ cb_fn(cb_arg, snap, 0);
+}
+
+void
+spdk_lvol_create_clone(struct spdk_lvol *lvol, const char *clone_name,
+ spdk_lvol_op_with_handle_complete cb_fn, void *cb_arg)
+{
+ struct spdk_lvol *clone;
+
+ clone = _lvol_create(lvol->lvol_store);
+ snprintf(clone->name, sizeof(clone->name), "%s", clone_name);
+ cb_fn(cb_arg, clone, 0);
+}
+
+static void
+lvol_store_op_complete(void *cb_arg, int lvserrno)
+{
+ g_lvserrno = lvserrno;
+ return;
+}
+
+static void
+lvol_store_op_with_handle_complete(void *cb_arg, struct spdk_lvol_store *lvs, int lvserrno)
+{
+ g_lvserrno = lvserrno;
+ g_lvol_store = lvs;
+ return;
+}
+
+static void
+vbdev_lvol_create_complete(void *cb_arg, struct spdk_lvol *lvol, int lvolerrno)
+{
+ g_lvolerrno = lvolerrno;
+ g_lvol = lvol;
+}
+
+static void
+vbdev_lvol_resize_complete(void *cb_arg, int lvolerrno)
+{
+ g_lvolerrno = lvolerrno;
+}
+
+static void
+vbdev_lvol_set_read_only_complete(void *cb_arg, int lvolerrno)
+{
+ g_lvolerrno = lvolerrno;
+}
+
+static void
+vbdev_lvol_rename_complete(void *cb_arg, int lvolerrno)
+{
+ g_lvolerrno = lvolerrno;
+}
+
+static void
+ut_lvs_destroy(void)
+{
+ int rc = 0;
+ int sz = 10;
+ struct spdk_lvol_store *lvs;
+
+ /* Lvol store is successfully created */
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, LVS_CLEAR_WITH_UNMAP, lvol_store_op_with_handle_complete,
+ NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(g_lvol_store->bs_dev != NULL);
+
+ lvs = g_lvol_store;
+ g_lvol_store = NULL;
+
+ spdk_uuid_generate(&lvs->uuid);
+
+ /* Successfully create lvol, which should be unloaded with lvs later */
+ g_lvolerrno = -1;
+ rc = vbdev_lvol_create(lvs, "lvol", sz, false, LVOL_CLEAR_WITH_DEFAULT, vbdev_lvol_create_complete,
+ NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvolerrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ /* Unload lvol store */
+ vbdev_lvs_destruct(lvs, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+}
+
+static void
+ut_lvol_init(void)
+{
+ struct spdk_lvol_store *lvs;
+ int sz = 10;
+ int rc;
+
+ /* Lvol store is successfully created */
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, LVS_CLEAR_WITH_UNMAP, lvol_store_op_with_handle_complete,
+ NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(g_lvol_store->bs_dev != NULL);
+ lvs = g_lvol_store;
+
+ /* Successful lvol create */
+ g_lvolerrno = -1;
+ rc = vbdev_lvol_create(lvs, "lvol", sz, false, LVOL_CLEAR_WITH_DEFAULT, vbdev_lvol_create_complete,
+ NULL);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(g_lvol != NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+
+ /* Successful lvol destroy */
+ vbdev_lvol_destroy(g_lvol, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvol == NULL);
+
+ /* Destroy lvol store */
+ vbdev_lvs_destruct(lvs, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+}
+
+static void
+ut_lvol_snapshot(void)
+{
+ struct spdk_lvol_store *lvs;
+ int sz = 10;
+ int rc;
+ struct spdk_lvol *lvol = NULL;
+
+ /* Lvol store is successfully created */
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, LVS_CLEAR_WITH_UNMAP, lvol_store_op_with_handle_complete,
+ NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(g_lvol_store->bs_dev != NULL);
+ lvs = g_lvol_store;
+
+ /* Successful lvol create */
+ g_lvolerrno = -1;
+ rc = vbdev_lvol_create(lvs, "lvol", sz, false, LVOL_CLEAR_WITH_DEFAULT, vbdev_lvol_create_complete,
+ NULL);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+
+ lvol = g_lvol;
+
+ /* Successful snap create */
+ vbdev_lvol_create_snapshot(lvol, "snap", vbdev_lvol_create_complete, NULL);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(g_lvol != NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+
+ /* Successful lvol destroy */
+ vbdev_lvol_destroy(g_lvol, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvol == NULL);
+
+ /* Successful snap destroy */
+ g_lvol = lvol;
+ vbdev_lvol_destroy(g_lvol, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvol == NULL);
+
+ /* Destroy lvol store */
+ vbdev_lvs_destruct(lvs, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+}
+
+static void
+ut_lvol_clone(void)
+{
+ struct spdk_lvol_store *lvs;
+ int sz = 10;
+ int rc;
+ struct spdk_lvol *lvol = NULL;
+ struct spdk_lvol *snap = NULL;
+ struct spdk_lvol *clone = NULL;
+
+ /* Lvol store is successfully created */
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, LVS_CLEAR_WITH_UNMAP, lvol_store_op_with_handle_complete,
+ NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(g_lvol_store->bs_dev != NULL);
+ lvs = g_lvol_store;
+
+ /* Successful lvol create */
+ g_lvolerrno = -1;
+ rc = vbdev_lvol_create(lvs, "lvol", sz, false, LVOL_CLEAR_WITH_DEFAULT, vbdev_lvol_create_complete,
+ NULL);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+
+ lvol = g_lvol;
+
+ /* Successful snap create */
+ vbdev_lvol_create_snapshot(lvol, "snap", vbdev_lvol_create_complete, NULL);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+
+ snap = g_lvol;
+
+ /* Successful clone create */
+ vbdev_lvol_create_clone(snap, "clone", vbdev_lvol_create_complete, NULL);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+
+ clone = g_lvol;
+
+ /* Successful lvol destroy */
+ g_lvol = lvol;
+ vbdev_lvol_destroy(g_lvol, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvol == NULL);
+
+ /* Successful clone destroy */
+ g_lvol = clone;
+ vbdev_lvol_destroy(g_lvol, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvol == NULL);
+
+ /* Successful lvol destroy */
+ g_lvol = snap;
+ vbdev_lvol_destroy(g_lvol, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvol == NULL);
+
+ /* Destroy lvol store */
+ vbdev_lvs_destruct(lvs, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+}
+
+static void
+ut_lvol_hotremove(void)
+{
+ int rc = 0;
+
+ lvol_store_initialize_fail = false;
+ lvol_store_initialize_cb_fail = false;
+ lvol_already_opened = false;
+
+ /* Lvol store is successfully created */
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, LVS_CLEAR_WITH_UNMAP, lvol_store_op_with_handle_complete,
+ NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(g_lvol_store->bs_dev != NULL);
+
+ /* Hot remove callback with NULL - stability check */
+ vbdev_lvs_hotremove_cb(NULL);
+
+ /* Hot remove lvs on bdev removal */
+ vbdev_lvs_hotremove_cb(&g_bdev);
+
+ CU_ASSERT(g_lvol_store == NULL);
+ CU_ASSERT(TAILQ_EMPTY(&g_spdk_lvol_pairs));
+
+}
+
+static void
+ut_lvs_examine_check(bool success)
+{
+ struct lvol_store_bdev *lvs_bdev;
+
+ /* Examine was finished regardless of result */
+ CU_ASSERT(g_examine_done == true);
+ g_examine_done = false;
+
+ if (success) {
+ SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&g_spdk_lvol_pairs));
+ lvs_bdev = TAILQ_FIRST(&g_spdk_lvol_pairs);
+ SPDK_CU_ASSERT_FATAL(lvs_bdev != NULL);
+ g_lvol_store = lvs_bdev->lvs;
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(g_lvol_store->bs_dev != NULL);
+ } else {
+ SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&g_spdk_lvol_pairs));
+ g_lvol_store = NULL;
+ }
+}
+
+static void
+ut_lvol_examine(void)
+{
+ /* Examine unsuccessfully - bdev already opened */
+ g_lvserrno = -1;
+ lvol_already_opened = true;
+ vbdev_lvs_examine(&g_bdev);
+ ut_lvs_examine_check(false);
+
+ /* Examine unsuccessfully - fail on lvol store */
+ g_lvserrno = -1;
+ lvol_already_opened = false;
+ vbdev_lvs_examine(&g_bdev);
+ ut_lvs_examine_check(false);
+
+ /* Examine successfully
+ * - one lvol fails to load
+ * - lvs is loaded with no lvols present */
+ g_lvserrno = 0;
+ g_lvolerrno = -1;
+ g_num_lvols = 1;
+ lvol_already_opened = false;
+ g_registered_bdevs = 0;
+ vbdev_lvs_examine(&g_bdev);
+ ut_lvs_examine_check(true);
+ CU_ASSERT(g_registered_bdevs == 0);
+ CU_ASSERT(TAILQ_EMPTY(&g_lvol_store->lvols));
+ vbdev_lvs_destruct(g_lvol_store, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+
+ /* Examine successfully */
+ g_lvserrno = 0;
+ g_lvolerrno = 0;
+ g_registered_bdevs = 0;
+ lvol_already_opened = false;
+ vbdev_lvs_examine(&g_bdev);
+ ut_lvs_examine_check(true);
+ CU_ASSERT(g_registered_bdevs != 0);
+ SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&g_lvol_store->lvols));
+ vbdev_lvs_destruct(g_lvol_store, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+}
+
+static void
+ut_lvol_rename(void)
+{
+ struct spdk_lvol_store *lvs;
+ struct spdk_lvol *lvol;
+ struct spdk_lvol *lvol2;
+ int sz = 10;
+ int rc;
+
+ /* Lvol store is successfully created */
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, LVS_CLEAR_WITH_UNMAP, lvol_store_op_with_handle_complete,
+ NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(g_lvol_store->bs_dev != NULL);
+ lvs = g_lvol_store;
+
+ /* Successful lvols create */
+ g_lvolerrno = -1;
+ rc = vbdev_lvol_create(lvs, "lvol", sz, false, LVOL_CLEAR_WITH_DEFAULT, vbdev_lvol_create_complete,
+ NULL);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(g_lvol != NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+ lvol = g_lvol;
+
+ g_lvolerrno = -1;
+ rc = vbdev_lvol_create(lvs, "lvol2", sz, false, LVOL_CLEAR_WITH_DEFAULT, vbdev_lvol_create_complete,
+ NULL);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(g_lvol != NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+ lvol2 = g_lvol;
+
+ /* Successful rename lvol */
+ vbdev_lvol_rename(lvol, "new_lvol_name", vbdev_lvol_rename_complete, NULL);
+ SPDK_CU_ASSERT_FATAL(g_lvolerrno == 0);
+ CU_ASSERT_STRING_EQUAL(lvol->name, "new_lvol_name");
+
+ /* Renaming lvol with name already existing */
+ g_bdev_alias_already_exists = true;
+ vbdev_lvol_rename(lvol2, "new_lvol_name", vbdev_lvol_rename_complete, NULL);
+ g_bdev_alias_already_exists = false;
+ SPDK_CU_ASSERT_FATAL(g_lvolerrno != 0);
+ CU_ASSERT_STRING_NOT_EQUAL(lvol2->name, "new_lvol_name");
+
+ /* Renaming lvol with it's own name */
+ vbdev_lvol_rename(lvol, "new_lvol_name", vbdev_lvol_rename_complete, NULL);
+ SPDK_CU_ASSERT_FATAL(g_lvolerrno == 0);
+ CU_ASSERT_STRING_EQUAL(lvol->name, "new_lvol_name");
+
+ /* Successful lvols destroy */
+ vbdev_lvol_destroy(lvol, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvol == NULL);
+
+ vbdev_lvol_destroy(lvol2, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvol == NULL);
+
+ /* Destroy lvol store */
+ vbdev_lvs_destruct(lvs, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+}
+
+static void
+ut_lvol_destroy(void)
+{
+ struct spdk_lvol_store *lvs;
+ struct spdk_lvol *lvol;
+ struct spdk_lvol *lvol2;
+ int sz = 10;
+ int rc;
+
+ /* Lvol store is successfully created */
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, LVS_CLEAR_WITH_UNMAP, lvol_store_op_with_handle_complete,
+ NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(g_lvol_store->bs_dev != NULL);
+ lvs = g_lvol_store;
+
+ /* Successful lvols create */
+ g_lvolerrno = -1;
+ rc = vbdev_lvol_create(lvs, "lvol", sz, false, LVOL_CLEAR_WITH_DEFAULT, vbdev_lvol_create_complete,
+ NULL);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(g_lvol != NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+ lvol = g_lvol;
+
+ g_lvolerrno = -1;
+ rc = vbdev_lvol_create(lvs, "lvol2", sz, false, LVOL_CLEAR_WITH_DEFAULT, vbdev_lvol_create_complete,
+ NULL);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(g_lvol != NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+ lvol2 = g_lvol;
+
+ /* Successful lvols destroy */
+ vbdev_lvol_destroy(lvol, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvol == NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+
+ /* Hot remove lvol bdev */
+ vbdev_lvol_unregister(lvol2);
+
+ /* Unload lvol store */
+ vbdev_lvs_unload(lvs, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+}
+
+static void
+ut_lvol_resize(void)
+{
+ struct spdk_lvol_store *lvs;
+ struct spdk_lvol *lvol;
+ int sz = 10;
+ int rc = 0;
+
+ /* Lvol store is successfully created */
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, LVS_CLEAR_WITH_UNMAP, lvol_store_op_with_handle_complete,
+ NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(g_lvol_store->bs_dev != NULL);
+ lvs = g_lvol_store;
+
+ /* Successful lvol create */
+ g_lvolerrno = -1;
+ rc = vbdev_lvol_create(lvs, "lvol", sz, false, LVOL_CLEAR_WITH_DEFAULT, vbdev_lvol_create_complete,
+ NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvolerrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ lvol = g_lvol;
+
+ /* Successful lvol resize */
+ g_lvolerrno = -1;
+ vbdev_lvol_resize(lvol, 20, vbdev_lvol_resize_complete, NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+ CU_ASSERT(lvol->bdev->blockcnt == 20 * g_cluster_size / lvol->bdev->blocklen);
+
+ /* Resize with NULL lvol */
+ vbdev_lvol_resize(NULL, 20, vbdev_lvol_resize_complete, NULL);
+ CU_ASSERT(g_lvolerrno != 0);
+
+ /* Successful lvol destroy */
+ vbdev_lvol_destroy(lvol, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvol == NULL);
+
+ /* Destroy lvol store */
+ vbdev_lvs_destruct(lvs, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+}
+
+static void
+ut_lvol_set_read_only(void)
+{
+ struct spdk_lvol_store *lvs;
+ struct spdk_lvol *lvol;
+ int sz = 10;
+ int rc = 0;
+
+ /* Lvol store is successfully created */
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, LVS_CLEAR_WITH_UNMAP, lvol_store_op_with_handle_complete,
+ NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(g_lvol_store->bs_dev != NULL);
+ lvs = g_lvol_store;
+
+ /* Successful lvol create */
+ g_lvolerrno = -1;
+ rc = vbdev_lvol_create(lvs, "lvol", sz, false, LVOL_CLEAR_WITH_DEFAULT, vbdev_lvol_create_complete,
+ NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvolerrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ lvol = g_lvol;
+
+ /* Successful set lvol as read only */
+ g_lvolerrno = -1;
+ vbdev_lvol_set_read_only(lvol, vbdev_lvol_set_read_only_complete, NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+
+ /* Successful lvol destroy */
+ vbdev_lvol_destroy(lvol, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvol == NULL);
+
+ /* Destroy lvol store */
+ vbdev_lvs_destruct(lvs, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+}
+
+static void
+ut_lvs_unload(void)
+{
+ int rc = 0;
+ int sz = 10;
+ struct spdk_lvol_store *lvs;
+
+ /* Lvol store is successfully created */
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, LVS_CLEAR_WITH_UNMAP, lvol_store_op_with_handle_complete,
+ NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(g_lvol_store->bs_dev != NULL);
+
+ lvs = g_lvol_store;
+ g_lvol_store = NULL;
+
+ spdk_uuid_generate(&lvs->uuid);
+
+ /* Successfully create lvol, which should be destroyed with lvs later */
+ g_lvolerrno = -1;
+ rc = vbdev_lvol_create(lvs, "lvol", sz, false, LVOL_CLEAR_WITH_DEFAULT, vbdev_lvol_create_complete,
+ NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvolerrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ /* Unload lvol store */
+ vbdev_lvs_unload(lvs, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+ CU_ASSERT(g_lvol != NULL);
+}
+
+static void
+ut_lvs_init(void)
+{
+ int rc = 0;
+ struct spdk_lvol_store *lvs;
+
+ /* spdk_lvs_init() fails */
+ lvol_store_initialize_fail = true;
+
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, LVS_CLEAR_WITH_UNMAP, lvol_store_op_with_handle_complete,
+ NULL);
+ CU_ASSERT(rc != 0);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+
+ lvol_store_initialize_fail = false;
+
+ /* spdk_lvs_init_cb() fails */
+ lvol_store_initialize_cb_fail = true;
+
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, LVS_CLEAR_WITH_UNMAP, lvol_store_op_with_handle_complete,
+ NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno != 0);
+ CU_ASSERT(g_lvol_store == NULL);
+
+ lvol_store_initialize_cb_fail = false;
+
+ /* Lvol store is successfully created */
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, LVS_CLEAR_WITH_UNMAP, lvol_store_op_with_handle_complete,
+ NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(g_lvol_store->bs_dev != NULL);
+
+ lvs = g_lvol_store;
+ g_lvol_store = NULL;
+
+ /* Bdev with lvol store already claimed */
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, LVS_CLEAR_WITH_UNMAP, lvol_store_op_with_handle_complete,
+ NULL);
+ CU_ASSERT(rc != 0);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+
+ /* Destruct lvol store */
+ vbdev_lvs_destruct(lvs, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+}
+
+static void
+ut_vbdev_lvol_get_io_channel(void)
+{
+ struct spdk_io_channel *ch;
+
+ g_lvol = calloc(1, sizeof(struct spdk_lvol));
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ ch = vbdev_lvol_get_io_channel(g_lvol);
+ CU_ASSERT(ch == g_ch);
+
+ free(g_lvol);
+}
+
+static void
+ut_vbdev_lvol_io_type_supported(void)
+{
+ struct spdk_lvol *lvol;
+ bool ret;
+
+ lvol = calloc(1, sizeof(struct spdk_lvol));
+ SPDK_CU_ASSERT_FATAL(lvol != NULL);
+
+ g_blob_is_read_only = false;
+
+ /* Supported types */
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_READ);
+ CU_ASSERT(ret == true);
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_WRITE);
+ CU_ASSERT(ret == true);
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_RESET);
+ CU_ASSERT(ret == true);
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_UNMAP);
+ CU_ASSERT(ret == true);
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_WRITE_ZEROES);
+ CU_ASSERT(ret == true);
+
+ /* Unsupported types */
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_FLUSH);
+ CU_ASSERT(ret == false);
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_NVME_ADMIN);
+ CU_ASSERT(ret == false);
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_NVME_IO);
+ CU_ASSERT(ret == false);
+
+ g_blob_is_read_only = true;
+
+ /* Supported types */
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_READ);
+ CU_ASSERT(ret == true);
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_RESET);
+ CU_ASSERT(ret == true);
+
+ /* Unsupported types */
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_WRITE);
+ CU_ASSERT(ret == false);
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_UNMAP);
+ CU_ASSERT(ret == false);
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_WRITE_ZEROES);
+ CU_ASSERT(ret == false);
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_FLUSH);
+ CU_ASSERT(ret == false);
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_NVME_ADMIN);
+ CU_ASSERT(ret == false);
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_NVME_IO);
+ CU_ASSERT(ret == false);
+
+ free(lvol);
+}
+
+static void
+ut_lvol_read_write(void)
+{
+ g_io = calloc(1, sizeof(struct spdk_bdev_io));
+ SPDK_CU_ASSERT_FATAL(g_io != NULL);
+ g_base_bdev = calloc(1, sizeof(struct spdk_bdev));
+ SPDK_CU_ASSERT_FATAL(g_base_bdev != NULL);
+ g_lvol = calloc(1, sizeof(struct spdk_lvol));
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ g_io->bdev = g_base_bdev;
+ g_io->bdev->ctxt = g_lvol;
+ g_io->u.bdev.offset_blocks = 20;
+ g_io->u.bdev.num_blocks = 20;
+
+ lvol_read(g_ch, g_io);
+ CU_ASSERT(g_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ lvol_write(g_lvol, g_ch, g_io);
+ CU_ASSERT(g_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ free(g_io);
+ free(g_base_bdev);
+ free(g_lvol);
+}
+
+static void
+ut_vbdev_lvol_submit_request(void)
+{
+ struct spdk_lvol request_lvol = {};
+ g_io = calloc(1, sizeof(struct spdk_bdev_io));
+ SPDK_CU_ASSERT_FATAL(g_io != NULL);
+ g_base_bdev = calloc(1, sizeof(struct spdk_bdev));
+ SPDK_CU_ASSERT_FATAL(g_base_bdev != NULL);
+ g_io->bdev = g_base_bdev;
+
+ g_io->type = SPDK_BDEV_IO_TYPE_READ;
+ g_base_bdev->ctxt = &request_lvol;
+ vbdev_lvol_submit_request(g_ch, g_io);
+
+ free(g_io);
+ free(g_base_bdev);
+}
+
+static void
+ut_lvs_rename(void)
+{
+ int rc = 0;
+ int sz = 10;
+ struct spdk_lvol_store *lvs;
+
+ /* Lvol store is successfully created */
+ rc = vbdev_lvs_create(&g_bdev, "old_lvs_name", 0, LVS_CLEAR_WITH_UNMAP,
+ lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(g_lvol_store->bs_dev != NULL);
+
+ lvs = g_lvol_store;
+ g_lvol_store = NULL;
+
+ g_base_bdev = calloc(1, sizeof(*g_base_bdev));
+ SPDK_CU_ASSERT_FATAL(g_base_bdev != NULL);
+
+ /* Successfully create lvol, which should be destroyed with lvs later */
+ g_lvolerrno = -1;
+ rc = vbdev_lvol_create(lvs, "lvol", sz, false, LVOL_CLEAR_WITH_DEFAULT, vbdev_lvol_create_complete,
+ NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvolerrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ /* Trying to rename lvs with lvols created */
+ vbdev_lvs_rename(lvs, "new_lvs_name", lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT_STRING_EQUAL(lvs->name, "new_lvs_name");
+ CU_ASSERT_STRING_EQUAL(TAILQ_FIRST(&g_lvol->bdev->aliases)->alias, "new_lvs_name/lvol");
+
+ /* Trying to rename lvs with name already used by another lvs */
+ /* This is a bdev_lvol test, so g_lvs_with_name_already_exists simulates
+ * existing lvs with name 'another_new_lvs_name' and this name in fact is not compared */
+ g_lvs_with_name_already_exists = true;
+ vbdev_lvs_rename(lvs, "another_new_lvs_name", lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == -EEXIST);
+ CU_ASSERT_STRING_EQUAL(lvs->name, "new_lvs_name");
+ CU_ASSERT_STRING_EQUAL(TAILQ_FIRST(&g_lvol->bdev->aliases)->alias, "new_lvs_name/lvol");
+ g_lvs_with_name_already_exists = false;
+
+ /* Unload lvol store */
+ g_lvol_store = lvs;
+ vbdev_lvs_destruct(g_lvol_store, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+
+ free(g_base_bdev->name);
+ free(g_base_bdev);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("lvol", NULL, NULL);
+
+ CU_ADD_TEST(suite, ut_lvs_init);
+ CU_ADD_TEST(suite, ut_lvol_init);
+ CU_ADD_TEST(suite, ut_lvol_snapshot);
+ CU_ADD_TEST(suite, ut_lvol_clone);
+ CU_ADD_TEST(suite, ut_lvs_destroy);
+ CU_ADD_TEST(suite, ut_lvs_unload);
+ CU_ADD_TEST(suite, ut_lvol_resize);
+ CU_ADD_TEST(suite, ut_lvol_set_read_only);
+ CU_ADD_TEST(suite, ut_lvol_hotremove);
+ CU_ADD_TEST(suite, ut_vbdev_lvol_get_io_channel);
+ CU_ADD_TEST(suite, ut_vbdev_lvol_io_type_supported);
+ CU_ADD_TEST(suite, ut_lvol_read_write);
+ CU_ADD_TEST(suite, ut_vbdev_lvol_submit_request);
+ CU_ADD_TEST(suite, ut_lvol_examine);
+ CU_ADD_TEST(suite, ut_lvol_rename);
+ CU_ADD_TEST(suite, ut_lvol_destroy);
+ CU_ADD_TEST(suite, ut_lvs_rename);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/bdev/vbdev_zone_block.c/.gitignore b/src/spdk/test/unit/lib/bdev/vbdev_zone_block.c/.gitignore
new file mode 100644
index 000000000..a1d7547aa
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/vbdev_zone_block.c/.gitignore
@@ -0,0 +1 @@
+vbdev_zone_block_ut
diff --git a/src/spdk/test/unit/lib/bdev/vbdev_zone_block.c/Makefile b/src/spdk/test/unit/lib/bdev/vbdev_zone_block.c/Makefile
new file mode 100644
index 000000000..81a9575d5
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/vbdev_zone_block.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = vbdev_zone_block_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/bdev/vbdev_zone_block.c/vbdev_zone_block_ut.c b/src/spdk/test/unit/lib/bdev/vbdev_zone_block.c/vbdev_zone_block_ut.c
new file mode 100644
index 000000000..d0ee553e3
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/vbdev_zone_block.c/vbdev_zone_block_ut.c
@@ -0,0 +1,1502 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+#include "spdk_cunit.h"
+#include "spdk/env.h"
+#include "spdk_internal/mock.h"
+#include "spdk/thread.h"
+#include "common/lib/test_env.c"
+#include "bdev/zone_block/vbdev_zone_block.c"
+#include "bdev/zone_block/vbdev_zone_block_rpc.c"
+
+#define BLOCK_CNT (1024ul * 1024ul * 1024ul * 1024ul)
+#define BLOCK_SIZE 4096
+
+/* Globals */
+uint64_t g_block_cnt;
+struct io_output *g_io_output = NULL;
+uint32_t g_max_io_size;
+uint32_t g_io_output_index;
+uint32_t g_io_comp_status;
+uint8_t g_rpc_err;
+uint8_t g_json_decode_obj_construct;
+static TAILQ_HEAD(, spdk_bdev) g_bdev_list = TAILQ_HEAD_INITIALIZER(g_bdev_list);
+void *g_rpc_req = NULL;
+static struct spdk_thread *g_thread;
+
+struct io_output {
+ struct spdk_bdev_desc *desc;
+ struct spdk_io_channel *ch;
+ uint64_t offset_blocks;
+ uint64_t num_blocks;
+ spdk_bdev_io_completion_cb cb;
+ void *cb_arg;
+ enum spdk_bdev_io_type iotype;
+};
+
+DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
+DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
+DEFINE_STUB(spdk_json_decode_string, int, (const struct spdk_json_val *val, void *out), 0);
+DEFINE_STUB(spdk_json_decode_uint64, int, (const struct spdk_json_val *val, void *out), 0);
+DEFINE_STUB_V(spdk_bdev_module_examine_done, (struct spdk_bdev_module *module));
+DEFINE_STUB(spdk_json_write_name, int, (struct spdk_json_write_ctx *w, const char *name), 0);
+DEFINE_STUB(spdk_json_write_object_begin, int, (struct spdk_json_write_ctx *w), 0);
+DEFINE_STUB(spdk_json_write_named_string, int, (struct spdk_json_write_ctx *w,
+ const char *name, const char *val), 0);
+DEFINE_STUB(spdk_bdev_io_type_supported, bool, (struct spdk_bdev *bdev,
+ enum spdk_bdev_io_type io_type), true);
+DEFINE_STUB(spdk_json_write_bool, int, (struct spdk_json_write_ctx *w, bool val), 0);
+DEFINE_STUB(spdk_json_write_named_object_begin, int, (struct spdk_json_write_ctx *w,
+ const char *name), 0);
+DEFINE_STUB(spdk_json_write_object_end, int, (struct spdk_json_write_ctx *w), 0);
+DEFINE_STUB_V(spdk_rpc_register_method, (const char *method, spdk_rpc_method_handler func,
+ uint32_t state_mask));
+DEFINE_STUB_V(spdk_jsonrpc_end_result, (struct spdk_jsonrpc_request *request,
+ struct spdk_json_write_ctx *w));
+DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_desc *desc),
+ (void *)0);
+
+static void
+set_test_opts(void)
+{
+ g_max_io_size = 1024;
+}
+
+static void
+init_test_globals(uint64_t block_cnt)
+{
+ g_io_output = calloc(g_max_io_size, sizeof(struct io_output));
+ SPDK_CU_ASSERT_FATAL(g_io_output != NULL);
+ g_io_output_index = 0;
+ g_block_cnt = block_cnt;
+}
+
+static void
+free_test_globals(void)
+{
+ free(g_io_output);
+ g_io_output = NULL;
+}
+
+void
+spdk_bdev_free_io(struct spdk_bdev_io *bdev_io)
+{
+ free(bdev_io);
+}
+
+int
+spdk_bdev_open(struct spdk_bdev *bdev, bool write, spdk_bdev_remove_cb_t remove_cb,
+ void *remove_ctx, struct spdk_bdev_desc **_desc)
+{
+ *_desc = (void *)bdev;
+ return 0;
+}
+
+struct spdk_bdev *
+spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc)
+{
+ return (void *)desc;
+}
+
+int
+spdk_bdev_register(struct spdk_bdev *bdev)
+{
+ CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(bdev->name));
+ TAILQ_INSERT_TAIL(&g_bdev_list, bdev, internal.link);
+
+ return 0;
+}
+
+void
+spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
+{
+ CU_ASSERT_EQUAL(spdk_bdev_get_by_name(bdev->name), bdev);
+ TAILQ_REMOVE(&g_bdev_list, bdev, internal.link);
+
+ bdev->fn_table->destruct(bdev->ctxt);
+
+ if (cb_fn) {
+ cb_fn(cb_arg, 0);
+ }
+}
+
+int spdk_json_write_named_uint64(struct spdk_json_write_ctx *w, const char *name, uint64_t val)
+{
+ struct rpc_construct_zone_block *req = g_rpc_req;
+ if (strcmp(name, "zone_capacity") == 0) {
+ CU_ASSERT(req->zone_capacity == val);
+ } else if (strcmp(name, "optimal_open_zones") == 0) {
+ CU_ASSERT(req->optimal_open_zones == val);
+ }
+
+ return 0;
+}
+
+const char *
+spdk_bdev_get_name(const struct spdk_bdev *bdev)
+{
+ return bdev->name;
+}
+
+bool
+spdk_bdev_is_zoned(const struct spdk_bdev *bdev)
+{
+ return bdev->zoned;
+}
+
+int
+spdk_json_write_string(struct spdk_json_write_ctx *w, const char *val)
+{
+ return 0;
+}
+
+int
+spdk_bdev_module_claim_bdev(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
+ struct spdk_bdev_module *module)
+{
+ if (bdev->internal.claim_module != NULL) {
+ return -1;
+ }
+ bdev->internal.claim_module = module;
+ return 0;
+}
+
+void
+spdk_bdev_module_release_bdev(struct spdk_bdev *bdev)
+{
+ CU_ASSERT(bdev->internal.claim_module != NULL);
+ bdev->internal.claim_module = NULL;
+}
+
+void
+spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
+{
+ g_io_comp_status = ((status == SPDK_BDEV_IO_STATUS_SUCCESS) ? true : false);
+}
+
+int
+spdk_json_decode_object(const struct spdk_json_val *values,
+ const struct spdk_json_object_decoder *decoders, size_t num_decoders,
+ void *out)
+{
+ struct rpc_construct_zone_block *construct, *_construct;
+ struct rpc_delete_zone_block *delete, *_delete;
+
+ if (g_json_decode_obj_construct) {
+ construct = g_rpc_req;
+ _construct = out;
+
+ _construct->name = strdup(construct->name);
+ SPDK_CU_ASSERT_FATAL(_construct->name != NULL);
+ _construct->base_bdev = strdup(construct->base_bdev);
+ SPDK_CU_ASSERT_FATAL(_construct->base_bdev != NULL);
+ _construct->zone_capacity = construct->zone_capacity;
+ _construct->optimal_open_zones = construct->optimal_open_zones;
+ } else {
+ delete = g_rpc_req;
+ _delete = out;
+
+ _delete->name = strdup(delete->name);
+ SPDK_CU_ASSERT_FATAL(_delete->name != NULL);
+ }
+
+ return 0;
+}
+
+struct spdk_json_write_ctx *
+spdk_jsonrpc_begin_result(struct spdk_jsonrpc_request *request)
+{
+ return (void *)1;
+}
+
+static struct spdk_bdev *
+create_nvme_bdev(void)
+{
+ struct spdk_bdev *base_bdev;
+ char *name = "Nvme0n1";
+ base_bdev = calloc(1, sizeof(struct spdk_bdev));
+ SPDK_CU_ASSERT_FATAL(base_bdev != NULL);
+ base_bdev->name = strdup(name);
+ SPDK_CU_ASSERT_FATAL(base_bdev->name != NULL);
+ base_bdev->blocklen = BLOCK_SIZE;
+ base_bdev->blockcnt = g_block_cnt;
+ base_bdev->write_unit_size = 1;
+ TAILQ_INSERT_TAIL(&g_bdev_list, base_bdev, internal.link);
+
+ return base_bdev;
+}
+
+static void
+base_bdevs_cleanup(void)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev *bdev_next;
+
+ if (!TAILQ_EMPTY(&g_bdev_list)) {
+ TAILQ_FOREACH_SAFE(bdev, &g_bdev_list, internal.link, bdev_next) {
+ free(bdev->name);
+ TAILQ_REMOVE(&g_bdev_list, bdev, internal.link);
+ free(bdev);
+ }
+ }
+}
+
+struct spdk_bdev *
+spdk_bdev_get_by_name(const char *bdev_name)
+{
+ struct spdk_bdev *bdev;
+
+ if (!TAILQ_EMPTY(&g_bdev_list)) {
+ TAILQ_FOREACH(bdev, &g_bdev_list, internal.link) {
+ if (strcmp(bdev_name, bdev->name) == 0) {
+ return bdev;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+void
+spdk_jsonrpc_send_error_response(struct spdk_jsonrpc_request *request,
+ int error_code, const char *msg)
+{
+ g_rpc_err = 1;
+}
+
+void
+spdk_jsonrpc_send_error_response_fmt(struct spdk_jsonrpc_request *request,
+ int error_code, const char *fmt, ...)
+{
+ g_rpc_err = 1;
+}
+
+static void
+set_io_output(struct io_output *output,
+ struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg,
+ enum spdk_bdev_io_type iotype)
+{
+ output->desc = desc;
+ output->ch = ch;
+ output->offset_blocks = offset_blocks;
+ output->num_blocks = num_blocks;
+ output->cb = cb;
+ output->cb_arg = cb_arg;
+ output->iotype = iotype;
+}
+
+int
+spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ struct io_output *output = &g_io_output[g_io_output_index];
+ struct spdk_bdev_io *child_io;
+
+ set_io_output(output, desc, ch, offset_blocks, num_blocks, cb, cb_arg,
+ SPDK_BDEV_IO_TYPE_UNMAP);
+ g_io_output_index++;
+
+ child_io = calloc(1, sizeof(struct spdk_bdev_io));
+ SPDK_CU_ASSERT_FATAL(child_io != NULL);
+ cb(child_io, true, cb_arg);
+
+ return 0;
+}
+
+int
+spdk_bdev_writev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct iovec *iov, int iovcnt, void *md,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ struct io_output *output = &g_io_output[g_io_output_index];
+ struct spdk_bdev_io *child_io;
+
+ SPDK_CU_ASSERT_FATAL(g_io_output_index < g_max_io_size);
+
+ set_io_output(output, desc, ch, offset_blocks, num_blocks, cb, cb_arg,
+ SPDK_BDEV_IO_TYPE_WRITE);
+ g_io_output_index++;
+
+ child_io = calloc(1, sizeof(struct spdk_bdev_io));
+ SPDK_CU_ASSERT_FATAL(child_io != NULL);
+ child_io->internal.desc = desc;
+ child_io->type = SPDK_BDEV_IO_TYPE_WRITE;
+ child_io->u.bdev.iovs = iov;
+ child_io->u.bdev.iovcnt = iovcnt;
+ child_io->u.bdev.md_buf = md;
+ child_io->u.bdev.num_blocks = num_blocks;
+ child_io->u.bdev.offset_blocks = offset_blocks;
+ cb(child_io, true, cb_arg);
+
+ return 0;
+}
+
+
+int
+spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct iovec *iov, int iovcnt,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+
+ return spdk_bdev_writev_blocks_with_md(desc, ch, iov, iovcnt, NULL, offset_blocks, num_blocks,
+ cb, cb_arg);
+}
+
+int
+spdk_bdev_readv_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct iovec *iov, int iovcnt, void *md,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ struct io_output *output = &g_io_output[g_io_output_index];
+ struct spdk_bdev_io *child_io;
+
+ SPDK_CU_ASSERT_FATAL(g_io_output_index < g_max_io_size);
+ set_io_output(output, desc, ch, offset_blocks, num_blocks, cb, cb_arg,
+ SPDK_BDEV_IO_TYPE_READ);
+ g_io_output_index++;
+
+ child_io = calloc(1, sizeof(struct spdk_bdev_io));
+ SPDK_CU_ASSERT_FATAL(child_io != NULL);
+ cb(child_io, true, cb_arg);
+
+ return 0;
+}
+
+int
+spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct iovec *iov, int iovcnt,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+
+ return spdk_bdev_readv_blocks_with_md(desc, ch, iov, iovcnt, NULL, offset_blocks, num_blocks,
+ cb, cb_arg);
+}
+
+static void
+verify_config_present(const char *name, bool presence)
+{
+ struct bdev_zone_block_config *cfg;
+ bool cfg_found;
+
+ cfg_found = false;
+
+ TAILQ_FOREACH(cfg, &g_bdev_configs, link) {
+ if (cfg->vbdev_name != NULL) {
+ if (strcmp(name, cfg->vbdev_name) == 0) {
+ cfg_found = true;
+ break;
+ }
+ }
+ }
+
+ if (presence == true) {
+ CU_ASSERT(cfg_found == true);
+ } else {
+ CU_ASSERT(cfg_found == false);
+ }
+}
+
+static void
+verify_bdev_present(const char *name, bool presence)
+{
+ struct bdev_zone_block *bdev;
+ bool bdev_found = false;
+
+ TAILQ_FOREACH(bdev, &g_bdev_nodes, link) {
+ if (strcmp(bdev->bdev.name, name) == 0) {
+ bdev_found = true;
+ break;
+ }
+ }
+ if (presence == true) {
+ CU_ASSERT(bdev_found == true);
+ } else {
+ CU_ASSERT(bdev_found == false);
+ }
+}
+
+static void
+initialize_create_req(const char *vbdev_name, const char *base_name,
+ uint64_t zone_capacity, uint64_t optimal_open_zones, bool create_base_bdev)
+{
+ struct rpc_construct_zone_block *r;
+
+ r = g_rpc_req = calloc(1, sizeof(struct rpc_construct_zone_block));
+ SPDK_CU_ASSERT_FATAL(r != NULL);
+
+ r->name = strdup(vbdev_name);
+ SPDK_CU_ASSERT_FATAL(r->name != NULL);
+ r->base_bdev = strdup(base_name);
+ SPDK_CU_ASSERT_FATAL(r->base_bdev != NULL);
+ r->zone_capacity = zone_capacity;
+ r->optimal_open_zones = optimal_open_zones;
+
+ if (create_base_bdev == true) {
+ create_nvme_bdev();
+ }
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 1;
+}
+
+static void
+free_create_req(void)
+{
+ struct rpc_construct_zone_block *r = g_rpc_req;
+
+ free(r->name);
+ free(r->base_bdev);
+ free(r);
+ g_rpc_req = NULL;
+}
+
+static void
+initialize_delete_req(const char *vbdev_name)
+{
+ struct rpc_delete_zone_block *r;
+
+ r = g_rpc_req = calloc(1, sizeof(struct rpc_delete_zone_block));
+ SPDK_CU_ASSERT_FATAL(r != NULL);
+ r->name = strdup(vbdev_name);
+ SPDK_CU_ASSERT_FATAL(r->name != NULL);
+
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 0;
+}
+
+static void
+free_delete_req(void)
+{
+ struct rpc_delete_zone_block *r = g_rpc_req;
+
+ free(r->name);
+ free(r);
+ g_rpc_req = NULL;
+}
+
+static void
+verify_zone_config(bool presence)
+{
+ struct rpc_construct_zone_block *r = g_rpc_req;
+ struct bdev_zone_block_config *cfg = NULL;
+
+ TAILQ_FOREACH(cfg, &g_bdev_configs, link) {
+ if (strcmp(r->name, cfg->vbdev_name) == 0) {
+ if (presence == false) {
+ break;
+ }
+ CU_ASSERT(strcmp(r->base_bdev, cfg->bdev_name) == 0);
+ CU_ASSERT(r->zone_capacity == cfg->zone_capacity);
+ CU_ASSERT(spdk_max(r->optimal_open_zones, 1) == cfg->optimal_open_zones);
+ break;
+ }
+ }
+
+ if (presence) {
+ CU_ASSERT(cfg != NULL);
+ } else {
+ CU_ASSERT(cfg == NULL);
+ }
+}
+
+static void
+verify_zone_bdev(bool presence)
+{
+ struct rpc_construct_zone_block *r = g_rpc_req;
+ struct block_zone *zone;
+ struct bdev_zone_block *bdev;
+ bool bdev_found = false;
+ uint32_t i;
+ uint64_t expected_num_zones;
+ uint64_t expected_optimal_open_zones;
+
+ TAILQ_FOREACH(bdev, &g_bdev_nodes, link) {
+ if (strcmp(bdev->bdev.name, r->name) == 0) {
+ bdev_found = true;
+ if (presence == false) {
+ break;
+ }
+
+ expected_optimal_open_zones = spdk_max(r->optimal_open_zones, 1);
+ expected_num_zones = g_block_cnt / spdk_align64pow2(r->zone_capacity) / expected_optimal_open_zones;
+ expected_num_zones *= expected_optimal_open_zones;
+
+ CU_ASSERT(bdev->num_zones == expected_num_zones);
+ CU_ASSERT(bdev->bdev.zoned == true);
+ CU_ASSERT(bdev->bdev.blockcnt == expected_num_zones * spdk_align64pow2(r->zone_capacity));
+ CU_ASSERT(bdev->bdev.blocklen == BLOCK_SIZE);
+ CU_ASSERT(bdev->bdev.ctxt == bdev);
+ CU_ASSERT(bdev->bdev.fn_table == &zone_block_fn_table);
+ CU_ASSERT(bdev->bdev.module == &bdev_zoned_if);
+ CU_ASSERT(bdev->bdev.write_unit_size == 1);
+ CU_ASSERT(bdev->bdev.zone_size == spdk_align64pow2(r->zone_capacity));
+ CU_ASSERT(bdev->bdev.optimal_open_zones == expected_optimal_open_zones);
+ CU_ASSERT(bdev->bdev.max_open_zones == 0);
+
+ for (i = 0; i < bdev->num_zones; i++) {
+ zone = &bdev->zones[i];
+ CU_ASSERT(zone->zone_info.state == SPDK_BDEV_ZONE_STATE_FULL);
+ CU_ASSERT(zone->zone_info.capacity == r->zone_capacity);
+ }
+ break;
+ }
+ }
+
+ if (presence == true) {
+ CU_ASSERT(bdev_found == true);
+ } else {
+ CU_ASSERT(bdev_found == false);
+ }
+}
+
+static void
+send_create_vbdev(char *vdev_name, char *name, uint64_t zone_capacity, uint64_t optimal_open_zones,
+ bool create_bdev, bool success)
+{
+ initialize_create_req(vdev_name, name, zone_capacity, optimal_open_zones, create_bdev);
+ rpc_zone_block_create(NULL, NULL);
+ CU_ASSERT(g_rpc_err != success);
+ verify_zone_config(success);
+ verify_zone_bdev(success);
+ free_create_req();
+}
+
+static void
+send_delete_vbdev(char *name, bool success)
+{
+ initialize_delete_req(name);
+ rpc_zone_block_delete(NULL, NULL);
+ verify_config_present(name, false);
+ verify_bdev_present(name, false);
+ CU_ASSERT(g_rpc_err != success);
+ free_delete_req();
+}
+
+static void
+test_cleanup(void)
+{
+ CU_ASSERT(spdk_thread_is_idle(g_thread));
+ zone_block_finish();
+ base_bdevs_cleanup();
+ free_test_globals();
+}
+
+static void
+test_zone_block_create(void)
+{
+ struct spdk_bdev *bdev;
+ char *name = "Nvme0n1";
+ size_t num_zones = 16;
+ size_t zone_capacity = BLOCK_CNT / num_zones;
+
+ init_test_globals(BLOCK_CNT);
+ CU_ASSERT(zone_block_init() == 0);
+
+ /* Create zoned virtual device before nvme device */
+ verify_config_present("zone_dev1", false);
+ verify_bdev_present("zone_dev1", false);
+ initialize_create_req("zone_dev1", name, zone_capacity, 1, false);
+ rpc_zone_block_create(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_zone_config(true);
+ verify_zone_bdev(false);
+ bdev = create_nvme_bdev();
+ zone_block_examine(bdev);
+ verify_zone_bdev(true);
+ free_create_req();
+
+ /* Delete bdev */
+ send_delete_vbdev("zone_dev1", true);
+
+ /* Create zoned virtual device and verify its correctness */
+ verify_config_present("zone_dev1", false);
+ send_create_vbdev("zone_dev1", name, zone_capacity, 1, false, true);
+ send_delete_vbdev("zone_dev1", true);
+
+ while (spdk_thread_poll(g_thread, 0, 0) > 0) {}
+ test_cleanup();
+}
+
+static void
+test_zone_block_create_invalid(void)
+{
+ char *name = "Nvme0n1";
+ size_t num_zones = 8;
+ size_t zone_capacity = BLOCK_CNT / num_zones;
+
+ init_test_globals(BLOCK_CNT);
+ CU_ASSERT(zone_block_init() == 0);
+
+ /* Create zoned virtual device and verify its correctness */
+ verify_config_present("zone_dev1", false);
+ verify_bdev_present("zone_dev1", false);
+ send_create_vbdev("zone_dev1", name, zone_capacity, 1, true, true);
+
+ /* Try to create another zoned virtual device on the same bdev */
+ send_create_vbdev("zone_dev2", name, zone_capacity, 1, false, false);
+
+ /* Try to create zoned virtual device on the zoned bdev */
+ send_create_vbdev("zone_dev2", "zone_dev1", zone_capacity, 1, false, false);
+
+ /* Unclaim the base bdev */
+ send_delete_vbdev("zone_dev1", true);
+
+ /* Try to create zoned virtual device with 0 zone size */
+ send_create_vbdev("zone_dev1", name, 0, 1, false, false);
+
+ /* Try to create zoned virtual device with 0 optimal number of zones */
+ send_create_vbdev("zone_dev1", name, zone_capacity, 0, false, false);
+
+ while (spdk_thread_poll(g_thread, 0, 0) > 0) {}
+ test_cleanup();
+}
+
+static void
+bdev_io_zone_info_initialize(struct spdk_bdev_io *bdev_io, struct spdk_bdev *bdev,
+ uint64_t zone_id, uint32_t num_zones)
+{
+ bdev_io->bdev = bdev;
+ bdev_io->type = SPDK_BDEV_IO_TYPE_GET_ZONE_INFO;
+
+ bdev_io->u.zone_mgmt.zone_id = zone_id;
+
+ bdev_io->u.zone_mgmt.num_zones = num_zones;
+ if (num_zones) {
+ bdev_io->u.zone_mgmt.buf = calloc(num_zones, sizeof(struct spdk_bdev_zone_info));
+ SPDK_CU_ASSERT_FATAL(bdev_io->u.zone_mgmt.buf != NULL);
+ }
+}
+
+static void
+bdev_io_zone_initialize(struct spdk_bdev_io *bdev_io, struct spdk_bdev *bdev,
+ uint64_t zone_id, uint32_t num_zones, uint8_t zone_action)
+{
+ bdev_io->bdev = bdev;
+ bdev_io->type = SPDK_BDEV_IO_TYPE_ZONE_MANAGEMENT;
+
+ bdev_io->u.zone_mgmt.zone_action = zone_action;
+ bdev_io->u.zone_mgmt.zone_id = zone_id;
+}
+
+static void
+bdev_io_zone_cleanup(struct spdk_bdev_io *bdev_io)
+{
+ free(bdev_io->u.zone_mgmt.buf);
+ free(bdev_io);
+}
+
+static void
+bdev_io_initialize(struct spdk_bdev_io *bdev_io, struct spdk_bdev *bdev,
+ uint64_t lba, uint64_t blocks, int16_t iotype)
+{
+ bdev_io->bdev = bdev;
+ bdev_io->u.bdev.offset_blocks = lba;
+ bdev_io->u.bdev.num_blocks = blocks;
+ bdev_io->type = iotype;
+
+ if (bdev_io->type == SPDK_BDEV_IO_TYPE_UNMAP || bdev_io->type == SPDK_BDEV_IO_TYPE_FLUSH) {
+ return;
+ }
+
+ bdev_io->u.bdev.iovcnt = 1;
+ bdev_io->u.bdev.iovs = &bdev_io->iov;
+ bdev_io->u.bdev.iovs->iov_base = calloc(1, bdev_io->u.bdev.num_blocks * BLOCK_SIZE);
+ SPDK_CU_ASSERT_FATAL(bdev_io->u.bdev.iovs->iov_base != NULL);
+ bdev_io->u.bdev.iovs->iov_len = bdev_io->u.bdev.num_blocks * BLOCK_SIZE;
+}
+
+static void
+bdev_io_cleanup(struct spdk_bdev_io *bdev_io)
+{
+ free(bdev_io->iov.iov_base);
+ free(bdev_io);
+}
+
+static struct bdev_zone_block *
+create_and_get_vbdev(char *vdev_name, char *name, uint64_t num_zones, uint64_t optimal_open_zones,
+ bool create_bdev)
+{
+ size_t zone_size = g_block_cnt / num_zones;
+ struct bdev_zone_block *bdev = NULL;
+
+ send_create_vbdev(vdev_name, name, zone_size, optimal_open_zones, create_bdev, true);
+
+ TAILQ_FOREACH(bdev, &g_bdev_nodes, link) {
+ if (strcmp(bdev->bdev.name, vdev_name) == 0) {
+ break;
+ }
+ }
+
+ SPDK_CU_ASSERT_FATAL(bdev != NULL);
+ return bdev;
+}
+
+static void
+test_supported_io_types(void)
+{
+ struct bdev_zone_block *bdev;
+ char *name = "Nvme0n1";
+ uint32_t num_zones = 8;
+
+ init_test_globals(BLOCK_CNT);
+ CU_ASSERT(zone_block_init() == 0);
+
+ /* Create zone dev */
+ bdev = create_and_get_vbdev("zone_dev1", name, num_zones, 1, true);
+
+ CU_ASSERT(zone_block_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_ZONE_MANAGEMENT) == true);
+ CU_ASSERT(zone_block_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_ZONE_APPEND) == true);
+ CU_ASSERT(zone_block_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_READ) == true);
+ CU_ASSERT(zone_block_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_WRITE) == true);
+
+ CU_ASSERT(zone_block_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_NVME_ADMIN) == false);
+ CU_ASSERT(zone_block_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_NVME_IO) == false);
+ CU_ASSERT(zone_block_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_NVME_IO_MD) == false);
+ CU_ASSERT(zone_block_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_UNMAP) == false);
+ CU_ASSERT(zone_block_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_FLUSH) == false);
+ CU_ASSERT(zone_block_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_RESET) == false);
+ CU_ASSERT(zone_block_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_WRITE_ZEROES) == false);
+ CU_ASSERT(zone_block_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_ZCOPY) == false);
+
+ send_delete_vbdev("zone_dev1", true);
+ while (spdk_thread_poll(g_thread, 0, 0) > 0) {}
+ test_cleanup();
+}
+
+static void
+send_zone_info(struct bdev_zone_block *bdev, struct spdk_io_channel *ch, uint64_t zone_id,
+ uint64_t wp,
+ enum spdk_bdev_zone_state state, uint32_t output_index, bool success)
+{
+ struct spdk_bdev_io *bdev_io;
+ struct spdk_bdev_zone_info *info;
+
+ bdev_io = calloc(1, sizeof(struct spdk_bdev_io));
+ SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
+ bdev_io_zone_info_initialize(bdev_io, &bdev->bdev, zone_id, 1);
+ memset(g_io_output, 0, (g_max_io_size * sizeof(struct io_output)));
+ g_io_output_index = output_index;
+
+ g_io_comp_status = !success;
+ zone_block_submit_request(ch, bdev_io);
+ CU_ASSERT(g_io_comp_status == success);
+
+ if (success) {
+ info = (struct spdk_bdev_zone_info *)bdev_io->u.zone_mgmt.buf;
+ CU_ASSERT(info->zone_id == zone_id);
+ CU_ASSERT(info->capacity == bdev->zone_capacity);
+ CU_ASSERT(info->write_pointer == wp);
+ CU_ASSERT(info->state == state);
+ }
+
+ bdev_io_zone_cleanup(bdev_io);
+}
+
+static void
+test_get_zone_info(void)
+{
+ struct spdk_io_channel *ch;
+ struct bdev_zone_block *bdev;
+ struct spdk_bdev_io *bdev_io;
+ char *name = "Nvme0n1";
+ uint32_t num_zones = 8, i;
+ struct spdk_bdev_zone_info *info;
+
+ init_test_globals(BLOCK_CNT);
+ CU_ASSERT(zone_block_init() == 0);
+
+ /* Create zone dev */
+ bdev = create_and_get_vbdev("zone_dev1", name, num_zones, 1, true);
+
+ ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct zone_block_io_channel));
+ SPDK_CU_ASSERT_FATAL(ch != NULL);
+
+ /* Get info about each zone */
+ for (i = 0; i < num_zones; i++) {
+ send_zone_info(bdev, ch, i * bdev->bdev.zone_size,
+ i * bdev->bdev.zone_size + bdev->zone_capacity, SPDK_BDEV_ZONE_STATE_FULL, 0, true);
+ }
+
+ /* Send info asking for 0 zones */
+ bdev_io = calloc(1, sizeof(struct spdk_bdev_io));
+ SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
+ bdev_io_zone_info_initialize(bdev_io, &bdev->bdev, 0, 0);
+ memset(g_io_output, 0, (g_max_io_size * sizeof(struct io_output)));
+ g_io_output_index = 0;
+ zone_block_submit_request(ch, bdev_io);
+ CU_ASSERT(g_io_comp_status);
+ bdev_io_zone_cleanup(bdev_io);
+
+ /* Send info asking for all zones */
+ bdev_io = calloc(1, sizeof(struct spdk_bdev_io));
+ SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
+ bdev_io_zone_info_initialize(bdev_io, &bdev->bdev, 0, num_zones);
+ memset(g_io_output, 0, (g_max_io_size * sizeof(struct io_output)));
+ g_io_output_index = 0;
+ zone_block_submit_request(ch, bdev_io);
+ CU_ASSERT(g_io_comp_status);
+
+ for (i = 0; i < num_zones; i++) {
+ info = &(((struct spdk_bdev_zone_info *)bdev_io->u.zone_mgmt.buf)[i]);
+ CU_ASSERT(info->zone_id == i * bdev->bdev.zone_size);
+ CU_ASSERT(info->capacity == bdev->zone_capacity);
+ CU_ASSERT(info->write_pointer == i * bdev->bdev.zone_size + bdev->zone_capacity);
+ CU_ASSERT(info->state == SPDK_BDEV_ZONE_STATE_FULL);
+ }
+ bdev_io_zone_cleanup(bdev_io);
+
+ /* Send info asking for too many zones */
+ bdev_io = calloc(1, sizeof(struct spdk_bdev_io));
+ SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
+ bdev_io_zone_info_initialize(bdev_io, &bdev->bdev, 0, num_zones + 1);
+ memset(g_io_output, 0, (g_max_io_size * sizeof(struct io_output)));
+ g_io_output_index = 0;
+ zone_block_submit_request(ch, bdev_io);
+ CU_ASSERT(!g_io_comp_status);
+ bdev_io_zone_cleanup(bdev_io);
+
+ /* Send info with misaligned start LBA */
+ send_zone_info(bdev, ch, 1, 0, SPDK_BDEV_ZONE_STATE_FULL, 0, false);
+
+ /* Send info with too high LBA */
+ send_zone_info(bdev, ch, num_zones * bdev->bdev.zone_size, 0, SPDK_BDEV_ZONE_STATE_FULL, 0,
+ false);
+
+ /* Delete zone dev */
+ send_delete_vbdev("zone_dev1", true);
+
+ while (spdk_thread_poll(g_thread, 0, 0) > 0) {}
+ free(ch);
+
+ test_cleanup();
+}
+
+static void
+send_zone_management(struct bdev_zone_block *bdev, struct spdk_io_channel *ch, uint64_t zone_id,
+ uint32_t output_index, enum spdk_bdev_zone_action action, bool success)
+{
+ struct spdk_bdev_io *bdev_io;
+
+ bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct zone_block_io));
+ SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
+ bdev_io_zone_initialize(bdev_io, &bdev->bdev, zone_id, 1, action);
+ memset(g_io_output, 0, (g_max_io_size * sizeof(struct io_output)));
+ g_io_output_index = output_index;
+
+ g_io_comp_status = !success;
+ zone_block_submit_request(ch, bdev_io);
+
+ CU_ASSERT(g_io_comp_status == success);
+ bdev_io_zone_cleanup(bdev_io);
+}
+
+static void
+send_reset_zone(struct bdev_zone_block *bdev, struct spdk_io_channel *ch, uint64_t zone_id,
+ uint32_t output_index, bool success)
+{
+ send_zone_management(bdev, ch, zone_id, output_index, SPDK_BDEV_ZONE_RESET, success);
+}
+
+static void
+send_open_zone(struct bdev_zone_block *bdev, struct spdk_io_channel *ch, uint64_t zone_id,
+ uint32_t output_index, bool success)
+{
+ send_zone_management(bdev, ch, zone_id, output_index, SPDK_BDEV_ZONE_OPEN, success);
+}
+
+static void
+send_close_zone(struct bdev_zone_block *bdev, struct spdk_io_channel *ch, uint64_t zone_id,
+ uint32_t output_index, bool success)
+{
+ send_zone_management(bdev, ch, zone_id, output_index, SPDK_BDEV_ZONE_CLOSE, success);
+}
+
+static void
+send_finish_zone(struct bdev_zone_block *bdev, struct spdk_io_channel *ch, uint64_t zone_id,
+ uint32_t output_index, bool success)
+{
+ send_zone_management(bdev, ch, zone_id, output_index, SPDK_BDEV_ZONE_FINISH, success);
+}
+
+static void
+test_reset_zone(void)
+{
+ struct spdk_io_channel *ch;
+ struct bdev_zone_block *bdev;
+ char *name = "Nvme0n1";
+ uint32_t num_zones = 16;
+ uint64_t zone_id;
+ uint32_t output_index = 0;
+
+ init_test_globals(BLOCK_CNT);
+ CU_ASSERT(zone_block_init() == 0);
+
+ /* Create zone dev */
+ bdev = create_and_get_vbdev("zone_dev1", name, num_zones, 1, true);
+
+ ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct zone_block_io_channel));
+ SPDK_CU_ASSERT_FATAL(ch != NULL);
+
+ /* Send reset to zone 0 */
+ zone_id = 0;
+ send_reset_zone(bdev, ch, zone_id, output_index, true);
+ send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_EMPTY, output_index, true);
+
+ /* Send reset to last zone */
+ zone_id = (num_zones - 1) * bdev->bdev.zone_size;
+ send_reset_zone(bdev, ch, zone_id, output_index, true);
+ send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_EMPTY, output_index, true);
+
+ /* Send reset with misaligned LBA */
+ zone_id = 1;
+ send_reset_zone(bdev, ch, zone_id, output_index, false);
+
+ /* Send reset to non-existing zone */
+ zone_id = num_zones * bdev->bdev.zone_size;
+ send_reset_zone(bdev, ch, zone_id, output_index, false);
+
+ /* Send reset to already resetted zone */
+ zone_id = 0;
+ send_reset_zone(bdev, ch, zone_id, output_index, true);
+ send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_EMPTY, output_index, true);
+
+ /* Delete zone dev */
+ send_delete_vbdev("zone_dev1", true);
+
+ while (spdk_thread_poll(g_thread, 0, 0) > 0) {}
+ free(ch);
+
+ test_cleanup();
+}
+
+static void
+send_write_zone(struct bdev_zone_block *bdev, struct spdk_io_channel *ch, uint64_t lba,
+ uint64_t blocks, uint32_t output_index, bool success)
+{
+ struct spdk_bdev_io *bdev_io;
+
+ bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct zone_block_io));
+ SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
+ bdev_io_initialize(bdev_io, &bdev->bdev, lba, blocks, SPDK_BDEV_IO_TYPE_WRITE);
+ memset(g_io_output, 0, (g_max_io_size * sizeof(struct io_output)));
+ g_io_output_index = output_index;
+
+ g_io_comp_status = !success;
+ zone_block_submit_request(ch, bdev_io);
+
+ CU_ASSERT(g_io_comp_status == success);
+ bdev_io_cleanup(bdev_io);
+}
+
+static void
+send_read_zone(struct bdev_zone_block *bdev, struct spdk_io_channel *ch, uint64_t lba,
+ uint64_t blocks, uint32_t output_index, bool success)
+{
+ struct spdk_bdev_io *bdev_io;
+
+ bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct zone_block_io));
+ SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
+ bdev_io_initialize(bdev_io, &bdev->bdev, lba, blocks, SPDK_BDEV_IO_TYPE_READ);
+ memset(g_io_output, 0, (g_max_io_size * sizeof(struct io_output)));
+ g_io_output_index = output_index;
+
+ g_io_comp_status = !success;
+ zone_block_submit_request(ch, bdev_io);
+
+ CU_ASSERT(g_io_comp_status == success);
+ bdev_io_cleanup(bdev_io);
+}
+
+static void
+send_append_zone(struct bdev_zone_block *bdev, struct spdk_io_channel *ch, uint64_t lba,
+ uint64_t blocks, uint32_t output_index, bool success, uint64_t wp)
+{
+ struct spdk_bdev_io *bdev_io;
+
+ bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct zone_block_io));
+ SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
+ bdev_io_initialize(bdev_io, &bdev->bdev, lba, blocks, SPDK_BDEV_IO_TYPE_ZONE_APPEND);
+ memset(g_io_output, 0, (g_max_io_size * sizeof(struct io_output)));
+ g_io_output_index = output_index;
+
+ g_io_comp_status = !success;
+ zone_block_submit_request(ch, bdev_io);
+
+ CU_ASSERT(g_io_comp_status == success);
+ if (success) {
+ CU_ASSERT(bdev_io->u.bdev.offset_blocks == wp);
+ }
+ bdev_io_cleanup(bdev_io);
+}
+
+static void
+test_open_zone(void)
+{
+ struct spdk_io_channel *ch;
+ struct bdev_zone_block *bdev;
+ char *name = "Nvme0n1";
+ uint32_t num_zones = 16;
+ uint64_t zone_id;
+ uint32_t output_index = 0, i;
+
+ init_test_globals(BLOCK_CNT);
+ CU_ASSERT(zone_block_init() == 0);
+
+ /* Create zone dev */
+ bdev = create_and_get_vbdev("zone_dev1", name, num_zones, 1, true);
+
+ ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct zone_block_io_channel));
+ SPDK_CU_ASSERT_FATAL(ch != NULL);
+
+ /* Try to open full zone */
+ zone_id = 0;
+ send_open_zone(bdev, ch, zone_id, output_index, false);
+
+ /* Open all zones */
+ for (i = 0; i < num_zones; i++) {
+ zone_id = i * bdev->bdev.zone_size;
+ send_reset_zone(bdev, ch, zone_id, output_index, true);
+ send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_EMPTY, output_index, true);
+ }
+ for (i = 0; i < num_zones; i++) {
+ zone_id = i * bdev->bdev.zone_size;
+ send_open_zone(bdev, ch, zone_id, output_index, true);
+ send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_OPEN, output_index, true);
+ }
+
+ /* Reset one of the zones and open it again */
+ zone_id = 0;
+ send_reset_zone(bdev, ch, zone_id, output_index, true);
+ send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_EMPTY, output_index, true);
+ send_open_zone(bdev, ch, zone_id, output_index, true);
+ send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_OPEN, output_index, true);
+
+ /* Send open with misaligned LBA */
+ zone_id = 0;
+ send_reset_zone(bdev, ch, zone_id, output_index, true);
+ zone_id = 1;
+ send_open_zone(bdev, ch, zone_id, output_index, false);
+
+ /* Send open to non-existing zone */
+ zone_id = num_zones * bdev->bdev.zone_size;
+ send_open_zone(bdev, ch, zone_id, output_index, false);
+
+ /* Send open to already opened zone */
+ zone_id = bdev->bdev.zone_size;
+ send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_OPEN, output_index, true);
+ send_open_zone(bdev, ch, zone_id, output_index, true);
+ send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_OPEN, output_index, true);
+
+ /* Delete zone dev */
+ send_delete_vbdev("zone_dev1", true);
+
+ while (spdk_thread_poll(g_thread, 0, 0) > 0) {}
+ free(ch);
+
+ test_cleanup();
+}
+
+static void
+test_zone_write(void)
+{
+ struct spdk_io_channel *ch;
+ struct bdev_zone_block *bdev;
+ char *name = "Nvme0n1";
+ uint32_t num_zones = 20;
+ uint64_t zone_id, lba, block_len;
+ uint32_t output_index = 0, i;
+
+ init_test_globals(20 * 1024ul);
+ CU_ASSERT(zone_block_init() == 0);
+
+ /* Create zone dev */
+ bdev = create_and_get_vbdev("zone_dev1", name, num_zones, 1, true);
+
+ ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct zone_block_io_channel));
+ SPDK_CU_ASSERT_FATAL(ch != NULL);
+
+ /* Write to full zone */
+ lba = 0;
+ send_write_zone(bdev, ch, lba, 1, output_index, false);
+
+ /* Write out of device range */
+ lba = g_block_cnt;
+ send_write_zone(bdev, ch, lba, 1, output_index, false);
+
+ /* Write 1 sector to zone 0 */
+ lba = 0;
+ send_reset_zone(bdev, ch, lba, output_index, true);
+ send_write_zone(bdev, ch, lba, 1, output_index, true);
+ send_zone_info(bdev, ch, lba, 1, SPDK_BDEV_ZONE_STATE_OPEN, output_index, true);
+
+ /* Write to another zone */
+ lba = bdev->bdev.zone_size;
+ send_reset_zone(bdev, ch, lba, output_index, true);
+ send_write_zone(bdev, ch, lba, 5, output_index, true);
+ send_zone_info(bdev, ch, lba, lba + 5, SPDK_BDEV_ZONE_STATE_OPEN, output_index, true);
+
+ /* Fill zone 0 and verify zone state change */
+ block_len = 15;
+ send_write_zone(bdev, ch, 1, block_len, output_index, true);
+ block_len = 16;
+ for (i = block_len; i < bdev->bdev.zone_size; i += block_len) {
+ send_write_zone(bdev, ch, i, block_len, output_index, true);
+ }
+ send_zone_info(bdev, ch, 0, bdev->bdev.zone_size, SPDK_BDEV_ZONE_STATE_FULL, output_index,
+ true);
+
+ /* Write to wrong write pointer */
+ lba = bdev->bdev.zone_size;
+ send_write_zone(bdev, ch, lba + 7, 1, output_index, false);
+ /* Write to already written sectors */
+ send_write_zone(bdev, ch, lba, 1, output_index, false);
+
+ /* Write to two zones at once */
+ for (i = 0; i < num_zones; i++) {
+ zone_id = i * bdev->bdev.zone_size;
+ send_reset_zone(bdev, ch, zone_id, output_index, true);
+ send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_EMPTY, output_index, true);
+ }
+ block_len = 16;
+ for (i = 0; i < bdev->bdev.zone_size - block_len; i += block_len) {
+ send_write_zone(bdev, ch, i, block_len, output_index, true);
+ }
+ send_write_zone(bdev, ch, bdev->bdev.zone_size - block_len, 32, output_index, false);
+
+ /* Delete zone dev */
+ send_delete_vbdev("zone_dev1", true);
+
+ while (spdk_thread_poll(g_thread, 0, 0) > 0) {}
+ free(ch);
+
+ test_cleanup();
+}
+
+static void
+test_zone_read(void)
+{
+ struct spdk_io_channel *ch;
+ struct bdev_zone_block *bdev;
+ char *name = "Nvme0n1";
+ uint32_t num_zones = 20;
+ uint64_t lba, block_len;
+ uint32_t output_index = 0;
+
+ init_test_globals(20 * 1024ul);
+ CU_ASSERT(zone_block_init() == 0);
+
+ /* Create zone dev */
+ bdev = create_and_get_vbdev("zone_dev1", name, num_zones, 1, true);
+
+ ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct zone_block_io_channel));
+ SPDK_CU_ASSERT_FATAL(ch != NULL);
+
+ /* Read out of device range */
+ block_len = 16;
+ lba = g_block_cnt - block_len / 2;
+ send_read_zone(bdev, ch, lba, block_len, output_index, false);
+
+ block_len = 1;
+ lba = g_block_cnt;
+ send_read_zone(bdev, ch, lba, block_len, output_index, false);
+
+ /* Read from full zone */
+ lba = 0;
+ send_read_zone(bdev, ch, lba, 1, output_index, true);
+
+ /* Read from empty zone */
+ send_reset_zone(bdev, ch, lba, output_index, true);
+ send_read_zone(bdev, ch, lba, 1, output_index, true);
+
+ /* Read written sectors from open zone */
+ send_write_zone(bdev, ch, lba, 1, output_index, true);
+ send_read_zone(bdev, ch, lba, 1, output_index, true);
+
+ /* Read partially written sectors from open zone */
+ send_read_zone(bdev, ch, lba, 2, output_index, true);
+
+ /* Read unwritten sectors from open zone */
+ lba = 2;
+ send_read_zone(bdev, ch, lba, 1, output_index, true);
+
+ /* Read from two zones at once */
+ block_len = 16;
+ lba = bdev->bdev.zone_size - block_len / 2;
+ send_read_zone(bdev, ch, lba, block_len, output_index, false);
+
+ /* Delete zone dev */
+ send_delete_vbdev("zone_dev1", true);
+
+ while (spdk_thread_poll(g_thread, 0, 0) > 0) {}
+ free(ch);
+ test_cleanup();
+}
+
+static void
+test_close_zone(void)
+{
+ struct spdk_io_channel *ch;
+ struct bdev_zone_block *bdev;
+ char *name = "Nvme0n1";
+ uint32_t num_zones = 20;
+ uint64_t zone_id;
+ uint32_t output_index = 0;
+
+ init_test_globals(20 * 1024ul);
+ CU_ASSERT(zone_block_init() == 0);
+
+ /* Create zone dev */
+ bdev = create_and_get_vbdev("zone_dev1", name, num_zones, 1, true);
+
+ ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct zone_block_io_channel));
+ SPDK_CU_ASSERT_FATAL(ch != NULL);
+
+ /* Try to close a full zone */
+ zone_id = 0;
+ send_close_zone(bdev, ch, zone_id, output_index, false);
+
+ /* Try to close an empty zone */
+ send_reset_zone(bdev, ch, zone_id, output_index, true);
+ send_close_zone(bdev, ch, zone_id, output_index, false);
+
+ /* Close an open zone */
+ send_open_zone(bdev, ch, zone_id, output_index, true);
+ send_close_zone(bdev, ch, zone_id, output_index, true);
+ send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_CLOSED, output_index, true);
+
+ /* Close a closed zone */
+ send_close_zone(bdev, ch, zone_id, output_index, true);
+ send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_CLOSED, output_index, true);
+
+ /* Send close to last zone */
+ zone_id = (num_zones - 1) * bdev->bdev.zone_size;
+ send_reset_zone(bdev, ch, zone_id, output_index, true);
+ send_open_zone(bdev, ch, zone_id, output_index, true);
+ send_close_zone(bdev, ch, zone_id, output_index, true);
+ send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_CLOSED, output_index, true);
+
+ /* Send close with misaligned LBA */
+ zone_id = 1;
+ send_close_zone(bdev, ch, zone_id, output_index, false);
+
+ /* Send close to non-existing zone */
+ zone_id = num_zones * bdev->bdev.zone_size;
+ send_close_zone(bdev, ch, zone_id, output_index, false);
+
+ /* Delete zone dev */
+ send_delete_vbdev("zone_dev1", true);
+
+ while (spdk_thread_poll(g_thread, 0, 0) > 0) {}
+ free(ch);
+ test_cleanup();
+}
+
+static void
+test_finish_zone(void)
+{
+ struct spdk_io_channel *ch;
+ struct bdev_zone_block *bdev;
+ char *name = "Nvme0n1";
+ uint32_t num_zones = 20;
+ uint64_t zone_id, wp;
+ uint32_t output_index = 0;
+
+ init_test_globals(20 * 1024ul);
+ CU_ASSERT(zone_block_init() == 0);
+
+ /* Create zone dev */
+ bdev = create_and_get_vbdev("zone_dev1", name, num_zones, 1, true);
+
+ ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct zone_block_io_channel));
+ SPDK_CU_ASSERT_FATAL(ch != NULL);
+
+ /* Reset an unused zone */
+ send_reset_zone(bdev, ch, bdev->bdev.zone_size, output_index, true);
+
+ /* Finish a full zone */
+ zone_id = 0;
+ wp = bdev->bdev.zone_size;
+ send_finish_zone(bdev, ch, zone_id, output_index, true);
+ send_zone_info(bdev, ch, zone_id, wp, SPDK_BDEV_ZONE_STATE_FULL, output_index, true);
+
+ /* Finish an empty zone */
+ send_reset_zone(bdev, ch, zone_id, output_index, true);
+ send_finish_zone(bdev, ch, zone_id, output_index, true);
+ send_zone_info(bdev, ch, zone_id, wp, SPDK_BDEV_ZONE_STATE_FULL, output_index, true);
+
+ /* Finish an open zone */
+ send_reset_zone(bdev, ch, zone_id, output_index, true);
+ send_write_zone(bdev, ch, zone_id, 1, output_index, true);
+ send_finish_zone(bdev, ch, zone_id, output_index, true);
+ send_zone_info(bdev, ch, zone_id, wp, SPDK_BDEV_ZONE_STATE_FULL, output_index, true);
+
+ /* Send finish with misaligned LBA */
+ zone_id = 1;
+ send_finish_zone(bdev, ch, zone_id, output_index, false);
+
+ /* Send finish to non-existing zone */
+ zone_id = num_zones * bdev->bdev.zone_size;
+ send_finish_zone(bdev, ch, zone_id, output_index, false);
+
+ /* Make sure unused zone wasn't written to */
+ zone_id = bdev->bdev.zone_size;
+ send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_EMPTY, output_index, true);
+
+ /* Delete zone dev */
+ send_delete_vbdev("zone_dev1", true);
+
+ while (spdk_thread_poll(g_thread, 0, 0) > 0) {}
+ free(ch);
+
+ test_cleanup();
+}
+
+static void
+test_append_zone(void)
+{
+ struct spdk_io_channel *ch;
+ struct bdev_zone_block *bdev;
+ char *name = "Nvme0n1";
+ uint32_t num_zones = 20;
+ uint64_t zone_id, block_len, i;
+ uint32_t output_index = 0;
+
+ init_test_globals(20 * 1024ul);
+ CU_ASSERT(zone_block_init() == 0);
+
+ /* Create zone dev */
+ bdev = create_and_get_vbdev("zone_dev1", name, num_zones, 1, true);
+
+ ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct zone_block_io_channel));
+ SPDK_CU_ASSERT_FATAL(ch != NULL);
+
+ /* Append to full zone */
+ zone_id = 0;
+ send_append_zone(bdev, ch, zone_id, 1, output_index, false, 0);
+
+ /* Append out of device range */
+ zone_id = g_block_cnt;
+ send_append_zone(bdev, ch, zone_id, 1, output_index, false, 0);
+
+ /* Append 1 sector to zone 0 */
+ zone_id = 0;
+ send_reset_zone(bdev, ch, zone_id, output_index, true);
+ send_append_zone(bdev, ch, zone_id, 1, output_index, true, zone_id);
+ send_zone_info(bdev, ch, zone_id, 1, SPDK_BDEV_ZONE_STATE_OPEN, output_index, true);
+
+ /* Append to another zone */
+ zone_id = bdev->bdev.zone_size;
+ send_reset_zone(bdev, ch, zone_id, output_index, true);
+ send_append_zone(bdev, ch, zone_id, 5, output_index, true, zone_id);
+ send_zone_info(bdev, ch, zone_id, zone_id + 5, SPDK_BDEV_ZONE_STATE_OPEN, output_index, true);
+
+ /* Fill zone 0 and verify zone state change */
+ zone_id = 0;
+ block_len = 15;
+ send_append_zone(bdev, ch, zone_id, block_len, output_index, true, 1);
+ block_len++;
+ for (i = block_len; i < bdev->zone_capacity; i += block_len) {
+ send_append_zone(bdev, ch, zone_id, block_len, output_index, true, i);
+ }
+ send_zone_info(bdev, ch, zone_id, bdev->bdev.zone_size, SPDK_BDEV_ZONE_STATE_FULL, output_index,
+ true);
+
+ /* Append to two zones at once */
+ for (i = 0; i < num_zones; i++) {
+ zone_id = i * bdev->bdev.zone_size;
+ send_reset_zone(bdev, ch, zone_id, output_index, true);
+ send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_EMPTY, output_index, true);
+ }
+
+ zone_id = 0;
+ block_len = 16;
+ for (i = 0; i < bdev->zone_capacity - block_len; i += block_len) {
+ send_append_zone(bdev, ch, zone_id, block_len, output_index, true, zone_id + i);
+ }
+ send_append_zone(bdev, ch, zone_id, 32, output_index, false, 0);
+ /* Delete zone dev */
+ send_delete_vbdev("zone_dev1", true);
+
+ while (spdk_thread_poll(g_thread, 0, 0) > 0) {}
+ free(ch);
+
+ test_cleanup();
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("zone_block", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_zone_block_create);
+ CU_ADD_TEST(suite, test_zone_block_create_invalid);
+ CU_ADD_TEST(suite, test_get_zone_info);
+ CU_ADD_TEST(suite, test_supported_io_types);
+ CU_ADD_TEST(suite, test_reset_zone);
+ CU_ADD_TEST(suite, test_open_zone);
+ CU_ADD_TEST(suite, test_zone_write);
+ CU_ADD_TEST(suite, test_zone_read);
+ CU_ADD_TEST(suite, test_close_zone);
+ CU_ADD_TEST(suite, test_finish_zone);
+ CU_ADD_TEST(suite, test_append_zone);
+
+ g_thread = spdk_thread_create("test", NULL);
+ spdk_set_thread(g_thread);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ set_test_opts();
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+
+ spdk_thread_exit(g_thread);
+ while (!spdk_thread_is_exited(g_thread)) {
+ spdk_thread_poll(g_thread, 0, 0);
+ }
+ spdk_thread_destroy(g_thread);
+
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/blob/Makefile b/src/spdk/test/unit/lib/blob/Makefile
new file mode 100644
index 000000000..a039a423e
--- /dev/null
+++ b/src/spdk/test/unit/lib/blob/Makefile
@@ -0,0 +1,49 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+CUNIT_VERSION = $(shell echo "\#include <CUnit/CUnit.h>" | $(CC) -E -dM - | sed -n -e 's/.*VERSION "\([0-9\.\-]*\).*/\1/p')
+ifeq ($(CUNIT_VERSION),2.1-3)
+DIRS-y = blob.c
+else
+$(warning "blob_ut.c compilation skipped, only CUnit version 2.1-3 is supported")
+endif
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/blob/blob.c/.gitignore b/src/spdk/test/unit/lib/blob/blob.c/.gitignore
new file mode 100644
index 000000000..553f54655
--- /dev/null
+++ b/src/spdk/test/unit/lib/blob/blob.c/.gitignore
@@ -0,0 +1 @@
+blob_ut
diff --git a/src/spdk/test/unit/lib/blob/blob.c/Makefile b/src/spdk/test/unit/lib/blob/blob.c/Makefile
new file mode 100644
index 000000000..fc449a5c8
--- /dev/null
+++ b/src/spdk/test/unit/lib/blob/blob.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = blob_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/blob/blob.c/blob_ut.c b/src/spdk/test/unit/lib/blob/blob.c/blob_ut.c
new file mode 100644
index 000000000..6e51842e3
--- /dev/null
+++ b/src/spdk/test/unit/lib/blob/blob.c/blob_ut.c
@@ -0,0 +1,6693 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+#include "spdk/blob.h"
+#include "spdk/string.h"
+#include "spdk_internal/thread.h"
+
+#include "common/lib/ut_multithread.c"
+#include "../bs_dev_common.c"
+#include "blob/blobstore.c"
+#include "blob/request.c"
+#include "blob/zeroes.c"
+#include "blob/blob_bs_dev.c"
+
+struct spdk_blob_store *g_bs;
+spdk_blob_id g_blobid;
+struct spdk_blob *g_blob;
+int g_bserrno;
+struct spdk_xattr_names *g_names;
+int g_done;
+char *g_xattr_names[] = {"first", "second", "third"};
+char *g_xattr_values[] = {"one", "two", "three"};
+uint64_t g_ctx = 1729;
+bool g_use_extent_table = false;
+
+struct spdk_bs_super_block_ver1 {
+ uint8_t signature[8];
+ uint32_t version;
+ uint32_t length;
+ uint32_t clean; /* If there was a clean shutdown, this is 1. */
+ spdk_blob_id super_blob;
+
+ uint32_t cluster_size; /* In bytes */
+
+ uint32_t used_page_mask_start; /* Offset from beginning of disk, in pages */
+ uint32_t used_page_mask_len; /* Count, in pages */
+
+ uint32_t used_cluster_mask_start; /* Offset from beginning of disk, in pages */
+ uint32_t used_cluster_mask_len; /* Count, in pages */
+
+ uint32_t md_start; /* Offset from beginning of disk, in pages */
+ uint32_t md_len; /* Count, in pages */
+
+ uint8_t reserved[4036];
+ uint32_t crc;
+} __attribute__((packed));
+SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_super_block_ver1) == 0x1000, "Invalid super block size");
+
+static struct spdk_blob *ut_blob_create_and_open(struct spdk_blob_store *bs,
+ struct spdk_blob_opts *blob_opts);
+static void ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob);
+static void suite_blob_setup(void);
+static void suite_blob_cleanup(void);
+
+static void
+_get_xattr_value(void *arg, const char *name,
+ const void **value, size_t *value_len)
+{
+ uint64_t i;
+
+ SPDK_CU_ASSERT_FATAL(value_len != NULL);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(arg == &g_ctx);
+
+ for (i = 0; i < sizeof(g_xattr_names); i++) {
+ if (!strcmp(name, g_xattr_names[i])) {
+ *value_len = strlen(g_xattr_values[i]);
+ *value = g_xattr_values[i];
+ break;
+ }
+ }
+}
+
+static void
+_get_xattr_value_null(void *arg, const char *name,
+ const void **value, size_t *value_len)
+{
+ SPDK_CU_ASSERT_FATAL(value_len != NULL);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(arg == NULL);
+
+ *value_len = 0;
+ *value = NULL;
+}
+
+static int
+_get_snapshots_count(struct spdk_blob_store *bs)
+{
+ struct spdk_blob_list *snapshot = NULL;
+ int count = 0;
+
+ TAILQ_FOREACH(snapshot, &bs->snapshots, link) {
+ count += 1;
+ }
+
+ return count;
+}
+
+static void
+ut_spdk_blob_opts_init(struct spdk_blob_opts *opts)
+{
+ spdk_blob_opts_init(opts);
+ opts->use_extent_table = g_use_extent_table;
+}
+
+static void
+bs_op_complete(void *cb_arg, int bserrno)
+{
+ g_bserrno = bserrno;
+}
+
+static void
+bs_op_with_handle_complete(void *cb_arg, struct spdk_blob_store *bs,
+ int bserrno)
+{
+ g_bs = bs;
+ g_bserrno = bserrno;
+}
+
+static void
+blob_op_complete(void *cb_arg, int bserrno)
+{
+ g_bserrno = bserrno;
+}
+
+static void
+blob_op_with_id_complete(void *cb_arg, spdk_blob_id blobid, int bserrno)
+{
+ g_blobid = blobid;
+ g_bserrno = bserrno;
+}
+
+static void
+blob_op_with_handle_complete(void *cb_arg, struct spdk_blob *blb, int bserrno)
+{
+ g_blob = blb;
+ g_bserrno = bserrno;
+}
+
+static void
+ut_bs_reload(struct spdk_blob_store **bs, struct spdk_bs_opts *opts)
+{
+ struct spdk_bs_dev *dev;
+
+ /* Unload the blob store */
+ spdk_bs_unload(*bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ dev = init_dev();
+ /* Load an existing blob store */
+ spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ *bs = g_bs;
+
+ g_bserrno = -1;
+}
+
+static void
+ut_bs_dirty_load(struct spdk_blob_store **bs, struct spdk_bs_opts *opts)
+{
+ struct spdk_bs_dev *dev;
+
+ /* Dirty shutdown */
+ bs_free(*bs);
+
+ dev = init_dev();
+ /* Load an existing blob store */
+ spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ *bs = g_bs;
+
+ g_bserrno = -1;
+}
+
+static void
+blob_init(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+
+ dev = init_dev();
+
+ /* should fail for an unsupported blocklen */
+ dev->blocklen = 500;
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -EINVAL);
+
+ dev = init_dev();
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+}
+
+static void
+blob_super(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ spdk_blob_id blobid;
+ struct spdk_blob_opts blob_opts;
+
+ /* Get the super blob without having set one */
+ spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -ENOENT);
+ CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
+
+ /* Create a blob */
+ ut_spdk_blob_opts_init(&blob_opts);
+ spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ /* Set the blob as the super blob */
+ spdk_bs_set_super(bs, blobid, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Get the super blob */
+ spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(blobid == g_blobid);
+}
+
+static void
+blob_open(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob;
+ struct spdk_blob_opts blob_opts;
+ spdk_blob_id blobid, blobid2;
+
+ ut_spdk_blob_opts_init(&blob_opts);
+ spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ blob = g_blob;
+
+ blobid2 = spdk_blob_get_id(blob);
+ CU_ASSERT(blobid == blobid2);
+
+ /* Try to open file again. It should return success. */
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(blob == g_blob);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /*
+ * Close the file a second time, releasing the second reference. This
+ * should succeed.
+ */
+ blob = g_blob;
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /*
+ * Try to open file again. It should succeed. This tests the case
+ * where the file is opened, closed, then re-opened again.
+ */
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ blob = g_blob;
+
+ ut_blob_close_and_delete(bs, blob);
+}
+
+static void
+blob_create(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob;
+ struct spdk_blob_opts opts;
+ spdk_blob_id blobid;
+
+ /* Create blob with 10 clusters */
+
+ ut_spdk_blob_opts_init(&opts);
+ opts.num_clusters = 10;
+
+ spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Create blob with 0 clusters */
+
+ ut_spdk_blob_opts_init(&opts);
+ opts.num_clusters = 0;
+
+ spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Create blob with default options (opts == NULL) */
+
+ spdk_bs_create_blob_ext(bs, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Try to create blob with size larger than blobstore */
+
+ ut_spdk_blob_opts_init(&opts);
+ opts.num_clusters = bs->total_clusters + 1;
+
+ spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -ENOSPC);
+}
+
+static void
+blob_create_fail(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob_opts opts;
+ spdk_blob_id blobid;
+ uint32_t used_blobids_count = spdk_bit_array_count_set(bs->used_blobids);
+ uint32_t used_md_pages_count = spdk_bit_array_count_set(bs->used_md_pages);
+
+ /* NULL callback */
+ ut_spdk_blob_opts_init(&opts);
+ opts.xattrs.names = g_xattr_names;
+ opts.xattrs.get_value = NULL;
+ opts.xattrs.count = 1;
+ opts.xattrs.ctx = &g_ctx;
+
+ blobid = spdk_bit_array_find_first_clear(bs->used_md_pages, 0);
+ spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -EINVAL);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count);
+ CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count);
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -ENOENT);
+ SPDK_CU_ASSERT_FATAL(g_blob == NULL);
+
+ ut_bs_reload(&bs, NULL);
+ CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count);
+ CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count);
+
+ spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_blob == NULL);
+ CU_ASSERT(g_bserrno == -ENOENT);
+}
+
+static void
+blob_create_internal(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob;
+ struct spdk_blob_opts opts;
+ struct spdk_blob_xattr_opts internal_xattrs;
+ const void *value;
+ size_t value_len;
+ spdk_blob_id blobid;
+ int rc;
+
+ /* Create blob with custom xattrs */
+
+ ut_spdk_blob_opts_init(&opts);
+ blob_xattrs_init(&internal_xattrs);
+ internal_xattrs.count = 3;
+ internal_xattrs.names = g_xattr_names;
+ internal_xattrs.get_value = _get_xattr_value;
+ internal_xattrs.ctx = &g_ctx;
+
+ bs_create_blob(bs, &opts, &internal_xattrs, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+
+ rc = blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len, true);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(value_len == strlen(g_xattr_values[0]));
+ CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
+
+ rc = blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len, true);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(value_len == strlen(g_xattr_values[1]));
+ CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
+
+ rc = blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len, true);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(value_len == strlen(g_xattr_values[2]));
+ CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
+
+ rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len);
+ CU_ASSERT(rc != 0);
+
+ rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len);
+ CU_ASSERT(rc != 0);
+
+ rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len);
+ CU_ASSERT(rc != 0);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Create blob with NULL internal options */
+
+ bs_create_blob(bs, NULL, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ CU_ASSERT(TAILQ_FIRST(&g_blob->xattrs_internal) == NULL);
+
+ blob = g_blob;
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+}
+
+static void
+blob_thin_provision(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_blob *blob;
+ struct spdk_blob_opts opts;
+ struct spdk_bs_opts bs_opts;
+ spdk_blob_id blobid;
+
+ dev = init_dev();
+ spdk_bs_opts_init(&bs_opts);
+ snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
+
+ /* Initialize a new blob store */
+ spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+
+ bs = g_bs;
+
+ /* Create blob with thin provisioning enabled */
+
+ ut_spdk_blob_opts_init(&opts);
+ opts.thin_provision = true;
+ opts.num_clusters = 10;
+
+ blob = ut_blob_create_and_open(bs, &opts);
+ blobid = spdk_blob_get_id(blob);
+ CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Do not shut down cleanly. This makes sure that when we load again
+ * and try to recover a valid used_cluster map, that blobstore will
+ * ignore clusters with index 0 since these are unallocated clusters.
+ */
+ ut_bs_dirty_load(&bs, &bs_opts);
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+ CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
+
+ ut_blob_close_and_delete(bs, blob);
+
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+}
+
+static void
+blob_snapshot(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob;
+ struct spdk_blob *snapshot, *snapshot2;
+ struct spdk_blob_bs_dev *blob_bs_dev;
+ struct spdk_blob_opts opts;
+ struct spdk_blob_xattr_opts xattrs;
+ spdk_blob_id blobid;
+ spdk_blob_id snapshotid;
+ spdk_blob_id snapshotid2;
+ const void *value;
+ size_t value_len;
+ int rc;
+ spdk_blob_id ids[2];
+ size_t count;
+
+ /* Create blob with 10 clusters */
+ ut_spdk_blob_opts_init(&opts);
+ opts.num_clusters = 10;
+
+ blob = ut_blob_create_and_open(bs, &opts);
+ blobid = spdk_blob_get_id(blob);
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
+
+ /* Create snapshot from blob */
+ CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
+ spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
+ snapshotid = g_blobid;
+
+ spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ snapshot = g_blob;
+ CU_ASSERT(snapshot->data_ro == true);
+ CU_ASSERT(snapshot->md_ro == true);
+ CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
+
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
+ CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
+ CU_ASSERT(spdk_mem_all_zero(blob->active.clusters,
+ blob->active.num_clusters * sizeof(blob->active.clusters[0])));
+
+ /* Try to create snapshot from clone with xattrs */
+ xattrs.names = g_xattr_names;
+ xattrs.get_value = _get_xattr_value;
+ xattrs.count = 3;
+ xattrs.ctx = &g_ctx;
+ spdk_bs_create_snapshot(bs, blobid, &xattrs, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
+ snapshotid2 = g_blobid;
+
+ spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ snapshot2 = g_blob;
+ CU_ASSERT(snapshot2->data_ro == true);
+ CU_ASSERT(snapshot2->md_ro == true);
+ CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 10);
+
+ /* Confirm that blob is backed by snapshot2 and snapshot2 is backed by snapshot */
+ CU_ASSERT(snapshot->back_bs_dev == NULL);
+ SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL);
+ SPDK_CU_ASSERT_FATAL(snapshot2->back_bs_dev != NULL);
+
+ blob_bs_dev = (struct spdk_blob_bs_dev *)blob->back_bs_dev;
+ CU_ASSERT(blob_bs_dev->blob == snapshot2);
+
+ blob_bs_dev = (struct spdk_blob_bs_dev *)snapshot2->back_bs_dev;
+ CU_ASSERT(blob_bs_dev->blob == snapshot);
+
+ rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[0], &value, &value_len);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(value_len == strlen(g_xattr_values[0]));
+ CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
+
+ rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[1], &value, &value_len);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(value_len == strlen(g_xattr_values[1]));
+ CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
+
+ rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[2], &value, &value_len);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(value_len == strlen(g_xattr_values[2]));
+ CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
+
+ /* Confirm that blob is clone of snapshot2, and snapshot2 is clone of snapshot */
+ count = 2;
+ CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
+ CU_ASSERT(count == 1);
+ CU_ASSERT(ids[0] == blobid);
+
+ count = 2;
+ CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
+ CU_ASSERT(count == 1);
+ CU_ASSERT(ids[0] == snapshotid2);
+
+ /* Try to create snapshot from snapshot */
+ spdk_bs_create_snapshot(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -EINVAL);
+ CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
+ CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
+
+ /* Delete blob and confirm that it is no longer on snapshot2 clone list */
+ ut_blob_close_and_delete(bs, blob);
+ count = 2;
+ CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
+ CU_ASSERT(count == 0);
+
+ /* Delete snapshot2 and confirm that it is no longer on snapshot clone list */
+ ut_blob_close_and_delete(bs, snapshot2);
+ CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
+ count = 2;
+ CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
+ CU_ASSERT(count == 0);
+
+ ut_blob_close_and_delete(bs, snapshot);
+ CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
+}
+
+static void
+blob_snapshot_freeze_io(void)
+{
+ struct spdk_io_channel *channel;
+ struct spdk_bs_channel *bs_channel;
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob;
+ struct spdk_blob_opts opts;
+ spdk_blob_id blobid;
+ uint32_t num_of_pages = 10;
+ uint8_t payload_read[num_of_pages * SPDK_BS_PAGE_SIZE];
+ uint8_t payload_write[num_of_pages * SPDK_BS_PAGE_SIZE];
+ uint8_t payload_zero[num_of_pages * SPDK_BS_PAGE_SIZE];
+
+ memset(payload_write, 0xE5, sizeof(payload_write));
+ memset(payload_read, 0x00, sizeof(payload_read));
+ memset(payload_zero, 0x00, sizeof(payload_zero));
+
+ /* Test freeze I/O during snapshot */
+ channel = spdk_bs_alloc_io_channel(bs);
+ bs_channel = spdk_io_channel_get_ctx(channel);
+
+ /* Create blob with 10 clusters */
+ ut_spdk_blob_opts_init(&opts);
+ opts.num_clusters = 10;
+ opts.thin_provision = false;
+
+ blob = ut_blob_create_and_open(bs, &opts);
+ blobid = spdk_blob_get_id(blob);
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
+
+ spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+
+ /* This is implementation specific.
+ * Flag 'frozen_io' is set in _spdk_bs_snapshot_freeze_cpl callback.
+ * Four async I/O operations happen before that. */
+ poll_thread_times(0, 3);
+
+ CU_ASSERT(TAILQ_EMPTY(&bs_channel->queued_io));
+
+ /* Blob I/O should be frozen here */
+ CU_ASSERT(blob->frozen_refcnt == 1);
+
+ /* Write to the blob */
+ spdk_blob_io_write(blob, channel, payload_write, 0, num_of_pages, blob_op_complete, NULL);
+
+ /* Verify that I/O is queued */
+ CU_ASSERT(!TAILQ_EMPTY(&bs_channel->queued_io));
+ /* Verify that payload is not written to disk */
+ CU_ASSERT(memcmp(payload_zero, &g_dev_buffer[blob->active.clusters[0]*SPDK_BS_PAGE_SIZE],
+ SPDK_BS_PAGE_SIZE) == 0);
+
+ /* Finish all operations including spdk_bs_create_snapshot */
+ poll_threads();
+
+ /* Verify snapshot */
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+
+ /* Verify that blob has unset frozen_io */
+ CU_ASSERT(blob->frozen_refcnt == 0);
+
+ /* Verify that postponed I/O completed successfully by comparing payload */
+ spdk_blob_io_read(blob, channel, payload_read, 0, num_of_pages, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_write, payload_read, num_of_pages * SPDK_BS_PAGE_SIZE) == 0);
+
+ spdk_bs_free_io_channel(channel);
+ poll_threads();
+
+ ut_blob_close_and_delete(bs, blob);
+}
+
+static void
+blob_clone(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob_opts opts;
+ struct spdk_blob *blob, *snapshot, *clone;
+ spdk_blob_id blobid, cloneid, snapshotid;
+ struct spdk_blob_xattr_opts xattrs;
+ const void *value;
+ size_t value_len;
+ int rc;
+
+ /* Create blob with 10 clusters */
+
+ ut_spdk_blob_opts_init(&opts);
+ opts.num_clusters = 10;
+
+ blob = ut_blob_create_and_open(bs, &opts);
+ blobid = spdk_blob_get_id(blob);
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
+
+ /* Create snapshot */
+ spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ snapshotid = g_blobid;
+
+ spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ snapshot = g_blob;
+ CU_ASSERT(snapshot->data_ro == true);
+ CU_ASSERT(snapshot->md_ro == true);
+ CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
+
+ spdk_blob_close(snapshot, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Create clone from snapshot with xattrs */
+ xattrs.names = g_xattr_names;
+ xattrs.get_value = _get_xattr_value;
+ xattrs.count = 3;
+ xattrs.ctx = &g_ctx;
+
+ spdk_bs_create_clone(bs, snapshotid, &xattrs, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ cloneid = g_blobid;
+
+ spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ clone = g_blob;
+ CU_ASSERT(clone->data_ro == false);
+ CU_ASSERT(clone->md_ro == false);
+ CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10);
+
+ rc = spdk_blob_get_xattr_value(clone, g_xattr_names[0], &value, &value_len);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(value_len == strlen(g_xattr_values[0]));
+ CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
+
+ rc = spdk_blob_get_xattr_value(clone, g_xattr_names[1], &value, &value_len);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(value_len == strlen(g_xattr_values[1]));
+ CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
+
+ rc = spdk_blob_get_xattr_value(clone, g_xattr_names[2], &value, &value_len);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(value_len == strlen(g_xattr_values[2]));
+ CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
+
+
+ spdk_blob_close(clone, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Try to create clone from not read only blob */
+ spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -EINVAL);
+ CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
+
+ /* Mark blob as read only */
+ spdk_blob_set_read_only(blob);
+ spdk_blob_sync_md(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Create clone from read only blob */
+ spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ cloneid = g_blobid;
+
+ spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ clone = g_blob;
+ CU_ASSERT(clone->data_ro == false);
+ CU_ASSERT(clone->md_ro == false);
+ CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10);
+
+ ut_blob_close_and_delete(bs, clone);
+ ut_blob_close_and_delete(bs, blob);
+}
+
+static void
+_blob_inflate(bool decouple_parent)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob_opts opts;
+ struct spdk_blob *blob, *snapshot;
+ spdk_blob_id blobid, snapshotid;
+ struct spdk_io_channel *channel;
+ uint64_t free_clusters;
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ SPDK_CU_ASSERT_FATAL(channel != NULL);
+
+ /* Create blob with 10 clusters */
+
+ ut_spdk_blob_opts_init(&opts);
+ opts.num_clusters = 10;
+ opts.thin_provision = true;
+
+ blob = ut_blob_create_and_open(bs, &opts);
+ blobid = spdk_blob_get_id(blob);
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
+ CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true);
+
+ /* 1) Blob with no parent */
+ if (decouple_parent) {
+ /* Decouple parent of blob with no parent (should fail) */
+ spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno != 0);
+ } else {
+ /* Inflate of thin blob with no parent should made it thick */
+ spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == false);
+ }
+
+ spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ snapshotid = g_blobid;
+
+ CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true);
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
+
+ spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ snapshot = g_blob;
+ CU_ASSERT(snapshot->data_ro == true);
+ CU_ASSERT(snapshot->md_ro == true);
+ CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
+
+ spdk_blob_close(snapshot, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ free_clusters = spdk_bs_free_cluster_count(bs);
+
+ /* 2) Blob with parent */
+ if (!decouple_parent) {
+ /* Do full blob inflation */
+ spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ /* all 10 clusters should be allocated */
+ CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 10);
+ } else {
+ /* Decouple parent of blob */
+ spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ /* when only parent is removed, none of the clusters should be allocated */
+ CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters);
+ }
+
+ /* Now, it should be possible to delete snapshot */
+ spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
+ CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == decouple_parent);
+
+ spdk_bs_free_io_channel(channel);
+ poll_threads();
+
+ ut_blob_close_and_delete(bs, blob);
+}
+
+static void
+blob_inflate(void)
+{
+ _blob_inflate(false);
+ _blob_inflate(true);
+}
+
+static void
+blob_delete(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob_opts blob_opts;
+ spdk_blob_id blobid;
+
+ /* Create a blob and then delete it. */
+ ut_spdk_blob_opts_init(&blob_opts);
+ spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid > 0);
+ blobid = g_blobid;
+
+ spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Try to open the blob */
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -ENOENT);
+}
+
+static void
+blob_resize_test(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob;
+ uint64_t free_clusters;
+
+ free_clusters = spdk_bs_free_cluster_count(bs);
+
+ blob = ut_blob_create_and_open(bs, NULL);
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+
+ /* Confirm that resize fails if blob is marked read-only. */
+ blob->md_ro = true;
+ spdk_blob_resize(blob, 5, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -EPERM);
+ blob->md_ro = false;
+
+ /* The blob started at 0 clusters. Resize it to be 5. */
+ spdk_blob_resize(blob, 5, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs));
+
+ /* Shrink the blob to 3 clusters. This will not actually release
+ * the old clusters until the blob is synced.
+ */
+ spdk_blob_resize(blob, 3, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ /* Verify there are still 5 clusters in use */
+ CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs));
+
+ spdk_blob_sync_md(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ /* Now there are only 3 clusters in use */
+ CU_ASSERT((free_clusters - 3) == spdk_bs_free_cluster_count(bs));
+
+ /* Resize the blob to be 10 clusters. Growth takes effect immediately. */
+ spdk_blob_resize(blob, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT((free_clusters - 10) == spdk_bs_free_cluster_count(bs));
+
+ /* Try to resize the blob to size larger than blobstore. */
+ spdk_blob_resize(blob, bs->total_clusters + 1, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -ENOSPC);
+
+ ut_blob_close_and_delete(bs, blob);
+}
+
+static void
+blob_read_only(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_blob *blob;
+ struct spdk_bs_opts opts;
+ spdk_blob_id blobid;
+ int rc;
+
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
+
+ spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ blob = ut_blob_create_and_open(bs, NULL);
+ blobid = spdk_blob_get_id(blob);
+
+ rc = spdk_blob_set_read_only(blob);
+ CU_ASSERT(rc == 0);
+
+ CU_ASSERT(blob->data_ro == false);
+ CU_ASSERT(blob->md_ro == false);
+
+ spdk_blob_sync_md(blob, bs_op_complete, NULL);
+ poll_threads();
+
+ CU_ASSERT(blob->data_ro == true);
+ CU_ASSERT(blob->md_ro == true);
+ CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+
+ CU_ASSERT(blob->data_ro == true);
+ CU_ASSERT(blob->md_ro == true);
+ CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ ut_bs_reload(&bs, &opts);
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+
+ CU_ASSERT(blob->data_ro == true);
+ CU_ASSERT(blob->md_ro == true);
+ CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
+
+ ut_blob_close_and_delete(bs, blob);
+
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+}
+
+static void
+channel_ops(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_io_channel *channel;
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ CU_ASSERT(channel != NULL);
+
+ spdk_bs_free_io_channel(channel);
+ poll_threads();
+}
+
+static void
+blob_write(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob = g_blob;
+ struct spdk_io_channel *channel;
+ uint64_t pages_per_cluster;
+ uint8_t payload[10 * 4096];
+
+ pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs);
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ CU_ASSERT(channel != NULL);
+
+ /* Write to a blob with 0 size */
+ spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -EINVAL);
+
+ /* Resize the blob */
+ spdk_blob_resize(blob, 5, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Confirm that write fails if blob is marked read-only. */
+ blob->data_ro = true;
+ spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -EPERM);
+ blob->data_ro = false;
+
+ /* Write to the blob */
+ spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Write starting beyond the end */
+ spdk_blob_io_write(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete,
+ NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -EINVAL);
+
+ /* Write starting at a valid location but going off the end */
+ spdk_blob_io_write(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1,
+ blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -EINVAL);
+
+ spdk_bs_free_io_channel(channel);
+ poll_threads();
+}
+
+static void
+blob_read(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob = g_blob;
+ struct spdk_io_channel *channel;
+ uint64_t pages_per_cluster;
+ uint8_t payload[10 * 4096];
+
+ pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs);
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ CU_ASSERT(channel != NULL);
+
+ /* Read from a blob with 0 size */
+ spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -EINVAL);
+
+ /* Resize the blob */
+ spdk_blob_resize(blob, 5, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Confirm that read passes if blob is marked read-only. */
+ blob->data_ro = true;
+ spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ blob->data_ro = false;
+
+ /* Read from the blob */
+ spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Read starting beyond the end */
+ spdk_blob_io_read(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete,
+ NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -EINVAL);
+
+ /* Read starting at a valid location but going off the end */
+ spdk_blob_io_read(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1,
+ blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -EINVAL);
+
+ spdk_bs_free_io_channel(channel);
+ poll_threads();
+}
+
+static void
+blob_rw_verify(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob = g_blob;
+ struct spdk_io_channel *channel;
+ uint8_t payload_read[10 * 4096];
+ uint8_t payload_write[10 * 4096];
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ CU_ASSERT(channel != NULL);
+
+ spdk_blob_resize(blob, 32, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ memset(payload_write, 0xE5, sizeof(payload_write));
+ spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ memset(payload_read, 0x00, sizeof(payload_read));
+ spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_write, payload_read, 4 * 4096) == 0);
+
+ spdk_bs_free_io_channel(channel);
+ poll_threads();
+}
+
+static void
+blob_rw_verify_iov(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob;
+ struct spdk_io_channel *channel;
+ uint8_t payload_read[10 * 4096];
+ uint8_t payload_write[10 * 4096];
+ struct iovec iov_read[3];
+ struct iovec iov_write[3];
+ void *buf;
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ CU_ASSERT(channel != NULL);
+
+ blob = ut_blob_create_and_open(bs, NULL);
+
+ spdk_blob_resize(blob, 2, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /*
+ * Manually adjust the offset of the blob's second cluster. This allows
+ * us to make sure that the readv/write code correctly accounts for I/O
+ * that cross cluster boundaries. Start by asserting that the allocated
+ * clusters are where we expect before modifying the second cluster.
+ */
+ CU_ASSERT(blob->active.clusters[0] == 1 * 256);
+ CU_ASSERT(blob->active.clusters[1] == 2 * 256);
+ blob->active.clusters[1] = 3 * 256;
+
+ memset(payload_write, 0xE5, sizeof(payload_write));
+ iov_write[0].iov_base = payload_write;
+ iov_write[0].iov_len = 1 * 4096;
+ iov_write[1].iov_base = payload_write + 1 * 4096;
+ iov_write[1].iov_len = 5 * 4096;
+ iov_write[2].iov_base = payload_write + 6 * 4096;
+ iov_write[2].iov_len = 4 * 4096;
+ /*
+ * Choose a page offset just before the cluster boundary. The first 6 pages of payload
+ * will get written to the first cluster, the last 4 to the second cluster.
+ */
+ spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ memset(payload_read, 0xAA, sizeof(payload_read));
+ iov_read[0].iov_base = payload_read;
+ iov_read[0].iov_len = 3 * 4096;
+ iov_read[1].iov_base = payload_read + 3 * 4096;
+ iov_read[1].iov_len = 4 * 4096;
+ iov_read[2].iov_base = payload_read + 7 * 4096;
+ iov_read[2].iov_len = 3 * 4096;
+ spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
+
+ buf = calloc(1, 256 * 4096);
+ SPDK_CU_ASSERT_FATAL(buf != NULL);
+ /* Check that cluster 2 on "disk" was not modified. */
+ CU_ASSERT(memcmp(buf, &g_dev_buffer[512 * 4096], 256 * 4096) == 0);
+ free(buf);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_free_io_channel(channel);
+ poll_threads();
+}
+
+static uint32_t
+bs_channel_get_req_count(struct spdk_io_channel *_channel)
+{
+ struct spdk_bs_channel *channel = spdk_io_channel_get_ctx(_channel);
+ struct spdk_bs_request_set *set;
+ uint32_t count = 0;
+
+ TAILQ_FOREACH(set, &channel->reqs, link) {
+ count++;
+ }
+
+ return count;
+}
+
+static void
+blob_rw_verify_iov_nomem(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob = g_blob;
+ struct spdk_io_channel *channel;
+ uint8_t payload_write[10 * 4096];
+ struct iovec iov_write[3];
+ uint32_t req_count;
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ CU_ASSERT(channel != NULL);
+
+ spdk_blob_resize(blob, 2, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /*
+ * Choose a page offset just before the cluster boundary. The first 6 pages of payload
+ * will get written to the first cluster, the last 4 to the second cluster.
+ */
+ iov_write[0].iov_base = payload_write;
+ iov_write[0].iov_len = 1 * 4096;
+ iov_write[1].iov_base = payload_write + 1 * 4096;
+ iov_write[1].iov_len = 5 * 4096;
+ iov_write[2].iov_base = payload_write + 6 * 4096;
+ iov_write[2].iov_len = 4 * 4096;
+ MOCK_SET(calloc, NULL);
+ req_count = bs_channel_get_req_count(channel);
+ spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno = -ENOMEM);
+ CU_ASSERT(req_count == bs_channel_get_req_count(channel));
+ MOCK_CLEAR(calloc);
+
+ spdk_bs_free_io_channel(channel);
+ poll_threads();
+}
+
+static void
+blob_rw_iov_read_only(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob = g_blob;
+ struct spdk_io_channel *channel;
+ uint8_t payload_read[4096];
+ uint8_t payload_write[4096];
+ struct iovec iov_read;
+ struct iovec iov_write;
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ CU_ASSERT(channel != NULL);
+
+ spdk_blob_resize(blob, 2, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Verify that writev failed if read_only flag is set. */
+ blob->data_ro = true;
+ iov_write.iov_base = payload_write;
+ iov_write.iov_len = sizeof(payload_write);
+ spdk_blob_io_writev(blob, channel, &iov_write, 1, 0, 1, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -EPERM);
+
+ /* Verify that reads pass if data_ro flag is set. */
+ iov_read.iov_base = payload_read;
+ iov_read.iov_len = sizeof(payload_read);
+ spdk_blob_io_readv(blob, channel, &iov_read, 1, 0, 1, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_free_io_channel(channel);
+ poll_threads();
+}
+
+static void
+_blob_io_read_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel,
+ uint8_t *payload, uint64_t offset, uint64_t length,
+ spdk_blob_op_complete cb_fn, void *cb_arg)
+{
+ uint64_t i;
+ uint8_t *buf;
+ uint64_t page_size = spdk_bs_get_page_size(blob->bs);
+
+ /* To be sure that operation is NOT splitted, read one page at the time */
+ buf = payload;
+ for (i = 0; i < length; i++) {
+ spdk_blob_io_read(blob, channel, buf, i + offset, 1, blob_op_complete, NULL);
+ poll_threads();
+ if (g_bserrno != 0) {
+ /* Pass the error code up */
+ break;
+ }
+ buf += page_size;
+ }
+
+ cb_fn(cb_arg, g_bserrno);
+}
+
+static void
+_blob_io_write_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel,
+ uint8_t *payload, uint64_t offset, uint64_t length,
+ spdk_blob_op_complete cb_fn, void *cb_arg)
+{
+ uint64_t i;
+ uint8_t *buf;
+ uint64_t page_size = spdk_bs_get_page_size(blob->bs);
+
+ /* To be sure that operation is NOT splitted, write one page at the time */
+ buf = payload;
+ for (i = 0; i < length; i++) {
+ spdk_blob_io_write(blob, channel, buf, i + offset, 1, blob_op_complete, NULL);
+ poll_threads();
+ if (g_bserrno != 0) {
+ /* Pass the error code up */
+ break;
+ }
+ buf += page_size;
+ }
+
+ cb_fn(cb_arg, g_bserrno);
+}
+
+static void
+blob_operation_split_rw(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob;
+ struct spdk_io_channel *channel;
+ struct spdk_blob_opts opts;
+ uint64_t cluster_size;
+
+ uint64_t payload_size;
+ uint8_t *payload_read;
+ uint8_t *payload_write;
+ uint8_t *payload_pattern;
+
+ uint64_t page_size;
+ uint64_t pages_per_cluster;
+ uint64_t pages_per_payload;
+
+ uint64_t i;
+
+ cluster_size = spdk_bs_get_cluster_size(bs);
+ page_size = spdk_bs_get_page_size(bs);
+ pages_per_cluster = cluster_size / page_size;
+ pages_per_payload = pages_per_cluster * 5;
+ payload_size = cluster_size * 5;
+
+ payload_read = malloc(payload_size);
+ SPDK_CU_ASSERT_FATAL(payload_read != NULL);
+
+ payload_write = malloc(payload_size);
+ SPDK_CU_ASSERT_FATAL(payload_write != NULL);
+
+ payload_pattern = malloc(payload_size);
+ SPDK_CU_ASSERT_FATAL(payload_pattern != NULL);
+
+ /* Prepare random pattern to write */
+ memset(payload_pattern, 0xFF, payload_size);
+ for (i = 0; i < pages_per_payload; i++) {
+ *((uint64_t *)(payload_pattern + page_size * i)) = (i + 1);
+ }
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ SPDK_CU_ASSERT_FATAL(channel != NULL);
+
+ /* Create blob */
+ ut_spdk_blob_opts_init(&opts);
+ opts.thin_provision = false;
+ opts.num_clusters = 5;
+
+ blob = ut_blob_create_and_open(bs, &opts);
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
+
+ /* Initial read should return zeroed payload */
+ memset(payload_read, 0xFF, payload_size);
+ spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
+
+ /* Fill whole blob except last page */
+ spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload - 1,
+ blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Write last page with a pattern */
+ spdk_blob_io_write(blob, channel, payload_pattern, pages_per_payload - 1, 1,
+ blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Read whole blob and check consistency */
+ memset(payload_read, 0xFF, payload_size);
+ spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0);
+ CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0);
+
+ /* Fill whole blob except first page */
+ spdk_blob_io_write(blob, channel, payload_pattern, 1, pages_per_payload - 1,
+ blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Write first page with a pattern */
+ spdk_blob_io_write(blob, channel, payload_pattern, 0, 1,
+ blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Read whole blob and check consistency */
+ memset(payload_read, 0xFF, payload_size);
+ spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0);
+ CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0);
+
+
+ /* Fill whole blob with a pattern (5 clusters) */
+
+ /* 1. Read test. */
+ _blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload,
+ blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ memset(payload_read, 0xFF, payload_size);
+ spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
+ poll_threads();
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
+
+ /* 2. Write test. */
+ spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload,
+ blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ memset(payload_read, 0xFF, payload_size);
+ _blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
+
+ spdk_bs_free_io_channel(channel);
+ poll_threads();
+
+ g_blob = NULL;
+ g_blobid = 0;
+
+ free(payload_read);
+ free(payload_write);
+ free(payload_pattern);
+
+ ut_blob_close_and_delete(bs, blob);
+}
+
+static void
+blob_operation_split_rw_iov(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob;
+ struct spdk_io_channel *channel;
+ struct spdk_blob_opts opts;
+ uint64_t cluster_size;
+
+ uint64_t payload_size;
+ uint8_t *payload_read;
+ uint8_t *payload_write;
+ uint8_t *payload_pattern;
+
+ uint64_t page_size;
+ uint64_t pages_per_cluster;
+ uint64_t pages_per_payload;
+
+ struct iovec iov_read[2];
+ struct iovec iov_write[2];
+
+ uint64_t i, j;
+
+ cluster_size = spdk_bs_get_cluster_size(bs);
+ page_size = spdk_bs_get_page_size(bs);
+ pages_per_cluster = cluster_size / page_size;
+ pages_per_payload = pages_per_cluster * 5;
+ payload_size = cluster_size * 5;
+
+ payload_read = malloc(payload_size);
+ SPDK_CU_ASSERT_FATAL(payload_read != NULL);
+
+ payload_write = malloc(payload_size);
+ SPDK_CU_ASSERT_FATAL(payload_write != NULL);
+
+ payload_pattern = malloc(payload_size);
+ SPDK_CU_ASSERT_FATAL(payload_pattern != NULL);
+
+ /* Prepare random pattern to write */
+ for (i = 0; i < pages_per_payload; i++) {
+ for (j = 0; j < page_size / sizeof(uint64_t); j++) {
+ uint64_t *tmp;
+
+ tmp = (uint64_t *)payload_pattern;
+ tmp += ((page_size * i) / sizeof(uint64_t)) + j;
+ *tmp = i + 1;
+ }
+ }
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ SPDK_CU_ASSERT_FATAL(channel != NULL);
+
+ /* Create blob */
+ ut_spdk_blob_opts_init(&opts);
+ opts.thin_provision = false;
+ opts.num_clusters = 5;
+
+ blob = ut_blob_create_and_open(bs, &opts);
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
+
+ /* Initial read should return zeroes payload */
+ memset(payload_read, 0xFF, payload_size);
+ iov_read[0].iov_base = payload_read;
+ iov_read[0].iov_len = cluster_size * 3;
+ iov_read[1].iov_base = payload_read + cluster_size * 3;
+ iov_read[1].iov_len = cluster_size * 2;
+ spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
+
+ /* First of iovs fills whole blob except last page and second of iovs writes last page
+ * with a pattern. */
+ iov_write[0].iov_base = payload_pattern;
+ iov_write[0].iov_len = payload_size - page_size;
+ iov_write[1].iov_base = payload_pattern;
+ iov_write[1].iov_len = page_size;
+ spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Read whole blob and check consistency */
+ memset(payload_read, 0xFF, payload_size);
+ iov_read[0].iov_base = payload_read;
+ iov_read[0].iov_len = cluster_size * 2;
+ iov_read[1].iov_base = payload_read + cluster_size * 2;
+ iov_read[1].iov_len = cluster_size * 3;
+ spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0);
+ CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0);
+
+ /* First of iovs fills only first page and second of iovs writes whole blob except
+ * first page with a pattern. */
+ iov_write[0].iov_base = payload_pattern;
+ iov_write[0].iov_len = page_size;
+ iov_write[1].iov_base = payload_pattern;
+ iov_write[1].iov_len = payload_size - page_size;
+ spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Read whole blob and check consistency */
+ memset(payload_read, 0xFF, payload_size);
+ iov_read[0].iov_base = payload_read;
+ iov_read[0].iov_len = cluster_size * 4;
+ iov_read[1].iov_base = payload_read + cluster_size * 4;
+ iov_read[1].iov_len = cluster_size;
+ spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0);
+ CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0);
+
+
+ /* Fill whole blob with a pattern (5 clusters) */
+
+ /* 1. Read test. */
+ _blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload,
+ blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ memset(payload_read, 0xFF, payload_size);
+ iov_read[0].iov_base = payload_read;
+ iov_read[0].iov_len = cluster_size;
+ iov_read[1].iov_base = payload_read + cluster_size;
+ iov_read[1].iov_len = cluster_size * 4;
+ spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
+
+ /* 2. Write test. */
+ iov_write[0].iov_base = payload_read;
+ iov_write[0].iov_len = cluster_size * 2;
+ iov_write[1].iov_base = payload_read + cluster_size * 2;
+ iov_write[1].iov_len = cluster_size * 3;
+ spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ memset(payload_read, 0xFF, payload_size);
+ _blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
+
+ spdk_bs_free_io_channel(channel);
+ poll_threads();
+
+ g_blob = NULL;
+ g_blobid = 0;
+
+ free(payload_read);
+ free(payload_write);
+ free(payload_pattern);
+
+ ut_blob_close_and_delete(bs, blob);
+}
+
+static void
+blob_unmap(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob;
+ struct spdk_io_channel *channel;
+ struct spdk_blob_opts opts;
+ uint8_t payload[4096];
+ int i;
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ CU_ASSERT(channel != NULL);
+
+ ut_spdk_blob_opts_init(&opts);
+ opts.num_clusters = 10;
+
+ blob = ut_blob_create_and_open(bs, &opts);
+
+ spdk_blob_resize(blob, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ memset(payload, 0, sizeof(payload));
+ payload[0] = 0xFF;
+
+ /*
+ * Set first byte of every cluster to 0xFF.
+ * First cluster on device is reserved so let's start from cluster number 1
+ */
+ for (i = 1; i < 11; i++) {
+ g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] = 0xFF;
+ }
+
+ /* Confirm writes */
+ for (i = 0; i < 10; i++) {
+ payload[0] = 0;
+ spdk_blob_io_read(blob, channel, &payload, i * SPDK_BLOB_OPTS_CLUSTER_SZ / 4096, 1,
+ blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(payload[0] == 0xFF);
+ }
+
+ /* Mark some clusters as unallocated */
+ blob->active.clusters[1] = 0;
+ blob->active.clusters[2] = 0;
+ blob->active.clusters[3] = 0;
+ blob->active.clusters[6] = 0;
+ blob->active.clusters[8] = 0;
+
+ /* Unmap clusters by resizing to 0 */
+ spdk_blob_resize(blob, 0, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_blob_sync_md(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Confirm that only 'allocated' clusters were unmapped */
+ for (i = 1; i < 11; i++) {
+ switch (i) {
+ case 2:
+ case 3:
+ case 4:
+ case 7:
+ case 9:
+ CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0xFF);
+ break;
+ default:
+ CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0);
+ break;
+ }
+ }
+
+ spdk_bs_free_io_channel(channel);
+ poll_threads();
+
+ ut_blob_close_and_delete(bs, blob);
+}
+
+static void
+blob_iter(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob;
+ spdk_blob_id blobid;
+ struct spdk_blob_opts blob_opts;
+
+ spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_blob == NULL);
+ CU_ASSERT(g_bserrno == -ENOENT);
+
+ ut_spdk_blob_opts_init(&blob_opts);
+ spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_blob != NULL);
+ CU_ASSERT(g_bserrno == 0);
+ blob = g_blob;
+ CU_ASSERT(spdk_blob_get_id(blob) == blobid);
+
+ spdk_bs_iter_next(bs, blob, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_blob == NULL);
+ CU_ASSERT(g_bserrno == -ENOENT);
+}
+
+static void
+blob_xattr(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob = g_blob;
+ spdk_blob_id blobid = spdk_blob_get_id(blob);
+ uint64_t length;
+ int rc;
+ const char *name1, *name2;
+ const void *value;
+ size_t value_len;
+ struct spdk_xattr_names *names;
+
+ /* Test that set_xattr fails if md_ro flag is set. */
+ blob->md_ro = true;
+ rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
+ CU_ASSERT(rc == -EPERM);
+
+ blob->md_ro = false;
+ rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
+ CU_ASSERT(rc == 0);
+
+ length = 2345;
+ rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
+ CU_ASSERT(rc == 0);
+
+ /* Overwrite "length" xattr. */
+ length = 3456;
+ rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
+ CU_ASSERT(rc == 0);
+
+ /* get_xattr should still work even if md_ro flag is set. */
+ value = NULL;
+ blob->md_ro = true;
+ rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(*(uint64_t *)value == length);
+ CU_ASSERT(value_len == 8);
+ blob->md_ro = false;
+
+ rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
+ CU_ASSERT(rc == -ENOENT);
+
+ names = NULL;
+ rc = spdk_blob_get_xattr_names(blob, &names);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(names != NULL);
+ CU_ASSERT(spdk_xattr_names_get_count(names) == 2);
+ name1 = spdk_xattr_names_get_name(names, 0);
+ SPDK_CU_ASSERT_FATAL(name1 != NULL);
+ CU_ASSERT(!strcmp(name1, "name") || !strcmp(name1, "length"));
+ name2 = spdk_xattr_names_get_name(names, 1);
+ SPDK_CU_ASSERT_FATAL(name2 != NULL);
+ CU_ASSERT(!strcmp(name2, "name") || !strcmp(name2, "length"));
+ CU_ASSERT(strcmp(name1, name2));
+ spdk_xattr_names_free(names);
+
+ /* Confirm that remove_xattr fails if md_ro is set to true. */
+ blob->md_ro = true;
+ rc = spdk_blob_remove_xattr(blob, "name");
+ CU_ASSERT(rc == -EPERM);
+
+ blob->md_ro = false;
+ rc = spdk_blob_remove_xattr(blob, "name");
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_blob_remove_xattr(blob, "foobar");
+ CU_ASSERT(rc == -ENOENT);
+
+ /* Set internal xattr */
+ length = 7898;
+ rc = blob_set_xattr(blob, "internal", &length, sizeof(length), true);
+ CU_ASSERT(rc == 0);
+ rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(*(uint64_t *)value == length);
+ /* try to get public xattr with same name */
+ rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len);
+ CU_ASSERT(rc != 0);
+ rc = blob_get_xattr_value(blob, "internal", &value, &value_len, false);
+ CU_ASSERT(rc != 0);
+ /* Check if SPDK_BLOB_INTERNAL_XATTR is set */
+ CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) ==
+ SPDK_BLOB_INTERNAL_XATTR);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+
+ /* Check if xattrs are persisted */
+ ut_bs_reload(&bs, NULL);
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+
+ rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(*(uint64_t *)value == length);
+
+ /* try to get internal xattr trough public call */
+ rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len);
+ CU_ASSERT(rc != 0);
+
+ rc = blob_remove_xattr(blob, "internal", true);
+ CU_ASSERT(rc == 0);
+
+ CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) == 0);
+}
+
+static void
+bs_load(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ spdk_blob_id blobid;
+ struct spdk_blob *blob;
+ struct spdk_bs_super_block *super_block;
+ uint64_t length;
+ int rc;
+ const void *value;
+ size_t value_len;
+ struct spdk_bs_opts opts;
+ struct spdk_blob_opts blob_opts;
+
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
+
+ /* Initialize a new blob store */
+ spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ /* Try to open a blobid that does not exist */
+ spdk_bs_open_blob(bs, 0, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -ENOENT);
+ CU_ASSERT(g_blob == NULL);
+
+ /* Create a blob */
+ blob = ut_blob_create_and_open(bs, NULL);
+ blobid = spdk_blob_get_id(blob);
+
+ /* Try again to open valid blob but without the upper bit set */
+ spdk_bs_open_blob(bs, blobid & 0xFFFFFFFF, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -ENOENT);
+ CU_ASSERT(g_blob == NULL);
+
+ /* Set some xattrs */
+ rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
+ CU_ASSERT(rc == 0);
+
+ length = 2345;
+ rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
+ CU_ASSERT(rc == 0);
+
+ /* Resize the blob */
+ spdk_blob_resize(blob, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ blob = NULL;
+ g_blob = NULL;
+ g_blobid = SPDK_BLOBID_INVALID;
+
+ /* Unload the blob store */
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+ g_blob = NULL;
+ g_blobid = 0;
+
+ super_block = (struct spdk_bs_super_block *)g_dev_buffer;
+ CU_ASSERT(super_block->clean == 1);
+
+ /* Load should fail for device with an unsupported blocklen */
+ dev = init_dev();
+ dev->blocklen = SPDK_BS_PAGE_SIZE * 2;
+ spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -EINVAL);
+
+ /* Load should when max_md_ops is set to zero */
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ opts.max_md_ops = 0;
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -EINVAL);
+
+ /* Load should when max_channel_ops is set to zero */
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ opts.max_channel_ops = 0;
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -EINVAL);
+
+ /* Load an existing blob store */
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ super_block = (struct spdk_bs_super_block *)g_dev_buffer;
+ CU_ASSERT(super_block->clean == 1);
+ CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ blob = g_blob;
+
+ /* Verify that blobstore is marked dirty after first metadata sync */
+ spdk_blob_sync_md(blob, blob_op_complete, NULL);
+ CU_ASSERT(super_block->clean == 1);
+
+ /* Get the xattrs */
+ value = NULL;
+ rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(*(uint64_t *)value == length);
+ CU_ASSERT(value_len == 8);
+
+ rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
+ CU_ASSERT(rc == -ENOENT);
+
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ blob = NULL;
+ g_blob = NULL;
+
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+
+ /* Load should fail: bdev size < saved size */
+ dev = init_dev();
+ dev->blockcnt /= 2;
+
+ spdk_bs_opts_init(&opts);
+ snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+
+ CU_ASSERT(g_bserrno == -EILSEQ);
+
+ /* Load should succeed: bdev size > saved size */
+ dev = init_dev();
+ dev->blockcnt *= 4;
+
+ spdk_bs_opts_init(&opts);
+ snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ CU_ASSERT(g_bserrno == 0);
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+
+
+ /* Test compatibility mode */
+
+ dev = init_dev();
+ super_block->size = 0;
+ super_block->crc = blob_md_page_calc_crc(super_block);
+
+ spdk_bs_opts_init(&opts);
+ snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ /* Create a blob */
+ ut_spdk_blob_opts_init(&blob_opts);
+ spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+
+ /* Blobstore should update number of blocks in super_block */
+ CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
+ CU_ASSERT(super_block->clean == 0);
+
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(super_block->clean == 1);
+ g_bs = NULL;
+
+}
+
+static void
+bs_load_pending_removal(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob_opts opts;
+ struct spdk_blob *blob, *snapshot;
+ spdk_blob_id blobid, snapshotid;
+ const void *value;
+ size_t value_len;
+ int rc;
+
+ /* Create blob */
+ ut_spdk_blob_opts_init(&opts);
+ opts.num_clusters = 10;
+
+ blob = ut_blob_create_and_open(bs, &opts);
+ blobid = spdk_blob_get_id(blob);
+
+ /* Create snapshot */
+ spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ snapshotid = g_blobid;
+
+ spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ snapshot = g_blob;
+
+ /* Set SNAPSHOT_PENDING_REMOVAL xattr */
+ snapshot->md_ro = false;
+ rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true);
+ CU_ASSERT(rc == 0);
+ snapshot->md_ro = true;
+
+ spdk_blob_close(snapshot, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Reload blobstore */
+ ut_bs_reload(&bs, NULL);
+
+ /* Snapshot should not be removed as blob is still pointing to it */
+ spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ snapshot = g_blob;
+
+ /* SNAPSHOT_PENDING_REMOVAL xattr should be removed during load */
+ rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len);
+ CU_ASSERT(rc != 0);
+
+ /* Set SNAPSHOT_PENDING_REMOVAL xattr again */
+ snapshot->md_ro = false;
+ rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true);
+ CU_ASSERT(rc == 0);
+ snapshot->md_ro = true;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+
+ /* Remove parent_id from blob by removing BLOB_SNAPSHOT xattr */
+ blob_remove_xattr(blob, BLOB_SNAPSHOT, true);
+
+ spdk_blob_sync_md(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_blob_close(snapshot, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Reload blobstore */
+ ut_bs_reload(&bs, NULL);
+
+ /* Snapshot should be removed as blob is not pointing to it anymore */
+ spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno != 0);
+}
+
+static void
+bs_load_custom_cluster_size(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_bs_super_block *super_block;
+ struct spdk_bs_opts opts;
+ uint32_t custom_cluster_size = 4194304; /* 4MiB */
+ uint32_t cluster_sz;
+ uint64_t total_clusters;
+
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ opts.cluster_sz = custom_cluster_size;
+ snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
+
+ /* Initialize a new blob store */
+ spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+ cluster_sz = bs->cluster_sz;
+ total_clusters = bs->total_clusters;
+
+ /* Unload the blob store */
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+ g_blob = NULL;
+ g_blobid = 0;
+
+ super_block = (struct spdk_bs_super_block *)g_dev_buffer;
+ CU_ASSERT(super_block->clean == 1);
+
+ /* Load an existing blob store */
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+ /* Compare cluster size and number to one after initialization */
+ CU_ASSERT(cluster_sz == bs->cluster_sz);
+ CU_ASSERT(total_clusters == bs->total_clusters);
+
+ super_block = (struct spdk_bs_super_block *)g_dev_buffer;
+ CU_ASSERT(super_block->clean == 1);
+ CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
+
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(super_block->clean == 1);
+ g_bs = NULL;
+}
+
+static void
+bs_type(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_bs_opts opts;
+
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
+
+ /* Initialize a new blob store */
+ spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ /* Unload the blob store */
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+ g_blob = NULL;
+ g_blobid = 0;
+
+ /* Load non existing blobstore type */
+ dev = init_dev();
+ snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING");
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno != 0);
+
+ /* Load with empty blobstore type */
+ dev = init_dev();
+ memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+
+ /* Initialize a new blob store with empty bstype */
+ dev = init_dev();
+ memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+
+ /* Load non existing blobstore type */
+ dev = init_dev();
+ snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING");
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno != 0);
+
+ /* Load with empty blobstore type */
+ dev = init_dev();
+ memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+}
+
+static void
+bs_super_block(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_bs_super_block *super_block;
+ struct spdk_bs_opts opts;
+ struct spdk_bs_super_block_ver1 super_block_v1;
+
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
+
+ /* Initialize a new blob store */
+ spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ /* Unload the blob store */
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+ g_blob = NULL;
+ g_blobid = 0;
+
+ /* Load an existing blob store with version newer than supported */
+ super_block = (struct spdk_bs_super_block *)g_dev_buffer;
+ super_block->version++;
+
+ dev = init_dev();
+ memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno != 0);
+
+ /* Create a new blob store with super block version 1 */
+ dev = init_dev();
+ super_block_v1.version = 1;
+ memcpy(super_block_v1.signature, "SPDKBLOB", sizeof(super_block_v1.signature));
+ super_block_v1.length = 0x1000;
+ super_block_v1.clean = 1;
+ super_block_v1.super_blob = 0xFFFFFFFFFFFFFFFF;
+ super_block_v1.cluster_size = 0x100000;
+ super_block_v1.used_page_mask_start = 0x01;
+ super_block_v1.used_page_mask_len = 0x01;
+ super_block_v1.used_cluster_mask_start = 0x02;
+ super_block_v1.used_cluster_mask_len = 0x01;
+ super_block_v1.md_start = 0x03;
+ super_block_v1.md_len = 0x40;
+ memset(super_block_v1.reserved, 0, 4036);
+ super_block_v1.crc = blob_md_page_calc_crc(&super_block_v1);
+ memcpy(g_dev_buffer, &super_block_v1, sizeof(struct spdk_bs_super_block_ver1));
+
+ memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+}
+
+/*
+ * Create a blobstore and then unload it.
+ */
+static void
+bs_unload(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob;
+
+ /* Create a blob and open it. */
+ blob = ut_blob_create_and_open(bs, NULL);
+
+ /* Try to unload blobstore, should fail with open blob */
+ g_bserrno = -1;
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -EBUSY);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+
+ /* Close the blob, then successfully unload blobstore */
+ g_bserrno = -1;
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+}
+
+/*
+ * Create a blobstore with a cluster size different than the default, and ensure it is
+ * persisted.
+ */
+static void
+bs_cluster_sz(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_bs_opts opts;
+ uint32_t cluster_sz;
+
+ /* Set cluster size to zero */
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ opts.cluster_sz = 0;
+
+ /* Initialize a new blob store */
+ spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -EINVAL);
+ SPDK_CU_ASSERT_FATAL(g_bs == NULL);
+
+ /*
+ * Set cluster size to blobstore page size,
+ * to work it is required to be at least twice the blobstore page size.
+ */
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ opts.cluster_sz = SPDK_BS_PAGE_SIZE;
+
+ /* Initialize a new blob store */
+ spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -ENOMEM);
+ SPDK_CU_ASSERT_FATAL(g_bs == NULL);
+
+ /*
+ * Set cluster size to lower than page size,
+ * to work it is required to be at least twice the blobstore page size.
+ */
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ opts.cluster_sz = SPDK_BS_PAGE_SIZE - 1;
+
+ /* Initialize a new blob store */
+ spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -EINVAL);
+ SPDK_CU_ASSERT_FATAL(g_bs == NULL);
+
+ /* Set cluster size to twice the default */
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ opts.cluster_sz *= 2;
+ cluster_sz = opts.cluster_sz;
+
+ /* Initialize a new blob store */
+ spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
+
+ ut_bs_reload(&bs, &opts);
+
+ CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
+
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+}
+
+/*
+ * Create a blobstore, reload it and ensure total usable cluster count
+ * stays the same.
+ */
+static void
+bs_usable_clusters(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob;
+ uint32_t clusters;
+ int i;
+
+
+ clusters = spdk_bs_total_data_cluster_count(bs);
+
+ ut_bs_reload(&bs, NULL);
+
+ CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
+
+ /* Create and resize blobs to make sure that useable cluster count won't change */
+ for (i = 0; i < 4; i++) {
+ g_bserrno = -1;
+ g_blobid = SPDK_BLOBID_INVALID;
+ blob = ut_blob_create_and_open(bs, NULL);
+
+ spdk_blob_resize(blob, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ g_bserrno = -1;
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
+ }
+
+ /* Reload the blob store to make sure that nothing changed */
+ ut_bs_reload(&bs, NULL);
+
+ CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
+}
+
+/*
+ * Test resizing of the metadata blob. This requires creating enough blobs
+ * so that one cluster is not enough to fit the metadata for those blobs.
+ * To induce this condition to happen more quickly, we reduce the cluster
+ * size to 16KB, which means only 4 4KB blob metadata pages can fit.
+ */
+static void
+bs_resize_md(void)
+{
+ struct spdk_blob_store *bs;
+ const int CLUSTER_PAGE_COUNT = 4;
+ const int NUM_BLOBS = CLUSTER_PAGE_COUNT * 4;
+ struct spdk_bs_dev *dev;
+ struct spdk_bs_opts opts;
+ struct spdk_blob *blob;
+ struct spdk_blob_opts blob_opts;
+ uint32_t cluster_sz;
+ spdk_blob_id blobids[NUM_BLOBS];
+ int i;
+
+
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ opts.cluster_sz = CLUSTER_PAGE_COUNT * 4096;
+ cluster_sz = opts.cluster_sz;
+
+ /* Initialize a new blob store */
+ spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
+
+ ut_spdk_blob_opts_init(&blob_opts);
+
+ for (i = 0; i < NUM_BLOBS; i++) {
+ g_bserrno = -1;
+ g_blobid = SPDK_BLOBID_INVALID;
+ spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobids[i] = g_blobid;
+ }
+
+ ut_bs_reload(&bs, &opts);
+
+ CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
+
+ for (i = 0; i < NUM_BLOBS; i++) {
+ g_bserrno = -1;
+ g_blob = NULL;
+ spdk_bs_open_blob(bs, blobids[i], blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ blob = g_blob;
+ g_bserrno = -1;
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ }
+
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+}
+
+static void
+bs_destroy(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+
+ /* Initialize a new blob store */
+ dev = init_dev();
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ /* Destroy the blob store */
+ g_bserrno = -1;
+ spdk_bs_destroy(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Loading an non-existent blob store should fail. */
+ g_bs = NULL;
+ dev = init_dev();
+
+ g_bserrno = 0;
+ spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno != 0);
+}
+
+/* Try to hit all of the corner cases associated with serializing
+ * a blob to disk
+ */
+static void
+blob_serialize_test(void)
+{
+ struct spdk_bs_dev *dev;
+ struct spdk_bs_opts opts;
+ struct spdk_blob_store *bs;
+ spdk_blob_id blobid[2];
+ struct spdk_blob *blob[2];
+ uint64_t i;
+ char *value;
+ int rc;
+
+ dev = init_dev();
+
+ /* Initialize a new blobstore with very small clusters */
+ spdk_bs_opts_init(&opts);
+ opts.cluster_sz = dev->blocklen * 8;
+ spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ /* Create and open two blobs */
+ for (i = 0; i < 2; i++) {
+ blob[i] = ut_blob_create_and_open(bs, NULL);
+ blobid[i] = spdk_blob_get_id(blob[i]);
+
+ /* Set a fairly large xattr on both blobs to eat up
+ * metadata space
+ */
+ value = calloc(dev->blocklen - 64, sizeof(char));
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ memset(value, i, dev->blocklen / 2);
+ rc = spdk_blob_set_xattr(blob[i], "name", value, dev->blocklen - 64);
+ CU_ASSERT(rc == 0);
+ free(value);
+ }
+
+ /* Resize the blobs, alternating 1 cluster at a time.
+ * This thwarts run length encoding and will cause spill
+ * over of the extents.
+ */
+ for (i = 0; i < 6; i++) {
+ spdk_blob_resize(blob[i % 2], (i / 2) + 1, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ }
+
+ for (i = 0; i < 2; i++) {
+ spdk_blob_sync_md(blob[i], blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ }
+
+ /* Close the blobs */
+ for (i = 0; i < 2; i++) {
+ spdk_blob_close(blob[i], blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ }
+
+ ut_bs_reload(&bs, &opts);
+
+ for (i = 0; i < 2; i++) {
+ blob[i] = NULL;
+
+ spdk_bs_open_blob(bs, blobid[i], blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ blob[i] = g_blob;
+
+ CU_ASSERT(spdk_blob_get_num_clusters(blob[i]) == 3);
+
+ spdk_blob_close(blob[i], blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ }
+
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+}
+
+static void
+blob_crc(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob;
+ spdk_blob_id blobid;
+ uint32_t page_num;
+ int index;
+ struct spdk_blob_md_page *page;
+
+ blob = ut_blob_create_and_open(bs, NULL);
+ blobid = spdk_blob_get_id(blob);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ page_num = bs_blobid_to_page(blobid);
+ index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num);
+ page = (struct spdk_blob_md_page *)&g_dev_buffer[index];
+ page->crc = 0;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -EINVAL);
+ CU_ASSERT(g_blob == NULL);
+ g_bserrno = 0;
+
+ spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -EINVAL);
+}
+
+static void
+super_block_crc(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_bs_super_block *super_block;
+
+ dev = init_dev();
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+
+ super_block = (struct spdk_bs_super_block *)g_dev_buffer;
+ super_block->crc = 0;
+ dev = init_dev();
+
+ /* Load an existing blob store */
+ g_bserrno = 0;
+ spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -EILSEQ);
+}
+
+/* For blob dirty shutdown test case we do the following sub-test cases:
+ * 1 Initialize new blob store and create 1 super blob with some xattrs, then we
+ * dirty shutdown and reload the blob store and verify the xattrs.
+ * 2 Resize the blob from 10 clusters to 20 clusters and then dirty shutdown,
+ * reload the blob store and verify the clusters number.
+ * 3 Create the second blob and then dirty shutdown, reload the blob store
+ * and verify the second blob.
+ * 4 Delete the second blob and then dirty shutdown, reload the blob store
+ * and verify the second blob is invalid.
+ * 5 Create the second blob again and also create the third blob, modify the
+ * md of second blob which makes the md invalid, and then dirty shutdown,
+ * reload the blob store verify the second blob, it should invalid and also
+ * verify the third blob, it should correct.
+ */
+static void
+blob_dirty_shutdown(void)
+{
+ int rc;
+ int index;
+ struct spdk_blob_store *bs = g_bs;
+ spdk_blob_id blobid1, blobid2, blobid3;
+ struct spdk_blob *blob = g_blob;
+ uint64_t length;
+ uint64_t free_clusters;
+ const void *value;
+ size_t value_len;
+ uint32_t page_num;
+ struct spdk_blob_md_page *page;
+ struct spdk_blob_opts blob_opts;
+
+ /* Create first blob */
+ blobid1 = spdk_blob_get_id(blob);
+
+ /* Set some xattrs */
+ rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
+ CU_ASSERT(rc == 0);
+
+ length = 2345;
+ rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
+ CU_ASSERT(rc == 0);
+
+ /* Put xattr that fits exactly single page.
+ * This results in adding additional pages to MD.
+ * First is flags and smaller xattr, second the large xattr,
+ * third are just the extents.
+ */
+ size_t xattr_length = 4072 - sizeof(struct spdk_blob_md_descriptor_xattr) -
+ strlen("large_xattr");
+ char *xattr = calloc(xattr_length, sizeof(char));
+ SPDK_CU_ASSERT_FATAL(xattr != NULL);
+ rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
+ free(xattr);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+
+ /* Resize the blob */
+ spdk_blob_resize(blob, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Set the blob as the super blob */
+ spdk_bs_set_super(bs, blobid1, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ free_clusters = spdk_bs_free_cluster_count(bs);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ blob = NULL;
+ g_blob = NULL;
+ g_blobid = SPDK_BLOBID_INVALID;
+
+ ut_bs_dirty_load(&bs, NULL);
+
+ /* Get the super blob */
+ spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(blobid1 == g_blobid);
+
+ spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ blob = g_blob;
+
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+
+ /* Get the xattrs */
+ value = NULL;
+ rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(*(uint64_t *)value == length);
+ CU_ASSERT(value_len == 8);
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
+
+ /* Resize the blob */
+ spdk_blob_resize(blob, 20, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ free_clusters = spdk_bs_free_cluster_count(bs);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ blob = NULL;
+ g_blob = NULL;
+ g_blobid = SPDK_BLOBID_INVALID;
+
+ ut_bs_dirty_load(&bs, NULL);
+
+ spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ blob = g_blob;
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 20);
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ blob = NULL;
+ g_blob = NULL;
+ g_blobid = SPDK_BLOBID_INVALID;
+
+ /* Create second blob */
+ blob = ut_blob_create_and_open(bs, NULL);
+ blobid2 = spdk_blob_get_id(blob);
+
+ /* Set some xattrs */
+ rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1);
+ CU_ASSERT(rc == 0);
+
+ length = 5432;
+ rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
+ CU_ASSERT(rc == 0);
+
+ /* Resize the blob */
+ spdk_blob_resize(blob, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ free_clusters = spdk_bs_free_cluster_count(bs);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ blob = NULL;
+ g_blob = NULL;
+ g_blobid = SPDK_BLOBID_INVALID;
+
+ ut_bs_dirty_load(&bs, NULL);
+
+ spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ blob = g_blob;
+
+ /* Get the xattrs */
+ value = NULL;
+ rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(*(uint64_t *)value == length);
+ CU_ASSERT(value_len == 8);
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+
+ ut_blob_close_and_delete(bs, blob);
+
+ free_clusters = spdk_bs_free_cluster_count(bs);
+
+ ut_bs_dirty_load(&bs, NULL);
+
+ spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno != 0);
+ CU_ASSERT(g_blob == NULL);
+
+ spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ blob = g_blob;
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ ut_bs_reload(&bs, NULL);
+
+ /* Create second blob */
+ ut_spdk_blob_opts_init(&blob_opts);
+ spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid2 = g_blobid;
+
+ /* Create third blob */
+ spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid3 = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ blob = g_blob;
+
+ /* Set some xattrs for second blob */
+ rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1);
+ CU_ASSERT(rc == 0);
+
+ length = 5432;
+ rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
+ CU_ASSERT(rc == 0);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ blob = NULL;
+ g_blob = NULL;
+ g_blobid = SPDK_BLOBID_INVALID;
+
+ spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ blob = g_blob;
+
+ /* Set some xattrs for third blob */
+ rc = spdk_blob_set_xattr(blob, "name", "log2.txt", strlen("log2.txt") + 1);
+ CU_ASSERT(rc == 0);
+
+ length = 5432;
+ rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
+ CU_ASSERT(rc == 0);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ blob = NULL;
+ g_blob = NULL;
+ g_blobid = SPDK_BLOBID_INVALID;
+
+ /* Mark second blob as invalid */
+ page_num = bs_blobid_to_page(blobid2);
+
+ index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num);
+ page = (struct spdk_blob_md_page *)&g_dev_buffer[index];
+ page->sequence_num = 1;
+ page->crc = blob_md_page_calc_crc(page);
+
+ free_clusters = spdk_bs_free_cluster_count(bs);
+
+ ut_bs_dirty_load(&bs, NULL);
+
+ spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno != 0);
+ CU_ASSERT(g_blob == NULL);
+
+ spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ blob = g_blob;
+
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+}
+
+static void
+blob_flags(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ spdk_blob_id blobid_invalid, blobid_data_ro, blobid_md_ro;
+ struct spdk_blob *blob_invalid, *blob_data_ro, *blob_md_ro;
+ struct spdk_blob_opts blob_opts;
+ int rc;
+
+ /* Create three blobs - one each for testing invalid, data_ro and md_ro flags. */
+ blob_invalid = ut_blob_create_and_open(bs, NULL);
+ blobid_invalid = spdk_blob_get_id(blob_invalid);
+
+ blob_data_ro = ut_blob_create_and_open(bs, NULL);
+ blobid_data_ro = spdk_blob_get_id(blob_data_ro);
+
+ ut_spdk_blob_opts_init(&blob_opts);
+ blob_opts.clear_method = BLOB_CLEAR_WITH_WRITE_ZEROES;
+ blob_md_ro = ut_blob_create_and_open(bs, &blob_opts);
+ blobid_md_ro = spdk_blob_get_id(blob_md_ro);
+ CU_ASSERT((blob_md_ro->md_ro_flags & SPDK_BLOB_MD_RO_FLAGS_MASK) == BLOB_CLEAR_WITH_WRITE_ZEROES);
+
+ /* Change the size of blob_data_ro to check if flags are serialized
+ * when blob has non zero number of extents */
+ spdk_blob_resize(blob_data_ro, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Set the xattr to check if flags are serialized
+ * when blob has non zero number of xattrs */
+ rc = spdk_blob_set_xattr(blob_md_ro, "name", "log.txt", strlen("log.txt") + 1);
+ CU_ASSERT(rc == 0);
+
+ blob_invalid->invalid_flags = (1ULL << 63);
+ blob_invalid->state = SPDK_BLOB_STATE_DIRTY;
+ blob_data_ro->data_ro_flags = (1ULL << 62);
+ blob_data_ro->state = SPDK_BLOB_STATE_DIRTY;
+ blob_md_ro->md_ro_flags = (1ULL << 61);
+ blob_md_ro->state = SPDK_BLOB_STATE_DIRTY;
+
+ g_bserrno = -1;
+ spdk_blob_sync_md(blob_invalid, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ g_bserrno = -1;
+ spdk_blob_sync_md(blob_data_ro, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ g_bserrno = -1;
+ spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ g_bserrno = -1;
+ spdk_blob_close(blob_invalid, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ blob_invalid = NULL;
+ g_bserrno = -1;
+ spdk_blob_close(blob_data_ro, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ blob_data_ro = NULL;
+ g_bserrno = -1;
+ spdk_blob_close(blob_md_ro, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ blob_md_ro = NULL;
+
+ g_blob = NULL;
+ g_blobid = SPDK_BLOBID_INVALID;
+
+ ut_bs_reload(&bs, NULL);
+
+ g_blob = NULL;
+ g_bserrno = 0;
+ spdk_bs_open_blob(bs, blobid_invalid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno != 0);
+ CU_ASSERT(g_blob == NULL);
+
+ g_blob = NULL;
+ g_bserrno = -1;
+ spdk_bs_open_blob(bs, blobid_data_ro, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob_data_ro = g_blob;
+ /* If an unknown data_ro flag was found, the blob should be marked both data and md read-only. */
+ CU_ASSERT(blob_data_ro->data_ro == true);
+ CU_ASSERT(blob_data_ro->md_ro == true);
+ CU_ASSERT(spdk_blob_get_num_clusters(blob_data_ro) == 10);
+
+ g_blob = NULL;
+ g_bserrno = -1;
+ spdk_bs_open_blob(bs, blobid_md_ro, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob_md_ro = g_blob;
+ CU_ASSERT(blob_md_ro->data_ro == false);
+ CU_ASSERT(blob_md_ro->md_ro == true);
+
+ g_bserrno = -1;
+ spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ ut_blob_close_and_delete(bs, blob_data_ro);
+ ut_blob_close_and_delete(bs, blob_md_ro);
+}
+
+static void
+bs_version(void)
+{
+ struct spdk_bs_super_block *super;
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_blob *blob;
+ struct spdk_blob_opts blob_opts;
+ spdk_blob_id blobid;
+
+ /* Unload the blob store */
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+
+ /*
+ * Change the bs version on disk. This will allow us to
+ * test that the version does not get modified automatically
+ * when loading and unloading the blobstore.
+ */
+ super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
+ CU_ASSERT(super->version == SPDK_BS_VERSION);
+ CU_ASSERT(super->clean == 1);
+ super->version = 2;
+ /*
+ * Version 2 metadata does not have a used blobid mask, so clear
+ * those fields in the super block and zero the corresponding
+ * region on "disk". We will use this to ensure blob IDs are
+ * correctly reconstructed.
+ */
+ memset(&g_dev_buffer[super->used_blobid_mask_start * SPDK_BS_PAGE_SIZE], 0,
+ super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE);
+ super->used_blobid_mask_start = 0;
+ super->used_blobid_mask_len = 0;
+ super->crc = blob_md_page_calc_crc(super);
+
+ /* Load an existing blob store */
+ dev = init_dev();
+ spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ CU_ASSERT(super->clean == 1);
+ bs = g_bs;
+
+ /*
+ * Create a blob - just to make sure that when we unload it
+ * results in writing the super block (since metadata pages
+ * were allocated.
+ */
+ ut_spdk_blob_opts_init(&blob_opts);
+ spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ /* Unload the blob store */
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+ CU_ASSERT(super->version == 2);
+ CU_ASSERT(super->used_blobid_mask_start == 0);
+ CU_ASSERT(super->used_blobid_mask_len == 0);
+
+ dev = init_dev();
+ spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ g_blob = NULL;
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ blob = g_blob;
+
+ ut_blob_close_and_delete(bs, blob);
+
+ CU_ASSERT(super->version == 2);
+ CU_ASSERT(super->used_blobid_mask_start == 0);
+ CU_ASSERT(super->used_blobid_mask_len == 0);
+}
+
+static void
+blob_set_xattrs_test(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob;
+ struct spdk_blob_opts opts;
+ const void *value;
+ size_t value_len;
+ char *xattr;
+ size_t xattr_length;
+ int rc;
+
+ /* Create blob with extra attributes */
+ ut_spdk_blob_opts_init(&opts);
+
+ opts.xattrs.names = g_xattr_names;
+ opts.xattrs.get_value = _get_xattr_value;
+ opts.xattrs.count = 3;
+ opts.xattrs.ctx = &g_ctx;
+
+ blob = ut_blob_create_and_open(bs, &opts);
+
+ /* Get the xattrs */
+ value = NULL;
+
+ rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(value_len == strlen(g_xattr_values[0]));
+ CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
+
+ rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(value_len == strlen(g_xattr_values[1]));
+ CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
+
+ rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(value_len == strlen(g_xattr_values[2]));
+ CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
+
+ /* Try to get non existing attribute */
+
+ rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
+ CU_ASSERT(rc == -ENOENT);
+
+ /* Try xattr exceeding maximum length of descriptor in single page */
+ xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
+ strlen("large_xattr") + 1;
+ xattr = calloc(xattr_length, sizeof(char));
+ SPDK_CU_ASSERT_FATAL(xattr != NULL);
+ rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
+ free(xattr);
+ SPDK_CU_ASSERT_FATAL(rc == -ENOMEM);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ blob = NULL;
+ g_blob = NULL;
+ g_blobid = SPDK_BLOBID_INVALID;
+
+ /* NULL callback */
+ ut_spdk_blob_opts_init(&opts);
+ opts.xattrs.names = g_xattr_names;
+ opts.xattrs.get_value = NULL;
+ opts.xattrs.count = 1;
+ opts.xattrs.ctx = &g_ctx;
+
+ spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -EINVAL);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+
+ /* NULL values */
+ ut_spdk_blob_opts_init(&opts);
+ opts.xattrs.names = g_xattr_names;
+ opts.xattrs.get_value = _get_xattr_value_null;
+ opts.xattrs.count = 1;
+ opts.xattrs.ctx = NULL;
+
+ spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -EINVAL);
+}
+
+static void
+blob_thin_prov_alloc(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob;
+ struct spdk_blob_opts opts;
+ spdk_blob_id blobid;
+ uint64_t free_clusters;
+
+ free_clusters = spdk_bs_free_cluster_count(bs);
+
+ /* Set blob as thin provisioned */
+ ut_spdk_blob_opts_init(&opts);
+ opts.thin_provision = true;
+
+ blob = ut_blob_create_and_open(bs, &opts);
+ blobid = spdk_blob_get_id(blob);
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+
+ CU_ASSERT(blob->active.num_clusters == 0);
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
+
+ /* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
+ spdk_blob_resize(blob, 5, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+ CU_ASSERT(blob->active.num_clusters == 5);
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
+
+ /* Grow it to 1TB - still unallocated */
+ spdk_blob_resize(blob, 262144, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+ CU_ASSERT(blob->active.num_clusters == 262144);
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144);
+
+ spdk_blob_sync_md(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ /* Sync must not change anything */
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+ CU_ASSERT(blob->active.num_clusters == 262144);
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144);
+ /* Since clusters are not allocated,
+ * number of metadata pages is expected to be minimal.
+ */
+ CU_ASSERT(blob->active.num_pages == 1);
+
+ /* Shrink the blob to 3 clusters - still unallocated */
+ spdk_blob_resize(blob, 3, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+ CU_ASSERT(blob->active.num_clusters == 3);
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3);
+
+ spdk_blob_sync_md(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ /* Sync must not change anything */
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+ CU_ASSERT(blob->active.num_clusters == 3);
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ ut_bs_reload(&bs, NULL);
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+
+ /* Check that clusters allocation and size is still the same */
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+ CU_ASSERT(blob->active.num_clusters == 3);
+
+ ut_blob_close_and_delete(bs, blob);
+}
+
+static void
+blob_insert_cluster_msg_test(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob;
+ struct spdk_blob_opts opts;
+ spdk_blob_id blobid;
+ uint64_t free_clusters;
+ uint64_t new_cluster = 0;
+ uint32_t cluster_num = 3;
+ uint32_t extent_page = 0;
+
+ free_clusters = spdk_bs_free_cluster_count(bs);
+
+ /* Set blob as thin provisioned */
+ ut_spdk_blob_opts_init(&opts);
+ opts.thin_provision = true;
+ opts.num_clusters = 4;
+
+ blob = ut_blob_create_and_open(bs, &opts);
+ blobid = spdk_blob_get_id(blob);
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+
+ CU_ASSERT(blob->active.num_clusters == 4);
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 4);
+ CU_ASSERT(blob->active.clusters[cluster_num] == 0);
+
+ /* Specify cluster_num to allocate and new_cluster will be returned to insert on md_thread.
+ * This is to simulate behaviour when cluster is allocated after blob creation.
+ * Such as _spdk_bs_allocate_and_copy_cluster(). */
+ bs_allocate_cluster(blob, cluster_num, &new_cluster, &extent_page, false);
+ CU_ASSERT(blob->active.clusters[cluster_num] == 0);
+
+ blob_insert_cluster_on_md_thread(blob, cluster_num, new_cluster, extent_page,
+ blob_op_complete, NULL);
+ poll_threads();
+
+ CU_ASSERT(blob->active.clusters[cluster_num] != 0);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ ut_bs_reload(&bs, NULL);
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+
+ CU_ASSERT(blob->active.clusters[cluster_num] != 0);
+
+ ut_blob_close_and_delete(bs, blob);
+}
+
+static void
+blob_thin_prov_rw(void)
+{
+ static const uint8_t zero[10 * 4096] = { 0 };
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob;
+ struct spdk_io_channel *channel, *channel_thread1;
+ struct spdk_blob_opts opts;
+ uint64_t free_clusters;
+ uint64_t page_size;
+ uint8_t payload_read[10 * 4096];
+ uint8_t payload_write[10 * 4096];
+ uint64_t write_bytes;
+ uint64_t read_bytes;
+
+ free_clusters = spdk_bs_free_cluster_count(bs);
+ page_size = spdk_bs_get_page_size(bs);
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ CU_ASSERT(channel != NULL);
+
+ ut_spdk_blob_opts_init(&opts);
+ opts.thin_provision = true;
+
+ blob = ut_blob_create_and_open(bs, &opts);
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+
+ CU_ASSERT(blob->active.num_clusters == 0);
+
+ /* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
+ spdk_blob_resize(blob, 5, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+ CU_ASSERT(blob->active.num_clusters == 5);
+
+ spdk_blob_sync_md(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ /* Sync must not change anything */
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+ CU_ASSERT(blob->active.num_clusters == 5);
+
+ /* Payload should be all zeros from unallocated clusters */
+ memset(payload_read, 0xFF, sizeof(payload_read));
+ spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
+
+ write_bytes = g_dev_write_bytes;
+ read_bytes = g_dev_read_bytes;
+
+ /* Perform write on thread 1. That will allocate cluster on thread 0 via send_msg */
+ set_thread(1);
+ channel_thread1 = spdk_bs_alloc_io_channel(bs);
+ CU_ASSERT(channel_thread1 != NULL);
+ memset(payload_write, 0xE5, sizeof(payload_write));
+ spdk_blob_io_write(blob, channel_thread1, payload_write, 4, 10, blob_op_complete, NULL);
+ CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
+ /* Perform write on thread 0. That will try to allocate cluster,
+ * but fail due to another thread issuing the cluster allocation first. */
+ set_thread(0);
+ memset(payload_write, 0xE5, sizeof(payload_write));
+ spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
+ CU_ASSERT(free_clusters - 2 == spdk_bs_free_cluster_count(bs));
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
+ /* For thin-provisioned blob we need to write 20 pages plus one page metadata and
+ * read 0 bytes */
+ if (g_use_extent_table) {
+ /* Add one more page for EXTENT_PAGE write */
+ CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 22);
+ } else {
+ CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 21);
+ }
+ CU_ASSERT(g_dev_read_bytes - read_bytes == 0);
+
+ spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
+
+ ut_blob_close_and_delete(bs, blob);
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+
+ set_thread(1);
+ spdk_bs_free_io_channel(channel_thread1);
+ set_thread(0);
+ spdk_bs_free_io_channel(channel);
+ poll_threads();
+ g_blob = NULL;
+ g_blobid = 0;
+}
+
+static void
+blob_thin_prov_rle(void)
+{
+ static const uint8_t zero[10 * 4096] = { 0 };
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob;
+ struct spdk_io_channel *channel;
+ struct spdk_blob_opts opts;
+ spdk_blob_id blobid;
+ uint64_t free_clusters;
+ uint64_t page_size;
+ uint8_t payload_read[10 * 4096];
+ uint8_t payload_write[10 * 4096];
+ uint64_t write_bytes;
+ uint64_t read_bytes;
+ uint64_t io_unit;
+
+ free_clusters = spdk_bs_free_cluster_count(bs);
+ page_size = spdk_bs_get_page_size(bs);
+
+ ut_spdk_blob_opts_init(&opts);
+ opts.thin_provision = true;
+ opts.num_clusters = 5;
+
+ blob = ut_blob_create_and_open(bs, &opts);
+ blobid = spdk_blob_get_id(blob);
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ CU_ASSERT(channel != NULL);
+
+ /* Target specifically second cluster in a blob as first allocation */
+ io_unit = bs_cluster_to_page(bs, 1) * bs_io_unit_per_page(bs);
+
+ /* Payload should be all zeros from unallocated clusters */
+ memset(payload_read, 0xFF, sizeof(payload_read));
+ spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
+
+ write_bytes = g_dev_write_bytes;
+ read_bytes = g_dev_read_bytes;
+
+ /* Issue write to second cluster in a blob */
+ memset(payload_write, 0xE5, sizeof(payload_write));
+ spdk_blob_io_write(blob, channel, payload_write, io_unit, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
+ /* For thin-provisioned blob we need to write 10 pages plus one page metadata and
+ * read 0 bytes */
+ if (g_use_extent_table) {
+ /* Add one more page for EXTENT_PAGE write */
+ CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 12);
+ } else {
+ CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 11);
+ }
+ CU_ASSERT(g_dev_read_bytes - read_bytes == 0);
+
+ spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
+
+ spdk_bs_free_io_channel(channel);
+ poll_threads();
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ ut_bs_reload(&bs, NULL);
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ CU_ASSERT(channel != NULL);
+
+ /* Read second cluster after blob reload to confirm data written */
+ spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
+
+ spdk_bs_free_io_channel(channel);
+ poll_threads();
+
+ ut_blob_close_and_delete(bs, blob);
+}
+
+static void
+blob_thin_prov_rw_iov(void)
+{
+ static const uint8_t zero[10 * 4096] = { 0 };
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob;
+ struct spdk_io_channel *channel;
+ struct spdk_blob_opts opts;
+ uint64_t free_clusters;
+ uint8_t payload_read[10 * 4096];
+ uint8_t payload_write[10 * 4096];
+ struct iovec iov_read[3];
+ struct iovec iov_write[3];
+
+ free_clusters = spdk_bs_free_cluster_count(bs);
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ CU_ASSERT(channel != NULL);
+
+ ut_spdk_blob_opts_init(&opts);
+ opts.thin_provision = true;
+
+ blob = ut_blob_create_and_open(bs, &opts);
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+
+ CU_ASSERT(blob->active.num_clusters == 0);
+
+ /* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
+ spdk_blob_resize(blob, 5, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+ CU_ASSERT(blob->active.num_clusters == 5);
+
+ spdk_blob_sync_md(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ /* Sync must not change anything */
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+ CU_ASSERT(blob->active.num_clusters == 5);
+
+ /* Payload should be all zeros from unallocated clusters */
+ memset(payload_read, 0xAA, sizeof(payload_read));
+ iov_read[0].iov_base = payload_read;
+ iov_read[0].iov_len = 3 * 4096;
+ iov_read[1].iov_base = payload_read + 3 * 4096;
+ iov_read[1].iov_len = 4 * 4096;
+ iov_read[2].iov_base = payload_read + 7 * 4096;
+ iov_read[2].iov_len = 3 * 4096;
+ spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
+
+ memset(payload_write, 0xE5, sizeof(payload_write));
+ iov_write[0].iov_base = payload_write;
+ iov_write[0].iov_len = 1 * 4096;
+ iov_write[1].iov_base = payload_write + 1 * 4096;
+ iov_write[1].iov_len = 5 * 4096;
+ iov_write[2].iov_base = payload_write + 6 * 4096;
+ iov_write[2].iov_len = 4 * 4096;
+
+ spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ memset(payload_read, 0xAA, sizeof(payload_read));
+ iov_read[0].iov_base = payload_read;
+ iov_read[0].iov_len = 3 * 4096;
+ iov_read[1].iov_base = payload_read + 3 * 4096;
+ iov_read[1].iov_len = 4 * 4096;
+ iov_read[2].iov_base = payload_read + 7 * 4096;
+ iov_read[2].iov_len = 3 * 4096;
+ spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
+
+ spdk_bs_free_io_channel(channel);
+ poll_threads();
+
+ ut_blob_close_and_delete(bs, blob);
+}
+
+struct iter_ctx {
+ int current_iter;
+ spdk_blob_id blobid[4];
+};
+
+static void
+test_iter(void *arg, struct spdk_blob *blob, int bserrno)
+{
+ struct iter_ctx *iter_ctx = arg;
+ spdk_blob_id blobid;
+
+ CU_ASSERT(bserrno == 0);
+ blobid = spdk_blob_get_id(blob);
+ CU_ASSERT(blobid == iter_ctx->blobid[iter_ctx->current_iter++]);
+}
+
+static void
+bs_load_iter_test(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct iter_ctx iter_ctx = { 0 };
+ struct spdk_blob *blob;
+ int i, rc;
+ struct spdk_bs_opts opts;
+
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
+
+ /* Initialize a new blob store */
+ spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ for (i = 0; i < 4; i++) {
+ blob = ut_blob_create_and_open(bs, NULL);
+ iter_ctx.blobid[i] = spdk_blob_get_id(blob);
+
+ /* Just save the blobid as an xattr for testing purposes. */
+ rc = spdk_blob_set_xattr(blob, "blobid", &iter_ctx.blobid[i], sizeof(spdk_blob_id));
+ CU_ASSERT(rc == 0);
+
+ /* Resize the blob */
+ spdk_blob_resize(blob, i, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ }
+
+ g_bserrno = -1;
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
+ opts.iter_cb_fn = test_iter;
+ opts.iter_cb_arg = &iter_ctx;
+
+ /* Test blob iteration during load after a clean shutdown. */
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ /* Dirty shutdown */
+ bs_free(bs);
+
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
+ opts.iter_cb_fn = test_iter;
+ iter_ctx.current_iter = 0;
+ opts.iter_cb_arg = &iter_ctx;
+
+ /* Test blob iteration during load after a dirty shutdown. */
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+}
+
+static void
+blob_snapshot_rw(void)
+{
+ static const uint8_t zero[10 * 4096] = { 0 };
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob, *snapshot;
+ struct spdk_io_channel *channel;
+ struct spdk_blob_opts opts;
+ spdk_blob_id blobid, snapshotid;
+ uint64_t free_clusters;
+ uint64_t cluster_size;
+ uint64_t page_size;
+ uint8_t payload_read[10 * 4096];
+ uint8_t payload_write[10 * 4096];
+ uint64_t write_bytes;
+ uint64_t read_bytes;
+
+ free_clusters = spdk_bs_free_cluster_count(bs);
+ cluster_size = spdk_bs_get_cluster_size(bs);
+ page_size = spdk_bs_get_page_size(bs);
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ CU_ASSERT(channel != NULL);
+
+ ut_spdk_blob_opts_init(&opts);
+ opts.thin_provision = true;
+ opts.num_clusters = 5;
+
+ blob = ut_blob_create_and_open(bs, &opts);
+ blobid = spdk_blob_get_id(blob);
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
+
+ memset(payload_read, 0xFF, sizeof(payload_read));
+ spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
+
+ memset(payload_write, 0xE5, sizeof(payload_write));
+ spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
+
+ /* Create snapshot from blob */
+ spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ snapshotid = g_blobid;
+
+ spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ snapshot = g_blob;
+ CU_ASSERT(snapshot->data_ro == true);
+ CU_ASSERT(snapshot->md_ro == true);
+
+ CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
+
+ write_bytes = g_dev_write_bytes;
+ read_bytes = g_dev_read_bytes;
+
+ memset(payload_write, 0xAA, sizeof(payload_write));
+ spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
+
+ /* For a clone we need to allocate and copy one cluster, update one page of metadata
+ * and then write 10 pages of payload.
+ */
+ if (g_use_extent_table) {
+ /* Add one more page for EXTENT_PAGE write */
+ CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 12 + cluster_size);
+ } else {
+ CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 11 + cluster_size);
+ }
+ CU_ASSERT(g_dev_read_bytes - read_bytes == cluster_size);
+
+ spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
+
+ /* Data on snapshot should not change after write to clone */
+ memset(payload_write, 0xE5, sizeof(payload_write));
+ spdk_blob_io_read(snapshot, channel, payload_read, 4, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
+
+ ut_blob_close_and_delete(bs, blob);
+ ut_blob_close_and_delete(bs, snapshot);
+
+ spdk_bs_free_io_channel(channel);
+ poll_threads();
+ g_blob = NULL;
+ g_blobid = 0;
+}
+
+static void
+blob_snapshot_rw_iov(void)
+{
+ static const uint8_t zero[10 * 4096] = { 0 };
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob, *snapshot;
+ struct spdk_io_channel *channel;
+ struct spdk_blob_opts opts;
+ spdk_blob_id blobid, snapshotid;
+ uint64_t free_clusters;
+ uint8_t payload_read[10 * 4096];
+ uint8_t payload_write[10 * 4096];
+ struct iovec iov_read[3];
+ struct iovec iov_write[3];
+
+ free_clusters = spdk_bs_free_cluster_count(bs);
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ CU_ASSERT(channel != NULL);
+
+ ut_spdk_blob_opts_init(&opts);
+ opts.thin_provision = true;
+ opts.num_clusters = 5;
+
+ blob = ut_blob_create_and_open(bs, &opts);
+ blobid = spdk_blob_get_id(blob);
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
+
+ /* Create snapshot from blob */
+ spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ snapshotid = g_blobid;
+
+ spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ snapshot = g_blob;
+ CU_ASSERT(snapshot->data_ro == true);
+ CU_ASSERT(snapshot->md_ro == true);
+ CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
+
+ /* Payload should be all zeros from unallocated clusters */
+ memset(payload_read, 0xAA, sizeof(payload_read));
+ iov_read[0].iov_base = payload_read;
+ iov_read[0].iov_len = 3 * 4096;
+ iov_read[1].iov_base = payload_read + 3 * 4096;
+ iov_read[1].iov_len = 4 * 4096;
+ iov_read[2].iov_base = payload_read + 7 * 4096;
+ iov_read[2].iov_len = 3 * 4096;
+ spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
+
+ memset(payload_write, 0xE5, sizeof(payload_write));
+ iov_write[0].iov_base = payload_write;
+ iov_write[0].iov_len = 1 * 4096;
+ iov_write[1].iov_base = payload_write + 1 * 4096;
+ iov_write[1].iov_len = 5 * 4096;
+ iov_write[2].iov_base = payload_write + 6 * 4096;
+ iov_write[2].iov_len = 4 * 4096;
+
+ spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ memset(payload_read, 0xAA, sizeof(payload_read));
+ iov_read[0].iov_base = payload_read;
+ iov_read[0].iov_len = 3 * 4096;
+ iov_read[1].iov_base = payload_read + 3 * 4096;
+ iov_read[1].iov_len = 4 * 4096;
+ iov_read[2].iov_base = payload_read + 7 * 4096;
+ iov_read[2].iov_len = 3 * 4096;
+ spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
+
+ spdk_bs_free_io_channel(channel);
+ poll_threads();
+
+ ut_blob_close_and_delete(bs, blob);
+ ut_blob_close_and_delete(bs, snapshot);
+}
+
+/**
+ * Inflate / decouple parent rw unit tests.
+ *
+ * --------------
+ * original blob: 0 1 2 3 4
+ * ,---------+---------+---------+---------+---------.
+ * snapshot |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx| - |
+ * +---------+---------+---------+---------+---------+
+ * snapshot2 | - |yyyyyyyyy| - |yyyyyyyyy| - |
+ * +---------+---------+---------+---------+---------+
+ * blob | - |zzzzzzzzz| - | - | - |
+ * '---------+---------+---------+---------+---------'
+ * . . . . . .
+ * -------- . . . . . .
+ * inflate: . . . . . .
+ * ,---------+---------+---------+---------+---------.
+ * blob |xxxxxxxxx|zzzzzzzzz|xxxxxxxxx|yyyyyyyyy|000000000|
+ * '---------+---------+---------+---------+---------'
+ *
+ * NOTE: needs to allocate 4 clusters, thin provisioning removed, dependency
+ * on snapshot2 and snapshot removed . . .
+ * . . . . . .
+ * ---------------- . . . . . .
+ * decouple parent: . . . . . .
+ * ,---------+---------+---------+---------+---------.
+ * snapshot |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx| - |
+ * +---------+---------+---------+---------+---------+
+ * blob | - |zzzzzzzzz| - |yyyyyyyyy| - |
+ * '---------+---------+---------+---------+---------'
+ *
+ * NOTE: needs to allocate 1 cluster, 3 clusters unallocated, dependency
+ * on snapshot2 removed and on snapshot still exists. Snapshot2
+ * should remain a clone of snapshot.
+ */
+static void
+_blob_inflate_rw(bool decouple_parent)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob, *snapshot, *snapshot2;
+ struct spdk_io_channel *channel;
+ struct spdk_blob_opts opts;
+ spdk_blob_id blobid, snapshotid, snapshot2id;
+ uint64_t free_clusters;
+ uint64_t cluster_size;
+
+ uint64_t payload_size;
+ uint8_t *payload_read;
+ uint8_t *payload_write;
+ uint8_t *payload_clone;
+
+ uint64_t pages_per_cluster;
+ uint64_t pages_per_payload;
+
+ int i;
+ spdk_blob_id ids[2];
+ size_t count;
+
+ free_clusters = spdk_bs_free_cluster_count(bs);
+ cluster_size = spdk_bs_get_cluster_size(bs);
+ pages_per_cluster = cluster_size / spdk_bs_get_page_size(bs);
+ pages_per_payload = pages_per_cluster * 5;
+
+ payload_size = cluster_size * 5;
+
+ payload_read = malloc(payload_size);
+ SPDK_CU_ASSERT_FATAL(payload_read != NULL);
+
+ payload_write = malloc(payload_size);
+ SPDK_CU_ASSERT_FATAL(payload_write != NULL);
+
+ payload_clone = malloc(payload_size);
+ SPDK_CU_ASSERT_FATAL(payload_clone != NULL);
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ SPDK_CU_ASSERT_FATAL(channel != NULL);
+
+ /* Create blob */
+ ut_spdk_blob_opts_init(&opts);
+ opts.thin_provision = true;
+ opts.num_clusters = 5;
+
+ blob = ut_blob_create_and_open(bs, &opts);
+ blobid = spdk_blob_get_id(blob);
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
+
+ /* 1) Initial read should return zeroed payload */
+ memset(payload_read, 0xFF, payload_size);
+ spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
+ blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
+
+ /* Fill whole blob with a pattern, except last cluster (to be sure it
+ * isn't allocated) */
+ memset(payload_write, 0xE5, payload_size - cluster_size);
+ spdk_blob_io_write(blob, channel, payload_write, 0, pages_per_payload -
+ pages_per_cluster, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
+
+ /* 2) Create snapshot from blob (first level) */
+ spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ snapshotid = g_blobid;
+
+ spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ snapshot = g_blob;
+ CU_ASSERT(snapshot->data_ro == true);
+ CU_ASSERT(snapshot->md_ro == true);
+
+ CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
+
+ /* Write every second cluster with a pattern.
+ *
+ * Last cluster shouldn't be written, to be sure that snapshot nor clone
+ * doesn't allocate it.
+ *
+ * payload_clone stores expected result on "blob" read at the time and
+ * is used only to check data consistency on clone before and after
+ * inflation. Initially we fill it with a backing snapshots pattern
+ * used before.
+ */
+ memset(payload_clone, 0xE5, payload_size - cluster_size);
+ memset(payload_clone + payload_size - cluster_size, 0x00, cluster_size);
+ memset(payload_write, 0xAA, payload_size);
+ for (i = 1; i < 5; i += 2) {
+ spdk_blob_io_write(blob, channel, payload_write, i * pages_per_cluster,
+ pages_per_cluster, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Update expected result */
+ memcpy(payload_clone + (cluster_size * i), payload_write,
+ cluster_size);
+ }
+ CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
+
+ /* Check data consistency on clone */
+ memset(payload_read, 0xFF, payload_size);
+ spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
+ blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
+
+ /* 3) Create second levels snapshot from blob */
+ spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ snapshot2id = g_blobid;
+
+ spdk_bs_open_blob(bs, snapshot2id, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ snapshot2 = g_blob;
+ CU_ASSERT(snapshot2->data_ro == true);
+ CU_ASSERT(snapshot2->md_ro == true);
+
+ CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 5);
+
+ CU_ASSERT(snapshot2->parent_id == snapshotid);
+
+ /* Write one cluster on the top level blob. This cluster (1) covers
+ * already allocated cluster in the snapshot2, so shouldn't be inflated
+ * at all */
+ spdk_blob_io_write(blob, channel, payload_write, pages_per_cluster,
+ pages_per_cluster, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Update expected result */
+ memcpy(payload_clone + cluster_size, payload_write, cluster_size);
+
+ /* Check data consistency on clone */
+ memset(payload_read, 0xFF, payload_size);
+ spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
+ blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
+
+
+ /* Close all blobs */
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_blob_close(snapshot2, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_blob_close(snapshot, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Check snapshot-clone relations */
+ count = 2;
+ CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
+ CU_ASSERT(count == 1);
+ CU_ASSERT(ids[0] == snapshot2id);
+
+ count = 2;
+ CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
+ CU_ASSERT(count == 1);
+ CU_ASSERT(ids[0] == blobid);
+
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshot2id);
+
+ free_clusters = spdk_bs_free_cluster_count(bs);
+ if (!decouple_parent) {
+ /* Do full blob inflation */
+ spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* All clusters should be inflated (except one already allocated
+ * in a top level blob) */
+ CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 4);
+
+ /* Check if relation tree updated correctly */
+ count = 2;
+ CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
+
+ /* snapshotid have one clone */
+ CU_ASSERT(count == 1);
+ CU_ASSERT(ids[0] == snapshot2id);
+
+ /* snapshot2id have no clones */
+ count = 2;
+ CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
+ CU_ASSERT(count == 0);
+
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
+ } else {
+ /* Decouple parent of blob */
+ spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Only one cluster from a parent should be inflated (second one
+ * is covered by a cluster written on a top level blob, and
+ * already allocated) */
+ CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 1);
+
+ /* Check if relation tree updated correctly */
+ count = 2;
+ CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
+
+ /* snapshotid have two clones now */
+ CU_ASSERT(count == 2);
+ CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
+ CU_ASSERT(ids[0] == snapshot2id || ids[1] == snapshot2id);
+
+ /* snapshot2id have no clones */
+ count = 2;
+ CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
+ CU_ASSERT(count == 0);
+
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
+ }
+
+ /* Try to delete snapshot2 (should pass) */
+ spdk_bs_delete_blob(bs, snapshot2id, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Try to delete base snapshot */
+ spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Reopen blob after snapshot deletion */
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
+
+ /* Check data consistency on inflated blob */
+ memset(payload_read, 0xFF, payload_size);
+ spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
+ blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
+
+ spdk_bs_free_io_channel(channel);
+ poll_threads();
+
+ free(payload_read);
+ free(payload_write);
+ free(payload_clone);
+
+ ut_blob_close_and_delete(bs, blob);
+}
+
+static void
+blob_inflate_rw(void)
+{
+ _blob_inflate_rw(false);
+ _blob_inflate_rw(true);
+}
+
+/**
+ * Snapshot-clones relation test
+ *
+ * snapshot
+ * |
+ * +-----+-----+
+ * | |
+ * blob(ro) snapshot2
+ * | |
+ * clone2 clone
+ */
+static void
+blob_relations(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_bs_opts bs_opts;
+ struct spdk_blob_opts opts;
+ struct spdk_blob *blob, *snapshot, *snapshot2, *clone, *clone2;
+ spdk_blob_id blobid, cloneid, snapshotid, cloneid2, snapshotid2;
+ int rc;
+ size_t count;
+ spdk_blob_id ids[10] = {};
+
+ dev = init_dev();
+ spdk_bs_opts_init(&bs_opts);
+ snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
+
+ spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ /* 1. Create blob with 10 clusters */
+
+ ut_spdk_blob_opts_init(&opts);
+ opts.num_clusters = 10;
+
+ blob = ut_blob_create_and_open(bs, &opts);
+ blobid = spdk_blob_get_id(blob);
+
+ CU_ASSERT(!spdk_blob_is_read_only(blob));
+ CU_ASSERT(!spdk_blob_is_snapshot(blob));
+ CU_ASSERT(!spdk_blob_is_clone(blob));
+ CU_ASSERT(!spdk_blob_is_thin_provisioned(blob));
+
+ /* blob should not have underlying snapshot nor clones */
+ CU_ASSERT(blob->parent_id == SPDK_BLOBID_INVALID);
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, blobid, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 0);
+
+
+ /* 2. Create snapshot */
+
+ spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ snapshotid = g_blobid;
+
+ spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ snapshot = g_blob;
+
+ CU_ASSERT(spdk_blob_is_read_only(snapshot));
+ CU_ASSERT(spdk_blob_is_snapshot(snapshot));
+ CU_ASSERT(!spdk_blob_is_clone(snapshot));
+ CU_ASSERT(snapshot->parent_id == SPDK_BLOBID_INVALID);
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID);
+
+ /* Check if original blob is converted to the clone of snapshot */
+ CU_ASSERT(!spdk_blob_is_read_only(blob));
+ CU_ASSERT(!spdk_blob_is_snapshot(blob));
+ CU_ASSERT(spdk_blob_is_clone(blob));
+ CU_ASSERT(spdk_blob_is_thin_provisioned(blob));
+ CU_ASSERT(blob->parent_id == snapshotid);
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
+
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 1);
+ CU_ASSERT(ids[0] == blobid);
+
+
+ /* 3. Create clone from snapshot */
+
+ spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ cloneid = g_blobid;
+
+ spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ clone = g_blob;
+
+ CU_ASSERT(!spdk_blob_is_read_only(clone));
+ CU_ASSERT(!spdk_blob_is_snapshot(clone));
+ CU_ASSERT(spdk_blob_is_clone(clone));
+ CU_ASSERT(spdk_blob_is_thin_provisioned(clone));
+ CU_ASSERT(clone->parent_id == snapshotid);
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid);
+
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 0);
+
+ /* Check if clone is on the snapshot's list */
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
+ CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid);
+
+
+ /* 4. Create snapshot of the clone */
+
+ spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ snapshotid2 = g_blobid;
+
+ spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ snapshot2 = g_blob;
+
+ CU_ASSERT(spdk_blob_is_read_only(snapshot2));
+ CU_ASSERT(spdk_blob_is_snapshot(snapshot2));
+ CU_ASSERT(spdk_blob_is_clone(snapshot2));
+ CU_ASSERT(snapshot2->parent_id == snapshotid);
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid);
+
+ /* Check if clone is converted to the clone of snapshot2 and snapshot2
+ * is a child of snapshot */
+ CU_ASSERT(!spdk_blob_is_read_only(clone));
+ CU_ASSERT(!spdk_blob_is_snapshot(clone));
+ CU_ASSERT(spdk_blob_is_clone(clone));
+ CU_ASSERT(spdk_blob_is_thin_provisioned(clone));
+ CU_ASSERT(clone->parent_id == snapshotid2);
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
+
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 1);
+ CU_ASSERT(ids[0] == cloneid);
+
+
+ /* 5. Try to create clone from read only blob */
+
+ /* Mark blob as read only */
+ spdk_blob_set_read_only(blob);
+ spdk_blob_sync_md(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Check if previously created blob is read only clone */
+ CU_ASSERT(spdk_blob_is_read_only(blob));
+ CU_ASSERT(!spdk_blob_is_snapshot(blob));
+ CU_ASSERT(spdk_blob_is_clone(blob));
+ CU_ASSERT(spdk_blob_is_thin_provisioned(blob));
+
+ /* Create clone from read only blob */
+ spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ cloneid2 = g_blobid;
+
+ spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ clone2 = g_blob;
+
+ CU_ASSERT(!spdk_blob_is_read_only(clone2));
+ CU_ASSERT(!spdk_blob_is_snapshot(clone2));
+ CU_ASSERT(spdk_blob_is_clone(clone2));
+ CU_ASSERT(spdk_blob_is_thin_provisioned(clone2));
+
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
+
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, blobid, ids, &count);
+ CU_ASSERT(rc == 0);
+
+ CU_ASSERT(count == 1);
+ CU_ASSERT(ids[0] == cloneid2);
+
+ /* Close blobs */
+
+ spdk_blob_close(clone2, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_blob_close(clone, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_blob_close(snapshot, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_blob_close(snapshot2, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Try to delete snapshot with more than 1 clone */
+ spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno != 0);
+
+ ut_bs_reload(&bs, &bs_opts);
+
+ /* NULL ids array should return number of clones in count */
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, snapshotid, NULL, &count);
+ CU_ASSERT(rc == -ENOMEM);
+ CU_ASSERT(count == 2);
+
+ /* incorrect array size */
+ count = 1;
+ rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
+ CU_ASSERT(rc == -ENOMEM);
+ CU_ASSERT(count == 2);
+
+
+ /* Verify structure of loaded blob store */
+
+ /* snapshot */
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID);
+
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 2);
+ CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
+ CU_ASSERT(ids[0] == snapshotid2 || ids[1] == snapshotid2);
+
+ /* blob */
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, blobid, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 1);
+ CU_ASSERT(ids[0] == cloneid2);
+
+ /* clone */
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 0);
+
+ /* snapshot2 */
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid);
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 1);
+ CU_ASSERT(ids[0] == cloneid);
+
+ /* clone2 */
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, cloneid2, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 0);
+
+ /* Try to delete blob that user should not be able to remove */
+
+ spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno != 0);
+
+ /* Remove all blobs */
+
+ spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ g_bs = NULL;
+}
+
+/**
+ * Snapshot-clones relation test 2
+ *
+ * snapshot1
+ * |
+ * snapshot2
+ * |
+ * +-----+-----+
+ * | |
+ * blob(ro) snapshot3
+ * | |
+ * | snapshot4
+ * | | |
+ * clone2 clone clone3
+ */
+static void
+blob_relations2(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_bs_opts bs_opts;
+ struct spdk_blob_opts opts;
+ struct spdk_blob *blob, *snapshot1, *snapshot2, *snapshot3, *snapshot4, *clone, *clone2;
+ spdk_blob_id blobid, snapshotid1, snapshotid2, snapshotid3, snapshotid4, cloneid, cloneid2,
+ cloneid3;
+ int rc;
+ size_t count;
+ spdk_blob_id ids[10] = {};
+
+ dev = init_dev();
+ spdk_bs_opts_init(&bs_opts);
+ snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
+
+ spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ /* 1. Create blob with 10 clusters */
+
+ ut_spdk_blob_opts_init(&opts);
+ opts.num_clusters = 10;
+
+ blob = ut_blob_create_and_open(bs, &opts);
+ blobid = spdk_blob_get_id(blob);
+
+ /* 2. Create snapshot1 */
+
+ spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ snapshotid1 = g_blobid;
+
+ spdk_bs_open_blob(bs, snapshotid1, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ snapshot1 = g_blob;
+
+ CU_ASSERT(snapshot1->parent_id == SPDK_BLOBID_INVALID);
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid1) == SPDK_BLOBID_INVALID);
+
+ CU_ASSERT(blob->parent_id == snapshotid1);
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1);
+
+ /* Check if blob is the clone of snapshot1 */
+ CU_ASSERT(blob->parent_id == snapshotid1);
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1);
+
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, snapshotid1, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 1);
+ CU_ASSERT(ids[0] == blobid);
+
+ /* 3. Create another snapshot */
+
+ spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ snapshotid2 = g_blobid;
+
+ spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ snapshot2 = g_blob;
+
+ CU_ASSERT(spdk_blob_is_clone(snapshot2));
+ CU_ASSERT(snapshot2->parent_id == snapshotid1);
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid1);
+
+ /* Check if snapshot2 is the clone of snapshot1 and blob
+ * is a child of snapshot2 */
+ CU_ASSERT(blob->parent_id == snapshotid2);
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2);
+
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 1);
+ CU_ASSERT(ids[0] == blobid);
+
+ /* 4. Create clone from snapshot */
+
+ spdk_bs_create_clone(bs, snapshotid2, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ cloneid = g_blobid;
+
+ spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ clone = g_blob;
+
+ CU_ASSERT(clone->parent_id == snapshotid2);
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
+
+ /* Check if clone is on the snapshot's list */
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 2);
+ CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
+ CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid);
+
+ /* 5. Create snapshot of the clone */
+
+ spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ snapshotid3 = g_blobid;
+
+ spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ snapshot3 = g_blob;
+
+ CU_ASSERT(snapshot3->parent_id == snapshotid2);
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2);
+
+ /* Check if clone is converted to the clone of snapshot3 and snapshot3
+ * is a child of snapshot2 */
+ CU_ASSERT(clone->parent_id == snapshotid3);
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
+
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 1);
+ CU_ASSERT(ids[0] == cloneid);
+
+ /* 6. Create another snapshot of the clone */
+
+ spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ snapshotid4 = g_blobid;
+
+ spdk_bs_open_blob(bs, snapshotid4, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ snapshot4 = g_blob;
+
+ CU_ASSERT(snapshot4->parent_id == snapshotid3);
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid4) == snapshotid3);
+
+ /* Check if clone is converted to the clone of snapshot4 and snapshot4
+ * is a child of snapshot3 */
+ CU_ASSERT(clone->parent_id == snapshotid4);
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid4);
+
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, snapshotid4, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 1);
+ CU_ASSERT(ids[0] == cloneid);
+
+ /* 7. Remove snapshot 4 */
+
+ ut_blob_close_and_delete(bs, snapshot4);
+
+ /* Check if relations are back to state from before creating snapshot 4 */
+ CU_ASSERT(clone->parent_id == snapshotid3);
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
+
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 1);
+ CU_ASSERT(ids[0] == cloneid);
+
+ /* 8. Create second clone of snapshot 3 and try to remove snapshot 3 */
+
+ spdk_bs_create_clone(bs, snapshotid3, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ cloneid3 = g_blobid;
+
+ spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno != 0);
+
+ /* 9. Open snapshot 3 again and try to remove it while clone 3 is closed */
+
+ spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ snapshot3 = g_blob;
+
+ spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno != 0);
+
+ spdk_blob_close(snapshot3, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_delete_blob(bs, cloneid3, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* 10. Remove snapshot 1 */
+
+ ut_blob_close_and_delete(bs, snapshot1);
+
+ /* Check if relations are back to state from before creating snapshot 4 (before step 6) */
+ CU_ASSERT(snapshot2->parent_id == SPDK_BLOBID_INVALID);
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID);
+
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 2);
+ CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
+ CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3);
+
+ /* 11. Try to create clone from read only blob */
+
+ /* Mark blob as read only */
+ spdk_blob_set_read_only(blob);
+ spdk_blob_sync_md(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Create clone from read only blob */
+ spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ cloneid2 = g_blobid;
+
+ spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ clone2 = g_blob;
+
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
+
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, blobid, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 1);
+ CU_ASSERT(ids[0] == cloneid2);
+
+ /* Close blobs */
+
+ spdk_blob_close(clone2, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_blob_close(clone, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_blob_close(snapshot2, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_blob_close(snapshot3, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ ut_bs_reload(&bs, &bs_opts);
+
+ /* Verify structure of loaded blob store */
+
+ /* snapshot2 */
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID);
+
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 2);
+ CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
+ CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3);
+
+ /* blob */
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2);
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, blobid, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 1);
+ CU_ASSERT(ids[0] == cloneid2);
+
+ /* clone */
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 0);
+
+ /* snapshot3 */
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2);
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 1);
+ CU_ASSERT(ids[0] == cloneid);
+
+ /* clone2 */
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, cloneid2, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 0);
+
+ /* Try to delete all blobs in the worse possible order */
+
+ spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno != 0);
+
+ spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno != 0);
+
+ spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ g_bs = NULL;
+}
+
+static void
+blobstore_clean_power_failure(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_blob *blob;
+ struct spdk_power_failure_thresholds thresholds = {};
+ bool clean = false;
+ struct spdk_bs_super_block *super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
+ struct spdk_bs_super_block super_copy = {};
+
+ thresholds.general_threshold = 1;
+ while (!clean) {
+ /* Create bs and blob */
+ suite_blob_setup();
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ bs = g_bs;
+ blob = g_blob;
+
+ /* Super block should not change for rest of the UT,
+ * save it and compare later. */
+ memcpy(&super_copy, super, sizeof(struct spdk_bs_super_block));
+ SPDK_CU_ASSERT_FATAL(super->clean == 0);
+ SPDK_CU_ASSERT_FATAL(bs->clean == 0);
+
+ /* Force bs/super block in a clean state.
+ * Along with marking blob dirty, to cause blob persist. */
+ blob->state = SPDK_BLOB_STATE_DIRTY;
+ bs->clean = 1;
+ super->clean = 1;
+ super->crc = blob_md_page_calc_crc(super);
+
+ g_bserrno = -1;
+ dev_set_power_failure_thresholds(thresholds);
+ spdk_blob_sync_md(blob, blob_op_complete, NULL);
+ poll_threads();
+ dev_reset_power_failure_event();
+
+ if (g_bserrno == 0) {
+ /* After successful md sync, both bs and super block
+ * should be marked as not clean. */
+ SPDK_CU_ASSERT_FATAL(bs->clean == 0);
+ SPDK_CU_ASSERT_FATAL(super->clean == 0);
+ clean = true;
+ }
+
+ /* Depending on the point of failure, super block was either updated or not. */
+ super_copy.clean = super->clean;
+ super_copy.crc = blob_md_page_calc_crc(&super_copy);
+ /* Compare that the values in super block remained unchanged. */
+ SPDK_CU_ASSERT_FATAL(!memcmp(&super_copy, super, sizeof(struct spdk_bs_super_block)));
+
+ /* Delete blob and unload bs */
+ suite_blob_cleanup();
+
+ thresholds.general_threshold++;
+ }
+}
+
+static void
+blob_delete_snapshot_power_failure(void)
+{
+ struct spdk_bs_dev *dev;
+ struct spdk_blob_store *bs;
+ struct spdk_blob_opts opts;
+ struct spdk_blob *blob, *snapshot;
+ struct spdk_power_failure_thresholds thresholds = {};
+ spdk_blob_id blobid, snapshotid;
+ const void *value;
+ size_t value_len;
+ size_t count;
+ spdk_blob_id ids[3] = {};
+ int rc;
+ bool deleted = false;
+ int delete_snapshot_bserrno = -1;
+
+ thresholds.general_threshold = 1;
+ while (!deleted) {
+ dev = init_dev();
+
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ /* Create blob */
+ ut_spdk_blob_opts_init(&opts);
+ opts.num_clusters = 10;
+
+ spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ /* Create snapshot */
+ spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ snapshotid = g_blobid;
+ SPDK_CU_ASSERT_FATAL(spdk_bit_array_get(bs->used_clusters, 1));
+ SPDK_CU_ASSERT_FATAL(!spdk_bit_array_get(bs->used_clusters, 11));
+
+ dev_set_power_failure_thresholds(thresholds);
+
+ spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
+ poll_threads();
+ delete_snapshot_bserrno = g_bserrno;
+
+ /* Do not shut down cleanly. Assumption is that after snapshot deletion
+ * reports success, changes to both blobs should already persisted. */
+ dev_reset_power_failure_event();
+ ut_bs_dirty_load(&bs, NULL);
+
+ SPDK_CU_ASSERT_FATAL(spdk_bit_array_get(bs->used_clusters, 1));
+ SPDK_CU_ASSERT_FATAL(!spdk_bit_array_get(bs->used_clusters, 11));
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+ SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true);
+
+ spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+
+ if (g_bserrno == 0) {
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ snapshot = g_blob;
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 1);
+ CU_ASSERT(ids[0] == blobid);
+ rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len);
+ CU_ASSERT(rc != 0);
+ SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false);
+
+ spdk_blob_close(snapshot, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ } else {
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
+ /* Snapshot might have been left in unrecoverable state, so it does not open.
+ * Yet delete might perform further changes to the clone after that.
+ * This UT should test until snapshot is deleted and delete call succeeds. */
+ if (delete_snapshot_bserrno == 0) {
+ deleted = true;
+ }
+ }
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ thresholds.general_threshold++;
+ }
+}
+
+static void
+blob_create_snapshot_power_failure(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_blob_opts opts;
+ struct spdk_blob *blob, *snapshot;
+ struct spdk_power_failure_thresholds thresholds = {};
+ spdk_blob_id blobid, snapshotid;
+ const void *value;
+ size_t value_len;
+ size_t count;
+ spdk_blob_id ids[3] = {};
+ int rc;
+ bool created = false;
+ int create_snapshot_bserrno = -1;
+
+ thresholds.general_threshold = 1;
+ while (!created) {
+ dev = init_dev();
+
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ /* Create blob */
+ ut_spdk_blob_opts_init(&opts);
+ opts.num_clusters = 10;
+
+ spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+ SPDK_CU_ASSERT_FATAL(spdk_bit_array_get(bs->used_clusters, 1));
+ SPDK_CU_ASSERT_FATAL(!spdk_bit_array_get(bs->used_clusters, 11));
+
+ dev_set_power_failure_thresholds(thresholds);
+
+ /* Create snapshot */
+ spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ create_snapshot_bserrno = g_bserrno;
+ snapshotid = g_blobid;
+ SPDK_CU_ASSERT_FATAL(spdk_bit_array_get(bs->used_clusters, 1));
+ SPDK_CU_ASSERT_FATAL(!spdk_bit_array_get(bs->used_clusters, 11));
+
+ /* Do not shut down cleanly. Assumption is that after create snapshot
+ * reports success, both blobs should be power-fail safe. */
+ dev_reset_power_failure_event();
+ ut_bs_dirty_load(&bs, NULL);
+
+ SPDK_CU_ASSERT_FATAL(spdk_bit_array_get(bs->used_clusters, 1));
+ SPDK_CU_ASSERT_FATAL(!spdk_bit_array_get(bs->used_clusters, 11));
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+
+ if (snapshotid != SPDK_BLOBID_INVALID) {
+ spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ }
+
+ if ((snapshotid != SPDK_BLOBID_INVALID) && (g_bserrno == 0)) {
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ snapshot = g_blob;
+ SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true);
+ SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false);
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 1);
+ CU_ASSERT(ids[0] == blobid);
+ rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_IN_PROGRESS, &value, &value_len);
+ CU_ASSERT(rc != 0);
+
+ spdk_blob_close(snapshot, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ if (create_snapshot_bserrno == 0) {
+ created = true;
+ }
+ } else {
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
+ SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == false);
+ }
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ thresholds.general_threshold++;
+ }
+}
+
+static void
+test_io_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
+{
+ uint8_t payload_ff[64 * 512];
+ uint8_t payload_aa[64 * 512];
+ uint8_t payload_00[64 * 512];
+ uint8_t *cluster0, *cluster1;
+
+ memset(payload_ff, 0xFF, sizeof(payload_ff));
+ memset(payload_aa, 0xAA, sizeof(payload_aa));
+ memset(payload_00, 0x00, sizeof(payload_00));
+
+ /* Try to perform I/O with io unit = 512 */
+ spdk_blob_io_write(blob, channel, payload_ff, 0, 1, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* If thin provisioned is set cluster should be allocated now */
+ SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0);
+ cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
+
+ /* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character.
+ * Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */
+ /* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
+ CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0);
+
+ /* Verify write with offset on first page */
+ spdk_blob_io_write(blob, channel, payload_ff, 2, 1, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
+ CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0);
+
+ /* Verify write with offset on first page */
+ spdk_blob_io_write(blob, channel, payload_ff, 4, 4, blob_op_complete, NULL);
+ poll_threads();
+
+ /* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */
+ CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0);
+
+ /* Verify write with offset on second page */
+ spdk_blob_io_write(blob, channel, payload_ff, 8, 4, blob_op_complete, NULL);
+ poll_threads();
+
+ /* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */
+ CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
+
+ /* Verify write across multiple pages */
+ spdk_blob_io_write(blob, channel, payload_aa, 4, 8, blob_op_complete, NULL);
+ poll_threads();
+
+ /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */
+ CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
+
+ /* Verify write across multiple clusters */
+ spdk_blob_io_write(blob, channel, payload_ff, 28, 8, blob_op_complete, NULL);
+ poll_threads();
+
+ SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
+ cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
+
+ /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
+ * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
+ CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
+
+ CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
+ CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0);
+
+ /* Verify write to second cluster */
+ spdk_blob_io_write(blob, channel, payload_ff, 32 + 12, 2, blob_op_complete, NULL);
+ poll_threads();
+
+ SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
+ cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
+
+ /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
+ * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */
+ CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
+
+ CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
+ CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0);
+ CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0);
+ CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0);
+}
+
+static void
+test_io_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
+{
+ uint8_t payload_read[64 * 512];
+ uint8_t payload_ff[64 * 512];
+ uint8_t payload_aa[64 * 512];
+ uint8_t payload_00[64 * 512];
+
+ memset(payload_ff, 0xFF, sizeof(payload_ff));
+ memset(payload_aa, 0xAA, sizeof(payload_aa));
+ memset(payload_00, 0x00, sizeof(payload_00));
+
+ /* Read only first io unit */
+ /* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
+ * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
+ * payload_read: F000 0000 | 0000 0000 ... */
+ memset(payload_read, 0x00, sizeof(payload_read));
+ spdk_blob_io_read(blob, channel, payload_read, 0, 1, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0);
+
+ /* Read four io_units starting from offset = 2
+ * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
+ * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
+ * payload_read: F0AA 0000 | 0000 0000 ... */
+
+ memset(payload_read, 0x00, sizeof(payload_read));
+ spdk_blob_io_read(blob, channel, payload_read, 2, 4, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
+
+ /* Read eight io_units across multiple pages
+ * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
+ * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
+ * payload_read: AAAA AAAA | 0000 0000 ... */
+ memset(payload_read, 0x00, sizeof(payload_read));
+ spdk_blob_io_read(blob, channel, payload_read, 4, 8, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
+
+ /* Read eight io_units across multiple clusters
+ * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ]
+ * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
+ * payload_read: FFFF FFFF | 0000 0000 ... */
+ memset(payload_read, 0x00, sizeof(payload_read));
+ spdk_blob_io_read(blob, channel, payload_read, 28, 8, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
+
+ /* Read four io_units from second cluster
+ * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
+ * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ]
+ * payload_read: 00FF 0000 | 0000 0000 ... */
+ memset(payload_read, 0x00, sizeof(payload_read));
+ spdk_blob_io_read(blob, channel, payload_read, 32 + 10, 4, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
+
+ /* Read second cluster
+ * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
+ * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ]
+ * payload_read: FFFF 0000 | 0000 FF00 ... */
+ memset(payload_read, 0x00, sizeof(payload_read));
+ spdk_blob_io_read(blob, channel, payload_read, 32, 32, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0);
+
+ /* Read whole two clusters
+ * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
+ * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */
+ memset(payload_read, 0x00, sizeof(payload_read));
+ spdk_blob_io_read(blob, channel, payload_read, 0, 64, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0);
+
+ CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0);
+}
+
+
+static void
+test_io_unmap(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
+{
+ uint8_t payload_ff[64 * 512];
+ uint8_t payload_aa[64 * 512];
+ uint8_t payload_00[64 * 512];
+ uint8_t *cluster0, *cluster1;
+
+ memset(payload_ff, 0xFF, sizeof(payload_ff));
+ memset(payload_aa, 0xAA, sizeof(payload_aa));
+ memset(payload_00, 0x00, sizeof(payload_00));
+
+ cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
+ cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
+
+ /* Unmap */
+ spdk_blob_io_unmap(blob, channel, 0, 64, blob_op_complete, NULL);
+ poll_threads();
+
+ CU_ASSERT(g_bserrno == 0);
+
+ CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0);
+ CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0);
+}
+
+static void
+test_io_zeroes(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
+{
+ uint8_t payload_ff[64 * 512];
+ uint8_t payload_aa[64 * 512];
+ uint8_t payload_00[64 * 512];
+ uint8_t *cluster0, *cluster1;
+
+ memset(payload_ff, 0xFF, sizeof(payload_ff));
+ memset(payload_aa, 0xAA, sizeof(payload_aa));
+ memset(payload_00, 0x00, sizeof(payload_00));
+
+ cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
+ cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
+
+ /* Write zeroes */
+ spdk_blob_io_write_zeroes(blob, channel, 0, 64, blob_op_complete, NULL);
+ poll_threads();
+
+ CU_ASSERT(g_bserrno == 0);
+
+ CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0);
+ CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0);
+}
+
+
+static void
+test_iov_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
+{
+ uint8_t payload_ff[64 * 512];
+ uint8_t payload_aa[64 * 512];
+ uint8_t payload_00[64 * 512];
+ uint8_t *cluster0, *cluster1;
+ struct iovec iov[4];
+
+ memset(payload_ff, 0xFF, sizeof(payload_ff));
+ memset(payload_aa, 0xAA, sizeof(payload_aa));
+ memset(payload_00, 0x00, sizeof(payload_00));
+
+ /* Try to perform I/O with io unit = 512 */
+ iov[0].iov_base = payload_ff;
+ iov[0].iov_len = 1 * 512;
+ spdk_blob_io_writev(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* If thin provisioned is set cluster should be allocated now */
+ SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0);
+ cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
+
+ /* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character.
+ * Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */
+ /* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
+ CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0);
+
+ /* Verify write with offset on first page */
+ iov[0].iov_base = payload_ff;
+ iov[0].iov_len = 1 * 512;
+ spdk_blob_io_writev(blob, channel, iov, 1, 2, 1, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
+ CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0);
+
+ /* Verify write with offset on first page */
+ iov[0].iov_base = payload_ff;
+ iov[0].iov_len = 4 * 512;
+ spdk_blob_io_writev(blob, channel, iov, 1, 4, 4, blob_op_complete, NULL);
+ poll_threads();
+
+ /* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */
+ CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0);
+
+ /* Verify write with offset on second page */
+ iov[0].iov_base = payload_ff;
+ iov[0].iov_len = 4 * 512;
+ spdk_blob_io_writev(blob, channel, iov, 1, 8, 4, blob_op_complete, NULL);
+ poll_threads();
+
+ /* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */
+ CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
+
+ /* Verify write across multiple pages */
+ iov[0].iov_base = payload_aa;
+ iov[0].iov_len = 8 * 512;
+ spdk_blob_io_writev(blob, channel, iov, 1, 4, 8, blob_op_complete, NULL);
+ poll_threads();
+
+ /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */
+ CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
+
+ /* Verify write across multiple clusters */
+
+ iov[0].iov_base = payload_ff;
+ iov[0].iov_len = 8 * 512;
+ spdk_blob_io_writev(blob, channel, iov, 1, 28, 8, blob_op_complete, NULL);
+ poll_threads();
+
+ SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
+ cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
+
+ /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
+ * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
+ CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 16 * 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
+
+ CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
+ CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0);
+
+ /* Verify write to second cluster */
+
+ iov[0].iov_base = payload_ff;
+ iov[0].iov_len = 2 * 512;
+ spdk_blob_io_writev(blob, channel, iov, 1, 32 + 12, 2, blob_op_complete, NULL);
+ poll_threads();
+
+ SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
+ cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
+
+ /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
+ * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */
+ CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
+
+ CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
+ CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0);
+ CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0);
+ CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0);
+}
+
+static void
+test_iov_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
+{
+ uint8_t payload_read[64 * 512];
+ uint8_t payload_ff[64 * 512];
+ uint8_t payload_aa[64 * 512];
+ uint8_t payload_00[64 * 512];
+ struct iovec iov[4];
+
+ memset(payload_ff, 0xFF, sizeof(payload_ff));
+ memset(payload_aa, 0xAA, sizeof(payload_aa));
+ memset(payload_00, 0x00, sizeof(payload_00));
+
+ /* Read only first io unit */
+ /* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
+ * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
+ * payload_read: F000 0000 | 0000 0000 ... */
+ memset(payload_read, 0x00, sizeof(payload_read));
+ iov[0].iov_base = payload_read;
+ iov[0].iov_len = 1 * 512;
+ spdk_blob_io_readv(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL);
+ poll_threads();
+
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0);
+
+ /* Read four io_units starting from offset = 2
+ * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
+ * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
+ * payload_read: F0AA 0000 | 0000 0000 ... */
+
+ memset(payload_read, 0x00, sizeof(payload_read));
+ iov[0].iov_base = payload_read;
+ iov[0].iov_len = 4 * 512;
+ spdk_blob_io_readv(blob, channel, iov, 1, 2, 4, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
+
+ /* Read eight io_units across multiple pages
+ * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
+ * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
+ * payload_read: AAAA AAAA | 0000 0000 ... */
+ memset(payload_read, 0x00, sizeof(payload_read));
+ iov[0].iov_base = payload_read;
+ iov[0].iov_len = 4 * 512;
+ iov[1].iov_base = payload_read + 4 * 512;
+ iov[1].iov_len = 4 * 512;
+ spdk_blob_io_readv(blob, channel, iov, 2, 4, 8, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
+
+ /* Read eight io_units across multiple clusters
+ * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ]
+ * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
+ * payload_read: FFFF FFFF | 0000 0000 ... */
+ memset(payload_read, 0x00, sizeof(payload_read));
+ iov[0].iov_base = payload_read;
+ iov[0].iov_len = 2 * 512;
+ iov[1].iov_base = payload_read + 2 * 512;
+ iov[1].iov_len = 2 * 512;
+ iov[2].iov_base = payload_read + 4 * 512;
+ iov[2].iov_len = 2 * 512;
+ iov[3].iov_base = payload_read + 6 * 512;
+ iov[3].iov_len = 2 * 512;
+ spdk_blob_io_readv(blob, channel, iov, 4, 28, 8, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
+
+ /* Read four io_units from second cluster
+ * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
+ * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ]
+ * payload_read: 00FF 0000 | 0000 0000 ... */
+ memset(payload_read, 0x00, sizeof(payload_read));
+ iov[0].iov_base = payload_read;
+ iov[0].iov_len = 1 * 512;
+ iov[1].iov_base = payload_read + 1 * 512;
+ iov[1].iov_len = 3 * 512;
+ spdk_blob_io_readv(blob, channel, iov, 2, 32 + 10, 4, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
+
+ /* Read second cluster
+ * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
+ * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ]
+ * payload_read: FFFF 0000 | 0000 FF00 ... */
+ memset(payload_read, 0x00, sizeof(payload_read));
+ iov[0].iov_base = payload_read;
+ iov[0].iov_len = 1 * 512;
+ iov[1].iov_base = payload_read + 1 * 512;
+ iov[1].iov_len = 2 * 512;
+ iov[2].iov_base = payload_read + 3 * 512;
+ iov[2].iov_len = 4 * 512;
+ iov[3].iov_base = payload_read + 7 * 512;
+ iov[3].iov_len = 25 * 512;
+ spdk_blob_io_readv(blob, channel, iov, 4, 32, 32, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0);
+
+ /* Read whole two clusters
+ * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
+ * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */
+ memset(payload_read, 0x00, sizeof(payload_read));
+ iov[0].iov_base = payload_read;
+ iov[0].iov_len = 1 * 512;
+ iov[1].iov_base = payload_read + 1 * 512;
+ iov[1].iov_len = 8 * 512;
+ iov[2].iov_base = payload_read + 9 * 512;
+ iov[2].iov_len = 16 * 512;
+ iov[3].iov_base = payload_read + 25 * 512;
+ iov[3].iov_len = 39 * 512;
+ spdk_blob_io_readv(blob, channel, iov, 4, 0, 64, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0);
+
+ CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0);
+}
+
+static void
+blob_io_unit(void)
+{
+ struct spdk_bs_opts bsopts;
+ struct spdk_blob_opts opts;
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_blob *blob, *snapshot, *clone;
+ spdk_blob_id blobid;
+ struct spdk_io_channel *channel;
+
+ /* Create dev with 512 bytes io unit size */
+
+ spdk_bs_opts_init(&bsopts);
+ bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4; /* 8 * 4 = 32 io_unit */
+ snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE");
+
+ /* Try to initialize a new blob store with unsupported io_unit */
+ dev = init_dev();
+ dev->blocklen = 512;
+ dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen;
+
+ /* Initialize a new blob store */
+ spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512);
+ channel = spdk_bs_alloc_io_channel(bs);
+
+ /* Create thick provisioned blob */
+ ut_spdk_blob_opts_init(&opts);
+ opts.thin_provision = false;
+ opts.num_clusters = 32;
+
+ blob = ut_blob_create_and_open(bs, &opts);
+ blobid = spdk_blob_get_id(blob);
+
+ test_io_write(dev, blob, channel);
+ test_io_read(dev, blob, channel);
+ test_io_zeroes(dev, blob, channel);
+
+ test_iov_write(dev, blob, channel);
+ test_iov_read(dev, blob, channel);
+
+ test_io_unmap(dev, blob, channel);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ blob = NULL;
+ g_blob = NULL;
+
+ /* Create thin provisioned blob */
+
+ ut_spdk_blob_opts_init(&opts);
+ opts.thin_provision = true;
+ opts.num_clusters = 32;
+
+ blob = ut_blob_create_and_open(bs, &opts);
+ blobid = spdk_blob_get_id(blob);
+
+ test_io_write(dev, blob, channel);
+ test_io_read(dev, blob, channel);
+
+ test_io_zeroes(dev, blob, channel);
+
+ test_iov_write(dev, blob, channel);
+ test_iov_read(dev, blob, channel);
+
+ /* Create snapshot */
+
+ spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ snapshot = g_blob;
+
+ spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ clone = g_blob;
+
+ test_io_read(dev, blob, channel);
+ test_io_read(dev, snapshot, channel);
+ test_io_read(dev, clone, channel);
+
+ test_iov_read(dev, blob, channel);
+ test_iov_read(dev, snapshot, channel);
+ test_iov_read(dev, clone, channel);
+
+ /* Inflate clone */
+
+ spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
+ poll_threads();
+
+ CU_ASSERT(g_bserrno == 0);
+
+ test_io_read(dev, clone, channel);
+
+ test_io_unmap(dev, clone, channel);
+
+ test_iov_write(dev, clone, channel);
+ test_iov_read(dev, clone, channel);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ spdk_blob_close(snapshot, blob_op_complete, NULL);
+ spdk_blob_close(clone, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ blob = NULL;
+ g_blob = NULL;
+
+ spdk_bs_free_io_channel(channel);
+ poll_threads();
+
+ /* Unload the blob store */
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+ g_blob = NULL;
+ g_blobid = 0;
+}
+
+static void
+blob_io_unit_compatiblity(void)
+{
+ struct spdk_bs_opts bsopts;
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_bs_super_block *super;
+
+ /* Create dev with 512 bytes io unit size */
+
+ spdk_bs_opts_init(&bsopts);
+ bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4; /* 8 * 4 = 32 io_unit */
+ snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE");
+
+ /* Try to initialize a new blob store with unsupported io_unit */
+ dev = init_dev();
+ dev->blocklen = 512;
+ dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen;
+
+ /* Initialize a new blob store */
+ spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512);
+
+ /* Unload the blob store */
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Modify super block to behave like older version.
+ * Check if loaded io unit size equals SPDK_BS_PAGE_SIZE */
+ super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
+ super->io_unit_size = 0;
+ super->crc = blob_md_page_calc_crc(super);
+
+ dev = init_dev();
+ dev->blocklen = 512;
+ dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen;
+
+ spdk_bs_load(dev, &bsopts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ CU_ASSERT(spdk_bs_get_io_unit_size(bs) == SPDK_BS_PAGE_SIZE);
+
+ /* Unload the blob store */
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ g_bs = NULL;
+ g_blob = NULL;
+ g_blobid = 0;
+}
+
+static void
+blob_simultaneous_operations(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob_opts opts;
+ struct spdk_blob *blob, *snapshot;
+ spdk_blob_id blobid, snapshotid;
+ struct spdk_io_channel *channel;
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ SPDK_CU_ASSERT_FATAL(channel != NULL);
+
+ ut_spdk_blob_opts_init(&opts);
+ opts.num_clusters = 10;
+
+ blob = ut_blob_create_and_open(bs, &opts);
+ blobid = spdk_blob_get_id(blob);
+
+ /* Create snapshot and try to remove blob in the same time:
+ * - snapshot should be created successfully
+ * - delete operation should fail w -EBUSY */
+ CU_ASSERT(blob->locked_operation_in_progress == false);
+ spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ CU_ASSERT(blob->locked_operation_in_progress == true);
+ spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
+ CU_ASSERT(blob->locked_operation_in_progress == true);
+ /* Deletion failure */
+ CU_ASSERT(g_bserrno == -EBUSY);
+ poll_threads();
+ CU_ASSERT(blob->locked_operation_in_progress == false);
+ /* Snapshot creation success */
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+
+ snapshotid = g_blobid;
+
+ spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ snapshot = g_blob;
+
+ /* Inflate blob and try to remove blob in the same time:
+ * - blob should be inflated successfully
+ * - delete operation should fail w -EBUSY */
+ CU_ASSERT(blob->locked_operation_in_progress == false);
+ spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
+ CU_ASSERT(blob->locked_operation_in_progress == true);
+ spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
+ CU_ASSERT(blob->locked_operation_in_progress == true);
+ /* Deletion failure */
+ CU_ASSERT(g_bserrno == -EBUSY);
+ poll_threads();
+ CU_ASSERT(blob->locked_operation_in_progress == false);
+ /* Inflation success */
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Clone snapshot and try to remove snapshot in the same time:
+ * - snapshot should be cloned successfully
+ * - delete operation should fail w -EBUSY */
+ CU_ASSERT(blob->locked_operation_in_progress == false);
+ spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
+ spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
+ /* Deletion failure */
+ CU_ASSERT(g_bserrno == -EBUSY);
+ poll_threads();
+ CU_ASSERT(blob->locked_operation_in_progress == false);
+ /* Clone created */
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Resize blob and try to remove blob in the same time:
+ * - blob should be resized successfully
+ * - delete operation should fail w -EBUSY */
+ CU_ASSERT(blob->locked_operation_in_progress == false);
+ spdk_blob_resize(blob, 50, blob_op_complete, NULL);
+ CU_ASSERT(blob->locked_operation_in_progress == true);
+ spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
+ CU_ASSERT(blob->locked_operation_in_progress == true);
+ /* Deletion failure */
+ CU_ASSERT(g_bserrno == -EBUSY);
+ poll_threads();
+ CU_ASSERT(blob->locked_operation_in_progress == false);
+ /* Blob resized successfully */
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Issue two consecutive blob syncs, neither should fail.
+ * Force sync to actually occur by marking blob dirty each time.
+ * Execution of sync should not be enough to complete the operation,
+ * since disk I/O is required to complete it. */
+ g_bserrno = -1;
+
+ blob->state = SPDK_BLOB_STATE_DIRTY;
+ spdk_blob_sync_md(blob, blob_op_complete, NULL);
+ SPDK_CU_ASSERT_FATAL(g_bserrno == -1);
+
+ blob->state = SPDK_BLOB_STATE_DIRTY;
+ spdk_blob_sync_md(blob, blob_op_complete, NULL);
+ SPDK_CU_ASSERT_FATAL(g_bserrno == -1);
+
+ uint32_t completions = 0;
+ while (completions < 2) {
+ SPDK_CU_ASSERT_FATAL(poll_thread_times(0, 1));
+ if (g_bserrno == 0) {
+ g_bserrno = -1;
+ completions++;
+ }
+ /* Never should the g_bserrno be other than -1.
+ * It would mean that either of syncs failed. */
+ SPDK_CU_ASSERT_FATAL(g_bserrno == -1);
+ }
+
+ spdk_bs_free_io_channel(channel);
+ poll_threads();
+
+ ut_blob_close_and_delete(bs, snapshot);
+ ut_blob_close_and_delete(bs, blob);
+}
+
+static void
+blob_persist_test(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob_opts opts;
+ struct spdk_blob *blob;
+ spdk_blob_id blobid;
+ struct spdk_io_channel *channel;
+ char *xattr;
+ size_t xattr_length;
+ int rc;
+ uint32_t page_count_clear, page_count_xattr;
+ uint64_t poller_iterations;
+ bool run_poller;
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ SPDK_CU_ASSERT_FATAL(channel != NULL);
+
+ ut_spdk_blob_opts_init(&opts);
+ opts.num_clusters = 10;
+
+ blob = ut_blob_create_and_open(bs, &opts);
+ blobid = spdk_blob_get_id(blob);
+
+ /* Save the amount of md pages used after creation of a blob.
+ * This should be consistent after removing xattr. */
+ page_count_clear = spdk_bit_array_count_set(bs->used_md_pages);
+ SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear);
+ SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear);
+
+ /* Add xattr with maximum length of descriptor to exceed single metadata page. */
+ xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
+ strlen("large_xattr");
+ xattr = calloc(xattr_length, sizeof(char));
+ SPDK_CU_ASSERT_FATAL(xattr != NULL);
+
+ rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ spdk_blob_sync_md(blob, blob_op_complete, NULL);
+ poll_threads();
+ SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
+
+ /* Save the amount of md pages used after adding the large xattr */
+ page_count_xattr = spdk_bit_array_count_set(bs->used_md_pages);
+ SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr);
+ SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr);
+
+ /* Add xattr to a blob and sync it. While sync is occuring, remove the xattr and sync again.
+ * Interrupt the first sync after increasing number of poller iterations, until it succeeds.
+ * Expectation is that after second sync completes no xattr is saved in metadata. */
+ poller_iterations = 1;
+ run_poller = true;
+ while (run_poller) {
+ rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ g_bserrno = -1;
+ spdk_blob_sync_md(blob, blob_op_complete, NULL);
+ poll_thread_times(0, poller_iterations);
+ if (g_bserrno == 0) {
+ /* Poller iteration count was high enough for first sync to complete.
+ * Verify that blob takes up enough of md_pages to store the xattr. */
+ SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr);
+ SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr);
+ SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_xattr);
+ run_poller = false;
+ }
+ rc = spdk_blob_remove_xattr(blob, "large_xattr");
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ spdk_blob_sync_md(blob, blob_op_complete, NULL);
+ poll_threads();
+ SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear);
+ SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear);
+ SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_clear);
+
+ /* Reload bs and re-open blob to verify that xattr was not persisted. */
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ ut_bs_reload(&bs, NULL);
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+
+ rc = spdk_blob_get_xattr_value(blob, "large_xattr", (const void **)&xattr, &xattr_length);
+ SPDK_CU_ASSERT_FATAL(rc == -ENOENT);
+
+ poller_iterations++;
+ /* Stop at high iteration count to prevent infinite loop.
+ * This value should be enough for first md sync to complete in any case. */
+ SPDK_CU_ASSERT_FATAL(poller_iterations < 50);
+ }
+
+ free(xattr);
+
+ ut_blob_close_and_delete(bs, blob);
+
+ spdk_bs_free_io_channel(channel);
+ poll_threads();
+}
+
+static void
+suite_bs_setup(void)
+{
+ struct spdk_bs_dev *dev;
+
+ dev = init_dev();
+ memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_bs != NULL);
+}
+
+static void
+suite_bs_cleanup(void)
+{
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+ memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
+}
+
+static struct spdk_blob *
+ut_blob_create_and_open(struct spdk_blob_store *bs, struct spdk_blob_opts *blob_opts)
+{
+ struct spdk_blob *blob;
+ struct spdk_blob_opts create_blob_opts;
+ spdk_blob_id blobid;
+
+ if (blob_opts == NULL) {
+ ut_spdk_blob_opts_init(&create_blob_opts);
+ blob_opts = &create_blob_opts;
+ }
+
+ spdk_bs_create_blob_ext(bs, blob_opts, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+ g_blobid = -1;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ blob = g_blob;
+
+ g_blob = NULL;
+ g_bserrno = -1;
+
+ return blob;
+}
+
+static void
+ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob)
+{
+ spdk_blob_id blobid = spdk_blob_get_id(blob);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ g_blob = NULL;
+
+ spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ g_bserrno = -1;
+}
+
+static void
+suite_blob_setup(void)
+{
+ suite_bs_setup();
+ CU_ASSERT(g_bs != NULL);
+
+ g_blob = ut_blob_create_and_open(g_bs, NULL);
+ CU_ASSERT(g_blob != NULL);
+}
+
+static void
+suite_blob_cleanup(void)
+{
+ ut_blob_close_and_delete(g_bs, g_blob);
+ CU_ASSERT(g_blob == NULL);
+
+ suite_bs_cleanup();
+ CU_ASSERT(g_bs == NULL);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite, suite_bs, suite_blob;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("blob", NULL, NULL);
+ suite_bs = CU_add_suite_with_setup_and_teardown("blob_bs", NULL, NULL,
+ suite_bs_setup, suite_bs_cleanup);
+ suite_blob = CU_add_suite_with_setup_and_teardown("blob_blob", NULL, NULL,
+ suite_blob_setup, suite_blob_cleanup);
+
+ CU_ADD_TEST(suite, blob_init);
+ CU_ADD_TEST(suite_bs, blob_open);
+ CU_ADD_TEST(suite_bs, blob_create);
+ CU_ADD_TEST(suite_bs, blob_create_fail);
+ CU_ADD_TEST(suite_bs, blob_create_internal);
+ CU_ADD_TEST(suite, blob_thin_provision);
+ CU_ADD_TEST(suite_bs, blob_snapshot);
+ CU_ADD_TEST(suite_bs, blob_clone);
+ CU_ADD_TEST(suite_bs, blob_inflate);
+ CU_ADD_TEST(suite_bs, blob_delete);
+ CU_ADD_TEST(suite_bs, blob_resize_test);
+ CU_ADD_TEST(suite, blob_read_only);
+ CU_ADD_TEST(suite_bs, channel_ops);
+ CU_ADD_TEST(suite_bs, blob_super);
+ CU_ADD_TEST(suite_blob, blob_write);
+ CU_ADD_TEST(suite_blob, blob_read);
+ CU_ADD_TEST(suite_blob, blob_rw_verify);
+ CU_ADD_TEST(suite_bs, blob_rw_verify_iov);
+ CU_ADD_TEST(suite_blob, blob_rw_verify_iov_nomem);
+ CU_ADD_TEST(suite_blob, blob_rw_iov_read_only);
+ CU_ADD_TEST(suite_bs, blob_unmap);
+ CU_ADD_TEST(suite_bs, blob_iter);
+ CU_ADD_TEST(suite_blob, blob_xattr);
+ CU_ADD_TEST(suite, bs_load);
+ CU_ADD_TEST(suite_bs, bs_load_pending_removal);
+ CU_ADD_TEST(suite, bs_load_custom_cluster_size);
+ CU_ADD_TEST(suite_bs, bs_unload);
+ CU_ADD_TEST(suite, bs_cluster_sz);
+ CU_ADD_TEST(suite_bs, bs_usable_clusters);
+ CU_ADD_TEST(suite, bs_resize_md);
+ CU_ADD_TEST(suite, bs_destroy);
+ CU_ADD_TEST(suite, bs_type);
+ CU_ADD_TEST(suite, bs_super_block);
+ CU_ADD_TEST(suite, blob_serialize_test);
+ CU_ADD_TEST(suite_bs, blob_crc);
+ CU_ADD_TEST(suite, super_block_crc);
+ CU_ADD_TEST(suite_blob, blob_dirty_shutdown);
+ CU_ADD_TEST(suite_bs, blob_flags);
+ CU_ADD_TEST(suite_bs, bs_version);
+ CU_ADD_TEST(suite_bs, blob_set_xattrs_test);
+ CU_ADD_TEST(suite_bs, blob_thin_prov_alloc);
+ CU_ADD_TEST(suite_bs, blob_insert_cluster_msg_test);
+ CU_ADD_TEST(suite_bs, blob_thin_prov_rw);
+ CU_ADD_TEST(suite_bs, blob_thin_prov_rle);
+ CU_ADD_TEST(suite_bs, blob_thin_prov_rw_iov);
+ CU_ADD_TEST(suite, bs_load_iter_test);
+ CU_ADD_TEST(suite_bs, blob_snapshot_rw);
+ CU_ADD_TEST(suite_bs, blob_snapshot_rw_iov);
+ CU_ADD_TEST(suite, blob_relations);
+ CU_ADD_TEST(suite, blob_relations2);
+ CU_ADD_TEST(suite, blobstore_clean_power_failure);
+ CU_ADD_TEST(suite, blob_delete_snapshot_power_failure);
+ CU_ADD_TEST(suite, blob_create_snapshot_power_failure);
+ CU_ADD_TEST(suite_bs, blob_inflate_rw);
+ CU_ADD_TEST(suite_bs, blob_snapshot_freeze_io);
+ CU_ADD_TEST(suite_bs, blob_operation_split_rw);
+ CU_ADD_TEST(suite_bs, blob_operation_split_rw_iov);
+ CU_ADD_TEST(suite, blob_io_unit);
+ CU_ADD_TEST(suite, blob_io_unit_compatiblity);
+ CU_ADD_TEST(suite_bs, blob_simultaneous_operations);
+ CU_ADD_TEST(suite_bs, blob_persist_test);
+
+ allocate_threads(2);
+ set_thread(0);
+
+ g_dev_buffer = calloc(1, DEV_BUFFER_SIZE);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ g_use_extent_table = false;
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ g_use_extent_table = true;
+ CU_basic_run_tests();
+ num_failures += CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ free(g_dev_buffer);
+
+ free_threads();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/blob/bs_dev_common.c b/src/spdk/test/unit/lib/blob/bs_dev_common.c
new file mode 100644
index 000000000..4e94fef8b
--- /dev/null
+++ b/src/spdk/test/unit/lib/blob/bs_dev_common.c
@@ -0,0 +1,395 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/thread.h"
+#include "bs_scheduler.c"
+
+
+#define DEV_BUFFER_SIZE (64 * 1024 * 1024)
+#define DEV_BUFFER_BLOCKLEN (4096)
+#define DEV_BUFFER_BLOCKCNT (DEV_BUFFER_SIZE / DEV_BUFFER_BLOCKLEN)
+uint8_t *g_dev_buffer;
+uint64_t g_dev_write_bytes;
+uint64_t g_dev_read_bytes;
+
+struct spdk_power_failure_counters {
+ uint64_t general_counter;
+ uint64_t read_counter;
+ uint64_t write_counter;
+ uint64_t unmap_counter;
+ uint64_t write_zero_counter;
+ uint64_t flush_counter;
+};
+
+static struct spdk_power_failure_counters g_power_failure_counters = {};
+
+struct spdk_power_failure_thresholds {
+ uint64_t general_threshold;
+ uint64_t read_threshold;
+ uint64_t write_threshold;
+ uint64_t unmap_threshold;
+ uint64_t write_zero_threshold;
+ uint64_t flush_threshold;
+};
+
+static struct spdk_power_failure_thresholds g_power_failure_thresholds = {};
+
+static uint64_t g_power_failure_rc;
+
+void dev_reset_power_failure_event(void);
+void dev_reset_power_failure_counters(void);
+void dev_set_power_failure_thresholds(struct spdk_power_failure_thresholds thresholds);
+
+void
+dev_reset_power_failure_event(void)
+{
+ memset(&g_power_failure_counters, 0, sizeof(g_power_failure_counters));
+ memset(&g_power_failure_thresholds, 0, sizeof(g_power_failure_thresholds));
+ g_power_failure_rc = 0;
+}
+
+void
+dev_reset_power_failure_counters(void)
+{
+ memset(&g_power_failure_counters, 0, sizeof(g_power_failure_counters));
+ g_power_failure_rc = 0;
+}
+
+/**
+ * Set power failure event. Power failure will occur after given number
+ * of IO operations. It may occure after number of particular operations
+ * (read, write, unmap, write zero or flush) or after given number of
+ * any IO operations (general_treshold). Value 0 means that the treshold
+ * is disabled. Any other value is the number of operation starting from
+ * which power failure event will happen.
+ */
+void
+dev_set_power_failure_thresholds(struct spdk_power_failure_thresholds thresholds)
+{
+ g_power_failure_thresholds = thresholds;
+}
+
+/* Define here for UT only. */
+struct spdk_io_channel g_io_channel;
+
+static struct spdk_io_channel *
+dev_create_channel(struct spdk_bs_dev *dev)
+{
+ return &g_io_channel;
+}
+
+static void
+dev_destroy_channel(struct spdk_bs_dev *dev, struct spdk_io_channel *channel)
+{
+}
+
+static void
+dev_destroy(struct spdk_bs_dev *dev)
+{
+ free(dev);
+}
+
+
+static void
+dev_complete_cb(void *arg)
+{
+ struct spdk_bs_dev_cb_args *cb_args = arg;
+
+ cb_args->cb_fn(cb_args->channel, cb_args->cb_arg, g_power_failure_rc);
+}
+
+static void
+dev_complete(void *arg)
+{
+ _bs_send_msg(dev_complete_cb, arg, NULL);
+}
+
+static void
+dev_read(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, void *payload,
+ uint64_t lba, uint32_t lba_count,
+ struct spdk_bs_dev_cb_args *cb_args)
+{
+ uint64_t offset, length;
+
+ if (g_power_failure_thresholds.read_threshold != 0) {
+ g_power_failure_counters.read_counter++;
+ }
+
+ if (g_power_failure_thresholds.general_threshold != 0) {
+ g_power_failure_counters.general_counter++;
+ }
+
+ if ((g_power_failure_thresholds.read_threshold == 0 ||
+ g_power_failure_counters.read_counter < g_power_failure_thresholds.read_threshold) &&
+ (g_power_failure_thresholds.general_threshold == 0 ||
+ g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) {
+ offset = lba * dev->blocklen;
+ length = lba_count * dev->blocklen;
+ SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
+
+ memcpy(payload, &g_dev_buffer[offset], length);
+ g_dev_read_bytes += length;
+ } else {
+ g_power_failure_rc = -EIO;
+ }
+
+ spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
+}
+
+static void
+dev_write(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, void *payload,
+ uint64_t lba, uint32_t lba_count,
+ struct spdk_bs_dev_cb_args *cb_args)
+{
+ uint64_t offset, length;
+
+ if (g_power_failure_thresholds.write_threshold != 0) {
+ g_power_failure_counters.write_counter++;
+ }
+
+ if (g_power_failure_thresholds.general_threshold != 0) {
+ g_power_failure_counters.general_counter++;
+ }
+
+ if ((g_power_failure_thresholds.write_threshold == 0 ||
+ g_power_failure_counters.write_counter < g_power_failure_thresholds.write_threshold) &&
+ (g_power_failure_thresholds.general_threshold == 0 ||
+ g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) {
+ offset = lba * dev->blocklen;
+ length = lba_count * dev->blocklen;
+ SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
+
+ memcpy(&g_dev_buffer[offset], payload, length);
+ g_dev_write_bytes += length;
+ } else {
+ g_power_failure_rc = -EIO;
+ }
+
+ spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
+}
+
+static void
+__check_iov(struct iovec *iov, int iovcnt, uint64_t length)
+{
+ int i;
+
+ for (i = 0; i < iovcnt; i++) {
+ length -= iov[i].iov_len;
+ }
+
+ CU_ASSERT(length == 0);
+}
+
+static void
+dev_readv(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
+ struct iovec *iov, int iovcnt,
+ uint64_t lba, uint32_t lba_count,
+ struct spdk_bs_dev_cb_args *cb_args)
+{
+ uint64_t offset, length;
+ int i;
+
+ if (g_power_failure_thresholds.read_threshold != 0) {
+ g_power_failure_counters.read_counter++;
+ }
+
+ if (g_power_failure_thresholds.general_threshold != 0) {
+ g_power_failure_counters.general_counter++;
+ }
+
+ if ((g_power_failure_thresholds.read_threshold == 0 ||
+ g_power_failure_counters.read_counter < g_power_failure_thresholds.read_threshold) &&
+ (g_power_failure_thresholds.general_threshold == 0 ||
+ g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) {
+ offset = lba * dev->blocklen;
+ length = lba_count * dev->blocklen;
+ SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
+ __check_iov(iov, iovcnt, length);
+
+ for (i = 0; i < iovcnt; i++) {
+ memcpy(iov[i].iov_base, &g_dev_buffer[offset], iov[i].iov_len);
+ offset += iov[i].iov_len;
+ }
+
+ g_dev_read_bytes += length;
+ } else {
+ g_power_failure_rc = -EIO;
+ }
+
+ spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
+}
+
+static void
+dev_writev(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
+ struct iovec *iov, int iovcnt,
+ uint64_t lba, uint32_t lba_count,
+ struct spdk_bs_dev_cb_args *cb_args)
+{
+ uint64_t offset, length;
+ int i;
+
+ if (g_power_failure_thresholds.write_threshold != 0) {
+ g_power_failure_counters.write_counter++;
+ }
+
+ if (g_power_failure_thresholds.general_threshold != 0) {
+ g_power_failure_counters.general_counter++;
+ }
+
+ if ((g_power_failure_thresholds.write_threshold == 0 ||
+ g_power_failure_counters.write_counter < g_power_failure_thresholds.write_threshold) &&
+ (g_power_failure_thresholds.general_threshold == 0 ||
+ g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) {
+ offset = lba * dev->blocklen;
+ length = lba_count * dev->blocklen;
+ SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
+ __check_iov(iov, iovcnt, length);
+
+ for (i = 0; i < iovcnt; i++) {
+ memcpy(&g_dev_buffer[offset], iov[i].iov_base, iov[i].iov_len);
+ offset += iov[i].iov_len;
+ }
+
+ g_dev_write_bytes += length;
+ } else {
+ g_power_failure_rc = -EIO;
+ }
+
+ spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
+}
+
+static void
+dev_flush(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
+ struct spdk_bs_dev_cb_args *cb_args)
+{
+ if (g_power_failure_thresholds.flush_threshold != 0) {
+ g_power_failure_counters.flush_counter++;
+ }
+
+ if (g_power_failure_thresholds.general_threshold != 0) {
+ g_power_failure_counters.general_counter++;
+ }
+
+ if ((g_power_failure_thresholds.flush_threshold != 0 &&
+ g_power_failure_counters.flush_counter >= g_power_failure_thresholds.flush_threshold) ||
+ (g_power_failure_thresholds.general_threshold != 0 &&
+ g_power_failure_counters.general_counter >= g_power_failure_thresholds.general_threshold)) {
+ g_power_failure_rc = -EIO;
+ }
+
+ spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
+}
+
+static void
+dev_unmap(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
+ uint64_t lba, uint32_t lba_count,
+ struct spdk_bs_dev_cb_args *cb_args)
+{
+ uint64_t offset, length;
+
+ if (g_power_failure_thresholds.unmap_threshold != 0) {
+ g_power_failure_counters.unmap_counter++;
+ }
+
+ if (g_power_failure_thresholds.general_threshold != 0) {
+ g_power_failure_counters.general_counter++;
+ }
+
+ if ((g_power_failure_thresholds.unmap_threshold == 0 ||
+ g_power_failure_counters.unmap_counter < g_power_failure_thresholds.unmap_threshold) &&
+ (g_power_failure_thresholds.general_threshold == 0 ||
+ g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) {
+ offset = lba * dev->blocklen;
+ length = lba_count * dev->blocklen;
+ SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
+ memset(&g_dev_buffer[offset], 0, length);
+ } else {
+ g_power_failure_rc = -EIO;
+ }
+
+ spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
+}
+
+static void
+dev_write_zeroes(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
+ uint64_t lba, uint32_t lba_count,
+ struct spdk_bs_dev_cb_args *cb_args)
+{
+ uint64_t offset, length;
+
+ if (g_power_failure_thresholds.write_zero_threshold != 0) {
+ g_power_failure_counters.write_zero_counter++;
+ }
+
+ if (g_power_failure_thresholds.general_threshold != 0) {
+ g_power_failure_counters.general_counter++;
+ }
+
+ if ((g_power_failure_thresholds.write_zero_threshold == 0 ||
+ g_power_failure_counters.write_zero_counter < g_power_failure_thresholds.write_zero_threshold) &&
+ (g_power_failure_thresholds.general_threshold == 0 ||
+ g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) {
+ offset = lba * dev->blocklen;
+ length = lba_count * dev->blocklen;
+ SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
+ memset(&g_dev_buffer[offset], 0, length);
+ g_dev_write_bytes += length;
+ } else {
+ g_power_failure_rc = -EIO;
+ }
+
+ spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
+}
+
+static struct spdk_bs_dev *
+init_dev(void)
+{
+ struct spdk_bs_dev *dev = calloc(1, sizeof(*dev));
+
+ SPDK_CU_ASSERT_FATAL(dev != NULL);
+
+ dev->create_channel = dev_create_channel;
+ dev->destroy_channel = dev_destroy_channel;
+ dev->destroy = dev_destroy;
+ dev->read = dev_read;
+ dev->write = dev_write;
+ dev->readv = dev_readv;
+ dev->writev = dev_writev;
+ dev->flush = dev_flush;
+ dev->unmap = dev_unmap;
+ dev->write_zeroes = dev_write_zeroes;
+ dev->blockcnt = DEV_BUFFER_BLOCKCNT;
+ dev->blocklen = DEV_BUFFER_BLOCKLEN;
+
+ return dev;
+}
diff --git a/src/spdk/test/unit/lib/blob/bs_scheduler.c b/src/spdk/test/unit/lib/blob/bs_scheduler.c
new file mode 100644
index 000000000..4b58fa007
--- /dev/null
+++ b/src/spdk/test/unit/lib/blob/bs_scheduler.c
@@ -0,0 +1,87 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+bool g_scheduler_delay = false;
+
+struct scheduled_ops {
+ spdk_msg_fn fn;
+ void *ctx;
+
+ TAILQ_ENTRY(scheduled_ops) ops_queue;
+};
+
+static TAILQ_HEAD(, scheduled_ops) g_scheduled_ops = TAILQ_HEAD_INITIALIZER(g_scheduled_ops);
+
+void _bs_flush_scheduler(uint32_t);
+
+static void
+_bs_send_msg(spdk_msg_fn fn, void *ctx, void *thread_ctx)
+{
+ if (g_scheduler_delay) {
+ struct scheduled_ops *ops = calloc(1, sizeof(*ops));
+
+ SPDK_CU_ASSERT_FATAL(ops != NULL);
+ ops->fn = fn;
+ ops->ctx = ctx;
+ TAILQ_INSERT_TAIL(&g_scheduled_ops, ops, ops_queue);
+
+ } else {
+ fn(ctx);
+ }
+}
+
+static void
+_bs_flush_scheduler_single(void)
+{
+ struct scheduled_ops *op;
+ TAILQ_HEAD(, scheduled_ops) ops;
+ TAILQ_INIT(&ops);
+
+ TAILQ_SWAP(&g_scheduled_ops, &ops, scheduled_ops, ops_queue);
+
+ while (!TAILQ_EMPTY(&ops)) {
+ op = TAILQ_FIRST(&ops);
+ TAILQ_REMOVE(&ops, op, ops_queue);
+
+ op->fn(op->ctx);
+ free(op);
+ }
+}
+
+void
+_bs_flush_scheduler(uint32_t n)
+{
+ while (n--) {
+ _bs_flush_scheduler_single();
+ }
+}
diff --git a/src/spdk/test/unit/lib/blobfs/Makefile b/src/spdk/test/unit/lib/blobfs/Makefile
new file mode 100644
index 000000000..5a2c5b3f3
--- /dev/null
+++ b/src/spdk/test/unit/lib/blobfs/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = tree.c blobfs_async_ut blobfs_sync_ut blobfs_bdev.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/blobfs/blobfs_async_ut/.gitignore b/src/spdk/test/unit/lib/blobfs/blobfs_async_ut/.gitignore
new file mode 100644
index 000000000..aea3b021d
--- /dev/null
+++ b/src/spdk/test/unit/lib/blobfs/blobfs_async_ut/.gitignore
@@ -0,0 +1 @@
+blobfs_async_ut
diff --git a/src/spdk/test/unit/lib/blobfs/blobfs_async_ut/Makefile b/src/spdk/test/unit/lib/blobfs/blobfs_async_ut/Makefile
new file mode 100644
index 000000000..6de0fc248
--- /dev/null
+++ b/src/spdk/test/unit/lib/blobfs/blobfs_async_ut/Makefile
@@ -0,0 +1,39 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+SPDK_LIB_LIST = blob
+TEST_FILE = blobfs_async_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/blobfs/blobfs_async_ut/blobfs_async_ut.c b/src/spdk/test/unit/lib/blobfs/blobfs_async_ut/blobfs_async_ut.c
new file mode 100644
index 000000000..134b8bfe9
--- /dev/null
+++ b/src/spdk/test/unit/lib/blobfs/blobfs_async_ut/blobfs_async_ut.c
@@ -0,0 +1,704 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "CUnit/Basic.h"
+
+#include "common/lib/ut_multithread.c"
+
+#include "spdk_cunit.h"
+#include "blobfs/blobfs.c"
+#include "blobfs/tree.c"
+#include "blob/blobstore.h"
+
+#include "spdk_internal/thread.h"
+
+#include "unit/lib/blob/bs_dev_common.c"
+
+struct spdk_filesystem *g_fs;
+struct spdk_file *g_file;
+int g_fserrno;
+struct spdk_trace_histories *g_trace_histories;
+DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn));
+DEFINE_STUB_V(spdk_trace_register_description, (const char *name,
+ uint16_t tpoint_id, uint8_t owner_type,
+ uint8_t object_type, uint8_t new_object,
+ uint8_t arg1_is_ptr, const char *arg1_name));
+DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
+ uint32_t size, uint64_t object_id, uint64_t arg1));
+
+/* Return NULL to test hardcoded defaults. */
+struct spdk_conf_section *
+spdk_conf_find_section(struct spdk_conf *cp, const char *name)
+{
+ return NULL;
+}
+
+/* Return -1 to test hardcoded defaults. */
+int
+spdk_conf_section_get_intval(struct spdk_conf_section *sp, const char *key)
+{
+ return -1;
+}
+
+static void
+fs_op_complete(void *ctx, int fserrno)
+{
+ g_fserrno = fserrno;
+}
+
+static void
+fs_op_with_handle_complete(void *ctx, struct spdk_filesystem *fs, int fserrno)
+{
+ g_fs = fs;
+ g_fserrno = fserrno;
+}
+
+static void
+fs_poll_threads(void)
+{
+ poll_threads();
+ while (spdk_thread_poll(g_cache_pool_thread, 0, 0) > 0) {}
+}
+
+static void
+fs_init(void)
+{
+ struct spdk_filesystem *fs;
+ struct spdk_bs_dev *dev;
+
+ dev = init_dev();
+
+ spdk_fs_init(dev, NULL, NULL, fs_op_with_handle_complete, NULL);
+ fs_poll_threads();
+ SPDK_CU_ASSERT_FATAL(g_fs != NULL);
+ CU_ASSERT(g_fserrno == 0);
+ fs = g_fs;
+ SPDK_CU_ASSERT_FATAL(fs->bs->dev == dev);
+
+ g_fserrno = 1;
+ spdk_fs_unload(fs, fs_op_complete, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+}
+
+static void
+create_cb(void *ctx, int fserrno)
+{
+ g_fserrno = fserrno;
+}
+
+static void
+open_cb(void *ctx, struct spdk_file *f, int fserrno)
+{
+ g_fserrno = fserrno;
+ g_file = f;
+}
+
+static void
+delete_cb(void *ctx, int fserrno)
+{
+ g_fserrno = fserrno;
+}
+
+static void
+fs_open(void)
+{
+ struct spdk_filesystem *fs;
+ spdk_fs_iter iter;
+ struct spdk_bs_dev *dev;
+ struct spdk_file *file;
+ char name[257] = {'\0'};
+
+ dev = init_dev();
+ memset(name, 'a', sizeof(name) - 1);
+
+ spdk_fs_init(dev, NULL, NULL, fs_op_with_handle_complete, NULL);
+ fs_poll_threads();
+ SPDK_CU_ASSERT_FATAL(g_fs != NULL);
+ CU_ASSERT(g_fserrno == 0);
+ fs = g_fs;
+ SPDK_CU_ASSERT_FATAL(fs->bs->dev == dev);
+
+ g_fserrno = 0;
+ /* Open should fail, because the file name is too long. */
+ spdk_fs_open_file_async(fs, name, SPDK_BLOBFS_OPEN_CREATE, open_cb, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == -ENAMETOOLONG);
+
+ g_fserrno = 0;
+ spdk_fs_open_file_async(fs, "file1", 0, open_cb, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == -ENOENT);
+
+ g_file = NULL;
+ g_fserrno = 1;
+ spdk_fs_open_file_async(fs, "file1", SPDK_BLOBFS_OPEN_CREATE, open_cb, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_file != NULL);
+ CU_ASSERT(!strcmp("file1", g_file->name));
+ CU_ASSERT(g_file->ref_count == 1);
+
+ iter = spdk_fs_iter_first(fs);
+ CU_ASSERT(iter != NULL);
+ file = spdk_fs_iter_get_file(iter);
+ SPDK_CU_ASSERT_FATAL(file != NULL);
+ CU_ASSERT(!strcmp("file1", file->name));
+ iter = spdk_fs_iter_next(iter);
+ CU_ASSERT(iter == NULL);
+
+ g_fserrno = 0;
+ /* Delete should successful, we will mark the file as deleted. */
+ spdk_fs_delete_file_async(fs, "file1", delete_cb, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ CU_ASSERT(!TAILQ_EMPTY(&fs->files));
+
+ g_fserrno = 1;
+ spdk_file_close_async(g_file, fs_op_complete, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ CU_ASSERT(TAILQ_EMPTY(&fs->files));
+
+ g_fserrno = 1;
+ spdk_fs_unload(fs, fs_op_complete, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+}
+
+static void
+fs_create(void)
+{
+ struct spdk_filesystem *fs;
+ struct spdk_bs_dev *dev;
+ char name[257] = {'\0'};
+
+ dev = init_dev();
+ memset(name, 'a', sizeof(name) - 1);
+
+ spdk_fs_init(dev, NULL, NULL, fs_op_with_handle_complete, NULL);
+ fs_poll_threads();
+ SPDK_CU_ASSERT_FATAL(g_fs != NULL);
+ CU_ASSERT(g_fserrno == 0);
+ fs = g_fs;
+ SPDK_CU_ASSERT_FATAL(fs->bs->dev == dev);
+
+ g_fserrno = 0;
+ /* Create should fail, because the file name is too long. */
+ spdk_fs_create_file_async(fs, name, create_cb, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == -ENAMETOOLONG);
+
+ g_fserrno = 1;
+ spdk_fs_create_file_async(fs, "file1", create_cb, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+
+ g_fserrno = 1;
+ spdk_fs_create_file_async(fs, "file1", create_cb, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == -EEXIST);
+
+ g_fserrno = 1;
+ spdk_fs_delete_file_async(fs, "file1", delete_cb, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ CU_ASSERT(TAILQ_EMPTY(&fs->files));
+
+ g_fserrno = 1;
+ spdk_fs_unload(fs, fs_op_complete, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+}
+
+static void
+fs_truncate(void)
+{
+ struct spdk_filesystem *fs;
+ struct spdk_bs_dev *dev;
+
+ dev = init_dev();
+
+ spdk_fs_init(dev, NULL, NULL, fs_op_with_handle_complete, NULL);
+ fs_poll_threads();
+ SPDK_CU_ASSERT_FATAL(g_fs != NULL);
+ CU_ASSERT(g_fserrno == 0);
+ fs = g_fs;
+ SPDK_CU_ASSERT_FATAL(fs->bs->dev == dev);
+
+ g_file = NULL;
+ g_fserrno = 1;
+ spdk_fs_open_file_async(fs, "file1", SPDK_BLOBFS_OPEN_CREATE, open_cb, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_file != NULL);
+
+ g_fserrno = 1;
+ spdk_file_truncate_async(g_file, 18 * 1024 * 1024 + 1, fs_op_complete, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ CU_ASSERT(g_file->length == 18 * 1024 * 1024 + 1);
+
+ g_fserrno = 1;
+ spdk_file_truncate_async(g_file, 1, fs_op_complete, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ CU_ASSERT(g_file->length == 1);
+
+ g_fserrno = 1;
+ spdk_file_truncate_async(g_file, 18 * 1024 * 1024 + 1, fs_op_complete, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ CU_ASSERT(g_file->length == 18 * 1024 * 1024 + 1);
+
+ g_fserrno = 1;
+ spdk_file_close_async(g_file, fs_op_complete, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ CU_ASSERT(g_file->ref_count == 0);
+
+ g_fserrno = 1;
+ spdk_fs_delete_file_async(fs, "file1", delete_cb, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ CU_ASSERT(TAILQ_EMPTY(&fs->files));
+
+ g_fserrno = 1;
+ spdk_fs_unload(fs, fs_op_complete, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+}
+
+static void
+fs_rename(void)
+{
+ struct spdk_filesystem *fs;
+ struct spdk_file *file, *file2, *file_iter;
+ struct spdk_bs_dev *dev;
+
+ dev = init_dev();
+
+ spdk_fs_init(dev, NULL, NULL, fs_op_with_handle_complete, NULL);
+ fs_poll_threads();
+ SPDK_CU_ASSERT_FATAL(g_fs != NULL);
+ CU_ASSERT(g_fserrno == 0);
+ fs = g_fs;
+ SPDK_CU_ASSERT_FATAL(fs->bs->dev == dev);
+
+ g_fserrno = 1;
+ spdk_fs_create_file_async(fs, "file1", create_cb, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+
+ g_file = NULL;
+ g_fserrno = 1;
+ spdk_fs_open_file_async(fs, "file1", 0, open_cb, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_file != NULL);
+ CU_ASSERT(g_file->ref_count == 1);
+
+ file = g_file;
+ g_file = NULL;
+ g_fserrno = 1;
+ spdk_file_close_async(file, fs_op_complete, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ SPDK_CU_ASSERT_FATAL(file->ref_count == 0);
+
+ g_file = NULL;
+ g_fserrno = 1;
+ spdk_fs_open_file_async(fs, "file2", SPDK_BLOBFS_OPEN_CREATE, open_cb, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_file != NULL);
+ CU_ASSERT(g_file->ref_count == 1);
+
+ file2 = g_file;
+ g_file = NULL;
+ g_fserrno = 1;
+ spdk_file_close_async(file2, fs_op_complete, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ SPDK_CU_ASSERT_FATAL(file2->ref_count == 0);
+
+ /*
+ * Do a 3-way rename. This should delete the old "file2", then rename
+ * "file1" to "file2".
+ */
+ g_fserrno = 1;
+ spdk_fs_rename_file_async(fs, "file1", "file2", fs_op_complete, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ CU_ASSERT(file->ref_count == 0);
+ CU_ASSERT(!strcmp(file->name, "file2"));
+ CU_ASSERT(TAILQ_FIRST(&fs->files) == file);
+ CU_ASSERT(TAILQ_NEXT(file, tailq) == NULL);
+
+ g_fserrno = 0;
+ spdk_fs_delete_file_async(fs, "file1", delete_cb, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == -ENOENT);
+ CU_ASSERT(!TAILQ_EMPTY(&fs->files));
+ TAILQ_FOREACH(file_iter, &fs->files, tailq) {
+ if (file_iter == NULL) {
+ SPDK_CU_ASSERT_FATAL(false);
+ }
+ }
+
+ g_fserrno = 1;
+ spdk_fs_delete_file_async(fs, "file2", delete_cb, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ CU_ASSERT(TAILQ_EMPTY(&fs->files));
+
+ g_fserrno = 1;
+ spdk_fs_unload(fs, fs_op_complete, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+}
+
+static void
+fs_rw_async(void)
+{
+ struct spdk_filesystem *fs;
+ struct spdk_bs_dev *dev;
+ uint8_t w_buf[4096];
+ uint8_t r_buf[4096];
+
+ dev = init_dev();
+
+ spdk_fs_init(dev, NULL, NULL, fs_op_with_handle_complete, NULL);
+ fs_poll_threads();
+ SPDK_CU_ASSERT_FATAL(g_fs != NULL);
+ CU_ASSERT(g_fserrno == 0);
+ fs = g_fs;
+ SPDK_CU_ASSERT_FATAL(fs->bs->dev == dev);
+
+ g_file = NULL;
+ g_fserrno = 1;
+ spdk_fs_open_file_async(fs, "file1", SPDK_BLOBFS_OPEN_CREATE, open_cb, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_file != NULL);
+
+ /* Write file */
+ CU_ASSERT(g_file->length == 0);
+ g_fserrno = 1;
+ memset(w_buf, 0x5a, sizeof(w_buf));
+ spdk_file_write_async(g_file, fs->sync_target.sync_io_channel, w_buf, 0, 4096,
+ fs_op_complete, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ CU_ASSERT(g_file->length == 4096);
+
+ /* Read file */
+ g_fserrno = 1;
+ memset(r_buf, 0x0, sizeof(r_buf));
+ spdk_file_read_async(g_file, fs->sync_target.sync_io_channel, r_buf, 0, 4096,
+ fs_op_complete, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ CU_ASSERT(memcmp(r_buf, w_buf, sizeof(r_buf)) == 0);
+
+ g_fserrno = 1;
+ spdk_file_close_async(g_file, fs_op_complete, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+
+ g_fserrno = 1;
+ spdk_fs_unload(fs, fs_op_complete, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+}
+
+static void
+fs_writev_readv_async(void)
+{
+ struct spdk_filesystem *fs;
+ struct spdk_bs_dev *dev;
+ struct iovec w_iov[2];
+ struct iovec r_iov[2];
+ uint8_t w_buf[4096];
+ uint8_t r_buf[4096];
+
+ dev = init_dev();
+
+ spdk_fs_init(dev, NULL, NULL, fs_op_with_handle_complete, NULL);
+ fs_poll_threads();
+ SPDK_CU_ASSERT_FATAL(g_fs != NULL);
+ CU_ASSERT(g_fserrno == 0);
+ fs = g_fs;
+ SPDK_CU_ASSERT_FATAL(fs->bs->dev == dev);
+
+ g_file = NULL;
+ g_fserrno = 1;
+ spdk_fs_open_file_async(fs, "file1", SPDK_BLOBFS_OPEN_CREATE, open_cb, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_file != NULL);
+
+ /* Write file */
+ CU_ASSERT(g_file->length == 0);
+ g_fserrno = 1;
+ memset(w_buf, 0x5a, sizeof(w_buf));
+ w_iov[0].iov_base = w_buf;
+ w_iov[0].iov_len = 2048;
+ w_iov[1].iov_base = w_buf + 2048;
+ w_iov[1].iov_len = 2048;
+ spdk_file_writev_async(g_file, fs->sync_target.sync_io_channel,
+ w_iov, 2, 0, 4096, fs_op_complete, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ CU_ASSERT(g_file->length == 4096);
+
+ /* Read file */
+ g_fserrno = 1;
+ memset(r_buf, 0x0, sizeof(r_buf));
+ r_iov[0].iov_base = r_buf;
+ r_iov[0].iov_len = 2048;
+ r_iov[1].iov_base = r_buf + 2048;
+ r_iov[1].iov_len = 2048;
+ spdk_file_readv_async(g_file, fs->sync_target.sync_io_channel,
+ r_iov, 2, 0, 4096, fs_op_complete, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ CU_ASSERT(memcmp(r_buf, w_buf, sizeof(r_buf)) == 0);
+
+ /* Overwrite file with block aligned */
+ g_fserrno = 1;
+ memset(w_buf, 0x6a, sizeof(w_buf));
+ w_iov[0].iov_base = w_buf;
+ w_iov[0].iov_len = 2048;
+ w_iov[1].iov_base = w_buf + 2048;
+ w_iov[1].iov_len = 2048;
+ spdk_file_writev_async(g_file, fs->sync_target.sync_io_channel,
+ w_iov, 2, 0, 4096, fs_op_complete, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ CU_ASSERT(g_file->length == 4096);
+
+ /* Read file to verify the overwritten data */
+ g_fserrno = 1;
+ memset(r_buf, 0x0, sizeof(r_buf));
+ r_iov[0].iov_base = r_buf;
+ r_iov[0].iov_len = 2048;
+ r_iov[1].iov_base = r_buf + 2048;
+ r_iov[1].iov_len = 2048;
+ spdk_file_readv_async(g_file, fs->sync_target.sync_io_channel,
+ r_iov, 2, 0, 4096, fs_op_complete, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ CU_ASSERT(memcmp(r_buf, w_buf, sizeof(r_buf)) == 0);
+
+ g_fserrno = 1;
+ spdk_file_close_async(g_file, fs_op_complete, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+
+ g_fserrno = 1;
+ spdk_fs_unload(fs, fs_op_complete, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+}
+
+static void
+tree_find_buffer_ut(void)
+{
+ struct cache_tree *root;
+ struct cache_tree *level1_0;
+ struct cache_tree *level0_0_0;
+ struct cache_tree *level0_0_12;
+ struct cache_buffer *leaf_0_0_4;
+ struct cache_buffer *leaf_0_12_8;
+ struct cache_buffer *leaf_9_23_15;
+ struct cache_buffer *buffer;
+
+ level1_0 = calloc(1, sizeof(struct cache_tree));
+ SPDK_CU_ASSERT_FATAL(level1_0 != NULL);
+ level0_0_0 = calloc(1, sizeof(struct cache_tree));
+ SPDK_CU_ASSERT_FATAL(level0_0_0 != NULL);
+ level0_0_12 = calloc(1, sizeof(struct cache_tree));
+ SPDK_CU_ASSERT_FATAL(level0_0_12 != NULL);
+ leaf_0_0_4 = calloc(1, sizeof(struct cache_buffer));
+ SPDK_CU_ASSERT_FATAL(leaf_0_0_4 != NULL);
+ leaf_0_12_8 = calloc(1, sizeof(struct cache_buffer));
+ SPDK_CU_ASSERT_FATAL(leaf_0_12_8 != NULL);
+ leaf_9_23_15 = calloc(1, sizeof(struct cache_buffer));
+ SPDK_CU_ASSERT_FATAL(leaf_9_23_15 != NULL);
+
+ level1_0->level = 1;
+ level0_0_0->level = 0;
+ level0_0_12->level = 0;
+
+ leaf_0_0_4->offset = CACHE_BUFFER_SIZE * 4;
+ level0_0_0->u.buffer[4] = leaf_0_0_4;
+ level0_0_0->present_mask |= (1ULL << 4);
+
+ leaf_0_12_8->offset = CACHE_TREE_LEVEL_SIZE(1) * 12 + CACHE_BUFFER_SIZE * 8;
+ level0_0_12->u.buffer[8] = leaf_0_12_8;
+ level0_0_12->present_mask |= (1ULL << 8);
+
+ level1_0->u.tree[0] = level0_0_0;
+ level1_0->present_mask |= (1ULL << 0);
+ level1_0->u.tree[12] = level0_0_12;
+ level1_0->present_mask |= (1ULL << 12);
+
+ buffer = tree_find_buffer(NULL, 0);
+ CU_ASSERT(buffer == NULL);
+
+ buffer = tree_find_buffer(level0_0_0, 0);
+ CU_ASSERT(buffer == NULL);
+
+ buffer = tree_find_buffer(level0_0_0, CACHE_TREE_LEVEL_SIZE(0) + 1);
+ CU_ASSERT(buffer == NULL);
+
+ buffer = tree_find_buffer(level0_0_0, leaf_0_0_4->offset);
+ CU_ASSERT(buffer == leaf_0_0_4);
+
+ buffer = tree_find_buffer(level1_0, leaf_0_0_4->offset);
+ CU_ASSERT(buffer == leaf_0_0_4);
+
+ buffer = tree_find_buffer(level1_0, leaf_0_12_8->offset);
+ CU_ASSERT(buffer == leaf_0_12_8);
+
+ buffer = tree_find_buffer(level1_0, leaf_0_12_8->offset + CACHE_BUFFER_SIZE - 1);
+ CU_ASSERT(buffer == leaf_0_12_8);
+
+ buffer = tree_find_buffer(level1_0, leaf_0_12_8->offset - 1);
+ CU_ASSERT(buffer == NULL);
+
+ leaf_9_23_15->offset = CACHE_TREE_LEVEL_SIZE(2) * 9 +
+ CACHE_TREE_LEVEL_SIZE(1) * 23 +
+ CACHE_BUFFER_SIZE * 15;
+ root = tree_insert_buffer(level1_0, leaf_9_23_15);
+ CU_ASSERT(root != level1_0);
+ buffer = tree_find_buffer(root, leaf_9_23_15->offset);
+ CU_ASSERT(buffer == leaf_9_23_15);
+ tree_free_buffers(root);
+ free(root);
+}
+
+static void
+channel_ops(void)
+{
+ struct spdk_filesystem *fs;
+ struct spdk_bs_dev *dev;
+ struct spdk_io_channel *channel;
+
+ dev = init_dev();
+
+ spdk_fs_init(dev, NULL, NULL, fs_op_with_handle_complete, NULL);
+ fs_poll_threads();
+ SPDK_CU_ASSERT_FATAL(g_fs != NULL);
+ CU_ASSERT(g_fserrno == 0);
+ fs = g_fs;
+ SPDK_CU_ASSERT_FATAL(fs->bs->dev == dev);
+
+ channel = spdk_fs_alloc_io_channel(fs);
+ CU_ASSERT(channel != NULL);
+
+ spdk_fs_free_io_channel(channel);
+
+ g_fserrno = 1;
+ spdk_fs_unload(fs, fs_op_complete, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ g_fs = NULL;
+}
+
+static void
+channel_ops_sync(void)
+{
+ struct spdk_filesystem *fs;
+ struct spdk_bs_dev *dev;
+ struct spdk_fs_thread_ctx *channel;
+
+ dev = init_dev();
+
+ spdk_fs_init(dev, NULL, NULL, fs_op_with_handle_complete, NULL);
+ fs_poll_threads();
+ SPDK_CU_ASSERT_FATAL(g_fs != NULL);
+ CU_ASSERT(g_fserrno == 0);
+ fs = g_fs;
+ SPDK_CU_ASSERT_FATAL(fs->bs->dev == dev);
+
+ channel = spdk_fs_alloc_thread_ctx(fs);
+ CU_ASSERT(channel != NULL);
+
+ spdk_fs_free_thread_ctx(channel);
+
+ g_fserrno = 1;
+ spdk_fs_unload(fs, fs_op_complete, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ g_fs = NULL;
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("blobfs_async_ut", NULL, NULL);
+
+ CU_ADD_TEST(suite, fs_init);
+ CU_ADD_TEST(suite, fs_open);
+ CU_ADD_TEST(suite, fs_create);
+ CU_ADD_TEST(suite, fs_truncate);
+ CU_ADD_TEST(suite, fs_rename);
+ CU_ADD_TEST(suite, fs_rw_async);
+ CU_ADD_TEST(suite, fs_writev_readv_async);
+ CU_ADD_TEST(suite, tree_find_buffer_ut);
+ CU_ADD_TEST(suite, channel_ops);
+ CU_ADD_TEST(suite, channel_ops_sync);
+
+ allocate_threads(1);
+ set_thread(0);
+
+ g_dev_buffer = calloc(1, DEV_BUFFER_SIZE);
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ free(g_dev_buffer);
+
+ free_threads();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/blobfs/blobfs_bdev.c/.gitignore b/src/spdk/test/unit/lib/blobfs/blobfs_bdev.c/.gitignore
new file mode 100644
index 000000000..0d29199be
--- /dev/null
+++ b/src/spdk/test/unit/lib/blobfs/blobfs_bdev.c/.gitignore
@@ -0,0 +1 @@
+blobfs_bdev_ut
diff --git a/src/spdk/test/unit/lib/blobfs/blobfs_bdev.c/Makefile b/src/spdk/test/unit/lib/blobfs/blobfs_bdev.c/Makefile
new file mode 100644
index 000000000..b2d666b1b
--- /dev/null
+++ b/src/spdk/test/unit/lib/blobfs/blobfs_bdev.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = blobfs_bdev_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/blobfs/blobfs_bdev.c/blobfs_bdev_ut.c b/src/spdk/test/unit/lib/blobfs/blobfs_bdev.c/blobfs_bdev_ut.c
new file mode 100644
index 000000000..425b29882
--- /dev/null
+++ b/src/spdk/test/unit/lib/blobfs/blobfs_bdev.c/blobfs_bdev_ut.c
@@ -0,0 +1,348 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+#include "spdk/string.h"
+#include "spdk/stdinc.h"
+
+#include "blobfs/bdev/blobfs_bdev.c"
+
+int g_fserrno;
+
+bool g_bdev_open_ext_fail = false;
+bool g_bdev_create_bs_dev_from_desc_fail = false;
+bool g_fs_load_fail = false;
+bool g_fs_unload_fail = false;
+bool g_bs_bdev_claim_fail = false;
+bool g_blobfs_fuse_start_fail = false;
+struct blobfs_bdev_operation_ctx *g_fs_ctx;
+
+const char *g_bdev_name = "ut_bdev";
+
+int
+spdk_bdev_open_ext(const char *bdev_name, bool write, spdk_bdev_event_cb_t event_cb,
+ void *event_ctx, struct spdk_bdev_desc **_desc)
+{
+ if (g_bdev_open_ext_fail) {
+ return -1;
+ }
+
+ return 0;
+}
+
+static void
+bs_dev_destroy(struct spdk_bs_dev *dev)
+{
+}
+
+struct spdk_bs_dev *
+spdk_bdev_create_bs_dev_from_desc(struct spdk_bdev_desc *desc)
+{
+ static struct spdk_bs_dev bs_dev;
+
+ if (g_bdev_create_bs_dev_from_desc_fail) {
+ return NULL;
+ }
+
+ bs_dev.destroy = bs_dev_destroy;
+ return &bs_dev;
+}
+
+void
+spdk_fs_load(struct spdk_bs_dev *dev, fs_send_request_fn send_request_fn,
+ spdk_fs_op_with_handle_complete cb_fn, void *cb_arg)
+{
+ int rc = 0;
+
+ if (g_fs_load_fail) {
+ rc = -1;
+ }
+
+ cb_fn(cb_arg, NULL, rc);
+
+ return;
+}
+
+void
+spdk_fs_unload(struct spdk_filesystem *fs, spdk_fs_op_complete cb_fn, void *cb_arg)
+{
+ int rc = 0;
+
+ if (g_fs_unload_fail) {
+ rc = -1;
+ }
+
+ cb_fn(cb_arg, rc);
+ return;
+}
+
+void
+spdk_fs_init(struct spdk_bs_dev *dev, struct spdk_blobfs_opts *opt,
+ fs_send_request_fn send_request_fn,
+ spdk_fs_op_with_handle_complete cb_fn, void *cb_arg)
+{
+ int rc = 0;
+
+ if (g_fs_load_fail) {
+ rc = -1;
+ }
+
+ cb_fn(cb_arg, NULL, rc);
+ return;
+}
+
+int
+spdk_bs_bdev_claim(struct spdk_bs_dev *bs_dev, struct spdk_bdev_module *module)
+{
+ if (g_bs_bdev_claim_fail) {
+ return -1;
+ }
+
+ return 0;
+}
+
+int
+blobfs_fuse_start(const char *bdev_name, const char *mountpoint, struct spdk_filesystem *fs,
+ blobfs_fuse_unmount_cb cb_fn, void *cb_arg, struct spdk_blobfs_fuse **_bfuse)
+{
+ if (g_blobfs_fuse_start_fail) {
+ return -1;
+ }
+
+ /* store the ctx for unmount operation */
+ g_fs_ctx = cb_arg;
+
+ return 0;
+}
+
+void
+spdk_bdev_close(struct spdk_bdev_desc *desc)
+{
+}
+
+int
+spdk_thread_send_msg(const struct spdk_thread *thread, spdk_msg_fn fn, void *ctx)
+{
+ fn(ctx);
+ return 0;
+}
+
+struct spdk_thread *
+spdk_get_thread(void)
+{
+ struct spdk_thread *thd = (struct spdk_thread *)0x1;
+
+ return thd;
+}
+
+const char *
+spdk_bdev_get_name(const struct spdk_bdev *bdev)
+{
+ return g_bdev_name;
+}
+
+void
+spdk_fs_opts_init(struct spdk_blobfs_opts *opts)
+{
+}
+
+void
+blobfs_fuse_send_request(fs_request_fn fn, void *arg)
+{
+}
+
+void
+blobfs_fuse_stop(struct spdk_blobfs_fuse *bfuse)
+{
+}
+
+static void
+blobfs_bdev_op_complete(void *cb_arg, int fserrno)
+{
+ g_fserrno = fserrno;
+}
+
+static void
+spdk_blobfs_bdev_detect_test(void)
+{
+ /* spdk_bdev_open_ext() fails */
+ g_bdev_open_ext_fail = true;
+ spdk_blobfs_bdev_detect(g_bdev_name, blobfs_bdev_op_complete, NULL);
+ CU_ASSERT(g_fserrno != 0);
+
+ g_bdev_open_ext_fail = false;
+
+ /* spdk_bdev_create_bs_dev_from_desc() fails */
+ g_bdev_create_bs_dev_from_desc_fail = true;
+ spdk_blobfs_bdev_detect(g_bdev_name, blobfs_bdev_op_complete, NULL);
+ CU_ASSERT(g_fserrno != 0);
+
+ g_bdev_create_bs_dev_from_desc_fail = false;
+
+ /* spdk_fs_load() fails */
+ g_fs_load_fail = true;
+ spdk_blobfs_bdev_detect(g_bdev_name, blobfs_bdev_op_complete, NULL);
+ CU_ASSERT(g_fserrno != 0);
+
+ g_fs_load_fail = false;
+
+ /* spdk_fs_unload() fails */
+ g_fs_unload_fail = true;
+ spdk_blobfs_bdev_detect(g_bdev_name, blobfs_bdev_op_complete, NULL);
+ CU_ASSERT(g_fserrno != 0);
+
+ g_fs_unload_fail = false;
+
+ /* no fail */
+ spdk_blobfs_bdev_detect(g_bdev_name, blobfs_bdev_op_complete, NULL);
+ CU_ASSERT(g_fserrno == 0);
+}
+
+static void
+spdk_blobfs_bdev_create_test(void)
+{
+ uint32_t cluster_sz = 1024 * 1024;
+
+ /* spdk_bdev_open_ext() fails */
+ g_bdev_open_ext_fail = true;
+ spdk_blobfs_bdev_create(g_bdev_name, cluster_sz, blobfs_bdev_op_complete, NULL);
+ CU_ASSERT(g_fserrno != 0);
+
+ g_bdev_open_ext_fail = false;
+
+ /* spdk_bdev_create_bs_dev_from_desc() fails */
+ g_bdev_create_bs_dev_from_desc_fail = true;
+ spdk_blobfs_bdev_create(g_bdev_name, cluster_sz, blobfs_bdev_op_complete, NULL);
+ CU_ASSERT(g_fserrno != 0);
+
+ g_bdev_create_bs_dev_from_desc_fail = false;
+
+ /* spdk_bs_bdev_claim() fails */
+ g_bs_bdev_claim_fail = true;
+ spdk_blobfs_bdev_create(g_bdev_name, cluster_sz, blobfs_bdev_op_complete, NULL);
+ CU_ASSERT(g_fserrno != 0);
+
+ g_bs_bdev_claim_fail = false;
+
+ /* spdk_fs_init() fails */
+ g_fs_load_fail = true;
+ spdk_blobfs_bdev_create(g_bdev_name, cluster_sz, blobfs_bdev_op_complete, NULL);
+ CU_ASSERT(g_fserrno != 0);
+
+ g_fs_load_fail = false;
+
+ /* spdk_fs_unload() fails */
+ g_fs_unload_fail = true;
+ spdk_blobfs_bdev_create(g_bdev_name, cluster_sz, blobfs_bdev_op_complete, NULL);
+ CU_ASSERT(g_fserrno != 0);
+
+ g_fs_unload_fail = false;
+
+ /* no fail */
+ spdk_blobfs_bdev_create(g_bdev_name, cluster_sz, blobfs_bdev_op_complete, NULL);
+ CU_ASSERT(g_fserrno == 0);
+}
+
+static void
+spdk_blobfs_bdev_mount_test(void)
+{
+#ifdef SPDK_CONFIG_FUSE
+ const char *mountpoint = "/mnt";
+
+ /* spdk_bdev_open_ext() fails */
+ g_bdev_open_ext_fail = true;
+ spdk_blobfs_bdev_mount(g_bdev_name, mountpoint, blobfs_bdev_op_complete, NULL);
+ CU_ASSERT(g_fserrno != 0);
+
+ g_bdev_open_ext_fail = false;
+
+ /* spdk_bdev_create_bs_dev_from_desc() fails */
+ g_bdev_create_bs_dev_from_desc_fail = true;
+ spdk_blobfs_bdev_mount(g_bdev_name, mountpoint, blobfs_bdev_op_complete, NULL);
+ CU_ASSERT(g_fserrno != 0);
+
+ g_bdev_create_bs_dev_from_desc_fail = false;
+
+ /* spdk_bs_bdev_claim() fails */
+ g_bs_bdev_claim_fail = true;
+ spdk_blobfs_bdev_mount(g_bdev_name, mountpoint, blobfs_bdev_op_complete, NULL);
+ CU_ASSERT(g_fserrno != 0);
+
+ g_bs_bdev_claim_fail = false;
+
+ /* spdk_fs_load() fails */
+ g_fs_load_fail = true;
+ spdk_blobfs_bdev_mount(g_bdev_name, mountpoint, blobfs_bdev_op_complete, NULL);
+ CU_ASSERT(g_fserrno != 0);
+
+ g_fs_load_fail = false;
+
+ /* blobfs_fuse_start() fails */
+ g_blobfs_fuse_start_fail = true;
+ spdk_blobfs_bdev_mount(g_bdev_name, mountpoint, blobfs_bdev_op_complete, NULL);
+ CU_ASSERT(g_fserrno != 0);
+
+ g_blobfs_fuse_start_fail = false;
+
+ /* no fail */
+ spdk_blobfs_bdev_mount(g_bdev_name, mountpoint, blobfs_bdev_op_complete, NULL);
+ CU_ASSERT(g_fserrno == 0);
+ CU_ASSERT(g_fs_ctx != NULL);
+
+ /* after mount operation success , we need make sure unmount operation success */
+ blobfs_bdev_unmount(g_fs_ctx);
+ CU_ASSERT(g_fserrno == 0);
+#endif
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("blobfs_bdev_ut", NULL, NULL);
+
+ CU_ADD_TEST(suite, spdk_blobfs_bdev_detect_test);
+ CU_ADD_TEST(suite, spdk_blobfs_bdev_create_test);
+ CU_ADD_TEST(suite, spdk_blobfs_bdev_mount_test);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/blobfs/blobfs_sync_ut/.gitignore b/src/spdk/test/unit/lib/blobfs/blobfs_sync_ut/.gitignore
new file mode 100644
index 000000000..93ef643ff
--- /dev/null
+++ b/src/spdk/test/unit/lib/blobfs/blobfs_sync_ut/.gitignore
@@ -0,0 +1 @@
+blobfs_sync_ut
diff --git a/src/spdk/test/unit/lib/blobfs/blobfs_sync_ut/Makefile b/src/spdk/test/unit/lib/blobfs/blobfs_sync_ut/Makefile
new file mode 100644
index 000000000..31961be12
--- /dev/null
+++ b/src/spdk/test/unit/lib/blobfs/blobfs_sync_ut/Makefile
@@ -0,0 +1,39 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+SPDK_LIB_LIST = blob
+TEST_FILE = blobfs_sync_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/blobfs/blobfs_sync_ut/blobfs_sync_ut.c b/src/spdk/test/unit/lib/blobfs/blobfs_sync_ut/blobfs_sync_ut.c
new file mode 100644
index 000000000..f9d00226c
--- /dev/null
+++ b/src/spdk/test/unit/lib/blobfs/blobfs_sync_ut/blobfs_sync_ut.c
@@ -0,0 +1,703 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk/blobfs.h"
+#include "spdk/env.h"
+#include "spdk/log.h"
+#include "spdk/thread.h"
+#include "spdk/barrier.h"
+#include "spdk_internal/thread.h"
+
+#include "spdk_cunit.h"
+#include "unit/lib/blob/bs_dev_common.c"
+#include "common/lib/test_env.c"
+#include "blobfs/blobfs.c"
+#include "blobfs/tree.c"
+
+struct spdk_filesystem *g_fs;
+struct spdk_file *g_file;
+int g_fserrno;
+struct spdk_thread *g_dispatch_thread = NULL;
+struct spdk_trace_histories *g_trace_histories;
+DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn));
+DEFINE_STUB_V(spdk_trace_register_description, (const char *name,
+ uint16_t tpoint_id, uint8_t owner_type,
+ uint8_t object_type, uint8_t new_object,
+ uint8_t arg1_is_ptr, const char *arg1_name));
+DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
+ uint32_t size, uint64_t object_id, uint64_t arg1));
+
+/* Return NULL to test hardcoded defaults. */
+struct spdk_conf_section *
+spdk_conf_find_section(struct spdk_conf *cp, const char *name)
+{
+ return NULL;
+}
+
+/* Return -1 to test hardcoded defaults. */
+int
+spdk_conf_section_get_intval(struct spdk_conf_section *sp, const char *key)
+{
+ return -1;
+}
+
+struct ut_request {
+ fs_request_fn fn;
+ void *arg;
+ volatile int done;
+};
+
+static void
+send_request(fs_request_fn fn, void *arg)
+{
+ spdk_thread_send_msg(g_dispatch_thread, (spdk_msg_fn)fn, arg);
+}
+
+static void
+ut_call_fn(void *arg)
+{
+ struct ut_request *req = arg;
+
+ req->fn(req->arg);
+ req->done = 1;
+}
+
+static void
+ut_send_request(fs_request_fn fn, void *arg)
+{
+ struct ut_request req;
+
+ req.fn = fn;
+ req.arg = arg;
+ req.done = 0;
+
+ spdk_thread_send_msg(g_dispatch_thread, ut_call_fn, &req);
+
+ /* Wait for this to finish */
+ while (req.done == 0) { }
+}
+
+static void
+fs_op_complete(void *ctx, int fserrno)
+{
+ g_fserrno = fserrno;
+}
+
+static void
+fs_op_with_handle_complete(void *ctx, struct spdk_filesystem *fs, int fserrno)
+{
+ g_fs = fs;
+ g_fserrno = fserrno;
+}
+
+static void
+fs_thread_poll(void)
+{
+ struct spdk_thread *thread;
+
+ thread = spdk_get_thread();
+ while (spdk_thread_poll(thread, 0, 0) > 0) {}
+ while (spdk_thread_poll(g_cache_pool_thread, 0, 0) > 0) {}
+}
+
+static void
+_fs_init(void *arg)
+{
+ struct spdk_bs_dev *dev;
+
+ g_fs = NULL;
+ g_fserrno = -1;
+ dev = init_dev();
+ spdk_fs_init(dev, NULL, send_request, fs_op_with_handle_complete, NULL);
+
+ fs_thread_poll();
+
+ SPDK_CU_ASSERT_FATAL(g_fs != NULL);
+ SPDK_CU_ASSERT_FATAL(g_fs->bdev == dev);
+ CU_ASSERT(g_fserrno == 0);
+}
+
+static void
+_fs_load(void *arg)
+{
+ struct spdk_bs_dev *dev;
+
+ g_fs = NULL;
+ g_fserrno = -1;
+ dev = init_dev();
+ spdk_fs_load(dev, send_request, fs_op_with_handle_complete, NULL);
+
+ fs_thread_poll();
+
+ SPDK_CU_ASSERT_FATAL(g_fs != NULL);
+ SPDK_CU_ASSERT_FATAL(g_fs->bdev == dev);
+ CU_ASSERT(g_fserrno == 0);
+}
+
+static void
+_fs_unload(void *arg)
+{
+ g_fserrno = -1;
+ spdk_fs_unload(g_fs, fs_op_complete, NULL);
+
+ fs_thread_poll();
+
+ CU_ASSERT(g_fserrno == 0);
+ g_fs = NULL;
+}
+
+static void
+_nop(void *arg)
+{
+}
+
+static void
+cache_read_after_write(void)
+{
+ uint64_t length;
+ int rc;
+ char w_buf[100], r_buf[100];
+ struct spdk_fs_thread_ctx *channel;
+ struct spdk_file_stat stat = {0};
+
+ ut_send_request(_fs_init, NULL);
+
+ channel = spdk_fs_alloc_thread_ctx(g_fs);
+
+ rc = spdk_fs_open_file(g_fs, channel, "testfile", SPDK_BLOBFS_OPEN_CREATE, &g_file);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_file != NULL);
+
+ length = (4 * 1024 * 1024);
+ rc = spdk_file_truncate(g_file, channel, length);
+ CU_ASSERT(rc == 0);
+
+ memset(w_buf, 0x5a, sizeof(w_buf));
+ spdk_file_write(g_file, channel, w_buf, 0, sizeof(w_buf));
+
+ CU_ASSERT(spdk_file_get_length(g_file) == length);
+
+ rc = spdk_file_truncate(g_file, channel, sizeof(w_buf));
+ CU_ASSERT(rc == 0);
+
+ spdk_file_close(g_file, channel);
+
+ fs_thread_poll();
+
+ rc = spdk_fs_file_stat(g_fs, channel, "testfile", &stat);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(sizeof(w_buf) == stat.size);
+
+ rc = spdk_fs_open_file(g_fs, channel, "testfile", 0, &g_file);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_file != NULL);
+
+ memset(r_buf, 0, sizeof(r_buf));
+ spdk_file_read(g_file, channel, r_buf, 0, sizeof(r_buf));
+ CU_ASSERT(memcmp(w_buf, r_buf, sizeof(r_buf)) == 0);
+
+ spdk_file_close(g_file, channel);
+
+ fs_thread_poll();
+
+ rc = spdk_fs_delete_file(g_fs, channel, "testfile");
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_fs_delete_file(g_fs, channel, "testfile");
+ CU_ASSERT(rc == -ENOENT);
+
+ spdk_fs_free_thread_ctx(channel);
+
+ ut_send_request(_fs_unload, NULL);
+}
+
+static void
+file_length(void)
+{
+ int rc;
+ char *buf;
+ uint64_t buf_length;
+ volatile uint64_t *length_flushed;
+ struct spdk_fs_thread_ctx *channel;
+ struct spdk_file_stat stat = {0};
+
+ ut_send_request(_fs_init, NULL);
+
+ channel = spdk_fs_alloc_thread_ctx(g_fs);
+
+ g_file = NULL;
+ rc = spdk_fs_open_file(g_fs, channel, "testfile", SPDK_BLOBFS_OPEN_CREATE, &g_file);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_file != NULL);
+
+ /* Write one CACHE_BUFFER. Filling at least one cache buffer triggers
+ * a flush to disk.
+ */
+ buf_length = CACHE_BUFFER_SIZE;
+ buf = calloc(1, buf_length);
+ spdk_file_write(g_file, channel, buf, 0, buf_length);
+ free(buf);
+
+ /* Spin until all of the data has been flushed to the SSD. There's been no
+ * sync operation yet, so the xattr on the file is still 0.
+ *
+ * length_flushed: This variable is modified by a different thread in this unit
+ * test. So we need to dereference it as a volatile to ensure the value is always
+ * re-read.
+ */
+ length_flushed = &g_file->length_flushed;
+ while (*length_flushed != buf_length) {}
+
+ /* Close the file. This causes an implicit sync which should write the
+ * length_flushed value as the "length" xattr on the file.
+ */
+ spdk_file_close(g_file, channel);
+
+ fs_thread_poll();
+
+ rc = spdk_fs_file_stat(g_fs, channel, "testfile", &stat);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(buf_length == stat.size);
+
+ spdk_fs_free_thread_ctx(channel);
+
+ /* Unload and reload the filesystem. The file length will be
+ * read during load from the length xattr. We want to make sure
+ * it matches what was written when the file was originally
+ * written and closed.
+ */
+ ut_send_request(_fs_unload, NULL);
+
+ ut_send_request(_fs_load, NULL);
+
+ channel = spdk_fs_alloc_thread_ctx(g_fs);
+
+ rc = spdk_fs_file_stat(g_fs, channel, "testfile", &stat);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(buf_length == stat.size);
+
+ g_file = NULL;
+ rc = spdk_fs_open_file(g_fs, channel, "testfile", 0, &g_file);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_file != NULL);
+
+ spdk_file_close(g_file, channel);
+
+ fs_thread_poll();
+
+ rc = spdk_fs_delete_file(g_fs, channel, "testfile");
+ CU_ASSERT(rc == 0);
+
+ spdk_fs_free_thread_ctx(channel);
+
+ ut_send_request(_fs_unload, NULL);
+}
+
+static void
+append_write_to_extend_blob(void)
+{
+ uint64_t blob_size, buf_length;
+ char *buf, append_buf[64];
+ int rc;
+ struct spdk_fs_thread_ctx *channel;
+
+ ut_send_request(_fs_init, NULL);
+
+ channel = spdk_fs_alloc_thread_ctx(g_fs);
+
+ /* create a file and write the file with blob_size - 1 data length */
+ rc = spdk_fs_open_file(g_fs, channel, "testfile", SPDK_BLOBFS_OPEN_CREATE, &g_file);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_file != NULL);
+
+ blob_size = __file_get_blob_size(g_file);
+
+ buf_length = blob_size - 1;
+ buf = calloc(1, buf_length);
+ rc = spdk_file_write(g_file, channel, buf, 0, buf_length);
+ CU_ASSERT(rc == 0);
+ free(buf);
+
+ spdk_file_close(g_file, channel);
+ fs_thread_poll();
+ spdk_fs_free_thread_ctx(channel);
+ ut_send_request(_fs_unload, NULL);
+
+ /* load existing file and write extra 2 bytes to cross blob boundary */
+ ut_send_request(_fs_load, NULL);
+
+ channel = spdk_fs_alloc_thread_ctx(g_fs);
+ g_file = NULL;
+ rc = spdk_fs_open_file(g_fs, channel, "testfile", 0, &g_file);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_file != NULL);
+
+ CU_ASSERT(g_file->length == buf_length);
+ CU_ASSERT(g_file->last == NULL);
+ CU_ASSERT(g_file->append_pos == buf_length);
+
+ rc = spdk_file_write(g_file, channel, append_buf, buf_length, 2);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(2 * blob_size == __file_get_blob_size(g_file));
+ spdk_file_close(g_file, channel);
+ fs_thread_poll();
+ CU_ASSERT(g_file->length == buf_length + 2);
+
+ spdk_fs_free_thread_ctx(channel);
+ ut_send_request(_fs_unload, NULL);
+}
+
+static void
+partial_buffer(void)
+{
+ int rc;
+ char *buf;
+ uint64_t buf_length;
+ struct spdk_fs_thread_ctx *channel;
+ struct spdk_file_stat stat = {0};
+
+ ut_send_request(_fs_init, NULL);
+
+ channel = spdk_fs_alloc_thread_ctx(g_fs);
+
+ g_file = NULL;
+ rc = spdk_fs_open_file(g_fs, channel, "testfile", SPDK_BLOBFS_OPEN_CREATE, &g_file);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_file != NULL);
+
+ /* Write one CACHE_BUFFER plus one byte. Filling at least one cache buffer triggers
+ * a flush to disk. We want to make sure the extra byte is not implicitly flushed.
+ * It should only get flushed once we sync or close the file.
+ */
+ buf_length = CACHE_BUFFER_SIZE + 1;
+ buf = calloc(1, buf_length);
+ spdk_file_write(g_file, channel, buf, 0, buf_length);
+ free(buf);
+
+ /* Send some nop messages to the dispatch thread. This will ensure any of the
+ * pending write operations are completed. A well-functioning blobfs should only
+ * issue one write for the filled CACHE_BUFFER - a buggy one might try to write
+ * the extra byte. So do a bunch of _nops to make sure all of them (even the buggy
+ * ones) get a chance to run. Note that we can't just send a message to the
+ * dispatch thread to call spdk_thread_poll() because the messages are themselves
+ * run in the context of spdk_thread_poll().
+ */
+ ut_send_request(_nop, NULL);
+ ut_send_request(_nop, NULL);
+ ut_send_request(_nop, NULL);
+ ut_send_request(_nop, NULL);
+ ut_send_request(_nop, NULL);
+ ut_send_request(_nop, NULL);
+
+ CU_ASSERT(g_file->length_flushed == CACHE_BUFFER_SIZE);
+
+ /* Close the file. This causes an implicit sync which should write the
+ * length_flushed value as the "length" xattr on the file.
+ */
+ spdk_file_close(g_file, channel);
+
+ fs_thread_poll();
+
+ rc = spdk_fs_file_stat(g_fs, channel, "testfile", &stat);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(buf_length == stat.size);
+
+ rc = spdk_fs_delete_file(g_fs, channel, "testfile");
+ CU_ASSERT(rc == 0);
+
+ spdk_fs_free_thread_ctx(channel);
+
+ ut_send_request(_fs_unload, NULL);
+}
+
+static void
+cache_write_null_buffer(void)
+{
+ uint64_t length;
+ int rc;
+ struct spdk_fs_thread_ctx *channel;
+ struct spdk_thread *thread;
+
+ ut_send_request(_fs_init, NULL);
+
+ channel = spdk_fs_alloc_thread_ctx(g_fs);
+
+ rc = spdk_fs_open_file(g_fs, channel, "testfile", SPDK_BLOBFS_OPEN_CREATE, &g_file);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_file != NULL);
+
+ length = 0;
+ rc = spdk_file_truncate(g_file, channel, length);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_file_write(g_file, channel, NULL, 0, 0);
+ CU_ASSERT(rc == 0);
+
+ spdk_file_close(g_file, channel);
+
+ fs_thread_poll();
+
+ rc = spdk_fs_delete_file(g_fs, channel, "testfile");
+ CU_ASSERT(rc == 0);
+
+ spdk_fs_free_thread_ctx(channel);
+
+ thread = spdk_get_thread();
+ while (spdk_thread_poll(thread, 0, 0) > 0) {}
+
+ ut_send_request(_fs_unload, NULL);
+}
+
+static void
+fs_create_sync(void)
+{
+ int rc;
+ struct spdk_fs_thread_ctx *channel;
+
+ ut_send_request(_fs_init, NULL);
+
+ channel = spdk_fs_alloc_thread_ctx(g_fs);
+ CU_ASSERT(channel != NULL);
+
+ rc = spdk_fs_create_file(g_fs, channel, "testfile");
+ CU_ASSERT(rc == 0);
+
+ /* Create should fail, because the file already exists. */
+ rc = spdk_fs_create_file(g_fs, channel, "testfile");
+ CU_ASSERT(rc != 0);
+
+ rc = spdk_fs_delete_file(g_fs, channel, "testfile");
+ CU_ASSERT(rc == 0);
+
+ spdk_fs_free_thread_ctx(channel);
+
+ fs_thread_poll();
+
+ ut_send_request(_fs_unload, NULL);
+}
+
+static void
+fs_rename_sync(void)
+{
+ int rc;
+ struct spdk_fs_thread_ctx *channel;
+
+ ut_send_request(_fs_init, NULL);
+
+ channel = spdk_fs_alloc_thread_ctx(g_fs);
+ CU_ASSERT(channel != NULL);
+
+ rc = spdk_fs_open_file(g_fs, channel, "testfile", SPDK_BLOBFS_OPEN_CREATE, &g_file);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_file != NULL);
+
+ CU_ASSERT(strcmp(spdk_file_get_name(g_file), "testfile") == 0);
+
+ rc = spdk_fs_rename_file(g_fs, channel, "testfile", "newtestfile");
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(strcmp(spdk_file_get_name(g_file), "newtestfile") == 0);
+
+ spdk_file_close(g_file, channel);
+
+ fs_thread_poll();
+
+ spdk_fs_free_thread_ctx(channel);
+
+ ut_send_request(_fs_unload, NULL);
+}
+
+static void
+cache_append_no_cache(void)
+{
+ int rc;
+ char buf[100];
+ struct spdk_fs_thread_ctx *channel;
+
+ ut_send_request(_fs_init, NULL);
+
+ channel = spdk_fs_alloc_thread_ctx(g_fs);
+
+ rc = spdk_fs_open_file(g_fs, channel, "testfile", SPDK_BLOBFS_OPEN_CREATE, &g_file);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_file != NULL);
+
+ spdk_file_write(g_file, channel, buf, 0 * sizeof(buf), sizeof(buf));
+ CU_ASSERT(spdk_file_get_length(g_file) == 1 * sizeof(buf));
+ spdk_file_write(g_file, channel, buf, 1 * sizeof(buf), sizeof(buf));
+ CU_ASSERT(spdk_file_get_length(g_file) == 2 * sizeof(buf));
+ spdk_file_sync(g_file, channel);
+
+ fs_thread_poll();
+
+ spdk_file_write(g_file, channel, buf, 2 * sizeof(buf), sizeof(buf));
+ CU_ASSERT(spdk_file_get_length(g_file) == 3 * sizeof(buf));
+ spdk_file_write(g_file, channel, buf, 3 * sizeof(buf), sizeof(buf));
+ CU_ASSERT(spdk_file_get_length(g_file) == 4 * sizeof(buf));
+ spdk_file_write(g_file, channel, buf, 4 * sizeof(buf), sizeof(buf));
+ CU_ASSERT(spdk_file_get_length(g_file) == 5 * sizeof(buf));
+
+ spdk_file_close(g_file, channel);
+
+ fs_thread_poll();
+
+ rc = spdk_fs_delete_file(g_fs, channel, "testfile");
+ CU_ASSERT(rc == 0);
+
+ spdk_fs_free_thread_ctx(channel);
+
+ ut_send_request(_fs_unload, NULL);
+}
+
+static void
+fs_delete_file_without_close(void)
+{
+ int rc;
+ struct spdk_fs_thread_ctx *channel;
+ struct spdk_file *file;
+
+ ut_send_request(_fs_init, NULL);
+ channel = spdk_fs_alloc_thread_ctx(g_fs);
+ CU_ASSERT(channel != NULL);
+
+ rc = spdk_fs_open_file(g_fs, channel, "testfile", SPDK_BLOBFS_OPEN_CREATE, &g_file);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_file != NULL);
+
+ rc = spdk_fs_delete_file(g_fs, channel, "testfile");
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_file->ref_count != 0);
+ CU_ASSERT(g_file->is_deleted == true);
+
+ rc = spdk_fs_open_file(g_fs, channel, "testfile", 0, &file);
+ CU_ASSERT(rc != 0);
+
+ spdk_file_close(g_file, channel);
+
+ fs_thread_poll();
+
+ rc = spdk_fs_open_file(g_fs, channel, "testfile", 0, &file);
+ CU_ASSERT(rc != 0);
+
+ spdk_fs_free_thread_ctx(channel);
+
+ ut_send_request(_fs_unload, NULL);
+
+}
+
+static bool g_thread_exit = false;
+
+static void
+terminate_spdk_thread(void *arg)
+{
+ g_thread_exit = true;
+}
+
+static void *
+spdk_thread(void *arg)
+{
+ struct spdk_thread *thread = arg;
+
+ spdk_set_thread(thread);
+
+ while (!g_thread_exit) {
+ spdk_thread_poll(thread, 0, 0);
+ }
+
+ return NULL;
+}
+
+int main(int argc, char **argv)
+{
+ struct spdk_thread *thread;
+ CU_pSuite suite = NULL;
+ pthread_t spdk_tid;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("blobfs_sync_ut", NULL, NULL);
+
+ CU_ADD_TEST(suite, cache_read_after_write);
+ CU_ADD_TEST(suite, file_length);
+ CU_ADD_TEST(suite, append_write_to_extend_blob);
+ CU_ADD_TEST(suite, partial_buffer);
+ CU_ADD_TEST(suite, cache_write_null_buffer);
+ CU_ADD_TEST(suite, fs_create_sync);
+ CU_ADD_TEST(suite, fs_rename_sync);
+ CU_ADD_TEST(suite, cache_append_no_cache);
+ CU_ADD_TEST(suite, fs_delete_file_without_close);
+
+ spdk_thread_lib_init(NULL, 0);
+
+ thread = spdk_thread_create("test_thread", NULL);
+ spdk_set_thread(thread);
+
+ g_dispatch_thread = spdk_thread_create("dispatch_thread", NULL);
+ pthread_create(&spdk_tid, NULL, spdk_thread, g_dispatch_thread);
+
+ g_dev_buffer = calloc(1, DEV_BUFFER_SIZE);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ free(g_dev_buffer);
+
+ ut_send_request(terminate_spdk_thread, NULL);
+ pthread_join(spdk_tid, NULL);
+
+ while (spdk_thread_poll(g_dispatch_thread, 0, 0) > 0) {}
+ while (spdk_thread_poll(thread, 0, 0) > 0) {}
+
+ spdk_set_thread(thread);
+ spdk_thread_exit(thread);
+ while (!spdk_thread_is_exited(thread)) {
+ spdk_thread_poll(thread, 0, 0);
+ }
+ spdk_thread_destroy(thread);
+
+ spdk_set_thread(g_dispatch_thread);
+ spdk_thread_exit(g_dispatch_thread);
+ while (!spdk_thread_is_exited(g_dispatch_thread)) {
+ spdk_thread_poll(g_dispatch_thread, 0, 0);
+ }
+ spdk_thread_destroy(g_dispatch_thread);
+
+ spdk_thread_lib_fini();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/blobfs/tree.c/.gitignore b/src/spdk/test/unit/lib/blobfs/tree.c/.gitignore
new file mode 100644
index 000000000..57e77bf71
--- /dev/null
+++ b/src/spdk/test/unit/lib/blobfs/tree.c/.gitignore
@@ -0,0 +1 @@
+tree_ut
diff --git a/src/spdk/test/unit/lib/blobfs/tree.c/Makefile b/src/spdk/test/unit/lib/blobfs/tree.c/Makefile
new file mode 100644
index 000000000..b3d57e873
--- /dev/null
+++ b/src/spdk/test/unit/lib/blobfs/tree.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = tree_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/blobfs/tree.c/tree_ut.c b/src/spdk/test/unit/lib/blobfs/tree.c/tree_ut.c
new file mode 100644
index 000000000..70f1d692a
--- /dev/null
+++ b/src/spdk/test/unit/lib/blobfs/tree.c/tree_ut.c
@@ -0,0 +1,150 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "blobfs/tree.c"
+
+void
+cache_buffer_free(struct cache_buffer *cache_buffer)
+{
+ free(cache_buffer);
+}
+
+static void
+blobfs_tree_op_test(void)
+{
+ struct cache_tree *tree;
+ struct cache_buffer *buffer[5];
+ struct cache_buffer *tmp_buffer;
+ int i;
+
+ for (i = 0; i < 5; i ++) {
+ buffer[i] = calloc(1, sizeof(struct cache_buffer));
+ SPDK_CU_ASSERT_FATAL(buffer[i]);
+ }
+
+ tree = calloc(1, sizeof(*tree));
+ SPDK_CU_ASSERT_FATAL(tree != NULL);
+
+ /* insert buffer[0] */
+ buffer[0]->offset = 0;
+ tree = tree_insert_buffer(tree, buffer[0]);
+ SPDK_CU_ASSERT_FATAL(tree != NULL);
+ CU_ASSERT(tree->level == 0);
+ tmp_buffer = tree_find_buffer(tree, buffer[0]->offset);
+ CU_ASSERT(tmp_buffer == buffer[0]);
+
+ /* insert buffer[1] */
+ buffer[1]->offset = CACHE_BUFFER_SIZE;
+ /* set the bytes_filled equal = bytes_filled with same non zero value, e.g., 32 */
+ buffer[1]->bytes_filled = buffer[1]->bytes_flushed = 32;
+ tree = tree_insert_buffer(tree, buffer[1]);
+ SPDK_CU_ASSERT_FATAL(tree != NULL);
+ CU_ASSERT(tree->level == 0);
+ tmp_buffer = tree_find_filled_buffer(tree, buffer[1]->offset);
+ CU_ASSERT(tmp_buffer == buffer[1]);
+
+ /* insert buffer[2] */
+ buffer[2]->offset = (CACHE_TREE_WIDTH - 1) * CACHE_BUFFER_SIZE;
+ tree = tree_insert_buffer(tree, buffer[2]);
+ SPDK_CU_ASSERT_FATAL(tree != NULL);
+ CU_ASSERT(tree->level == 0);
+ tmp_buffer = tree_find_buffer(tree, buffer[2]->offset);
+ CU_ASSERT(tmp_buffer == buffer[2]);
+ tmp_buffer = tree_find_filled_buffer(tree, buffer[2]->offset);
+ CU_ASSERT(tmp_buffer == NULL);
+
+ /* insert buffer[3], set an offset which can not be fit level 0 */
+ buffer[3]->offset = CACHE_TREE_LEVEL_SIZE(1);
+ tree = tree_insert_buffer(tree, buffer[3]);
+ SPDK_CU_ASSERT_FATAL(tree != NULL);
+ CU_ASSERT(tree->level == 1);
+ tmp_buffer = tree_find_buffer(tree, buffer[3]->offset);
+ CU_ASSERT(tmp_buffer == buffer[3]);
+
+ /* insert buffer[4], set an offset which can not be fit level 1 */
+ buffer[4]->offset = CACHE_TREE_LEVEL_SIZE(2);
+ tree = tree_insert_buffer(tree, buffer[4]);
+ SPDK_CU_ASSERT_FATAL(tree != NULL);
+ CU_ASSERT(tree->level == 2);
+ tmp_buffer = tree_find_buffer(tree, buffer[4]->offset);
+ CU_ASSERT(tmp_buffer == buffer[4]);
+
+ /* delete buffer[0] */
+ tree_remove_buffer(tree, buffer[0]);
+ /* check whether buffer[0] is still existed or not */
+ tmp_buffer = tree_find_buffer(tree, 0);
+ CU_ASSERT(tmp_buffer == NULL);
+
+ /* delete buffer[3] */
+ tree_remove_buffer(tree, buffer[3]);
+ /* check whether buffer[3] is still existed or not */
+ tmp_buffer = tree_find_buffer(tree, CACHE_TREE_LEVEL_SIZE(1));
+ CU_ASSERT(tmp_buffer == NULL);
+
+ /* free all buffers in the tree */
+ tree_free_buffers(tree);
+
+ /* check whether buffer[1] is still existed or not */
+ tmp_buffer = tree_find_buffer(tree, CACHE_BUFFER_SIZE);
+ CU_ASSERT(tmp_buffer == NULL);
+ /* check whether buffer[2] is still existed or not */
+ tmp_buffer = tree_find_buffer(tree, (CACHE_TREE_WIDTH - 1) * CACHE_BUFFER_SIZE);
+ CU_ASSERT(tmp_buffer == NULL);
+ /* check whether buffer[4] is still existed or not */
+ tmp_buffer = tree_find_buffer(tree, CACHE_TREE_LEVEL_SIZE(2));
+ CU_ASSERT(tmp_buffer == NULL);
+
+ /* According to tree_free_buffers, root will not be freed */
+ free(tree);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("tree", NULL, NULL);
+ CU_ADD_TEST(suite, blobfs_tree_op_test);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/event/Makefile b/src/spdk/test/unit/lib/event/Makefile
new file mode 100644
index 000000000..ea411460c
--- /dev/null
+++ b/src/spdk/test/unit/lib/event/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = subsystem.c app.c reactor.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/event/app.c/.gitignore b/src/spdk/test/unit/lib/event/app.c/.gitignore
new file mode 100644
index 000000000..123e16734
--- /dev/null
+++ b/src/spdk/test/unit/lib/event/app.c/.gitignore
@@ -0,0 +1 @@
+app_ut
diff --git a/src/spdk/test/unit/lib/event/app.c/Makefile b/src/spdk/test/unit/lib/event/app.c/Makefile
new file mode 100644
index 000000000..9ec2b97db
--- /dev/null
+++ b/src/spdk/test/unit/lib/event/app.c/Makefile
@@ -0,0 +1,39 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+SPDK_LIB_LIST = conf trace jsonrpc json
+TEST_FILE = app_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/event/app.c/app_ut.c b/src/spdk/test/unit/lib/event/app.c/app_ut.c
new file mode 100644
index 000000000..6077d6600
--- /dev/null
+++ b/src/spdk/test/unit/lib/event/app.c/app_ut.c
@@ -0,0 +1,193 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+#include "common/lib/test_env.c"
+#include "event/app.c"
+
+#define test_argc 6
+
+DEFINE_STUB_V(spdk_event_call, (struct spdk_event *event));
+DEFINE_STUB(spdk_event_allocate, struct spdk_event *, (uint32_t core, spdk_event_fn fn, void *arg1,
+ void *arg2), NULL);
+DEFINE_STUB_V(spdk_subsystem_init, (spdk_subsystem_init_fn cb_fn, void *cb_arg));
+DEFINE_STUB_V(spdk_rpc_register_method, (const char *method, spdk_rpc_method_handler func,
+ uint32_t state_mask));
+DEFINE_STUB_V(spdk_rpc_register_alias_deprecated, (const char *method, const char *alias));
+DEFINE_STUB_V(spdk_rpc_set_state, (uint32_t state));
+DEFINE_STUB(spdk_rpc_get_state, uint32_t, (void), SPDK_RPC_RUNTIME);
+DEFINE_STUB_V(spdk_app_json_config_load, (const char *json_config_file, const char *rpc_addr,
+ spdk_subsystem_init_fn cb_fn, void *cb_arg, bool stop_on_error));
+
+static void
+unittest_usage(void)
+{
+}
+
+static int
+unittest_parse_args(int ch, char *arg)
+{
+ return 0;
+}
+
+static void
+clean_opts(struct spdk_app_opts *opts)
+{
+ free(opts->pci_whitelist);
+ opts->pci_whitelist = NULL;
+ free(opts->pci_blacklist);
+ opts->pci_blacklist = NULL;
+ memset(opts, 0, sizeof(struct spdk_app_opts));
+}
+
+static void
+test_spdk_app_parse_args(void)
+{
+ spdk_app_parse_args_rvals_t rc;
+ struct spdk_app_opts opts = {};
+ struct option my_options[2] = {};
+ char *valid_argv[test_argc] = {"app_ut",
+ "--wait-for-rpc",
+ "-d",
+ "-p0",
+ "-B",
+ "0000:81:00.0"
+ };
+ char *invalid_argv_BW[test_argc] = {"app_ut",
+ "-B",
+ "0000:81:00.0",
+ "-W",
+ "0000:82:00.0",
+ "-cspdk.conf"
+ };
+ /* currently use -z as our new option */
+ char *argv_added_short_opt[test_argc] = {"app_ut",
+ "-z",
+ "-d",
+ "--wait-for-rpc",
+ "-p0",
+ "-cspdk.conf"
+ };
+ char *argv_added_long_opt[test_argc] = {"app_ut",
+ "-cspdk.conf",
+ "-d",
+ "-r/var/tmp/spdk.sock",
+ "--test-long-opt",
+ "--wait-for-rpc"
+ };
+ char *invalid_argv_missing_option[test_argc] = {"app_ut",
+ "-d",
+ "-p",
+ "--wait-for-rpc",
+ "--silence-noticelog"
+ "-R"
+ };
+
+ /* Test valid arguments. Expected result: PASS */
+ rc = spdk_app_parse_args(test_argc, valid_argv, &opts, "", NULL, unittest_parse_args, NULL);
+ CU_ASSERT_EQUAL(rc, SPDK_APP_PARSE_ARGS_SUCCESS);
+ optind = 1;
+ clean_opts(&opts);
+
+ /* Test invalid short option Expected result: FAIL */
+ rc = spdk_app_parse_args(test_argc, argv_added_short_opt, &opts, "", NULL, unittest_parse_args,
+ NULL);
+ CU_ASSERT_EQUAL(rc, SPDK_APP_PARSE_ARGS_FAIL);
+ optind = 1;
+ clean_opts(&opts);
+
+ /* Test valid global and local options. Expected result: PASS */
+ rc = spdk_app_parse_args(test_argc, argv_added_short_opt, &opts, "z", NULL, unittest_parse_args,
+ unittest_usage);
+ CU_ASSERT_EQUAL(rc, SPDK_APP_PARSE_ARGS_SUCCESS);
+ optind = 1;
+ clean_opts(&opts);
+
+ /* Test invalid long option Expected result: FAIL */
+ rc = spdk_app_parse_args(test_argc, argv_added_long_opt, &opts, "", NULL, unittest_parse_args,
+ NULL);
+ CU_ASSERT_EQUAL(rc, SPDK_APP_PARSE_ARGS_FAIL);
+ optind = 1;
+ clean_opts(&opts);
+
+ /* Test valid global and local options. Expected result: PASS */
+ my_options[0].name = "test-long-opt";
+ rc = spdk_app_parse_args(test_argc, argv_added_long_opt, &opts, "", my_options, unittest_parse_args,
+ unittest_usage);
+ CU_ASSERT_EQUAL(rc, SPDK_APP_PARSE_ARGS_SUCCESS);
+ optind = 1;
+ clean_opts(&opts);
+
+ /* Test overlapping global and local options. Expected result: FAIL */
+ rc = spdk_app_parse_args(test_argc, valid_argv, &opts, SPDK_APP_GETOPT_STRING, NULL,
+ unittest_parse_args, NULL);
+ CU_ASSERT_EQUAL(rc, SPDK_APP_PARSE_ARGS_FAIL);
+ optind = 1;
+ clean_opts(&opts);
+
+ /* Specify -B and -W options at the same time. Expected result: FAIL */
+ rc = spdk_app_parse_args(test_argc, invalid_argv_BW, &opts, "", NULL, unittest_parse_args, NULL);
+ SPDK_CU_ASSERT_FATAL(rc == SPDK_APP_PARSE_ARGS_FAIL);
+ optind = 1;
+ clean_opts(&opts);
+
+ /* Omit necessary argument to option */
+ rc = spdk_app_parse_args(test_argc, invalid_argv_missing_option, &opts, "", NULL,
+ unittest_parse_args, NULL);
+ CU_ASSERT_EQUAL(rc, SPDK_APP_PARSE_ARGS_FAIL);
+ optind = 1;
+ clean_opts(&opts);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("app_suite", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_spdk_app_parse_args);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/event/reactor.c/.gitignore b/src/spdk/test/unit/lib/event/reactor.c/.gitignore
new file mode 100644
index 000000000..c86b7dfcd
--- /dev/null
+++ b/src/spdk/test/unit/lib/event/reactor.c/.gitignore
@@ -0,0 +1 @@
+reactor_ut
diff --git a/src/spdk/test/unit/lib/event/reactor.c/Makefile b/src/spdk/test/unit/lib/event/reactor.c/Makefile
new file mode 100644
index 000000000..f7b3b5887
--- /dev/null
+++ b/src/spdk/test/unit/lib/event/reactor.c/Makefile
@@ -0,0 +1,39 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+SPDK_LIB_LIST = conf trace jsonrpc json
+TEST_FILE = reactor_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/event/reactor.c/reactor_ut.c b/src/spdk/test/unit/lib/event/reactor.c/reactor_ut.c
new file mode 100644
index 000000000..db50ea2f6
--- /dev/null
+++ b/src/spdk/test/unit/lib/event/reactor.c/reactor_ut.c
@@ -0,0 +1,455 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+#include "common/lib/test_env.c"
+#include "event/reactor.c"
+
+static void
+test_create_reactor(void)
+{
+ struct spdk_reactor reactor = {};
+
+ g_reactors = &reactor;
+
+ reactor_construct(&reactor, 0);
+
+ CU_ASSERT(spdk_reactor_get(0) == &reactor);
+
+ spdk_ring_free(reactor.events);
+ g_reactors = NULL;
+}
+
+static void
+test_init_reactors(void)
+{
+ uint32_t core;
+
+ allocate_cores(3);
+
+ CU_ASSERT(spdk_reactors_init() == 0);
+
+ CU_ASSERT(g_reactor_state == SPDK_REACTOR_STATE_INITIALIZED);
+ for (core = 0; core < 3; core++) {
+ CU_ASSERT(spdk_reactor_get(core) != NULL);
+ }
+
+ spdk_reactors_fini();
+
+ free_cores();
+}
+
+static void
+ut_event_fn(void *arg1, void *arg2)
+{
+ uint8_t *test1 = arg1;
+ uint8_t *test2 = arg2;
+
+ *test1 = 1;
+ *test2 = 0xFF;
+}
+
+static void
+test_event_call(void)
+{
+ uint8_t test1 = 0, test2 = 0;
+ struct spdk_event *evt;
+ struct spdk_reactor *reactor;
+
+ allocate_cores(1);
+
+ CU_ASSERT(spdk_reactors_init() == 0);
+
+ evt = spdk_event_allocate(0, ut_event_fn, &test1, &test2);
+ CU_ASSERT(evt != NULL);
+
+ spdk_event_call(evt);
+
+ reactor = spdk_reactor_get(0);
+ CU_ASSERT(reactor != NULL);
+
+ CU_ASSERT(event_queue_run_batch(reactor) == 1);
+ CU_ASSERT(test1 == 1);
+ CU_ASSERT(test2 == 0xFF);
+
+ spdk_reactors_fini();
+
+ free_cores();
+}
+
+static void
+test_schedule_thread(void)
+{
+ struct spdk_cpuset cpuset = {};
+ struct spdk_thread *thread;
+ struct spdk_reactor *reactor;
+ struct spdk_lw_thread *lw_thread;
+
+ allocate_cores(5);
+
+ CU_ASSERT(spdk_reactors_init() == 0);
+
+ spdk_cpuset_set_cpu(&cpuset, 3, true);
+ g_next_core = 4;
+
+ /* _reactor_schedule_thread() will be called in spdk_thread_create()
+ * at its end because it is passed to SPDK thread library by
+ * spdk_thread_lib_init().
+ */
+ thread = spdk_thread_create(NULL, &cpuset);
+ CU_ASSERT(thread != NULL);
+
+ reactor = spdk_reactor_get(3);
+ CU_ASSERT(reactor != NULL);
+
+ MOCK_SET(spdk_env_get_current_core, 3);
+
+ CU_ASSERT(event_queue_run_batch(reactor) == 1);
+
+ MOCK_CLEAR(spdk_env_get_current_core);
+
+ lw_thread = TAILQ_FIRST(&reactor->threads);
+ CU_ASSERT(lw_thread != NULL);
+ CU_ASSERT(spdk_thread_get_from_ctx(lw_thread) == thread);
+
+ TAILQ_REMOVE(&reactor->threads, lw_thread, link);
+ reactor->thread_count--;
+ spdk_set_thread(thread);
+ spdk_thread_exit(thread);
+ while (!spdk_thread_is_exited(thread)) {
+ spdk_thread_poll(thread, 0, 0);
+ }
+ spdk_thread_destroy(thread);
+ spdk_set_thread(NULL);
+
+ spdk_reactors_fini();
+
+ free_cores();
+}
+
+static void
+test_reschedule_thread(void)
+{
+ struct spdk_cpuset cpuset = {};
+ struct spdk_thread *thread;
+ struct spdk_reactor *reactor;
+ struct spdk_lw_thread *lw_thread;
+
+ allocate_cores(3);
+
+ CU_ASSERT(spdk_reactors_init() == 0);
+
+ spdk_cpuset_set_cpu(&g_reactor_core_mask, 0, true);
+ spdk_cpuset_set_cpu(&g_reactor_core_mask, 1, true);
+ spdk_cpuset_set_cpu(&g_reactor_core_mask, 2, true);
+ g_next_core = 0;
+
+ /* Create and schedule the thread to core 1. */
+ spdk_cpuset_set_cpu(&cpuset, 1, true);
+
+ thread = spdk_thread_create(NULL, &cpuset);
+ CU_ASSERT(thread != NULL);
+ lw_thread = spdk_thread_get_ctx(thread);
+
+ reactor = spdk_reactor_get(1);
+ CU_ASSERT(reactor != NULL);
+ MOCK_SET(spdk_env_get_current_core, 1);
+
+ CU_ASSERT(event_queue_run_batch(reactor) == 1);
+ CU_ASSERT(TAILQ_FIRST(&reactor->threads) == lw_thread);
+
+ spdk_set_thread(thread);
+
+ /* Call spdk_thread_set_cpumask() twice with different cpumask values.
+ * The cpumask of the 2nd call will be used in reschedule operation.
+ */
+
+ spdk_cpuset_zero(&cpuset);
+ spdk_cpuset_set_cpu(&cpuset, 0, true);
+ CU_ASSERT(spdk_thread_set_cpumask(&cpuset) == 0);
+
+ spdk_cpuset_zero(&cpuset);
+ spdk_cpuset_set_cpu(&cpuset, 2, true);
+ CU_ASSERT(spdk_thread_set_cpumask(&cpuset) == 0);
+
+ CU_ASSERT(lw_thread->resched == true);
+
+ reactor_run(reactor);
+
+ CU_ASSERT(lw_thread->resched == false);
+ CU_ASSERT(TAILQ_EMPTY(&reactor->threads));
+
+ reactor = spdk_reactor_get(0);
+ CU_ASSERT(reactor != NULL);
+ MOCK_SET(spdk_env_get_current_core, 0);
+
+ CU_ASSERT(event_queue_run_batch(reactor) == 0);
+
+ reactor = spdk_reactor_get(2);
+ CU_ASSERT(reactor != NULL);
+ MOCK_SET(spdk_env_get_current_core, 2);
+
+ CU_ASSERT(event_queue_run_batch(reactor) == 1);
+
+ CU_ASSERT(TAILQ_FIRST(&reactor->threads) == lw_thread);
+
+ MOCK_CLEAR(spdk_env_get_current_core);
+
+ TAILQ_REMOVE(&reactor->threads, lw_thread, link);
+ reactor->thread_count--;
+ spdk_set_thread(thread);
+ spdk_thread_exit(thread);
+ while (!spdk_thread_is_exited(thread)) {
+ spdk_thread_poll(thread, 0, 0);
+ }
+ spdk_thread_destroy(thread);
+ spdk_set_thread(NULL);
+
+ spdk_reactors_fini();
+
+ free_cores();
+}
+
+static void
+for_each_reactor_done(void *arg1, void *arg2)
+{
+ uint32_t *count = arg1;
+ bool *done = arg2;
+
+ (*count)++;
+ *done = true;
+}
+
+static void
+for_each_reactor_cb(void *arg1, void *arg2)
+{
+ uint32_t *count = arg1;
+
+ (*count)++;
+}
+
+static void
+test_for_each_reactor(void)
+{
+ uint32_t count = 0, i;
+ bool done = false;
+ struct spdk_reactor *reactor;
+
+ allocate_cores(5);
+
+ CU_ASSERT(spdk_reactors_init() == 0);
+
+ MOCK_SET(spdk_env_get_current_core, 0);
+
+ spdk_for_each_reactor(for_each_reactor_cb, &count, &done, for_each_reactor_done);
+
+ MOCK_CLEAR(spdk_env_get_current_core);
+
+ /* We have not processed any event yet, so count and done should be 0 and false,
+ * respectively.
+ */
+ CU_ASSERT(count == 0);
+
+ /* Poll each reactor to verify the event is passed to each */
+ for (i = 0; i < 5; i++) {
+ reactor = spdk_reactor_get(i);
+ CU_ASSERT(reactor != NULL);
+
+ event_queue_run_batch(reactor);
+ CU_ASSERT(count == (i + 1));
+ CU_ASSERT(done == false);
+ }
+
+ /* After each reactor is called, the completion calls it one more time. */
+ reactor = spdk_reactor_get(0);
+ CU_ASSERT(reactor != NULL);
+
+ event_queue_run_batch(reactor);
+ CU_ASSERT(count == 6);
+ CU_ASSERT(done == true);
+
+ spdk_reactors_fini();
+
+ free_cores();
+}
+
+static int
+poller_run_idle(void *ctx)
+{
+ uint64_t delay_us = (uint64_t)ctx;
+
+ spdk_delay_us(delay_us);
+
+ return 0;
+}
+
+static int
+poller_run_busy(void *ctx)
+{
+ uint64_t delay_us = (uint64_t)ctx;
+
+ spdk_delay_us(delay_us);
+
+ return 1;
+}
+
+static void
+test_reactor_stats(void)
+{
+ struct spdk_cpuset cpuset = {};
+ struct spdk_thread *thread1, *thread2;
+ struct spdk_reactor *reactor;
+ struct spdk_poller *busy1, *idle1, *busy2, *idle2;
+ int rc __attribute__((unused));
+
+ /* Test case is the following:
+ * Create a reactor on CPU core0.
+ * Create thread1 and thread2 simultaneously on reactor0 at TSC = 100.
+ * Reactor runs
+ * - thread1 for 100 with busy
+ * - thread2 for 200 with idle
+ * - thread1 for 300 with idle
+ * - thread2 for 400 with busy.
+ * Then,
+ * - both elapsed TSC of thread1 and thread2 should be 1000 (= 100 + 900).
+ * - busy TSC of reactor should be 500 (= 100 + 400).
+ * - idle TSC of reactor should be 500 (= 200 + 300).
+ */
+
+ allocate_cores(1);
+
+ CU_ASSERT(spdk_reactors_init() == 0);
+
+ spdk_cpuset_set_cpu(&cpuset, 0, true);
+
+ MOCK_SET(spdk_env_get_current_core, 0);
+ MOCK_SET(spdk_get_ticks, 100);
+
+ thread1 = spdk_thread_create(NULL, &cpuset);
+ SPDK_CU_ASSERT_FATAL(thread1 != NULL);
+
+ thread2 = spdk_thread_create(NULL, &cpuset);
+ SPDK_CU_ASSERT_FATAL(thread2 != NULL);
+
+ reactor = spdk_reactor_get(0);
+ SPDK_CU_ASSERT_FATAL(reactor != NULL);
+
+ reactor->tsc_last = 100;
+
+ spdk_set_thread(thread1);
+ busy1 = spdk_poller_register(poller_run_busy, (void *)100, 0);
+ CU_ASSERT(busy1 != NULL);
+
+ spdk_set_thread(thread2);
+ idle2 = spdk_poller_register(poller_run_idle, (void *)300, 0);
+ CU_ASSERT(idle2 != NULL);
+
+ _reactor_run(reactor);
+
+ CU_ASSERT(thread1->tsc_last == 200);
+ CU_ASSERT(thread1->stats.busy_tsc == 100);
+ CU_ASSERT(thread1->stats.idle_tsc == 0);
+ CU_ASSERT(thread2->tsc_last == 500);
+ CU_ASSERT(thread2->stats.busy_tsc == 0);
+ CU_ASSERT(thread2->stats.idle_tsc == 300);
+
+ CU_ASSERT(reactor->busy_tsc == 100);
+ CU_ASSERT(reactor->idle_tsc == 300);
+
+ spdk_set_thread(thread1);
+ spdk_poller_unregister(&busy1);
+ idle1 = spdk_poller_register(poller_run_idle, (void *)200, 0);
+ CU_ASSERT(idle1 != NULL);
+
+ spdk_set_thread(thread2);
+ spdk_poller_unregister(&idle2);
+ busy2 = spdk_poller_register(poller_run_busy, (void *)400, 0);
+ CU_ASSERT(busy2 != NULL);
+
+ _reactor_run(reactor);
+
+ CU_ASSERT(thread1->tsc_last == 700);
+ CU_ASSERT(thread1->stats.busy_tsc == 100);
+ CU_ASSERT(thread1->stats.idle_tsc == 200);
+ CU_ASSERT(thread2->tsc_last == 1100);
+ CU_ASSERT(thread2->stats.busy_tsc == 400);
+ CU_ASSERT(thread2->stats.idle_tsc == 300);
+
+ CU_ASSERT(reactor->busy_tsc == 500);
+ CU_ASSERT(reactor->idle_tsc == 500);
+
+ spdk_set_thread(thread1);
+ spdk_poller_unregister(&idle1);
+ spdk_thread_exit(thread1);
+
+ spdk_set_thread(thread2);
+ spdk_poller_unregister(&busy2);
+ spdk_thread_exit(thread2);
+
+ _reactor_run(reactor);
+
+ CU_ASSERT(TAILQ_EMPTY(&reactor->threads));
+
+ spdk_reactors_fini();
+
+ free_cores();
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("app_suite", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_create_reactor);
+ CU_ADD_TEST(suite, test_init_reactors);
+ CU_ADD_TEST(suite, test_event_call);
+ CU_ADD_TEST(suite, test_schedule_thread);
+ CU_ADD_TEST(suite, test_reschedule_thread);
+ CU_ADD_TEST(suite, test_for_each_reactor);
+ CU_ADD_TEST(suite, test_reactor_stats);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/event/subsystem.c/.gitignore b/src/spdk/test/unit/lib/event/subsystem.c/.gitignore
new file mode 100644
index 000000000..76ca0d330
--- /dev/null
+++ b/src/spdk/test/unit/lib/event/subsystem.c/.gitignore
@@ -0,0 +1 @@
+subsystem_ut
diff --git a/src/spdk/test/unit/lib/event/subsystem.c/Makefile b/src/spdk/test/unit/lib/event/subsystem.c/Makefile
new file mode 100644
index 000000000..b62f1ee1a
--- /dev/null
+++ b/src/spdk/test/unit/lib/event/subsystem.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = subsystem_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/event/subsystem.c/subsystem_ut.c b/src/spdk/test/unit/lib/event/subsystem.c/subsystem_ut.c
new file mode 100644
index 000000000..deeb2f3aa
--- /dev/null
+++ b/src/spdk/test/unit/lib/event/subsystem.c/subsystem_ut.c
@@ -0,0 +1,255 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "unit/lib/json_mock.c"
+#include "event/subsystem.c"
+#include "common/lib/test_env.c"
+
+static struct spdk_subsystem g_ut_subsystems[8];
+static struct spdk_subsystem_depend g_ut_subsystem_deps[8];
+static int global_rc;
+
+static void
+ut_event_fn(int rc, void *arg1)
+{
+ global_rc = rc;
+}
+
+static void
+set_up_subsystem(struct spdk_subsystem *subsystem, const char *name)
+{
+ subsystem->init = NULL;
+ subsystem->fini = NULL;
+ subsystem->config = NULL;
+ subsystem->name = name;
+}
+
+static void
+set_up_depends(struct spdk_subsystem_depend *depend, const char *subsystem_name,
+ const char *dpends_on_name)
+{
+ depend->name = subsystem_name;
+ depend->depends_on = dpends_on_name;
+}
+
+static void
+subsystem_clear(void)
+{
+ struct spdk_subsystem *subsystem, *subsystem_tmp;
+ struct spdk_subsystem_depend *subsystem_dep, *subsystem_dep_tmp;
+
+ TAILQ_FOREACH_SAFE(subsystem, &g_subsystems, tailq, subsystem_tmp) {
+ TAILQ_REMOVE(&g_subsystems, subsystem, tailq);
+ }
+
+ TAILQ_FOREACH_SAFE(subsystem_dep, &g_subsystems_deps, tailq, subsystem_dep_tmp) {
+ TAILQ_REMOVE(&g_subsystems_deps, subsystem_dep, tailq);
+ }
+}
+
+static void
+subsystem_sort_test_depends_on_single(void)
+{
+ struct spdk_subsystem *subsystem;
+ int i;
+ char subsystem_name[16];
+
+ global_rc = -1;
+ spdk_subsystem_init(ut_event_fn, NULL);
+ CU_ASSERT(global_rc == 0);
+
+ i = 4;
+ TAILQ_FOREACH(subsystem, &g_subsystems, tailq) {
+ snprintf(subsystem_name, sizeof(subsystem_name), "subsystem%d", i);
+ SPDK_CU_ASSERT_FATAL(i > 0);
+ i--;
+ CU_ASSERT(strcmp(subsystem_name, subsystem->name) == 0);
+ }
+}
+
+static void
+subsystem_sort_test_depends_on_multiple(void)
+{
+ int i;
+ struct spdk_subsystem *subsystem;
+
+ subsystem_clear();
+ set_up_subsystem(&g_ut_subsystems[0], "iscsi");
+ set_up_subsystem(&g_ut_subsystems[1], "nvmf");
+ set_up_subsystem(&g_ut_subsystems[2], "sock");
+ set_up_subsystem(&g_ut_subsystems[3], "bdev");
+ set_up_subsystem(&g_ut_subsystems[4], "rpc");
+ set_up_subsystem(&g_ut_subsystems[5], "scsi");
+ set_up_subsystem(&g_ut_subsystems[6], "interface");
+ set_up_subsystem(&g_ut_subsystems[7], "accel");
+
+ for (i = 0; i < 8; i++) {
+ spdk_add_subsystem(&g_ut_subsystems[i]);
+ }
+
+ set_up_depends(&g_ut_subsystem_deps[0], "bdev", "accel");
+ set_up_depends(&g_ut_subsystem_deps[1], "scsi", "bdev");
+ set_up_depends(&g_ut_subsystem_deps[2], "rpc", "interface");
+ set_up_depends(&g_ut_subsystem_deps[3], "sock", "interface");
+ set_up_depends(&g_ut_subsystem_deps[4], "nvmf", "interface");
+ set_up_depends(&g_ut_subsystem_deps[5], "iscsi", "scsi");
+ set_up_depends(&g_ut_subsystem_deps[6], "iscsi", "sock");
+ set_up_depends(&g_ut_subsystem_deps[7], "iscsi", "rpc");
+
+ for (i = 0; i < 8; i++) {
+ spdk_add_subsystem_depend(&g_ut_subsystem_deps[i]);
+ }
+
+ global_rc = -1;
+ spdk_subsystem_init(ut_event_fn, NULL);
+ CU_ASSERT(global_rc == 0);
+
+ subsystem = TAILQ_FIRST(&g_subsystems);
+ CU_ASSERT(strcmp(subsystem->name, "interface") == 0);
+ TAILQ_REMOVE(&g_subsystems, subsystem, tailq);
+
+ subsystem = TAILQ_FIRST(&g_subsystems);
+ CU_ASSERT(strcmp(subsystem->name, "accel") == 0);
+ TAILQ_REMOVE(&g_subsystems, subsystem, tailq);
+
+ subsystem = TAILQ_FIRST(&g_subsystems);
+ CU_ASSERT(strcmp(subsystem->name, "nvmf") == 0);
+ TAILQ_REMOVE(&g_subsystems, subsystem, tailq);
+
+ subsystem = TAILQ_FIRST(&g_subsystems);
+ CU_ASSERT(strcmp(subsystem->name, "sock") == 0);
+ TAILQ_REMOVE(&g_subsystems, subsystem, tailq);
+
+ subsystem = TAILQ_FIRST(&g_subsystems);
+ CU_ASSERT(strcmp(subsystem->name, "bdev") == 0);
+ TAILQ_REMOVE(&g_subsystems, subsystem, tailq);
+
+ subsystem = TAILQ_FIRST(&g_subsystems);
+ CU_ASSERT(strcmp(subsystem->name, "rpc") == 0);
+ TAILQ_REMOVE(&g_subsystems, subsystem, tailq);
+
+ subsystem = TAILQ_FIRST(&g_subsystems);
+ CU_ASSERT(strcmp(subsystem->name, "scsi") == 0);
+ TAILQ_REMOVE(&g_subsystems, subsystem, tailq);
+
+ subsystem = TAILQ_FIRST(&g_subsystems);
+ CU_ASSERT(strcmp(subsystem->name, "iscsi") == 0);
+ TAILQ_REMOVE(&g_subsystems, subsystem, tailq);
+}
+
+struct spdk_subsystem subsystem1 = {
+ .name = "subsystem1",
+};
+
+struct spdk_subsystem subsystem2 = {
+ .name = "subsystem2",
+};
+struct spdk_subsystem subsystem3 = {
+ .name = "subsystem3",
+};
+
+struct spdk_subsystem subsystem4 = {
+ .name = "subsystem4",
+};
+
+SPDK_SUBSYSTEM_REGISTER(subsystem1);
+SPDK_SUBSYSTEM_REGISTER(subsystem2);
+SPDK_SUBSYSTEM_REGISTER(subsystem3);
+SPDK_SUBSYSTEM_REGISTER(subsystem4);
+
+SPDK_SUBSYSTEM_DEPEND(subsystem1, subsystem2)
+SPDK_SUBSYSTEM_DEPEND(subsystem2, subsystem3)
+SPDK_SUBSYSTEM_DEPEND(subsystem3, subsystem4)
+
+
+static void
+subsystem_sort_test_missing_dependency(void)
+{
+ /*
+ * A depends on B, but B is missing
+ */
+
+ subsystem_clear();
+ set_up_subsystem(&g_ut_subsystems[0], "A");
+ spdk_add_subsystem(&g_ut_subsystems[0]);
+
+ set_up_depends(&g_ut_subsystem_deps[0], "A", "B");
+ spdk_add_subsystem_depend(&g_ut_subsystem_deps[0]);
+
+ global_rc = -1;
+ spdk_subsystem_init(ut_event_fn, NULL);
+ CU_ASSERT(global_rc != 0);
+
+ /*
+ * Dependency from C to A is defined, but C is missing
+ */
+
+ subsystem_clear();
+ set_up_subsystem(&g_ut_subsystems[0], "A");
+ spdk_add_subsystem(&g_ut_subsystems[0]);
+
+ set_up_depends(&g_ut_subsystem_deps[0], "C", "A");
+ spdk_add_subsystem_depend(&g_ut_subsystem_deps[0]);
+
+ global_rc = -1;
+ spdk_subsystem_init(ut_event_fn, NULL);
+ CU_ASSERT(global_rc != 0);
+
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("subsystem_suite", NULL, NULL);
+
+ CU_ADD_TEST(suite, subsystem_sort_test_depends_on_single);
+ CU_ADD_TEST(suite, subsystem_sort_test_depends_on_multiple);
+ CU_ADD_TEST(suite, subsystem_sort_test_missing_dependency);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/ftl/Makefile b/src/spdk/test/unit/lib/ftl/Makefile
new file mode 100644
index 000000000..57745c450
--- /dev/null
+++ b/src/spdk/test/unit/lib/ftl/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = ftl_ppa ftl_band.c ftl_reloc.c ftl_wptr ftl_md ftl_io.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/ftl/common/utils.c b/src/spdk/test/unit/lib/ftl/common/utils.c
new file mode 100644
index 000000000..dda828df8
--- /dev/null
+++ b/src/spdk/test/unit/lib/ftl/common/utils.c
@@ -0,0 +1,173 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_internal/thread.h"
+
+#include "spdk/ftl.h"
+#include "ftl/ftl_core.h"
+
+struct base_bdev_geometry {
+ size_t write_unit_size;
+ size_t zone_size;
+ size_t optimal_open_zones;
+ size_t blockcnt;
+};
+
+extern struct base_bdev_geometry g_geo;
+
+struct spdk_ftl_dev *test_init_ftl_dev(const struct base_bdev_geometry *geo);
+struct ftl_band *test_init_ftl_band(struct spdk_ftl_dev *dev, size_t id, size_t zone_size);
+void test_free_ftl_dev(struct spdk_ftl_dev *dev);
+void test_free_ftl_band(struct ftl_band *band);
+uint64_t test_offset_from_addr(struct ftl_addr addr, struct ftl_band *band);
+
+DEFINE_STUB(spdk_bdev_desc_get_bdev, struct spdk_bdev *, (struct spdk_bdev_desc *desc), NULL);
+
+uint64_t
+spdk_bdev_get_zone_size(const struct spdk_bdev *bdev)
+{
+ return g_geo.zone_size;
+}
+
+uint32_t
+spdk_bdev_get_optimal_open_zones(const struct spdk_bdev *bdev)
+{
+ return g_geo.optimal_open_zones;
+}
+
+struct spdk_ftl_dev *
+test_init_ftl_dev(const struct base_bdev_geometry *geo)
+{
+ struct spdk_ftl_dev *dev;
+
+ dev = calloc(1, sizeof(*dev));
+ SPDK_CU_ASSERT_FATAL(dev != NULL);
+
+ dev->xfer_size = geo->write_unit_size;
+ dev->core_thread = spdk_thread_create("unit_test_thread", NULL);
+ spdk_set_thread(dev->core_thread);
+ dev->ioch = calloc(1, sizeof(*dev->ioch)
+ + sizeof(struct ftl_io_channel *));
+ dev->num_bands = geo->blockcnt / (geo->zone_size * geo->optimal_open_zones);
+ dev->bands = calloc(dev->num_bands, sizeof(*dev->bands));
+ SPDK_CU_ASSERT_FATAL(dev->bands != NULL);
+
+ dev->lba_pool = spdk_mempool_create("ftl_ut", 2, 0x18000,
+ SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
+ SPDK_ENV_SOCKET_ID_ANY);
+ SPDK_CU_ASSERT_FATAL(dev->lba_pool != NULL);
+
+ LIST_INIT(&dev->free_bands);
+ LIST_INIT(&dev->shut_bands);
+
+ return dev;
+}
+
+struct ftl_band *
+test_init_ftl_band(struct spdk_ftl_dev *dev, size_t id, size_t zone_size)
+{
+ struct ftl_band *band;
+ struct ftl_zone *zone;
+
+ SPDK_CU_ASSERT_FATAL(dev != NULL);
+ SPDK_CU_ASSERT_FATAL(id < dev->num_bands);
+
+ band = &dev->bands[id];
+ band->dev = dev;
+ band->id = id;
+
+ band->state = FTL_BAND_STATE_CLOSED;
+ LIST_INSERT_HEAD(&dev->shut_bands, band, list_entry);
+ CIRCLEQ_INIT(&band->zones);
+
+ band->lba_map.vld = spdk_bit_array_create(ftl_get_num_blocks_in_band(dev));
+ SPDK_CU_ASSERT_FATAL(band->lba_map.vld != NULL);
+
+ band->zone_buf = calloc(ftl_get_num_punits(dev), sizeof(*band->zone_buf));
+ SPDK_CU_ASSERT_FATAL(band->zone_buf != NULL);
+
+ band->reloc_bitmap = spdk_bit_array_create(ftl_get_num_bands(dev));
+ SPDK_CU_ASSERT_FATAL(band->reloc_bitmap != NULL);
+
+ for (size_t i = 0; i < ftl_get_num_punits(dev); ++i) {
+ zone = &band->zone_buf[i];
+ zone->info.state = SPDK_BDEV_ZONE_STATE_FULL;
+ zone->info.zone_id = zone_size * (id * ftl_get_num_punits(dev) + i);
+ CIRCLEQ_INSERT_TAIL(&band->zones, zone, circleq);
+ band->num_zones++;
+ }
+
+ pthread_spin_init(&band->lba_map.lock, PTHREAD_PROCESS_PRIVATE);
+ return band;
+}
+
+void
+test_free_ftl_dev(struct spdk_ftl_dev *dev)
+{
+ struct spdk_thread *thread;
+
+ SPDK_CU_ASSERT_FATAL(dev != NULL);
+ free(dev->ioch);
+
+ thread = dev->core_thread;
+
+ spdk_set_thread(thread);
+ spdk_thread_exit(thread);
+ while (!spdk_thread_is_exited(thread)) {
+ spdk_thread_poll(thread, 0, 0);
+ }
+ spdk_thread_destroy(thread);
+ spdk_mempool_free(dev->lba_pool);
+ free(dev->bands);
+ free(dev);
+}
+
+void
+test_free_ftl_band(struct ftl_band *band)
+{
+ SPDK_CU_ASSERT_FATAL(band != NULL);
+ spdk_bit_array_free(&band->lba_map.vld);
+ spdk_bit_array_free(&band->reloc_bitmap);
+ free(band->zone_buf);
+ spdk_dma_free(band->lba_map.dma_buf);
+}
+
+uint64_t
+test_offset_from_addr(struct ftl_addr addr, struct ftl_band *band)
+{
+ struct spdk_ftl_dev *dev = band->dev;
+
+ CU_ASSERT_EQUAL(ftl_addr_get_band(dev, addr), band->id);
+
+ return addr.offset - band->id * ftl_get_num_blocks_in_band(dev);
+}
diff --git a/src/spdk/test/unit/lib/ftl/ftl_band.c/.gitignore b/src/spdk/test/unit/lib/ftl/ftl_band.c/.gitignore
new file mode 100644
index 000000000..aa8820632
--- /dev/null
+++ b/src/spdk/test/unit/lib/ftl/ftl_band.c/.gitignore
@@ -0,0 +1 @@
+ftl_band_ut
diff --git a/src/spdk/test/unit/lib/ftl/ftl_band.c/Makefile b/src/spdk/test/unit/lib/ftl/ftl_band.c/Makefile
new file mode 100644
index 000000000..4d4195105
--- /dev/null
+++ b/src/spdk/test/unit/lib/ftl/ftl_band.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = ftl_band_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/ftl/ftl_band.c/ftl_band_ut.c b/src/spdk/test/unit/lib/ftl/ftl_band.c/ftl_band_ut.c
new file mode 100644
index 000000000..d4f299e5b
--- /dev/null
+++ b/src/spdk/test/unit/lib/ftl/ftl_band.c/ftl_band_ut.c
@@ -0,0 +1,307 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+#include "common/lib/test_env.c"
+
+#include "ftl/ftl_core.c"
+#include "ftl/ftl_band.c"
+#include "../common/utils.c"
+
+#define TEST_BAND_IDX 68
+#define TEST_LBA 0x68676564
+
+struct base_bdev_geometry g_geo = {
+ .write_unit_size = 16,
+ .optimal_open_zones = 9,
+ .zone_size = 100,
+ .blockcnt = 1500 * 100 * 8,
+};
+
+static struct spdk_ftl_dev *g_dev;
+static struct ftl_band *g_band;
+
+static void
+setup_band(void)
+{
+ int rc;
+
+ g_dev = test_init_ftl_dev(&g_geo);
+ g_band = test_init_ftl_band(g_dev, TEST_BAND_IDX, g_geo.zone_size);
+ rc = ftl_band_alloc_lba_map(g_band);
+ CU_ASSERT_EQUAL_FATAL(rc, 0);
+}
+
+static void
+cleanup_band(void)
+{
+ test_free_ftl_band(g_band);
+ test_free_ftl_dev(g_dev);
+}
+
+static struct ftl_addr
+addr_from_punit(uint64_t punit)
+{
+ struct ftl_addr addr = {};
+
+ addr.offset = punit * g_geo.zone_size;
+ return addr;
+}
+
+static void
+test_band_block_offset_from_addr_base(void)
+{
+ struct ftl_addr addr;
+ uint64_t offset, i, flat_lun = 0;
+
+ setup_band();
+ for (i = 0; i < ftl_get_num_punits(g_dev); ++i) {
+ addr = addr_from_punit(i);
+ addr.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev);
+
+ offset = ftl_band_block_offset_from_addr(g_band, addr);
+ CU_ASSERT_EQUAL(offset, flat_lun * ftl_get_num_blocks_in_zone(g_dev));
+ flat_lun++;
+ }
+ cleanup_band();
+}
+
+static void
+test_band_block_offset_from_addr_offset(void)
+{
+ struct ftl_addr addr;
+ uint64_t offset, expect, i, j;
+
+ setup_band();
+ for (i = 0; i < ftl_get_num_punits(g_dev); ++i) {
+ for (j = 0; j < g_geo.zone_size; ++j) {
+ addr = addr_from_punit(i);
+ addr.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev) + j;
+
+ offset = ftl_band_block_offset_from_addr(g_band, addr);
+
+ expect = test_offset_from_addr(addr, g_band);
+ CU_ASSERT_EQUAL(offset, expect);
+ }
+ }
+ cleanup_band();
+}
+
+static void
+test_band_addr_from_block_offset(void)
+{
+ struct ftl_addr addr, expect;
+ uint64_t offset, i, j;
+
+ setup_band();
+ for (i = 0; i < ftl_get_num_punits(g_dev); ++i) {
+ for (j = 0; j < g_geo.zone_size; ++j) {
+ expect = addr_from_punit(i);
+ expect.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev) + j;
+
+ offset = ftl_band_block_offset_from_addr(g_band, expect);
+ addr = ftl_band_addr_from_block_offset(g_band, offset);
+
+ CU_ASSERT_EQUAL(addr.offset, expect.offset);
+ }
+ }
+ cleanup_band();
+}
+
+static void
+test_band_set_addr(void)
+{
+ struct ftl_lba_map *lba_map;
+ struct ftl_addr addr;
+ uint64_t offset = 0;
+
+ setup_band();
+ lba_map = &g_band->lba_map;
+ addr = addr_from_punit(0);
+ addr.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev);
+
+ CU_ASSERT_EQUAL(lba_map->num_vld, 0);
+
+ offset = test_offset_from_addr(addr, g_band);
+
+ ftl_band_set_addr(g_band, TEST_LBA, addr);
+ CU_ASSERT_EQUAL(lba_map->num_vld, 1);
+ CU_ASSERT_EQUAL(lba_map->map[offset], TEST_LBA);
+ CU_ASSERT_TRUE(spdk_bit_array_get(lba_map->vld, offset));
+
+ addr.offset += g_geo.zone_size;
+ offset = test_offset_from_addr(addr, g_band);
+ ftl_band_set_addr(g_band, TEST_LBA + 1, addr);
+ CU_ASSERT_EQUAL(lba_map->num_vld, 2);
+ CU_ASSERT_EQUAL(lba_map->map[offset], TEST_LBA + 1);
+ CU_ASSERT_TRUE(spdk_bit_array_get(lba_map->vld, offset));
+ addr.offset -= g_geo.zone_size;
+ offset = test_offset_from_addr(addr, g_band);
+ CU_ASSERT_TRUE(spdk_bit_array_get(lba_map->vld, offset));
+ cleanup_band();
+}
+
+static void
+test_invalidate_addr(void)
+{
+ struct ftl_lba_map *lba_map;
+ struct ftl_addr addr;
+ uint64_t offset[2];
+
+ setup_band();
+ lba_map = &g_band->lba_map;
+ addr = addr_from_punit(0);
+ addr.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev);
+ offset[0] = test_offset_from_addr(addr, g_band);
+
+ ftl_band_set_addr(g_band, TEST_LBA, addr);
+ CU_ASSERT_EQUAL(lba_map->num_vld, 1);
+ CU_ASSERT_TRUE(spdk_bit_array_get(lba_map->vld, offset[0]));
+ ftl_invalidate_addr(g_band->dev, addr);
+ CU_ASSERT_EQUAL(lba_map->num_vld, 0);
+ CU_ASSERT_FALSE(spdk_bit_array_get(lba_map->vld, offset[0]));
+
+ offset[0] = test_offset_from_addr(addr, g_band);
+ ftl_band_set_addr(g_band, TEST_LBA, addr);
+ addr.offset += g_geo.zone_size;
+ offset[1] = test_offset_from_addr(addr, g_band);
+ ftl_band_set_addr(g_band, TEST_LBA + 1, addr);
+ CU_ASSERT_EQUAL(lba_map->num_vld, 2);
+ CU_ASSERT_TRUE(spdk_bit_array_get(lba_map->vld, offset[0]));
+ CU_ASSERT_TRUE(spdk_bit_array_get(lba_map->vld, offset[1]));
+ ftl_invalidate_addr(g_band->dev, addr);
+ CU_ASSERT_EQUAL(lba_map->num_vld, 1);
+ CU_ASSERT_TRUE(spdk_bit_array_get(lba_map->vld, offset[0]));
+ CU_ASSERT_FALSE(spdk_bit_array_get(lba_map->vld, offset[1]));
+ cleanup_band();
+}
+
+static void
+test_next_xfer_addr(void)
+{
+ struct ftl_addr addr, result, expect;
+
+ setup_band();
+ /* Verify simple one block incremention */
+ addr = addr_from_punit(0);
+ addr.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev);
+ expect = addr;
+ expect.offset += 1;
+
+ result = ftl_band_next_xfer_addr(g_band, addr, 1);
+ CU_ASSERT_EQUAL(result.offset, expect.offset);
+
+ /* Verify jumping between zones */
+ expect = addr_from_punit(1);
+ expect.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev);
+ result = ftl_band_next_xfer_addr(g_band, addr, g_dev->xfer_size);
+ CU_ASSERT_EQUAL(result.offset, expect.offset);
+
+ /* Verify jumping works with unaligned offsets */
+ expect = addr_from_punit(1);
+ expect.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev) + 3;
+ result = ftl_band_next_xfer_addr(g_band, addr, g_dev->xfer_size + 3);
+ CU_ASSERT_EQUAL(result.offset, expect.offset);
+
+ /* Verify jumping from last zone to the first one */
+ expect = addr_from_punit(0);
+ expect.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev) + g_dev->xfer_size;
+ addr = addr_from_punit(ftl_get_num_punits(g_dev) - 1);
+ addr.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev);
+ result = ftl_band_next_xfer_addr(g_band, addr, g_dev->xfer_size);
+ CU_ASSERT_EQUAL(result.offset, expect.offset);
+
+ /* Verify jumping from last zone to the first one with unaligned offset */
+ expect = addr_from_punit(0);
+ expect.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev);
+ expect.offset += g_dev->xfer_size + 2;
+ addr = addr_from_punit(ftl_get_num_punits(g_dev) - 1);
+ addr.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev);
+ result = ftl_band_next_xfer_addr(g_band, addr, g_dev->xfer_size + 2);
+ CU_ASSERT_EQUAL(result.offset, expect.offset);
+
+ /* Verify large offset spanning across the whole band multiple times */
+ expect = addr_from_punit(0);
+ expect.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev);
+ expect.offset += g_dev->xfer_size * 5 + 4;
+ addr = addr_from_punit(0);
+ addr.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev);
+ addr.offset += g_dev->xfer_size * 2 + 1;
+ result = ftl_band_next_xfer_addr(g_band, addr, 3 * g_dev->xfer_size *
+ ftl_get_num_punits(g_dev) + 3);
+ CU_ASSERT_EQUAL(result.offset, expect.offset);
+
+ /* Remove one zone and verify it's skipped properly */
+ g_band->zone_buf[1].info.state = SPDK_BDEV_ZONE_STATE_OFFLINE;
+ CIRCLEQ_REMOVE(&g_band->zones, &g_band->zone_buf[1], circleq);
+ g_band->num_zones--;
+ expect = addr_from_punit(2);
+ expect.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev);
+ expect.offset += g_dev->xfer_size * 5 + 4;
+ addr = addr_from_punit(0);
+ addr.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev);
+ addr.offset += g_dev->xfer_size * 2 + 1;
+ result = ftl_band_next_xfer_addr(g_band, addr, 3 * g_dev->xfer_size *
+ (ftl_get_num_punits(g_dev) - 1) + g_dev->xfer_size + 3);
+ CU_ASSERT_EQUAL(result.offset, expect.offset);
+ cleanup_band();
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("ftl_band_suite", NULL, NULL);
+
+
+ CU_ADD_TEST(suite, test_band_block_offset_from_addr_base);
+ CU_ADD_TEST(suite, test_band_block_offset_from_addr_offset);
+ CU_ADD_TEST(suite, test_band_addr_from_block_offset);
+ CU_ADD_TEST(suite, test_band_set_addr);
+ CU_ADD_TEST(suite, test_invalidate_addr);
+ CU_ADD_TEST(suite, test_next_xfer_addr);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/ftl/ftl_io.c/.gitignore b/src/spdk/test/unit/lib/ftl/ftl_io.c/.gitignore
new file mode 100644
index 000000000..c5e09253e
--- /dev/null
+++ b/src/spdk/test/unit/lib/ftl/ftl_io.c/.gitignore
@@ -0,0 +1 @@
+ftl_io_ut
diff --git a/src/spdk/test/unit/lib/ftl/ftl_io.c/Makefile b/src/spdk/test/unit/lib/ftl/ftl_io.c/Makefile
new file mode 100644
index 000000000..e06a186b1
--- /dev/null
+++ b/src/spdk/test/unit/lib/ftl/ftl_io.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = ftl_io_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/ftl/ftl_io.c/ftl_io_ut.c b/src/spdk/test/unit/lib/ftl/ftl_io.c/ftl_io_ut.c
new file mode 100644
index 000000000..81288de60
--- /dev/null
+++ b/src/spdk/test/unit/lib/ftl/ftl_io.c/ftl_io_ut.c
@@ -0,0 +1,1068 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+#include "common/lib/ut_multithread.c"
+
+#include "ftl/ftl_io.c"
+#include "ftl/ftl_init.c"
+#include "ftl/ftl_core.c"
+#include "ftl/ftl_band.c"
+
+DEFINE_STUB(spdk_bdev_io_get_append_location, uint64_t, (struct spdk_bdev_io *bdev_io), 0);
+DEFINE_STUB(spdk_bdev_desc_get_bdev, struct spdk_bdev *, (struct spdk_bdev_desc *desc), NULL);
+DEFINE_STUB(spdk_bdev_get_optimal_open_zones, uint32_t, (const struct spdk_bdev *b), 1);
+DEFINE_STUB(spdk_bdev_zone_appendv, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct iovec *iov, int iovcnt, uint64_t zone_id, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
+DEFINE_STUB(spdk_bdev_get_zone_size, uint64_t, (const struct spdk_bdev *b), 1024);
+DEFINE_STUB(spdk_bdev_zone_management, int, (struct spdk_bdev_desc *desc,
+ struct spdk_io_channel *ch, uint64_t zone_id, enum spdk_bdev_zone_action action,
+ spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
+DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
+DEFINE_STUB(spdk_bdev_read_blocks, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ void *buf, uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
+DEFINE_STUB(spdk_bdev_write_blocks, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ void *buf, uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb,
+ void *cb_arg), 0);
+DEFINE_STUB(spdk_bdev_write_blocks_with_md, int, (struct spdk_bdev_desc *desc,
+ struct spdk_io_channel *ch, void *buf, void *md, uint64_t offset_blocks,
+ uint64_t num_blocks, spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
+DEFINE_STUB(spdk_bdev_writev_blocks, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
+DEFINE_STUB(spdk_bdev_get_num_blocks, uint64_t, (const struct spdk_bdev *bdev), 1024);
+DEFINE_STUB(spdk_bdev_get_md_size, uint32_t, (const struct spdk_bdev *bdev), 0);
+DEFINE_STUB(spdk_bdev_get_block_size, uint32_t, (const struct spdk_bdev *bdev), 4096);
+#if defined(FTL_META_DEBUG)
+DEFINE_STUB(ftl_band_validate_md, bool, (struct ftl_band *band), true);
+#endif
+#if defined(DEBUG)
+DEFINE_STUB_V(ftl_trace_submission, (struct spdk_ftl_dev *dev, const struct ftl_io *io,
+ struct ftl_addr addr, size_t addr_cnt));
+DEFINE_STUB_V(ftl_trace_limits, (struct spdk_ftl_dev *dev, int limit, size_t num_free));
+DEFINE_STUB(ftl_trace_alloc_id, uint64_t, (struct spdk_ftl_dev *dev), 0);
+DEFINE_STUB_V(ftl_trace_completion, (struct spdk_ftl_dev *dev, const struct ftl_io *io,
+ enum ftl_trace_completion type));
+DEFINE_STUB_V(ftl_trace_wbuf_fill, (struct spdk_ftl_dev *dev, const struct ftl_io *io));
+#endif
+
+struct spdk_io_channel *
+spdk_bdev_get_io_channel(struct spdk_bdev_desc *bdev_desc)
+{
+ return spdk_get_io_channel(bdev_desc);
+}
+
+static int
+channel_create_cb(void *io_device, void *ctx)
+{
+ return 0;
+}
+
+static void
+channel_destroy_cb(void *io_device, void *ctx)
+{}
+
+static struct spdk_ftl_dev *
+setup_device(uint32_t num_threads, uint32_t xfer_size)
+{
+ struct spdk_ftl_dev *dev;
+ struct _ftl_io_channel *_ioch;
+ struct ftl_io_channel *ioch;
+ int rc;
+
+ allocate_threads(num_threads);
+ set_thread(0);
+
+ dev = calloc(1, sizeof(*dev));
+ SPDK_CU_ASSERT_FATAL(dev != NULL);
+
+ dev->core_thread = spdk_get_thread();
+ dev->ioch = calloc(1, sizeof(*_ioch) + sizeof(struct spdk_io_channel));
+ SPDK_CU_ASSERT_FATAL(dev->ioch != NULL);
+
+ _ioch = (struct _ftl_io_channel *)(dev->ioch + 1);
+ ioch = _ioch->ioch = calloc(1, sizeof(*ioch));
+ SPDK_CU_ASSERT_FATAL(ioch != NULL);
+
+ ioch->elem_size = sizeof(struct ftl_md_io);
+ ioch->io_pool = spdk_mempool_create("io-pool", 4096, ioch->elem_size, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(ioch->io_pool != NULL);
+
+ dev->conf = g_default_conf;
+ dev->xfer_size = xfer_size;
+ dev->base_bdev_desc = (struct spdk_bdev_desc *)0xdeadbeef;
+ spdk_io_device_register(dev->base_bdev_desc, channel_create_cb, channel_destroy_cb, 0, NULL);
+
+ rc = ftl_dev_init_io_channel(dev);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ return dev;
+}
+
+static void
+free_device(struct spdk_ftl_dev *dev)
+{
+ struct ftl_io_channel *ioch;
+
+ ioch = ftl_io_channel_get_ctx(dev->ioch);
+ spdk_mempool_free(ioch->io_pool);
+ free(ioch);
+
+ spdk_io_device_unregister(dev, NULL);
+ spdk_io_device_unregister(dev->base_bdev_desc, NULL);
+ free_threads();
+
+ free(dev->ioch_array);
+ free(dev->iov_buf);
+ free(dev->ioch);
+ free(dev);
+}
+
+static void
+setup_io(struct ftl_io *io, struct spdk_ftl_dev *dev, ftl_io_fn cb, void *ctx)
+{
+ io->dev = dev;
+ io->cb_fn = cb;
+ io->cb_ctx = ctx;
+}
+
+static struct ftl_io *
+alloc_io(struct spdk_ftl_dev *dev, ftl_io_fn cb, void *ctx)
+{
+ struct ftl_io *io;
+
+ io = ftl_io_alloc(dev->ioch);
+ SPDK_CU_ASSERT_FATAL(io != NULL);
+ setup_io(io, dev, cb, ctx);
+
+ return io;
+}
+
+static void
+io_complete_cb(struct ftl_io *io, void *ctx, int status)
+{
+ *(int *)ctx = status;
+}
+
+static void
+test_completion(void)
+{
+ struct spdk_ftl_dev *dev;
+ struct ftl_io_channel *ioch;
+ struct ftl_io *io;
+ int req, status = 0;
+ size_t pool_size;
+
+ dev = setup_device(1, 16);
+ ioch = ftl_io_channel_get_ctx(dev->ioch);
+ pool_size = spdk_mempool_count(ioch->io_pool);
+
+ io = alloc_io(dev, io_complete_cb, &status);
+ io->status = -EIO;
+
+#define NUM_REQUESTS 16
+ for (req = 0; req < NUM_REQUESTS; ++req) {
+ ftl_io_inc_req(io);
+ CU_ASSERT_FALSE(ftl_io_done(io));
+ }
+
+ CU_ASSERT_EQUAL(io->req_cnt, NUM_REQUESTS);
+
+ for (req = 0; req < (NUM_REQUESTS - 1); ++req) {
+ ftl_io_dec_req(io);
+ CU_ASSERT_FALSE(ftl_io_done(io));
+ }
+
+ CU_ASSERT_EQUAL(io->req_cnt, 1);
+
+ ftl_io_dec_req(io);
+ CU_ASSERT_TRUE(ftl_io_done(io));
+
+ ftl_io_complete(io);
+ CU_ASSERT_EQUAL(status, -EIO);
+
+ CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
+
+ free_device(dev);
+}
+
+static void
+test_alloc_free(void)
+{
+ struct spdk_ftl_dev *dev;
+ struct ftl_io_channel *ioch;
+ struct ftl_io *parent, *child;
+ int parent_status = -1;
+ size_t pool_size;
+
+ dev = setup_device(1, 16);
+ ioch = ftl_io_channel_get_ctx(dev->ioch);
+ pool_size = spdk_mempool_count(ioch->io_pool);
+
+ parent = alloc_io(dev, io_complete_cb, &parent_status);
+ SPDK_CU_ASSERT_FATAL(parent != NULL);
+ child = ftl_io_alloc_child(parent);
+ SPDK_CU_ASSERT_FATAL(child != NULL);
+
+ ftl_io_free(child);
+ CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size - 1);
+
+ child = ftl_io_alloc_child(parent);
+ SPDK_CU_ASSERT_FATAL(child != NULL);
+ ftl_io_complete(child);
+ CU_ASSERT_EQUAL(parent_status, -1);
+ ftl_io_complete(parent);
+ CU_ASSERT_EQUAL(parent_status, 0);
+ CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
+
+ parent_status = -1;
+ parent = alloc_io(dev, io_complete_cb, &parent_status);
+ SPDK_CU_ASSERT_FATAL(parent != NULL);
+ child = ftl_io_alloc_child(parent);
+ SPDK_CU_ASSERT_FATAL(child != NULL);
+
+ ftl_io_free(child);
+ CU_ASSERT_EQUAL(parent_status, -1);
+ CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size - 1);
+ ftl_io_complete(parent);
+ CU_ASSERT_EQUAL(parent_status, 0);
+ CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
+
+ free_device(dev);
+}
+
+static void
+test_child_requests(void)
+{
+ struct spdk_ftl_dev *dev;
+ struct ftl_io_channel *ioch;
+#define MAX_CHILDREN 16
+ struct ftl_io *parent, *child[MAX_CHILDREN];
+ int status[MAX_CHILDREN + 1], i;
+ size_t pool_size;
+
+ dev = setup_device(1, 16);
+ ioch = ftl_io_channel_get_ctx(dev->ioch);
+ pool_size = spdk_mempool_count(ioch->io_pool);
+
+ /* Verify correct behaviour when children finish first */
+ parent = alloc_io(dev, io_complete_cb, &status[0]);
+ parent->status = 0;
+
+ ftl_io_inc_req(parent);
+ status[0] = -1;
+
+ for (i = 0; i < MAX_CHILDREN; ++i) {
+ status[i + 1] = -1;
+
+ child[i] = ftl_io_alloc_child(parent);
+ SPDK_CU_ASSERT_FATAL(child[i] != NULL);
+ setup_io(child[i], dev, io_complete_cb, &status[i + 1]);
+ child[i]->status = 0;
+
+ ftl_io_inc_req(child[i]);
+ }
+
+ CU_ASSERT_FALSE(ftl_io_done(parent));
+ CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size - MAX_CHILDREN - 1);
+
+ for (i = 0; i < MAX_CHILDREN; ++i) {
+ CU_ASSERT_FALSE(ftl_io_done(child[i]));
+ ftl_io_dec_req(child[i]);
+ CU_ASSERT_TRUE(ftl_io_done(child[i]));
+ CU_ASSERT_FALSE(ftl_io_done(parent));
+
+ ftl_io_complete(child[i]);
+ CU_ASSERT_FALSE(ftl_io_done(parent));
+ CU_ASSERT_EQUAL(status[i + 1], 0);
+ }
+
+ CU_ASSERT_EQUAL(status[0], -1);
+
+ ftl_io_dec_req(parent);
+ CU_ASSERT_EQUAL(parent->req_cnt, 0);
+ CU_ASSERT_TRUE(ftl_io_done(parent));
+
+ ftl_io_complete(parent);
+ CU_ASSERT_EQUAL(status[0], 0);
+ CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
+
+
+ /* Verify correct behaviour when parent finishes first */
+ parent = alloc_io(dev, io_complete_cb, &status[0]);
+ parent->status = 0;
+
+ ftl_io_inc_req(parent);
+ status[0] = -1;
+
+ for (i = 0; i < MAX_CHILDREN; ++i) {
+ status[i + 1] = -1;
+
+ child[i] = ftl_io_alloc_child(parent);
+ SPDK_CU_ASSERT_FATAL(child[i] != NULL);
+ setup_io(child[i], dev, io_complete_cb, &status[i + 1]);
+ child[i]->status = 0;
+
+ ftl_io_inc_req(child[i]);
+ }
+
+ CU_ASSERT_FALSE(ftl_io_done(parent));
+ CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size - MAX_CHILDREN - 1);
+
+ ftl_io_dec_req(parent);
+ CU_ASSERT_TRUE(ftl_io_done(parent));
+ CU_ASSERT_EQUAL(parent->req_cnt, 0);
+
+ ftl_io_complete(parent);
+ CU_ASSERT_EQUAL(status[0], -1);
+ CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size - MAX_CHILDREN - 1);
+
+ for (i = 0; i < MAX_CHILDREN; ++i) {
+ CU_ASSERT_FALSE(ftl_io_done(child[i]));
+ ftl_io_dec_req(child[i]);
+ CU_ASSERT_TRUE(ftl_io_done(child[i]));
+
+ ftl_io_complete(child[i]);
+ CU_ASSERT_EQUAL(status[i + 1], 0);
+ }
+
+ CU_ASSERT_EQUAL(status[0], 0);
+ CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
+
+ free_device(dev);
+}
+
+static void
+test_child_status(void)
+{
+ struct spdk_ftl_dev *dev;
+ struct ftl_io_channel *ioch;
+ struct ftl_io *parent, *child[2];
+ int parent_status, child_status[2];
+ size_t pool_size, i;
+
+ dev = setup_device(1, 16);
+ ioch = ftl_io_channel_get_ctx(dev->ioch);
+ pool_size = spdk_mempool_count(ioch->io_pool);
+
+ /* Verify the first error is returned by the parent */
+ parent = alloc_io(dev, io_complete_cb, &parent_status);
+ parent->status = 0;
+
+ for (i = 0; i < 2; ++i) {
+ child[i] = ftl_io_alloc_child(parent);
+ SPDK_CU_ASSERT_FATAL(child[i] != NULL);
+ setup_io(child[i], dev, io_complete_cb, &child_status[i]);
+ }
+
+ child[0]->status = -3;
+ child[1]->status = -4;
+
+ ftl_io_complete(child[1]);
+ ftl_io_complete(child[0]);
+ ftl_io_complete(parent);
+
+ CU_ASSERT_EQUAL(child_status[0], -3);
+ CU_ASSERT_EQUAL(child_status[1], -4);
+ CU_ASSERT_EQUAL(parent_status, -4);
+
+ CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
+
+ /* Verify parent's status is kept if children finish successfully */
+ parent = alloc_io(dev, io_complete_cb, &parent_status);
+ parent->status = -1;
+
+ for (i = 0; i < 2; ++i) {
+ child[i] = ftl_io_alloc_child(parent);
+ SPDK_CU_ASSERT_FATAL(child[i] != NULL);
+ setup_io(child[i], dev, io_complete_cb, &child_status[i]);
+ }
+
+ child[0]->status = 0;
+ child[1]->status = 0;
+
+ ftl_io_complete(parent);
+ ftl_io_complete(child[1]);
+ ftl_io_complete(child[0]);
+
+ CU_ASSERT_EQUAL(child_status[0], 0);
+ CU_ASSERT_EQUAL(child_status[1], 0);
+ CU_ASSERT_EQUAL(parent_status, -1);
+
+ CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
+
+ /* Verify parent's status is kept if children fail too */
+ parent = alloc_io(dev, io_complete_cb, &parent_status);
+ parent->status = -1;
+
+ for (i = 0; i < 2; ++i) {
+ child[i] = ftl_io_alloc_child(parent);
+ SPDK_CU_ASSERT_FATAL(child[i] != NULL);
+ setup_io(child[i], dev, io_complete_cb, &child_status[i]);
+ }
+
+ child[0]->status = -3;
+ child[1]->status = -4;
+
+ ftl_io_complete(parent);
+ ftl_io_complete(child[1]);
+ ftl_io_complete(child[0]);
+
+ CU_ASSERT_EQUAL(child_status[0], -3);
+ CU_ASSERT_EQUAL(child_status[1], -4);
+ CU_ASSERT_EQUAL(parent_status, -1);
+
+ CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
+
+ free_device(dev);
+}
+
+static void
+test_multi_generation(void)
+{
+ struct spdk_ftl_dev *dev;
+ struct ftl_io_channel *ioch;
+#define MAX_GRAND_CHILDREN 32
+ struct ftl_io *parent, *child[MAX_CHILDREN], *gchild[MAX_CHILDREN * MAX_GRAND_CHILDREN];
+ int parent_status, child_status[MAX_CHILDREN], gchild_status[MAX_CHILDREN * MAX_GRAND_CHILDREN];
+ size_t pool_size;
+ int i, j;
+
+ dev = setup_device(1, 16);
+ ioch = ftl_io_channel_get_ctx(dev->ioch);
+ pool_size = spdk_mempool_count(ioch->io_pool);
+
+ /* Verify correct behaviour when children finish first */
+ parent = alloc_io(dev, io_complete_cb, &parent_status);
+ parent->status = 0;
+
+ ftl_io_inc_req(parent);
+ parent_status = -1;
+
+ for (i = 0; i < MAX_CHILDREN; ++i) {
+ child_status[i] = -1;
+
+ child[i] = ftl_io_alloc_child(parent);
+ SPDK_CU_ASSERT_FATAL(child[i] != NULL);
+ setup_io(child[i], dev, io_complete_cb, &child_status[i]);
+ child[i]->status = 0;
+
+
+ for (j = 0; j < MAX_GRAND_CHILDREN; ++j) {
+ struct ftl_io *io = ftl_io_alloc_child(child[i]);
+ SPDK_CU_ASSERT_FATAL(io != NULL);
+
+ gchild[i * MAX_GRAND_CHILDREN + j] = io;
+ gchild_status[i * MAX_GRAND_CHILDREN + j] = -1;
+ setup_io(io, dev, io_complete_cb, &gchild_status[i * MAX_GRAND_CHILDREN + j]);
+ io->status = 0;
+
+ ftl_io_inc_req(io);
+ }
+
+ ftl_io_inc_req(child[i]);
+ }
+
+ for (i = 0; i < MAX_CHILDREN; ++i) {
+ CU_ASSERT_FALSE(ftl_io_done(child[i]));
+ ftl_io_dec_req(child[i]);
+ CU_ASSERT_TRUE(ftl_io_done(child[i]));
+
+ ftl_io_complete(child[i]);
+ CU_ASSERT_FALSE(ftl_io_done(parent));
+ CU_ASSERT_EQUAL(child_status[i], -1);
+
+ for (j = 0; j < MAX_GRAND_CHILDREN; ++j) {
+ struct ftl_io *io = gchild[i * MAX_GRAND_CHILDREN + j];
+
+ CU_ASSERT_FALSE(ftl_io_done(io));
+ ftl_io_dec_req(io);
+ CU_ASSERT_TRUE(ftl_io_done(io));
+ ftl_io_complete(io);
+ CU_ASSERT_EQUAL(gchild_status[i * MAX_GRAND_CHILDREN + j], 0);
+ }
+
+ CU_ASSERT_EQUAL(child_status[i], 0);
+ }
+
+ ftl_io_dec_req(parent);
+ CU_ASSERT_TRUE(ftl_io_done(parent));
+ ftl_io_complete(parent);
+ CU_ASSERT_EQUAL(parent_status, 0);
+ CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
+
+ /* Verify correct behaviour when parents finish first */
+ parent = alloc_io(dev, io_complete_cb, &parent_status);
+ parent->status = 0;
+ parent_status = -1;
+
+ for (i = 0; i < MAX_CHILDREN; ++i) {
+ child_status[i] = -1;
+
+ child[i] = ftl_io_alloc_child(parent);
+ SPDK_CU_ASSERT_FATAL(child[i] != NULL);
+ setup_io(child[i], dev, io_complete_cb, &child_status[i]);
+ child[i]->status = 0;
+
+ for (j = 0; j < MAX_GRAND_CHILDREN; ++j) {
+ struct ftl_io *io = ftl_io_alloc_child(child[i]);
+ SPDK_CU_ASSERT_FATAL(io != NULL);
+
+ gchild[i * MAX_GRAND_CHILDREN + j] = io;
+ gchild_status[i * MAX_GRAND_CHILDREN + j] = -1;
+ setup_io(io, dev, io_complete_cb, &gchild_status[i * MAX_GRAND_CHILDREN + j]);
+ io->status = 0;
+
+ ftl_io_inc_req(io);
+ }
+
+ CU_ASSERT_TRUE(ftl_io_done(child[i]));
+ ftl_io_complete(child[i]);
+ CU_ASSERT_EQUAL(child_status[i], -1);
+ }
+
+ CU_ASSERT_TRUE(ftl_io_done(parent));
+ ftl_io_complete(parent);
+ CU_ASSERT_EQUAL(parent_status, -1);
+
+ for (i = 0; i < MAX_CHILDREN; ++i) {
+ for (j = 0; j < MAX_GRAND_CHILDREN; ++j) {
+ struct ftl_io *io = gchild[i * MAX_GRAND_CHILDREN + j];
+
+ CU_ASSERT_FALSE(ftl_io_done(io));
+ ftl_io_dec_req(io);
+ CU_ASSERT_TRUE(ftl_io_done(io));
+ ftl_io_complete(io);
+ CU_ASSERT_EQUAL(gchild_status[i * MAX_GRAND_CHILDREN + j], 0);
+ }
+
+ CU_ASSERT_EQUAL(child_status[i], 0);
+ }
+
+ CU_ASSERT_EQUAL(parent_status, 0);
+ CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
+
+ free_device(dev);
+}
+
+static void
+test_io_channel_create(void)
+{
+ struct spdk_ftl_dev *dev;
+ struct spdk_io_channel *ioch, **ioch_array;
+ struct ftl_io_channel *ftl_ioch;
+ uint32_t ioch_idx;
+
+ dev = setup_device(g_default_conf.max_io_channels + 1, 16);
+
+ ioch = spdk_get_io_channel(dev);
+ CU_ASSERT(ioch != NULL);
+ CU_ASSERT_EQUAL(dev->num_io_channels, 1);
+ spdk_put_io_channel(ioch);
+ poll_threads();
+ CU_ASSERT_EQUAL(dev->num_io_channels, 0);
+
+ ioch_array = calloc(dev->conf.max_io_channels, sizeof(*ioch_array));
+ SPDK_CU_ASSERT_FATAL(ioch != NULL);
+
+ for (ioch_idx = 0; ioch_idx < dev->conf.max_io_channels; ++ioch_idx) {
+ set_thread(ioch_idx);
+ ioch = ioch_array[ioch_idx] = spdk_get_io_channel(dev);
+ SPDK_CU_ASSERT_FATAL(ioch != NULL);
+ poll_threads();
+
+ ftl_ioch = ftl_io_channel_get_ctx(ioch);
+ CU_ASSERT_EQUAL(ftl_ioch->index, ioch_idx);
+ }
+
+ CU_ASSERT_EQUAL(dev->num_io_channels, dev->conf.max_io_channels);
+ set_thread(dev->conf.max_io_channels);
+ ioch = spdk_get_io_channel(dev);
+ CU_ASSERT_EQUAL(dev->num_io_channels, dev->conf.max_io_channels);
+ CU_ASSERT_EQUAL(ioch, NULL);
+
+ for (ioch_idx = 0; ioch_idx < dev->conf.max_io_channels; ioch_idx += 2) {
+ set_thread(ioch_idx);
+ spdk_put_io_channel(ioch_array[ioch_idx]);
+ ioch_array[ioch_idx] = NULL;
+ poll_threads();
+ }
+
+ poll_threads();
+ CU_ASSERT_EQUAL(dev->num_io_channels, dev->conf.max_io_channels / 2);
+
+ for (ioch_idx = 0; ioch_idx < dev->conf.max_io_channels; ioch_idx++) {
+ set_thread(ioch_idx);
+
+ if (ioch_array[ioch_idx] == NULL) {
+ ioch = ioch_array[ioch_idx] = spdk_get_io_channel(dev);
+ SPDK_CU_ASSERT_FATAL(ioch != NULL);
+ poll_threads();
+
+ ftl_ioch = ftl_io_channel_get_ctx(ioch);
+ CU_ASSERT_EQUAL(ftl_ioch->index, ioch_idx);
+ }
+ }
+
+ for (ioch_idx = 0; ioch_idx < dev->conf.max_io_channels; ioch_idx++) {
+ set_thread(ioch_idx);
+ spdk_put_io_channel(ioch_array[ioch_idx]);
+ }
+
+ poll_threads();
+ CU_ASSERT_EQUAL(dev->num_io_channels, 0);
+
+ free(ioch_array);
+ free_device(dev);
+}
+
+static void
+test_acquire_entry(void)
+{
+ struct spdk_ftl_dev *dev;
+ struct spdk_io_channel *ioch, **ioch_array;
+ struct ftl_io_channel *ftl_ioch;
+ struct ftl_wbuf_entry *entry, **entries;
+ uint32_t num_entries, num_io_channels = 2;
+ uint32_t ioch_idx, entry_idx, tmp_idx;
+
+ dev = setup_device(num_io_channels, 16);
+
+ num_entries = dev->conf.write_buffer_size / FTL_BLOCK_SIZE;
+ entries = calloc(num_entries * num_io_channels, sizeof(*entries));
+ SPDK_CU_ASSERT_FATAL(entries != NULL);
+ ioch_array = calloc(num_io_channels, sizeof(*ioch_array));
+ SPDK_CU_ASSERT_FATAL(ioch_array != NULL);
+
+ /* Acquire whole buffer of internal entries */
+ entry_idx = 0;
+ for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
+ set_thread(ioch_idx);
+ ioch_array[ioch_idx] = spdk_get_io_channel(dev);
+ SPDK_CU_ASSERT_FATAL(ioch_array[ioch_idx] != NULL);
+ ftl_ioch = ftl_io_channel_get_ctx(ioch_array[ioch_idx]);
+ poll_threads();
+
+ for (tmp_idx = 0; tmp_idx < num_entries; ++tmp_idx) {
+ entries[entry_idx++] = ftl_acquire_wbuf_entry(ftl_ioch, FTL_IO_INTERNAL);
+ CU_ASSERT(entries[entry_idx - 1] != NULL);
+ }
+
+ entry = ftl_acquire_wbuf_entry(ftl_ioch, FTL_IO_INTERNAL);
+ CU_ASSERT(entry == NULL);
+ }
+
+ for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
+ set_thread(ioch_idx);
+
+ for (tmp_idx = 0; tmp_idx < num_entries; ++tmp_idx) {
+ ftl_release_wbuf_entry(entries[ioch_idx * num_entries + tmp_idx]);
+ entries[ioch_idx * num_entries + tmp_idx] = NULL;
+ }
+
+ spdk_put_io_channel(ioch_array[ioch_idx]);
+ }
+ poll_threads();
+
+ /* Do the same for user entries */
+ entry_idx = 0;
+ for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
+ set_thread(ioch_idx);
+ ioch_array[ioch_idx] = spdk_get_io_channel(dev);
+ SPDK_CU_ASSERT_FATAL(ioch_array[ioch_idx] != NULL);
+ ftl_ioch = ftl_io_channel_get_ctx(ioch_array[ioch_idx]);
+ poll_threads();
+
+ for (tmp_idx = 0; tmp_idx < num_entries; ++tmp_idx) {
+ entries[entry_idx++] = ftl_acquire_wbuf_entry(ftl_ioch, 0);
+ CU_ASSERT(entries[entry_idx - 1] != NULL);
+ }
+
+ entry = ftl_acquire_wbuf_entry(ftl_ioch, 0);
+ CU_ASSERT(entry == NULL);
+ }
+
+ for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
+ set_thread(ioch_idx);
+
+ for (tmp_idx = 0; tmp_idx < num_entries; ++tmp_idx) {
+ ftl_release_wbuf_entry(entries[ioch_idx * num_entries + tmp_idx]);
+ entries[ioch_idx * num_entries + tmp_idx] = NULL;
+ }
+
+ spdk_put_io_channel(ioch_array[ioch_idx]);
+ }
+ poll_threads();
+
+ /* Verify limits */
+ entry_idx = 0;
+ for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
+ set_thread(ioch_idx);
+ ioch_array[ioch_idx] = spdk_get_io_channel(dev);
+ SPDK_CU_ASSERT_FATAL(ioch_array[ioch_idx] != NULL);
+ ftl_ioch = ftl_io_channel_get_ctx(ioch_array[ioch_idx]);
+ poll_threads();
+
+ ftl_ioch->qdepth_limit = num_entries / 2;
+ for (tmp_idx = 0; tmp_idx < num_entries / 2; ++tmp_idx) {
+ entries[entry_idx++] = ftl_acquire_wbuf_entry(ftl_ioch, 0);
+ CU_ASSERT(entries[entry_idx - 1] != NULL);
+ }
+
+ entry = ftl_acquire_wbuf_entry(ftl_ioch, 0);
+ CU_ASSERT(entry == NULL);
+
+ for (; tmp_idx < num_entries; ++tmp_idx) {
+ entries[entry_idx++] = ftl_acquire_wbuf_entry(ftl_ioch, FTL_IO_INTERNAL);
+ CU_ASSERT(entries[entry_idx - 1] != NULL);
+ }
+ }
+
+ for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
+ set_thread(ioch_idx);
+
+ for (tmp_idx = 0; tmp_idx < num_entries; ++tmp_idx) {
+ ftl_release_wbuf_entry(entries[ioch_idx * num_entries + tmp_idx]);
+ entries[ioch_idx * num_entries + tmp_idx] = NULL;
+ }
+
+ spdk_put_io_channel(ioch_array[ioch_idx]);
+ }
+ poll_threads();
+
+ /* Verify acquire/release */
+ set_thread(0);
+ ioch = spdk_get_io_channel(dev);
+ SPDK_CU_ASSERT_FATAL(ioch != NULL);
+ ftl_ioch = ftl_io_channel_get_ctx(ioch);
+ poll_threads();
+
+ for (entry_idx = 0; entry_idx < num_entries; ++entry_idx) {
+ entries[entry_idx] = ftl_acquire_wbuf_entry(ftl_ioch, 0);
+ CU_ASSERT(entries[entry_idx] != NULL);
+ }
+
+ entry = ftl_acquire_wbuf_entry(ftl_ioch, 0);
+ CU_ASSERT(entry == NULL);
+
+ for (entry_idx = 0; entry_idx < num_entries / 2; ++entry_idx) {
+ ftl_release_wbuf_entry(entries[entry_idx]);
+ entries[entry_idx] = NULL;
+ }
+
+ for (; entry_idx < num_entries; ++entry_idx) {
+ entries[entry_idx - num_entries / 2] = ftl_acquire_wbuf_entry(ftl_ioch, 0);
+ CU_ASSERT(entries[entry_idx - num_entries / 2] != NULL);
+ }
+
+ for (entry_idx = 0; entry_idx < num_entries; ++entry_idx) {
+ ftl_release_wbuf_entry(entries[entry_idx]);
+ entries[entry_idx] = NULL;
+ }
+
+ spdk_put_io_channel(ioch);
+ poll_threads();
+
+ free(ioch_array);
+ free(entries);
+ free_device(dev);
+}
+
+static void
+test_submit_batch(void)
+{
+ struct spdk_ftl_dev *dev;
+ struct spdk_io_channel **_ioch_array;
+ struct ftl_io_channel **ioch_array;
+ struct ftl_wbuf_entry *entry;
+ struct ftl_batch *batch, *batch2;
+ uint32_t num_io_channels = 16;
+ uint32_t ioch_idx, tmp_idx, entry_idx;
+ uint64_t ioch_bitmap;
+ size_t num_entries;
+
+ dev = setup_device(num_io_channels, num_io_channels);
+
+ _ioch_array = calloc(num_io_channels, sizeof(*_ioch_array));
+ SPDK_CU_ASSERT_FATAL(_ioch_array != NULL);
+ ioch_array = calloc(num_io_channels, sizeof(*ioch_array));
+ SPDK_CU_ASSERT_FATAL(ioch_array != NULL);
+
+ for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
+ set_thread(ioch_idx);
+ _ioch_array[ioch_idx] = spdk_get_io_channel(dev);
+ SPDK_CU_ASSERT_FATAL(_ioch_array[ioch_idx] != NULL);
+ ioch_array[ioch_idx] = ftl_io_channel_get_ctx(_ioch_array[ioch_idx]);
+ poll_threads();
+ }
+
+ /* Make sure the IO channels are not starved and entries are popped in RR fashion */
+ for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
+ set_thread(ioch_idx);
+
+ for (entry_idx = 0; entry_idx < dev->xfer_size; ++entry_idx) {
+ entry = ftl_acquire_wbuf_entry(ioch_array[ioch_idx], 0);
+ SPDK_CU_ASSERT_FATAL(entry != NULL);
+
+ num_entries = spdk_ring_enqueue(ioch_array[ioch_idx]->submit_queue,
+ (void **)&entry, 1, NULL);
+ CU_ASSERT(num_entries == 1);
+ }
+ }
+
+ for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
+ for (tmp_idx = 0; tmp_idx < ioch_idx; ++tmp_idx) {
+ set_thread(tmp_idx);
+
+ while (spdk_ring_count(ioch_array[tmp_idx]->submit_queue) < dev->xfer_size) {
+ entry = ftl_acquire_wbuf_entry(ioch_array[tmp_idx], 0);
+ SPDK_CU_ASSERT_FATAL(entry != NULL);
+
+ num_entries = spdk_ring_enqueue(ioch_array[tmp_idx]->submit_queue,
+ (void **)&entry, 1, NULL);
+ CU_ASSERT(num_entries == 1);
+ }
+ }
+
+ set_thread(ioch_idx);
+
+ batch = ftl_get_next_batch(dev);
+ SPDK_CU_ASSERT_FATAL(batch != NULL);
+
+ TAILQ_FOREACH(entry, &batch->entries, tailq) {
+ CU_ASSERT(entry->ioch == ioch_array[ioch_idx]);
+ }
+
+ ftl_release_batch(dev, batch);
+
+ CU_ASSERT(spdk_ring_count(ioch_array[ioch_idx]->free_queue) ==
+ ioch_array[ioch_idx]->num_entries);
+ }
+
+ for (ioch_idx = 0; ioch_idx < num_io_channels - 1; ++ioch_idx) {
+ batch = ftl_get_next_batch(dev);
+ SPDK_CU_ASSERT_FATAL(batch != NULL);
+ ftl_release_batch(dev, batch);
+ }
+
+ /* Make sure the batch can be built from entries from any IO channel */
+ for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
+ set_thread(ioch_idx);
+ entry = ftl_acquire_wbuf_entry(ioch_array[ioch_idx], 0);
+ SPDK_CU_ASSERT_FATAL(entry != NULL);
+
+ num_entries = spdk_ring_enqueue(ioch_array[ioch_idx]->submit_queue,
+ (void **)&entry, 1, NULL);
+ CU_ASSERT(num_entries == 1);
+ }
+
+ batch = ftl_get_next_batch(dev);
+ SPDK_CU_ASSERT_FATAL(batch != NULL);
+
+ ioch_bitmap = 0;
+ TAILQ_FOREACH(entry, &batch->entries, tailq) {
+ ioch_bitmap |= 1 << entry->ioch->index;
+ }
+
+ for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
+ CU_ASSERT((ioch_bitmap & (1 << ioch_array[ioch_idx]->index)) != 0);
+ }
+ ftl_release_batch(dev, batch);
+
+ for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
+ CU_ASSERT(spdk_ring_count(ioch_array[ioch_idx]->free_queue) ==
+ ioch_array[ioch_idx]->num_entries);
+ }
+
+ /* Make sure pending batches are prioritized */
+ for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
+ set_thread(ioch_idx);
+
+ while (spdk_ring_count(ioch_array[ioch_idx]->submit_queue) < dev->xfer_size) {
+ entry = ftl_acquire_wbuf_entry(ioch_array[ioch_idx], 0);
+ SPDK_CU_ASSERT_FATAL(entry != NULL);
+ num_entries = spdk_ring_enqueue(ioch_array[ioch_idx]->submit_queue,
+ (void **)&entry, 1, NULL);
+ CU_ASSERT(num_entries == 1);
+ }
+ }
+
+ batch = ftl_get_next_batch(dev);
+ SPDK_CU_ASSERT_FATAL(batch != NULL);
+
+ TAILQ_INSERT_TAIL(&dev->pending_batches, batch, tailq);
+ batch2 = ftl_get_next_batch(dev);
+ SPDK_CU_ASSERT_FATAL(batch2 != NULL);
+
+ CU_ASSERT(TAILQ_EMPTY(&dev->pending_batches));
+ CU_ASSERT(batch == batch2);
+
+ batch = ftl_get_next_batch(dev);
+ SPDK_CU_ASSERT_FATAL(batch != NULL);
+
+ ftl_release_batch(dev, batch);
+ ftl_release_batch(dev, batch2);
+
+ for (ioch_idx = 2; ioch_idx < num_io_channels; ++ioch_idx) {
+ batch = ftl_get_next_batch(dev);
+ SPDK_CU_ASSERT_FATAL(batch != NULL);
+ ftl_release_batch(dev, batch);
+ }
+
+ for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
+ set_thread(ioch_idx);
+ spdk_put_io_channel(_ioch_array[ioch_idx]);
+ }
+ poll_threads();
+
+ free(_ioch_array);
+ free(ioch_array);
+ free_device(dev);
+}
+
+static void
+test_entry_address(void)
+{
+ struct spdk_ftl_dev *dev;
+ struct spdk_io_channel **ioch_array;
+ struct ftl_io_channel *ftl_ioch;
+ struct ftl_wbuf_entry **entry_array;
+ struct ftl_addr addr;
+ uint32_t num_entries, num_io_channels = 7;
+ uint32_t ioch_idx, entry_idx;
+
+ dev = setup_device(num_io_channels, num_io_channels);
+ ioch_array = calloc(num_io_channels, sizeof(*ioch_array));
+ SPDK_CU_ASSERT_FATAL(ioch_array != NULL);
+
+ num_entries = dev->conf.write_buffer_size / FTL_BLOCK_SIZE;
+ entry_array = calloc(num_entries, sizeof(*entry_array));
+ SPDK_CU_ASSERT_FATAL(entry_array != NULL);
+
+ for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
+ set_thread(ioch_idx);
+ ioch_array[ioch_idx] = spdk_get_io_channel(dev);
+ SPDK_CU_ASSERT_FATAL(ioch_array[ioch_idx] != NULL);
+ poll_threads();
+ }
+
+ for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
+ set_thread(ioch_idx);
+ ftl_ioch = ftl_io_channel_get_ctx(ioch_array[ioch_idx]);
+
+ for (entry_idx = 0; entry_idx < num_entries; ++entry_idx) {
+ entry_array[entry_idx] = ftl_acquire_wbuf_entry(ftl_ioch, 0);
+ SPDK_CU_ASSERT_FATAL(entry_array[entry_idx] != NULL);
+
+ addr = ftl_get_addr_from_entry(entry_array[entry_idx]);
+ CU_ASSERT(addr.cached == 1);
+ CU_ASSERT((addr.cache_offset >> dev->ioch_shift) == entry_idx);
+ CU_ASSERT((addr.cache_offset & ((1 << dev->ioch_shift) - 1)) == ioch_idx);
+ CU_ASSERT(entry_array[entry_idx] == ftl_get_entry_from_addr(dev, addr));
+ }
+
+ for (entry_idx = 0; entry_idx < num_entries; ++entry_idx) {
+ ftl_release_wbuf_entry(entry_array[entry_idx]);
+ }
+ }
+
+ for (ioch_idx = 0; ioch_idx < num_io_channels; ioch_idx += 2) {
+ set_thread(ioch_idx);
+ spdk_put_io_channel(ioch_array[ioch_idx]);
+ ioch_array[ioch_idx] = NULL;
+ }
+ poll_threads();
+
+ for (ioch_idx = 1; ioch_idx < num_io_channels; ioch_idx += 2) {
+ set_thread(ioch_idx);
+ ftl_ioch = ftl_io_channel_get_ctx(ioch_array[ioch_idx]);
+
+ for (entry_idx = 0; entry_idx < num_entries; ++entry_idx) {
+ entry_array[entry_idx] = ftl_acquire_wbuf_entry(ftl_ioch, 0);
+ SPDK_CU_ASSERT_FATAL(entry_array[entry_idx] != NULL);
+
+ addr = ftl_get_addr_from_entry(entry_array[entry_idx]);
+ CU_ASSERT(addr.cached == 1);
+ CU_ASSERT(entry_array[entry_idx] == ftl_get_entry_from_addr(dev, addr));
+ }
+
+ for (entry_idx = 0; entry_idx < num_entries; ++entry_idx) {
+ ftl_release_wbuf_entry(entry_array[entry_idx]);
+ }
+ }
+
+ for (ioch_idx = 1; ioch_idx < num_io_channels; ioch_idx += 2) {
+ set_thread(ioch_idx);
+ spdk_put_io_channel(ioch_array[ioch_idx]);
+ }
+ poll_threads();
+
+ free(entry_array);
+ free(ioch_array);
+ free_device(dev);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("ftl_io_suite", NULL, NULL);
+
+
+ CU_ADD_TEST(suite, test_completion);
+ CU_ADD_TEST(suite, test_alloc_free);
+ CU_ADD_TEST(suite, test_child_requests);
+ CU_ADD_TEST(suite, test_child_status);
+ CU_ADD_TEST(suite, test_multi_generation);
+ CU_ADD_TEST(suite, test_io_channel_create);
+ CU_ADD_TEST(suite, test_acquire_entry);
+ CU_ADD_TEST(suite, test_submit_batch);
+ CU_ADD_TEST(suite, test_entry_address);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/ftl/ftl_md/.gitignore b/src/spdk/test/unit/lib/ftl/ftl_md/.gitignore
new file mode 100644
index 000000000..8f0f690f0
--- /dev/null
+++ b/src/spdk/test/unit/lib/ftl/ftl_md/.gitignore
@@ -0,0 +1 @@
+ftl_md_ut
diff --git a/src/spdk/test/unit/lib/ftl/ftl_md/Makefile b/src/spdk/test/unit/lib/ftl/ftl_md/Makefile
new file mode 100644
index 000000000..1ad632aff
--- /dev/null
+++ b/src/spdk/test/unit/lib/ftl/ftl_md/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = ftl_md_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/ftl/ftl_md/ftl_md_ut.c b/src/spdk/test/unit/lib/ftl/ftl_md/ftl_md_ut.c
new file mode 100644
index 000000000..20f3a28c9
--- /dev/null
+++ b/src/spdk/test/unit/lib/ftl/ftl_md/ftl_md_ut.c
@@ -0,0 +1,150 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+#include "common/lib/test_env.c"
+
+#include "ftl/ftl_band.c"
+#include "../common/utils.c"
+
+struct base_bdev_geometry g_geo = {
+ .write_unit_size = 16,
+ .optimal_open_zones = 12,
+ .zone_size = 100,
+ .blockcnt = 1500 * 100 * 12,
+};
+
+static void
+setup_band(struct ftl_band **band, const struct base_bdev_geometry *geo)
+{
+ int rc;
+ struct spdk_ftl_dev *dev;
+
+ dev = test_init_ftl_dev(&g_geo);
+ *band = test_init_ftl_band(dev, 0, geo->zone_size);
+ rc = ftl_band_alloc_lba_map(*band);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ (*band)->state = FTL_BAND_STATE_PREP;
+ ftl_band_clear_lba_map(*band);
+}
+
+static void
+cleanup_band(struct ftl_band *band)
+{
+ struct spdk_ftl_dev *dev = band->dev;
+
+ test_free_ftl_band(band);
+ test_free_ftl_dev(dev);
+}
+
+static void
+test_md_unpack(void)
+{
+ struct ftl_band *band;
+ struct ftl_lba_map *lba_map;
+
+ setup_band(&band, &g_geo);
+
+ lba_map = &band->lba_map;
+ SPDK_CU_ASSERT_FATAL(lba_map->dma_buf);
+
+ ftl_pack_head_md(band);
+ CU_ASSERT_EQUAL(ftl_unpack_head_md(band), FTL_MD_SUCCESS);
+
+ ftl_pack_tail_md(band);
+ CU_ASSERT_EQUAL(ftl_unpack_tail_md(band), FTL_MD_SUCCESS);
+
+ cleanup_band(band);
+}
+
+static void
+test_md_unpack_fail(void)
+{
+ struct ftl_band *band;
+ struct ftl_lba_map *lba_map;
+ struct ftl_md_hdr *hdr;
+
+ setup_band(&band, &g_geo);
+
+ lba_map = &band->lba_map;
+ SPDK_CU_ASSERT_FATAL(lba_map->dma_buf);
+
+ /* check crc */
+ ftl_pack_tail_md(band);
+ /* flip last bit of lba_map */
+ *((char *)lba_map->dma_buf + ftl_tail_md_num_blocks(band->dev) * FTL_BLOCK_SIZE - 1) ^= 0x1;
+ CU_ASSERT_EQUAL(ftl_unpack_tail_md(band), FTL_MD_INVALID_CRC);
+
+ /* check invalid version */
+ hdr = lba_map->dma_buf;
+ ftl_pack_tail_md(band);
+ hdr->ver++;
+ CU_ASSERT_EQUAL(ftl_unpack_tail_md(band), FTL_MD_INVALID_VER);
+
+ /* check wrong UUID */
+ ftl_pack_head_md(band);
+ hdr->uuid.u.raw[0] ^= 0x1;
+ CU_ASSERT_EQUAL(ftl_unpack_head_md(band), FTL_MD_NO_MD);
+
+ /* check invalid size */
+ ftl_pack_tail_md(band);
+ g_geo.zone_size--;
+ CU_ASSERT_EQUAL(ftl_unpack_tail_md(band), FTL_MD_INVALID_SIZE);
+
+ cleanup_band(band);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("ftl_meta_suite", NULL, NULL);
+
+
+ CU_ADD_TEST(suite, test_md_unpack);
+ CU_ADD_TEST(suite, test_md_unpack_fail);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/ftl/ftl_ppa/.gitignore b/src/spdk/test/unit/lib/ftl/ftl_ppa/.gitignore
new file mode 100644
index 000000000..7f07c7f98
--- /dev/null
+++ b/src/spdk/test/unit/lib/ftl/ftl_ppa/.gitignore
@@ -0,0 +1 @@
+ftl_ppa_ut
diff --git a/src/spdk/test/unit/lib/ftl/ftl_ppa/Makefile b/src/spdk/test/unit/lib/ftl/ftl_ppa/Makefile
new file mode 100644
index 000000000..f8df5209e
--- /dev/null
+++ b/src/spdk/test/unit/lib/ftl/ftl_ppa/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = ftl_ppa_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/ftl/ftl_ppa/ftl_ppa_ut.c b/src/spdk/test/unit/lib/ftl/ftl_ppa/ftl_ppa_ut.c
new file mode 100644
index 000000000..dae57abcd
--- /dev/null
+++ b/src/spdk/test/unit/lib/ftl/ftl_ppa/ftl_ppa_ut.c
@@ -0,0 +1,226 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+#include "common/lib/test_env.c"
+
+#include "ftl/ftl_core.h"
+
+#define L2P_TABLE_SIZE 1024
+
+static struct spdk_ftl_dev *g_dev;
+
+DEFINE_STUB(spdk_bdev_desc_get_bdev, struct spdk_bdev *, (struct spdk_bdev_desc *desc), NULL);
+
+uint64_t
+spdk_bdev_get_zone_size(const struct spdk_bdev *bdev)
+{
+ if (g_dev->addr_len > 32) {
+ return 1ULL << 32;
+ }
+
+ return 1024;
+}
+
+uint32_t
+spdk_bdev_get_optimal_open_zones(const struct spdk_bdev *bdev)
+{
+ return 100;
+}
+
+static struct spdk_ftl_dev *
+test_alloc_dev(size_t size)
+{
+ struct spdk_ftl_dev *dev;
+
+ dev = calloc(1, sizeof(*dev));
+
+ dev->num_lbas = L2P_TABLE_SIZE;
+ dev->l2p = calloc(L2P_TABLE_SIZE, size);
+
+ return dev;
+}
+
+static int
+setup_l2p_32bit(void)
+{
+ g_dev = test_alloc_dev(sizeof(uint32_t));
+ g_dev->addr_len = 24;
+ return 0;
+}
+
+static int
+setup_l2p_64bit(void)
+{
+ g_dev = test_alloc_dev(sizeof(uint64_t));
+ g_dev->addr_len = 63;
+ return 0;
+}
+
+static void
+clean_l2p(void)
+{
+ size_t l2p_elem_size;
+
+ if (ftl_addr_packed(g_dev)) {
+ l2p_elem_size = sizeof(uint32_t);
+ } else {
+ l2p_elem_size = sizeof(uint64_t);
+ }
+ memset(g_dev->l2p, 0, g_dev->num_lbas * l2p_elem_size);
+}
+
+static int
+cleanup(void)
+{
+ free(g_dev->l2p);
+ free(g_dev);
+ g_dev = NULL;
+ return 0;
+}
+
+static void
+test_addr_pack32(void)
+{
+ struct ftl_addr orig = {}, addr;
+
+ /* Check valid address transformation */
+ orig.offset = 4;
+ addr = ftl_addr_to_packed(g_dev, orig);
+ CU_ASSERT_TRUE(addr.offset <= UINT32_MAX);
+ CU_ASSERT_FALSE(addr.pack.cached);
+ addr = ftl_addr_from_packed(g_dev, addr);
+ CU_ASSERT_FALSE(ftl_addr_invalid(addr));
+ CU_ASSERT_EQUAL(addr.offset, orig.offset);
+
+ /* Check invalid address transformation */
+ orig = ftl_to_addr(FTL_ADDR_INVALID);
+ addr = ftl_addr_to_packed(g_dev, orig);
+ CU_ASSERT_TRUE(addr.offset <= UINT32_MAX);
+ addr = ftl_addr_from_packed(g_dev, addr);
+ CU_ASSERT_TRUE(ftl_addr_invalid(addr));
+
+ /* Check cached entry offset transformation */
+ orig.cached = 1;
+ orig.cache_offset = 1024;
+ addr = ftl_addr_to_packed(g_dev, orig);
+ CU_ASSERT_TRUE(addr.offset <= UINT32_MAX);
+ CU_ASSERT_TRUE(addr.pack.cached);
+ addr = ftl_addr_from_packed(g_dev, addr);
+ CU_ASSERT_FALSE(ftl_addr_invalid(addr));
+ CU_ASSERT_TRUE(ftl_addr_cached(addr));
+ CU_ASSERT_EQUAL(addr.offset, orig.offset);
+ clean_l2p();
+}
+
+static void
+test_addr_invalid(void)
+{
+ struct ftl_addr addr;
+ size_t i;
+
+ /* Set every other LBA as invalid */
+ for (i = 0; i < L2P_TABLE_SIZE; i += 2) {
+ ftl_l2p_set(g_dev, i, ftl_to_addr(FTL_ADDR_INVALID));
+ }
+
+ /* Check every even LBA is invalid while others are fine */
+ for (i = 0; i < L2P_TABLE_SIZE; ++i) {
+ addr = ftl_l2p_get(g_dev, i);
+
+ if (i % 2 == 0) {
+ CU_ASSERT_TRUE(ftl_addr_invalid(addr));
+ } else {
+ CU_ASSERT_FALSE(ftl_addr_invalid(addr));
+ }
+ }
+ clean_l2p();
+}
+
+static void
+test_addr_cached(void)
+{
+ struct ftl_addr addr;
+ size_t i;
+
+ /* Set every other LBA is cached */
+ for (i = 0; i < L2P_TABLE_SIZE; i += 2) {
+ addr.cached = 1;
+ addr.cache_offset = i;
+ ftl_l2p_set(g_dev, i, addr);
+ }
+
+ /* Check every even LBA is cached while others are not */
+ for (i = 0; i < L2P_TABLE_SIZE; ++i) {
+ addr = ftl_l2p_get(g_dev, i);
+
+ if (i % 2 == 0) {
+ CU_ASSERT_TRUE(ftl_addr_cached(addr));
+ CU_ASSERT_EQUAL(addr.cache_offset, i);
+ } else {
+ CU_ASSERT_FALSE(ftl_addr_cached(addr));
+ }
+ }
+ clean_l2p();
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite32 = NULL, suite64 = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite32 = CU_add_suite("ftl_addr32_suite", setup_l2p_32bit, cleanup);
+
+
+ suite64 = CU_add_suite("ftl_addr64_suite", setup_l2p_64bit, cleanup);
+
+
+ CU_ADD_TEST(suite32, test_addr_pack32);
+ CU_ADD_TEST(suite32, test_addr_invalid);
+ CU_ADD_TEST(suite32, test_addr_cached);
+ CU_ADD_TEST(suite64, test_addr_invalid);
+ CU_ADD_TEST(suite64, test_addr_cached);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/ftl/ftl_reloc.c/.gitignore b/src/spdk/test/unit/lib/ftl/ftl_reloc.c/.gitignore
new file mode 100644
index 000000000..439602062
--- /dev/null
+++ b/src/spdk/test/unit/lib/ftl/ftl_reloc.c/.gitignore
@@ -0,0 +1 @@
+ftl_reloc_ut
diff --git a/src/spdk/test/unit/lib/ftl/ftl_reloc.c/Makefile b/src/spdk/test/unit/lib/ftl/ftl_reloc.c/Makefile
new file mode 100644
index 000000000..ed4188107
--- /dev/null
+++ b/src/spdk/test/unit/lib/ftl/ftl_reloc.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = ftl_reloc_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/ftl/ftl_reloc.c/ftl_reloc_ut.c b/src/spdk/test/unit/lib/ftl/ftl_reloc.c/ftl_reloc_ut.c
new file mode 100644
index 000000000..26a423882
--- /dev/null
+++ b/src/spdk/test/unit/lib/ftl/ftl_reloc.c/ftl_reloc_ut.c
@@ -0,0 +1,508 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+#include "common/lib/test_env.c"
+
+#include "ftl/ftl_reloc.c"
+#include "../common/utils.c"
+
+#define MAX_ACTIVE_RELOCS 5
+#define MAX_RELOC_QDEPTH 31
+
+struct base_bdev_geometry g_geo = {
+ .write_unit_size = 16,
+ .optimal_open_zones = 12,
+ .zone_size = 100,
+ .blockcnt = 1500 * 100 * 12,
+};
+
+DEFINE_STUB(ftl_dev_tail_md_disk_size, size_t, (const struct spdk_ftl_dev *dev), 1);
+DEFINE_STUB(ftl_addr_is_written, bool, (struct ftl_band *band, struct ftl_addr addr), true);
+DEFINE_STUB_V(ftl_band_set_state, (struct ftl_band *band, enum ftl_band_state state));
+DEFINE_STUB_V(ftl_free_io, (struct ftl_io *io));
+#if defined(DEBUG)
+DEFINE_STUB_V(ftl_trace_lba_io_init, (struct spdk_ftl_dev *dev, const struct ftl_io *io));
+#endif
+
+int
+ftl_band_alloc_lba_map(struct ftl_band *band)
+{
+ struct spdk_ftl_dev *dev = band->dev;
+
+ ftl_band_acquire_lba_map(band);
+ band->lba_map.map = spdk_mempool_get(dev->lba_pool);
+
+ return 0;
+}
+
+void
+ftl_band_release_lba_map(struct ftl_band *band)
+{
+ struct spdk_ftl_dev *dev = band->dev;
+
+ band->lba_map.ref_cnt--;
+ spdk_mempool_put(dev->lba_pool, band->lba_map.map);
+ band->lba_map.map = NULL;
+}
+
+void
+ftl_band_acquire_lba_map(struct ftl_band *band)
+{
+ band->lba_map.ref_cnt++;
+}
+
+size_t
+ftl_lba_map_num_blocks(const struct spdk_ftl_dev *dev)
+{
+ return spdk_divide_round_up(ftl_get_num_blocks_in_band(dev) * sizeof(uint64_t), FTL_BLOCK_SIZE);
+}
+
+int
+ftl_band_read_lba_map(struct ftl_band *band, size_t offset,
+ size_t num_blocks, ftl_io_fn fn, void *ctx)
+{
+ fn(ctx, ctx, 0);
+ return 0;
+}
+
+uint64_t
+ftl_band_block_offset_from_addr(struct ftl_band *band, struct ftl_addr addr)
+{
+ return test_offset_from_addr(addr, band);
+}
+
+struct ftl_addr
+ftl_band_addr_from_block_offset(struct ftl_band *band, uint64_t block_off)
+{
+ struct ftl_addr addr = {};
+
+ addr.offset = block_off + band->id * ftl_get_num_blocks_in_band(band->dev);
+ return addr;
+}
+
+void
+ftl_io_read(struct ftl_io *io)
+{
+ io->cb_fn(io, io->cb_ctx, 0);
+ free(io);
+}
+
+void
+ftl_io_write(struct ftl_io *io)
+{
+ io->cb_fn(io, io->cb_ctx, 0);
+ free(io->lba.vector);
+ free(io);
+}
+
+struct ftl_io *
+ftl_io_init_internal(const struct ftl_io_init_opts *opts)
+{
+ struct ftl_io *io = opts->io;
+
+ if (!io) {
+ io = calloc(1, opts->size);
+ }
+
+ SPDK_CU_ASSERT_FATAL(io != NULL);
+
+ io->dev = opts->dev;
+ io->band = opts->band;
+ io->flags = opts->flags;
+ io->cb_fn = opts->cb_fn;
+ io->cb_ctx = io;
+ io->num_blocks = opts->num_blocks;
+ memcpy(&io->iov, &opts->iovs, sizeof(io->iov));
+ io->iov_cnt = opts->iovcnt;
+
+ if (opts->flags & FTL_IO_VECTOR_LBA) {
+ io->lba.vector = calloc(io->num_blocks, sizeof(uint64_t));
+ SPDK_CU_ASSERT_FATAL(io->lba.vector != NULL);
+ }
+
+ return io;
+}
+
+struct ftl_io *
+ftl_io_alloc(struct spdk_io_channel *ch)
+{
+ size_t io_size = sizeof(struct ftl_md_io);
+
+ return malloc(io_size);
+}
+
+void
+ftl_io_reinit(struct ftl_io *io, ftl_io_fn fn, void *ctx, int flags, int type)
+{
+ io->cb_fn = fn;
+ io->cb_ctx = ctx;
+ io->type = type;
+}
+
+static void
+single_reloc_move(struct ftl_band_reloc *breloc)
+{
+ /* Process read */
+ ftl_process_reloc(breloc);
+ /* Process lba map read */
+ ftl_process_reloc(breloc);
+ /* Process write */
+ ftl_process_reloc(breloc);
+}
+
+static void
+add_to_active_queue(struct ftl_reloc *reloc, struct ftl_band_reloc *breloc)
+{
+ TAILQ_REMOVE(&reloc->pending_queue, breloc, entry);
+ breloc->state = FTL_BAND_RELOC_STATE_ACTIVE;
+ TAILQ_INSERT_HEAD(&reloc->active_queue, breloc, entry);
+}
+
+static void
+setup_reloc(struct spdk_ftl_dev **_dev, struct ftl_reloc **_reloc,
+ const struct base_bdev_geometry *geo)
+{
+ size_t i;
+ struct spdk_ftl_dev *dev;
+ struct ftl_reloc *reloc;
+
+ dev = test_init_ftl_dev(geo);
+
+ dev->conf.max_active_relocs = MAX_ACTIVE_RELOCS;
+ dev->conf.max_reloc_qdepth = MAX_RELOC_QDEPTH;
+
+ SPDK_CU_ASSERT_FATAL(ftl_get_num_bands(dev) > 0);
+
+ for (i = 0; i < ftl_get_num_bands(dev); ++i) {
+ test_init_ftl_band(dev, i, geo->zone_size);
+ }
+
+ reloc = ftl_reloc_init(dev);
+ dev->reloc = reloc;
+ CU_ASSERT_PTR_NOT_NULL_FATAL(reloc);
+ ftl_reloc_resume(reloc);
+
+ *_dev = dev;
+ *_reloc = reloc;
+}
+
+static void
+cleanup_reloc(struct spdk_ftl_dev *dev, struct ftl_reloc *reloc)
+{
+ size_t i;
+
+ for (i = 0; i < ftl_get_num_bands(reloc->dev); ++i) {
+ SPDK_CU_ASSERT_FATAL(reloc->brelocs[i].state == FTL_BAND_RELOC_STATE_INACTIVE);
+ }
+
+ ftl_reloc_free(reloc);
+
+ for (i = 0; i < ftl_get_num_bands(dev); ++i) {
+ test_free_ftl_band(&dev->bands[i]);
+ }
+ test_free_ftl_dev(dev);
+}
+
+static void
+set_band_valid_map(struct ftl_band *band, size_t offset, size_t num_blocks)
+{
+ struct ftl_lba_map *lba_map = &band->lba_map;
+ size_t i;
+
+ SPDK_CU_ASSERT_FATAL(lba_map != NULL);
+ for (i = offset; i < offset + num_blocks; ++i) {
+ spdk_bit_array_set(lba_map->vld, i);
+ lba_map->num_vld++;
+ }
+}
+
+static void
+test_reloc_iter_full(void)
+{
+ size_t num_blocks, num_iters, reminder, i;
+ struct spdk_ftl_dev *dev;
+ struct ftl_reloc *reloc;
+ struct ftl_band_reloc *breloc;
+ struct ftl_band *band;
+ struct ftl_addr addr;
+
+ setup_reloc(&dev, &reloc, &g_geo);
+
+ g_geo.zone_size = 100;
+ breloc = &reloc->brelocs[0];
+ band = breloc->band;
+
+ set_band_valid_map(band, 0, ftl_get_num_blocks_in_band(dev));
+
+ ftl_reloc_add(reloc, band, 0, ftl_get_num_blocks_in_band(dev), 0, true);
+
+ CU_ASSERT_EQUAL(breloc->num_blocks, ftl_get_num_blocks_in_band(dev));
+
+ num_iters = ftl_get_num_punits(dev) *
+ (ftl_get_num_blocks_in_zone(dev) / reloc->xfer_size);
+
+ for (i = 0; i < num_iters; i++) {
+ num_blocks = ftl_reloc_next_blocks(breloc, &addr);
+ CU_ASSERT_EQUAL(num_blocks, reloc->xfer_size);
+ }
+
+ num_iters = ftl_get_num_punits(dev);
+
+ /* ftl_reloc_next_blocks is searching for maximum xfer_size */
+ /* contiguous valid logic blocks in zone, so we can end up */
+ /* with some reminder if number of logical blocks in zone */
+ /* is not divisible by xfer_size */
+ reminder = ftl_get_num_blocks_in_zone(dev) % reloc->xfer_size;
+ for (i = 0; i < num_iters; i++) {
+ num_blocks = ftl_reloc_next_blocks(breloc, &addr);
+ CU_ASSERT_EQUAL(reminder, num_blocks);
+ }
+
+ /* num_blocks should remain intact since all the blocks are valid */
+ CU_ASSERT_EQUAL(breloc->num_blocks, ftl_get_num_blocks_in_band(dev));
+ breloc->state = FTL_BAND_RELOC_STATE_INACTIVE;
+
+ cleanup_reloc(dev, reloc);
+}
+
+static void
+test_reloc_empty_band(void)
+{
+ struct spdk_ftl_dev *dev;
+ struct ftl_reloc *reloc;
+ struct ftl_band_reloc *breloc;
+ struct ftl_band *band;
+
+ setup_reloc(&dev, &reloc, &g_geo);
+
+ breloc = &reloc->brelocs[0];
+ band = breloc->band;
+
+ ftl_reloc_add(reloc, band, 0, ftl_get_num_blocks_in_band(dev), 0, true);
+
+ CU_ASSERT_EQUAL(breloc->num_blocks, 0);
+
+ cleanup_reloc(dev, reloc);
+}
+
+static void
+test_reloc_full_band(void)
+{
+ struct spdk_ftl_dev *dev;
+ struct ftl_reloc *reloc;
+ struct ftl_band_reloc *breloc;
+ struct ftl_band *band;
+ size_t num_moves, num_iters, num_block, i;
+
+ setup_reloc(&dev, &reloc, &g_geo);
+
+ breloc = &reloc->brelocs[0];
+ band = breloc->band;
+ num_moves = MAX_RELOC_QDEPTH * reloc->xfer_size;
+ num_iters = ftl_get_num_blocks_in_band(dev) / num_moves;
+
+ set_band_valid_map(band, 0, ftl_get_num_blocks_in_band(dev));
+
+ ftl_reloc_add(reloc, band, 0, ftl_get_num_blocks_in_band(dev), 0, true);
+
+ CU_ASSERT_EQUAL(breloc->num_blocks, ftl_get_num_blocks_in_band(dev));
+
+ ftl_reloc_prep(breloc);
+ add_to_active_queue(reloc, breloc);
+
+ for (i = 1; i <= num_iters; ++i) {
+ single_reloc_move(breloc);
+ num_block = ftl_get_num_blocks_in_band(dev) - (i * num_moves);
+ CU_ASSERT_EQUAL(breloc->num_blocks, num_block);
+
+ }
+
+ /* Process reminder blocks */
+ single_reloc_move(breloc);
+ /* Drain move queue */
+ ftl_reloc_process_moves(breloc);
+
+ CU_ASSERT_EQUAL(breloc->num_blocks, 0);
+ CU_ASSERT_TRUE(ftl_reloc_done(breloc));
+ ftl_reloc_release(breloc);
+
+ cleanup_reloc(dev, reloc);
+}
+
+static void
+test_reloc_scatter_band(void)
+{
+ struct spdk_ftl_dev *dev;
+ struct ftl_reloc *reloc;
+ struct ftl_band_reloc *breloc;
+ struct ftl_band *band;
+ size_t num_iters, i;
+
+ setup_reloc(&dev, &reloc, &g_geo);
+
+ breloc = &reloc->brelocs[0];
+ band = breloc->band;
+ num_iters = spdk_divide_round_up(ftl_get_num_blocks_in_band(dev), MAX_RELOC_QDEPTH * 2);
+
+ for (i = 0; i < ftl_get_num_blocks_in_band(dev); ++i) {
+ if (i % 2) {
+ set_band_valid_map(band, i, 1);
+ }
+ }
+
+ ftl_reloc_add(reloc, band, 0, ftl_get_num_blocks_in_band(dev), 0, true);
+ ftl_reloc_prep(breloc);
+ add_to_active_queue(reloc, breloc);
+
+ CU_ASSERT_EQUAL(breloc->num_blocks, ftl_get_num_blocks_in_band(dev));
+
+ for (i = 0; i < num_iters ; ++i) {
+ single_reloc_move(breloc);
+ }
+
+ ftl_process_reloc(breloc);
+ CU_ASSERT_EQUAL(breloc->num_blocks, 0);
+ CU_ASSERT_TRUE(ftl_reloc_done(breloc));
+
+ cleanup_reloc(dev, reloc);
+}
+
+static void
+test_reloc_zone(void)
+{
+ struct spdk_ftl_dev *dev;
+ struct ftl_reloc *reloc;
+ struct ftl_band_reloc *breloc;
+ struct ftl_band *band;
+ size_t num_io, num_iters, num_block, i;
+
+ setup_reloc(&dev, &reloc, &g_geo);
+
+ breloc = &reloc->brelocs[0];
+ band = breloc->band;
+ /* High priority band have allocated lba map */
+ band->high_prio = 1;
+ ftl_band_alloc_lba_map(band);
+ num_io = MAX_RELOC_QDEPTH * reloc->xfer_size;
+ num_iters = ftl_get_num_blocks_in_zone(dev) / num_io;
+
+ set_band_valid_map(band, 0, ftl_get_num_blocks_in_band(dev));
+
+ ftl_reloc_add(reloc, band, ftl_get_num_blocks_in_zone(dev) * 3,
+ ftl_get_num_blocks_in_zone(dev), 1, false);
+ add_to_active_queue(reloc, breloc);
+
+ CU_ASSERT_EQUAL(breloc->num_blocks, ftl_get_num_blocks_in_zone(dev));
+
+ for (i = 1; i <= num_iters ; ++i) {
+ single_reloc_move(breloc);
+ num_block = ftl_get_num_blocks_in_zone(dev) - (i * num_io);
+
+ CU_ASSERT_EQUAL(breloc->num_blocks, num_block);
+ }
+
+ /* In case num_blocks_in_zone % num_io != 0 one extra iteration is needed */
+ single_reloc_move(breloc);
+ /* Drain move queue */
+ ftl_reloc_process_moves(breloc);
+
+ CU_ASSERT_EQUAL(breloc->num_blocks, 0);
+ CU_ASSERT_TRUE(ftl_reloc_done(breloc));
+ ftl_reloc_release(breloc);
+
+ cleanup_reloc(dev, reloc);
+}
+
+static void
+test_reloc_single_block(void)
+{
+ struct spdk_ftl_dev *dev;
+ struct ftl_reloc *reloc;
+ struct ftl_band_reloc *breloc;
+ struct ftl_band *band;
+#define TEST_RELOC_OFFSET 6
+
+ setup_reloc(&dev, &reloc, &g_geo);
+
+ breloc = &reloc->brelocs[0];
+ band = breloc->band;
+
+ set_band_valid_map(band, TEST_RELOC_OFFSET, 1);
+
+ ftl_reloc_add(reloc, band, TEST_RELOC_OFFSET, 1, 0, false);
+ SPDK_CU_ASSERT_FATAL(breloc == TAILQ_FIRST(&reloc->pending_queue));
+ ftl_reloc_prep(breloc);
+ add_to_active_queue(reloc, breloc);
+
+ CU_ASSERT_EQUAL(breloc->num_blocks, 1);
+
+ single_reloc_move(breloc);
+ /* Drain move queue */
+ ftl_reloc_process_moves(breloc);
+
+ CU_ASSERT_EQUAL(breloc->num_blocks, 0);
+ CU_ASSERT_TRUE(ftl_reloc_done(breloc));
+ ftl_reloc_release(breloc);
+
+ cleanup_reloc(dev, reloc);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("ftl_band_suite", NULL, NULL);
+
+
+ CU_ADD_TEST(suite, test_reloc_iter_full);
+ CU_ADD_TEST(suite, test_reloc_empty_band);
+ CU_ADD_TEST(suite, test_reloc_full_band);
+ CU_ADD_TEST(suite, test_reloc_scatter_band);
+ CU_ADD_TEST(suite, test_reloc_zone);
+ CU_ADD_TEST(suite, test_reloc_single_block);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/ftl/ftl_wptr/.gitignore b/src/spdk/test/unit/lib/ftl/ftl_wptr/.gitignore
new file mode 100644
index 000000000..8f1f46756
--- /dev/null
+++ b/src/spdk/test/unit/lib/ftl/ftl_wptr/.gitignore
@@ -0,0 +1 @@
+ftl_wptr_ut
diff --git a/src/spdk/test/unit/lib/ftl/ftl_wptr/Makefile b/src/spdk/test/unit/lib/ftl/ftl_wptr/Makefile
new file mode 100644
index 000000000..42bf7c602
--- /dev/null
+++ b/src/spdk/test/unit/lib/ftl/ftl_wptr/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = ftl_wptr_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/ftl/ftl_wptr/ftl_wptr_ut.c b/src/spdk/test/unit/lib/ftl/ftl_wptr/ftl_wptr_ut.c
new file mode 100644
index 000000000..ccee312a2
--- /dev/null
+++ b/src/spdk/test/unit/lib/ftl/ftl_wptr/ftl_wptr_ut.c
@@ -0,0 +1,223 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+#include "common/lib/test_env.c"
+
+#include "ftl/ftl_core.c"
+#include "ftl/ftl_band.c"
+#include "ftl/ftl_init.c"
+#include "../common/utils.c"
+
+struct base_bdev_geometry g_geo = {
+ .write_unit_size = 16,
+ .optimal_open_zones = 12,
+ .zone_size = 128,
+ .blockcnt = 20 * 128 * 12,
+};
+
+#if defined(DEBUG)
+DEFINE_STUB(ftl_band_validate_md, bool, (struct ftl_band *band), true);
+DEFINE_STUB_V(ftl_trace_limits, (struct spdk_ftl_dev *dev, int limit, size_t num_free));
+
+DEFINE_STUB_V(ftl_trace_completion, (struct spdk_ftl_dev *dev, const struct ftl_io *io,
+ enum ftl_trace_completion completion));
+DEFINE_STUB_V(ftl_trace_write_band, (struct spdk_ftl_dev *dev, const struct ftl_band *band));
+DEFINE_STUB_V(ftl_trace_submission, (struct spdk_ftl_dev *dev, const struct ftl_io *io,
+ struct ftl_addr addr, size_t addr_cnt));
+#endif
+DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
+DEFINE_STUB_V(ftl_io_dec_req, (struct ftl_io *io));
+DEFINE_STUB_V(ftl_io_inc_req, (struct ftl_io *io));
+DEFINE_STUB_V(ftl_io_fail, (struct ftl_io *io, int status));
+DEFINE_STUB_V(ftl_reloc_add, (struct ftl_reloc *reloc, struct ftl_band *band, size_t offset,
+ size_t num_blocks, int prio, bool defrag));
+DEFINE_STUB_V(ftl_io_process_error, (struct ftl_io *io, const struct spdk_nvme_cpl *status));
+DEFINE_STUB(spdk_bdev_get_num_blocks, uint64_t, (const struct spdk_bdev *bdev), 0);
+DEFINE_STUB(spdk_bdev_zone_management, int, (struct spdk_bdev_desc *desc,
+ struct spdk_io_channel *ch,
+ uint64_t zone_id, enum spdk_bdev_zone_action action,
+ spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
+DEFINE_STUB(spdk_bdev_io_get_append_location, uint64_t, (struct spdk_bdev_io *bdev_io), 0);
+
+struct spdk_io_channel *
+spdk_bdev_get_io_channel(struct spdk_bdev_desc *bdev_desc)
+{
+ return spdk_get_io_channel(bdev_desc);
+}
+
+struct ftl_io *
+ftl_io_erase_init(struct ftl_band *band, size_t num_blocks, ftl_io_fn cb)
+{
+ struct ftl_io *io;
+
+ io = calloc(1, sizeof(struct ftl_io));
+ SPDK_CU_ASSERT_FATAL(io != NULL);
+
+ io->dev = band->dev;
+ io->band = band;
+ io->cb_fn = cb;
+ io->num_blocks = 1;
+
+ return io;
+}
+
+void
+ftl_io_advance(struct ftl_io *io, size_t num_blocks)
+{
+ io->pos += num_blocks;
+}
+
+void
+ftl_io_complete(struct ftl_io *io)
+{
+ io->cb_fn(io, NULL, 0);
+ free(io);
+}
+
+static void
+setup_wptr_test(struct spdk_ftl_dev **dev, const struct base_bdev_geometry *geo)
+{
+ struct spdk_ftl_dev *t_dev;
+ struct _ftl_io_channel *_ioch;
+ size_t i;
+
+ t_dev = test_init_ftl_dev(geo);
+ for (i = 0; i < ftl_get_num_bands(t_dev); ++i) {
+ test_init_ftl_band(t_dev, i, geo->zone_size);
+ t_dev->bands[i].state = FTL_BAND_STATE_CLOSED;
+ ftl_band_set_state(&t_dev->bands[i], FTL_BAND_STATE_FREE);
+ }
+
+ _ioch = (struct _ftl_io_channel *)(t_dev->ioch + 1);
+ _ioch->ioch = calloc(1, sizeof(*_ioch->ioch));
+ SPDK_CU_ASSERT_FATAL(_ioch->ioch != NULL);
+
+ *dev = t_dev;
+}
+
+static void
+cleanup_wptr_test(struct spdk_ftl_dev *dev)
+{
+ struct _ftl_io_channel *_ioch;
+ size_t i;
+
+ for (i = 0; i < ftl_get_num_bands(dev); ++i) {
+ dev->bands[i].lba_map.segments = NULL;
+ test_free_ftl_band(&dev->bands[i]);
+ }
+
+ _ioch = (struct _ftl_io_channel *)(dev->ioch + 1);
+ free(_ioch->ioch);
+
+ test_free_ftl_dev(dev);
+}
+
+static void
+test_wptr(void)
+{
+ struct spdk_ftl_dev *dev;
+ struct ftl_wptr *wptr;
+ struct ftl_band *band;
+ struct ftl_io io = { 0 };
+ size_t xfer_size;
+ size_t zone, block, offset, i;
+ int rc;
+
+ setup_wptr_test(&dev, &g_geo);
+
+ xfer_size = dev->xfer_size;
+ ftl_add_wptr(dev);
+ for (i = 0; i < ftl_get_num_bands(dev); ++i) {
+ wptr = LIST_FIRST(&dev->wptr_list);
+ band = wptr->band;
+ ftl_band_set_state(band, FTL_BAND_STATE_OPENING);
+ ftl_band_set_state(band, FTL_BAND_STATE_OPEN);
+ io.band = band;
+ io.dev = dev;
+
+ for (block = 0, offset = 0; block < ftl_get_num_blocks_in_zone(dev) / xfer_size; ++block) {
+ for (zone = 0; zone < band->num_zones; ++zone) {
+ CU_ASSERT_EQUAL(wptr->offset, offset);
+ ftl_wptr_advance(wptr, xfer_size);
+ offset += xfer_size;
+ }
+ }
+
+ CU_ASSERT_EQUAL(band->state, FTL_BAND_STATE_FULL);
+
+ ftl_band_set_state(band, FTL_BAND_STATE_CLOSING);
+
+ /* Call the metadata completion cb to force band state change */
+ /* and removal of the actual wptr */
+ ftl_md_write_cb(&io, NULL, 0);
+ CU_ASSERT_EQUAL(band->state, FTL_BAND_STATE_CLOSED);
+ CU_ASSERT_TRUE(LIST_EMPTY(&dev->wptr_list));
+
+ rc = ftl_add_wptr(dev);
+
+ /* There are no free bands during the last iteration, so */
+ /* there'll be no new wptr allocation */
+ if (i == (ftl_get_num_bands(dev) - 1)) {
+ CU_ASSERT_EQUAL(rc, -1);
+ } else {
+ CU_ASSERT_EQUAL(rc, 0);
+ }
+ }
+
+ cleanup_wptr_test(dev);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("ftl_wptr_suite", NULL, NULL);
+
+
+ CU_ADD_TEST(suite, test_wptr);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/idxd/Makefile b/src/spdk/test/unit/lib/idxd/Makefile
new file mode 100644
index 000000000..e37cb22d9
--- /dev/null
+++ b/src/spdk/test/unit/lib/idxd/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = idxd.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/idxd/idxd.c/.gitignore b/src/spdk/test/unit/lib/idxd/idxd.c/.gitignore
new file mode 100644
index 000000000..b9fee58fe
--- /dev/null
+++ b/src/spdk/test/unit/lib/idxd/idxd.c/.gitignore
@@ -0,0 +1 @@
+idxd_ut
diff --git a/src/spdk/test/unit/lib/idxd/idxd.c/Makefile b/src/spdk/test/unit/lib/idxd/idxd.c/Makefile
new file mode 100644
index 000000000..73fdbe3e4
--- /dev/null
+++ b/src/spdk/test/unit/lib/idxd/idxd.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = idxd_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/idxd/idxd.c/idxd_ut.c b/src/spdk/test/unit/lib/idxd/idxd.c/idxd_ut.c
new file mode 100644
index 000000000..0eed4273a
--- /dev/null
+++ b/src/spdk/test/unit/lib/idxd/idxd.c/idxd_ut.c
@@ -0,0 +1,300 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+#include "spdk_internal/mock.h"
+#include "spdk_internal/idxd.h"
+#include "common/lib/test_env.c"
+#include "idxd/idxd.c"
+
+#define FAKE_REG_SIZE 0x800
+#define NUM_GROUPS 4
+#define NUM_WQ_PER_GROUP 1
+#define NUM_ENGINES_PER_GROUP 1
+#define TOTAL_WQS (NUM_GROUPS * NUM_WQ_PER_GROUP)
+#define TOTAL_ENGINES (NUM_GROUPS * NUM_ENGINES_PER_GROUP)
+
+int
+spdk_pci_enumerate(struct spdk_pci_driver *driver, spdk_pci_enum_cb enum_cb, void *enum_ctx)
+{
+ return -1;
+}
+
+int
+spdk_pci_device_map_bar(struct spdk_pci_device *dev, uint32_t bar,
+ void **mapped_addr, uint64_t *phys_addr, uint64_t *size)
+{
+ *mapped_addr = NULL;
+ *phys_addr = 0;
+ *size = 0;
+ return 0;
+}
+
+int
+spdk_pci_device_unmap_bar(struct spdk_pci_device *dev, uint32_t bar, void *addr)
+{
+ return 0;
+}
+
+int
+spdk_pci_device_cfg_read32(struct spdk_pci_device *dev, uint32_t *value,
+ uint32_t offset)
+{
+ *value = 0xFFFFFFFFu;
+ return 0;
+}
+
+int
+spdk_pci_device_cfg_write32(struct spdk_pci_device *dev, uint32_t value,
+ uint32_t offset)
+{
+ return 0;
+}
+
+#define movdir64b mock_movdir64b
+static inline void
+mock_movdir64b(void *dst, const void *src)
+{
+ return;
+}
+
+#define WQ_CFG_OFFSET 0x500
+#define TOTAL_WQE_SIZE 0x40
+static int
+test_idxd_wq_config(void)
+{
+ struct spdk_idxd_device idxd = {};
+ union idxd_wqcfg wqcfg = {};
+ uint32_t expected[8] = {0x10, 0, 0x11, 0x11e, 0, 0, 0x40000000, 0};
+ uint32_t wq_size;
+ int rc, i, j;
+
+ idxd.reg_base = calloc(1, FAKE_REG_SIZE);
+ SPDK_CU_ASSERT_FATAL(idxd.reg_base != NULL);
+
+ g_dev_cfg = &g_dev_cfg0;
+ idxd.registers.wqcap.total_wq_size = TOTAL_WQE_SIZE;
+ idxd.registers.wqcap.num_wqs = TOTAL_WQS;
+ idxd.registers.gencap.max_batch_shift = LOG2_WQ_MAX_BATCH;
+ idxd.registers.gencap.max_xfer_shift = LOG2_WQ_MAX_XFER;
+ idxd.wqcfg_offset = WQ_CFG_OFFSET;
+ wq_size = idxd.registers.wqcap.total_wq_size / g_dev_cfg->total_wqs;
+
+ rc = idxd_wq_config(&idxd);
+ CU_ASSERT(rc == 0);
+ for (i = 0; i < g_dev_cfg->total_wqs; i++) {
+ CU_ASSERT(idxd.queues[i].wqcfg.wq_size == wq_size);
+ CU_ASSERT(idxd.queues[i].wqcfg.mode == WQ_MODE_DEDICATED);
+ CU_ASSERT(idxd.queues[i].wqcfg.max_batch_shift == LOG2_WQ_MAX_BATCH);
+ CU_ASSERT(idxd.queues[i].wqcfg.max_xfer_shift == LOG2_WQ_MAX_XFER);
+ CU_ASSERT(idxd.queues[i].wqcfg.wq_state == WQ_ENABLED);
+ CU_ASSERT(idxd.queues[i].wqcfg.priority == WQ_PRIORITY_1);
+ CU_ASSERT(idxd.queues[i].idxd == &idxd);
+ CU_ASSERT(idxd.queues[i].group == &idxd.groups[i % g_dev_cfg->num_groups]);
+ }
+
+ for (i = 0 ; i < idxd.registers.wqcap.num_wqs; i++) {
+ for (j = 0 ; j < WQCFG_NUM_DWORDS; j++) {
+ wqcfg.raw[j] = spdk_mmio_read_4((uint32_t *)(idxd.reg_base + idxd.wqcfg_offset + i * 32 + j *
+ 4));
+ CU_ASSERT(wqcfg.raw[j] == expected[j]);
+ }
+ }
+
+ free(idxd.queues);
+ free(idxd.reg_base);
+
+ return 0;
+}
+
+#define GRP_CFG_OFFSET 0x400
+#define MAX_TOKENS 0x40
+static int
+test_idxd_group_config(void)
+{
+ struct spdk_idxd_device idxd = {};
+ uint64_t wqs[NUM_GROUPS] = {};
+ uint64_t engines[NUM_GROUPS] = {};
+ union idxd_group_flags flags[NUM_GROUPS] = {};
+ int rc, i;
+ uint64_t base_offset;
+
+ idxd.reg_base = calloc(1, FAKE_REG_SIZE);
+ SPDK_CU_ASSERT_FATAL(idxd.reg_base != NULL);
+
+ g_dev_cfg = &g_dev_cfg0;
+ idxd.registers.groupcap.num_groups = NUM_GROUPS;
+ idxd.registers.enginecap.num_engines = TOTAL_ENGINES;
+ idxd.registers.wqcap.num_wqs = TOTAL_WQS;
+ idxd.registers.groupcap.total_tokens = MAX_TOKENS;
+ idxd.grpcfg_offset = GRP_CFG_OFFSET;
+
+ rc = idxd_group_config(&idxd);
+ CU_ASSERT(rc == 0);
+ for (i = 0 ; i < idxd.registers.groupcap.num_groups; i++) {
+ base_offset = idxd.grpcfg_offset + i * 64;
+
+ wqs[i] = spdk_mmio_read_8((uint64_t *)(idxd.reg_base + base_offset));
+ engines[i] = spdk_mmio_read_8((uint64_t *)(idxd.reg_base + base_offset + CFG_ENGINE_OFFSET));
+ flags[i].raw = spdk_mmio_read_8((uint64_t *)(idxd.reg_base + base_offset + CFG_FLAG_OFFSET));
+ }
+ /* wqe and engine arrays are indexed by group id and are bitmaps of assigned elements. */
+ CU_ASSERT(wqs[0] == 0x1);
+ CU_ASSERT(engines[0] == 0x1);
+ CU_ASSERT(wqs[1] == 0x2);
+ CU_ASSERT(engines[1] == 0x2);
+ CU_ASSERT(flags[0].tokens_allowed == MAX_TOKENS / NUM_GROUPS);
+ CU_ASSERT(flags[1].tokens_allowed == MAX_TOKENS / NUM_GROUPS);
+
+ /* groups allocated by code under test. */
+ free(idxd.groups);
+ free(idxd.reg_base);
+
+ return 0;
+}
+
+static int
+test_idxd_reset_dev(void)
+{
+ struct spdk_idxd_device idxd = {};
+ union idxd_cmdsts_reg *fake_cmd_status_reg;
+ int rc;
+
+ idxd.reg_base = calloc(1, FAKE_REG_SIZE);
+ SPDK_CU_ASSERT_FATAL(idxd.reg_base != NULL);
+ fake_cmd_status_reg = idxd.reg_base + IDXD_CMDSTS_OFFSET;
+
+ /* Test happy path */
+ rc = idxd_reset_dev(&idxd);
+ CU_ASSERT(rc == 0);
+
+ /* Test error reported path */
+ fake_cmd_status_reg->err = 1;
+ rc = idxd_reset_dev(&idxd);
+ CU_ASSERT(rc == -EINVAL);
+
+ free(idxd.reg_base);
+
+ return 0;
+}
+
+static int
+test_idxd_wait_cmd(void)
+{
+ struct spdk_idxd_device idxd = {};
+ int timeout = 1;
+ union idxd_cmdsts_reg *fake_cmd_status_reg;
+ int rc;
+
+ idxd.reg_base = calloc(1, FAKE_REG_SIZE);
+ SPDK_CU_ASSERT_FATAL(idxd.reg_base != NULL);
+ fake_cmd_status_reg = idxd.reg_base + IDXD_CMDSTS_OFFSET;
+
+ /* Test happy path. */
+ rc = idxd_wait_cmd(&idxd, timeout);
+ CU_ASSERT(rc == 0);
+
+ /* Setup up our fake register to set the error bit. */
+ fake_cmd_status_reg->err = 1;
+ rc = idxd_wait_cmd(&idxd, timeout);
+ CU_ASSERT(rc == -EINVAL);
+ fake_cmd_status_reg->err = 0;
+
+ /* Setup up our fake register to set the active bit. */
+ fake_cmd_status_reg->active = 1;
+ rc = idxd_wait_cmd(&idxd, timeout);
+ CU_ASSERT(rc == -EBUSY);
+
+ free(idxd.reg_base);
+
+ return 0;
+}
+
+static int
+test_spdk_idxd_set_config(void)
+{
+
+ g_dev_cfg = NULL;
+ spdk_idxd_set_config(0);
+ SPDK_CU_ASSERT_FATAL(g_dev_cfg != NULL);
+ CU_ASSERT(memcmp(&g_dev_cfg0, g_dev_cfg, sizeof(struct device_config)) == 0);
+
+ return 0;
+}
+
+static int
+test_spdk_idxd_reconfigure_chan(void)
+{
+ struct spdk_idxd_io_channel chan = {};
+ int rc;
+ uint32_t test_ring_size = 8;
+ uint32_t num_channels = 2;
+
+ chan.ring_ctrl.ring_slots = spdk_bit_array_create(test_ring_size);
+ chan.ring_ctrl.ring_size = test_ring_size;
+ chan.ring_ctrl.completions = spdk_zmalloc(test_ring_size * sizeof(struct idxd_hw_desc), 0, NULL,
+ SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
+ SPDK_CU_ASSERT_FATAL(chan.ring_ctrl.completions != NULL);
+
+ rc = spdk_idxd_reconfigure_chan(&chan, num_channels);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(chan.ring_ctrl.max_ring_slots == test_ring_size / num_channels);
+
+ spdk_bit_array_free(&chan.ring_ctrl.ring_slots);
+ spdk_free(chan.ring_ctrl.completions);
+ return 0;
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("idxd", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_spdk_idxd_reconfigure_chan);
+ CU_ADD_TEST(suite, test_spdk_idxd_set_config);
+ CU_ADD_TEST(suite, test_idxd_wait_cmd);
+ CU_ADD_TEST(suite, test_idxd_reset_dev);
+ CU_ADD_TEST(suite, test_idxd_group_config);
+ CU_ADD_TEST(suite, test_idxd_wq_config);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/ioat/Makefile b/src/spdk/test/unit/lib/ioat/Makefile
new file mode 100644
index 000000000..8d982710e
--- /dev/null
+++ b/src/spdk/test/unit/lib/ioat/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = ioat.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/ioat/ioat.c/.gitignore b/src/spdk/test/unit/lib/ioat/ioat.c/.gitignore
new file mode 100644
index 000000000..deefbf0c1
--- /dev/null
+++ b/src/spdk/test/unit/lib/ioat/ioat.c/.gitignore
@@ -0,0 +1 @@
+ioat_ut
diff --git a/src/spdk/test/unit/lib/ioat/ioat.c/Makefile b/src/spdk/test/unit/lib/ioat/ioat.c/Makefile
new file mode 100644
index 000000000..8b685ce0b
--- /dev/null
+++ b/src/spdk/test/unit/lib/ioat/ioat.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = ioat_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/ioat/ioat.c/ioat_ut.c b/src/spdk/test/unit/lib/ioat/ioat.c/ioat_ut.c
new file mode 100644
index 000000000..abe13c2b9
--- /dev/null
+++ b/src/spdk/test/unit/lib/ioat/ioat.c/ioat_ut.c
@@ -0,0 +1,144 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "ioat/ioat.c"
+
+#include "spdk_internal/mock.h"
+
+#include "common/lib/test_env.c"
+
+int
+spdk_pci_enumerate(struct spdk_pci_driver *driver, spdk_pci_enum_cb enum_cb, void *enum_ctx)
+{
+ return -1;
+}
+
+int
+spdk_pci_device_map_bar(struct spdk_pci_device *dev, uint32_t bar,
+ void **mapped_addr, uint64_t *phys_addr, uint64_t *size)
+{
+ *mapped_addr = NULL;
+ *phys_addr = 0;
+ *size = 0;
+ return 0;
+}
+
+int
+spdk_pci_device_unmap_bar(struct spdk_pci_device *dev, uint32_t bar, void *addr)
+{
+ return 0;
+}
+
+int
+spdk_pci_device_cfg_read32(struct spdk_pci_device *dev, uint32_t *value,
+ uint32_t offset)
+{
+ *value = 0xFFFFFFFFu;
+ return 0;
+}
+
+int
+spdk_pci_device_cfg_write32(struct spdk_pci_device *dev, uint32_t value,
+ uint32_t offset)
+{
+ return 0;
+}
+
+static void ioat_state_check(void)
+{
+ /*
+ * CHANSTS's STATUS field is 3 bits (8 possible values), but only has 5 valid states:
+ * ACTIVE 0x0
+ * IDLE 0x1
+ * SUSPENDED 0x2
+ * HALTED 0x3
+ * ARMED 0x4
+ */
+
+ CU_ASSERT(is_ioat_active(0) == 1); /* ACTIVE */
+ CU_ASSERT(is_ioat_active(1) == 0); /* IDLE */
+ CU_ASSERT(is_ioat_active(2) == 0); /* SUSPENDED */
+ CU_ASSERT(is_ioat_active(3) == 0); /* HALTED */
+ CU_ASSERT(is_ioat_active(4) == 0); /* ARMED */
+ CU_ASSERT(is_ioat_active(5) == 0); /* reserved */
+ CU_ASSERT(is_ioat_active(6) == 0); /* reserved */
+ CU_ASSERT(is_ioat_active(7) == 0); /* reserved */
+
+ CU_ASSERT(is_ioat_idle(0) == 0); /* ACTIVE */
+ CU_ASSERT(is_ioat_idle(1) == 1); /* IDLE */
+ CU_ASSERT(is_ioat_idle(2) == 0); /* SUSPENDED */
+ CU_ASSERT(is_ioat_idle(3) == 0); /* HALTED */
+ CU_ASSERT(is_ioat_idle(4) == 0); /* ARMED */
+ CU_ASSERT(is_ioat_idle(5) == 0); /* reserved */
+ CU_ASSERT(is_ioat_idle(6) == 0); /* reserved */
+ CU_ASSERT(is_ioat_idle(7) == 0); /* reserved */
+
+ CU_ASSERT(is_ioat_suspended(0) == 0); /* ACTIVE */
+ CU_ASSERT(is_ioat_suspended(1) == 0); /* IDLE */
+ CU_ASSERT(is_ioat_suspended(2) == 1); /* SUSPENDED */
+ CU_ASSERT(is_ioat_suspended(3) == 0); /* HALTED */
+ CU_ASSERT(is_ioat_suspended(4) == 0); /* ARMED */
+ CU_ASSERT(is_ioat_suspended(5) == 0); /* reserved */
+ CU_ASSERT(is_ioat_suspended(6) == 0); /* reserved */
+ CU_ASSERT(is_ioat_suspended(7) == 0); /* reserved */
+
+ CU_ASSERT(is_ioat_halted(0) == 0); /* ACTIVE */
+ CU_ASSERT(is_ioat_halted(1) == 0); /* IDLE */
+ CU_ASSERT(is_ioat_halted(2) == 0); /* SUSPENDED */
+ CU_ASSERT(is_ioat_halted(3) == 1); /* HALTED */
+ CU_ASSERT(is_ioat_halted(4) == 0); /* ARMED */
+ CU_ASSERT(is_ioat_halted(5) == 0); /* reserved */
+ CU_ASSERT(is_ioat_halted(6) == 0); /* reserved */
+ CU_ASSERT(is_ioat_halted(7) == 0); /* reserved */
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("ioat", NULL, NULL);
+
+ CU_ADD_TEST(suite, ioat_state_check);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/iscsi/Makefile b/src/spdk/test/unit/lib/iscsi/Makefile
new file mode 100644
index 000000000..396c5a055
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = conn.c init_grp.c iscsi.c param.c portal_grp.c tgt_node.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/iscsi/common.c b/src/spdk/test/unit/lib/iscsi/common.c
new file mode 100644
index 000000000..e6631848a
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/common.c
@@ -0,0 +1,209 @@
+#include "iscsi/task.h"
+#include "iscsi/iscsi.h"
+#include "iscsi/conn.h"
+
+#include "spdk/env.h"
+#include "spdk/sock.h"
+#include "spdk_cunit.h"
+
+#include "spdk_internal/log.h"
+#include "spdk_internal/mock.h"
+
+#include "scsi/scsi_internal.h"
+
+SPDK_LOG_REGISTER_COMPONENT("iscsi", SPDK_LOG_ISCSI)
+
+struct spdk_trace_histories *g_trace_histories;
+DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn));
+DEFINE_STUB_V(spdk_trace_register_owner, (uint8_t type, char id_prefix));
+DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix));
+DEFINE_STUB_V(spdk_trace_register_description, (const char *name,
+ uint16_t tpoint_id, uint8_t owner_type, uint8_t object_type, uint8_t new_object,
+ uint8_t arg1_type, const char *arg1_name));
+DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
+ uint32_t size, uint64_t object_id, uint64_t arg1));
+
+TAILQ_HEAD(, spdk_iscsi_pdu) g_write_pdu_list = TAILQ_HEAD_INITIALIZER(g_write_pdu_list);
+
+static bool g_task_pool_is_empty = false;
+static bool g_pdu_pool_is_empty = false;
+
+struct spdk_iscsi_task *
+iscsi_task_get(struct spdk_iscsi_conn *conn,
+ struct spdk_iscsi_task *parent,
+ spdk_scsi_task_cpl cpl_fn)
+{
+ struct spdk_iscsi_task *task;
+
+ if (g_task_pool_is_empty) {
+ return NULL;
+ }
+
+ task = calloc(1, sizeof(*task));
+ if (!task) {
+ return NULL;
+ }
+
+ task->conn = conn;
+ task->scsi.cpl_fn = cpl_fn;
+ if (parent) {
+ parent->scsi.ref++;
+ task->parent = parent;
+ task->tag = parent->tag;
+ task->lun_id = parent->lun_id;
+ task->scsi.dxfer_dir = parent->scsi.dxfer_dir;
+ task->scsi.transfer_len = parent->scsi.transfer_len;
+ task->scsi.lun = parent->scsi.lun;
+ task->scsi.cdb = parent->scsi.cdb;
+ task->scsi.target_port = parent->scsi.target_port;
+ task->scsi.initiator_port = parent->scsi.initiator_port;
+ if (conn && (task->scsi.dxfer_dir == SPDK_SCSI_DIR_FROM_DEV)) {
+ conn->data_in_cnt++;
+ }
+ }
+
+ task->scsi.iovs = &task->scsi.iov;
+ return task;
+}
+
+void
+spdk_scsi_task_put(struct spdk_scsi_task *task)
+{
+ free(task);
+}
+
+void
+iscsi_put_pdu(struct spdk_iscsi_pdu *pdu)
+{
+ if (!pdu) {
+ return;
+ }
+
+ pdu->ref--;
+ if (pdu->ref < 0) {
+ CU_FAIL("negative ref count");
+ pdu->ref = 0;
+ }
+
+ if (pdu->ref == 0) {
+ if (pdu->data && !pdu->data_from_mempool) {
+ free(pdu->data);
+ }
+ free(pdu);
+ }
+}
+
+struct spdk_iscsi_pdu *
+iscsi_get_pdu(struct spdk_iscsi_conn *conn)
+{
+ struct spdk_iscsi_pdu *pdu;
+
+ assert(conn != NULL);
+ if (g_pdu_pool_is_empty) {
+ return NULL;
+ }
+
+ pdu = malloc(sizeof(*pdu));
+ if (!pdu) {
+ return NULL;
+ }
+
+ memset(pdu, 0, offsetof(struct spdk_iscsi_pdu, ahs));
+ pdu->ref = 1;
+ pdu->conn = conn;
+
+ return pdu;
+}
+
+DEFINE_STUB_V(spdk_scsi_task_process_null_lun, (struct spdk_scsi_task *task));
+
+DEFINE_STUB_V(spdk_scsi_task_process_abort, (struct spdk_scsi_task *task));
+
+DEFINE_STUB_V(spdk_scsi_dev_queue_task,
+ (struct spdk_scsi_dev *dev, struct spdk_scsi_task *task));
+
+DEFINE_STUB(spdk_scsi_dev_find_port_by_id, struct spdk_scsi_port *,
+ (struct spdk_scsi_dev *dev, uint64_t id), NULL);
+
+DEFINE_STUB_V(spdk_scsi_dev_queue_mgmt_task,
+ (struct spdk_scsi_dev *dev, struct spdk_scsi_task *task));
+
+const char *
+spdk_scsi_dev_get_name(const struct spdk_scsi_dev *dev)
+{
+ if (dev != NULL) {
+ return dev->name;
+ }
+
+ return NULL;
+}
+
+DEFINE_STUB(spdk_scsi_dev_construct, struct spdk_scsi_dev *,
+ (const char *name, const char **bdev_name_list,
+ int *lun_id_list, int num_luns, uint8_t protocol_id,
+ void (*hotremove_cb)(const struct spdk_scsi_lun *, void *),
+ void *hotremove_ctx),
+ NULL);
+
+DEFINE_STUB_V(spdk_scsi_dev_destruct,
+ (struct spdk_scsi_dev *dev, spdk_scsi_dev_destruct_cb_t cb_fn, void *cb_arg));
+
+DEFINE_STUB(spdk_scsi_dev_add_port, int,
+ (struct spdk_scsi_dev *dev, uint64_t id, const char *name), 0);
+
+DEFINE_STUB(iscsi_drop_conns, int,
+ (struct spdk_iscsi_conn *conn, const char *conn_match, int drop_all),
+ 0);
+
+DEFINE_STUB(spdk_scsi_dev_delete_port, int,
+ (struct spdk_scsi_dev *dev, uint64_t id), 0);
+
+DEFINE_STUB_V(shutdown_iscsi_conns, (void));
+
+DEFINE_STUB_V(iscsi_conns_request_logout, (struct spdk_iscsi_tgt_node *target));
+
+DEFINE_STUB(iscsi_get_active_conns, int, (struct spdk_iscsi_tgt_node *target), 0);
+
+void
+iscsi_task_cpl(struct spdk_scsi_task *scsi_task)
+{
+ struct spdk_iscsi_task *iscsi_task;
+
+ if (scsi_task != NULL) {
+ iscsi_task = iscsi_task_from_scsi_task(scsi_task);
+ if (iscsi_task->parent && (iscsi_task->scsi.dxfer_dir == SPDK_SCSI_DIR_FROM_DEV)) {
+ assert(iscsi_task->conn->data_in_cnt > 0);
+ iscsi_task->conn->data_in_cnt--;
+ }
+
+ free(iscsi_task);
+ }
+}
+
+DEFINE_STUB_V(iscsi_task_mgmt_cpl, (struct spdk_scsi_task *scsi_task));
+
+DEFINE_STUB(iscsi_conn_read_data, int,
+ (struct spdk_iscsi_conn *conn, int bytes, void *buf), 0);
+
+DEFINE_STUB(iscsi_conn_readv_data, int,
+ (struct spdk_iscsi_conn *conn, struct iovec *iov, int iovcnt), 0);
+
+void
+iscsi_conn_write_pdu(struct spdk_iscsi_conn *conn, struct spdk_iscsi_pdu *pdu,
+ iscsi_conn_xfer_complete_cb cb_fn, void *cb_arg)
+{
+ TAILQ_INSERT_TAIL(&g_write_pdu_list, pdu, tailq);
+}
+
+DEFINE_STUB_V(iscsi_conn_logout, (struct spdk_iscsi_conn *conn));
+
+DEFINE_STUB_V(spdk_scsi_task_set_status,
+ (struct spdk_scsi_task *task, int sc, int sk, int asc, int ascq));
+
+void
+spdk_scsi_task_set_data(struct spdk_scsi_task *task, void *data, uint32_t len)
+{
+ SPDK_CU_ASSERT_FATAL(task->iovs != NULL);
+ task->iovs[0].iov_base = data;
+ task->iovs[0].iov_len = len;
+}
diff --git a/src/spdk/test/unit/lib/iscsi/conn.c/.gitignore b/src/spdk/test/unit/lib/iscsi/conn.c/.gitignore
new file mode 100644
index 000000000..3bb0afd8a
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/conn.c/.gitignore
@@ -0,0 +1 @@
+conn_ut
diff --git a/src/spdk/test/unit/lib/iscsi/conn.c/Makefile b/src/spdk/test/unit/lib/iscsi/conn.c/Makefile
new file mode 100644
index 000000000..0c208d888
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/conn.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = conn_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/iscsi/conn.c/conn_ut.c b/src/spdk/test/unit/lib/iscsi/conn.c/conn_ut.c
new file mode 100644
index 000000000..967e16ec1
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/conn.c/conn_ut.c
@@ -0,0 +1,927 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "common/lib/test_env.c"
+#include "spdk_cunit.h"
+
+#include "iscsi/conn.c"
+
+#include "spdk_internal/mock.h"
+
+SPDK_LOG_REGISTER_COMPONENT("iscsi", SPDK_LOG_ISCSI)
+
+struct spdk_trace_histories *g_trace_histories;
+DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn));
+DEFINE_STUB_V(spdk_trace_register_owner, (uint8_t type, char id_prefix));
+DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix));
+DEFINE_STUB_V(spdk_trace_register_description, (const char *name,
+ uint16_t tpoint_id, uint8_t owner_type, uint8_t object_type, uint8_t new_object,
+ uint8_t arg1_type, const char *arg1_name));
+DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
+ uint32_t size, uint64_t object_id, uint64_t arg1));
+
+struct spdk_scsi_lun {
+ uint8_t reserved;
+};
+
+struct spdk_iscsi_globals g_iscsi;
+static TAILQ_HEAD(read_tasks_head, spdk_iscsi_task) g_ut_read_tasks =
+ TAILQ_HEAD_INITIALIZER(g_ut_read_tasks);
+static struct spdk_iscsi_task *g_new_task = NULL;
+static ssize_t g_sock_writev_bytes = 0;
+
+DEFINE_STUB(spdk_app_get_shm_id, int, (void), 0);
+
+DEFINE_STUB(spdk_sock_getaddr, int,
+ (struct spdk_sock *sock, char *saddr, int slen, uint16_t *sport,
+ char *caddr, int clen, uint16_t *cport),
+ 0);
+
+int
+spdk_sock_close(struct spdk_sock **sock)
+{
+ *sock = NULL;
+ return 0;
+}
+
+DEFINE_STUB(spdk_sock_recv, ssize_t,
+ (struct spdk_sock *sock, void *buf, size_t len), 0);
+
+DEFINE_STUB(spdk_sock_readv, ssize_t,
+ (struct spdk_sock *sock, struct iovec *iov, int iovcnt), 0);
+
+ssize_t
+spdk_sock_writev(struct spdk_sock *sock, struct iovec *iov, int iovcnt)
+{
+ return g_sock_writev_bytes;
+}
+
+DEFINE_STUB(spdk_sock_set_recvlowat, int, (struct spdk_sock *s, int nbytes), 0);
+
+DEFINE_STUB(spdk_sock_set_recvbuf, int, (struct spdk_sock *sock, int sz), 0);
+
+DEFINE_STUB(spdk_sock_set_sendbuf, int, (struct spdk_sock *sock, int sz), 0);
+
+DEFINE_STUB(spdk_sock_group_add_sock, int,
+ (struct spdk_sock_group *group, struct spdk_sock *sock,
+ spdk_sock_cb cb_fn, void *cb_arg),
+ 0);
+
+DEFINE_STUB(spdk_sock_group_remove_sock, int,
+ (struct spdk_sock_group *group, struct spdk_sock *sock), 0);
+
+struct spdk_iscsi_task *
+iscsi_task_get(struct spdk_iscsi_conn *conn,
+ struct spdk_iscsi_task *parent,
+ spdk_scsi_task_cpl cpl_fn)
+{
+ struct spdk_iscsi_task *task;
+
+ task = g_new_task;
+ if (task == NULL) {
+ return NULL;
+ }
+ memset(task, 0, sizeof(*task));
+
+ task->scsi.ref = 1;
+ task->conn = conn;
+ task->scsi.cpl_fn = cpl_fn;
+ if (parent) {
+ parent->scsi.ref++;
+ task->parent = parent;
+ task->scsi.dxfer_dir = parent->scsi.dxfer_dir;
+ task->scsi.transfer_len = parent->scsi.transfer_len;
+ task->scsi.lun = parent->scsi.lun;
+ if (conn && (task->scsi.dxfer_dir == SPDK_SCSI_DIR_FROM_DEV)) {
+ conn->data_in_cnt++;
+ }
+ }
+
+ return task;
+}
+
+void
+spdk_scsi_task_put(struct spdk_scsi_task *scsi_task)
+{
+ struct spdk_iscsi_task *task;
+
+ CU_ASSERT(scsi_task->ref > 0);
+ scsi_task->ref--;
+
+ task = iscsi_task_from_scsi_task(scsi_task);
+ if (task->parent) {
+ spdk_scsi_task_put(&task->parent->scsi);
+ }
+}
+
+DEFINE_STUB(spdk_scsi_dev_get_lun, struct spdk_scsi_lun *,
+ (struct spdk_scsi_dev *dev, int lun_id), NULL);
+
+DEFINE_STUB(spdk_scsi_dev_has_pending_tasks, bool,
+ (const struct spdk_scsi_dev *dev, const struct spdk_scsi_port *initiator_port),
+ true);
+
+DEFINE_STUB(spdk_scsi_lun_open, int,
+ (struct spdk_scsi_lun *lun, spdk_scsi_lun_remove_cb_t hotremove_cb,
+ void *hotremove_ctx, struct spdk_scsi_lun_desc **desc),
+ 0);
+
+DEFINE_STUB_V(spdk_scsi_lun_close, (struct spdk_scsi_lun_desc *desc));
+
+DEFINE_STUB(spdk_scsi_lun_allocate_io_channel, int,
+ (struct spdk_scsi_lun_desc *desc), 0);
+
+DEFINE_STUB_V(spdk_scsi_lun_free_io_channel, (struct spdk_scsi_lun_desc *desc));
+
+DEFINE_STUB(spdk_scsi_lun_get_id, int, (const struct spdk_scsi_lun *lun), 0);
+
+DEFINE_STUB(spdk_scsi_port_get_name, const char *,
+ (const struct spdk_scsi_port *port), NULL);
+
+void
+spdk_scsi_task_copy_status(struct spdk_scsi_task *dst,
+ struct spdk_scsi_task *src)
+{
+ dst->status = src->status;
+}
+
+DEFINE_STUB_V(spdk_scsi_task_set_data, (struct spdk_scsi_task *task, void *data, uint32_t len));
+
+DEFINE_STUB_V(spdk_scsi_task_process_null_lun, (struct spdk_scsi_task *task));
+
+DEFINE_STUB_V(spdk_scsi_task_process_abort, (struct spdk_scsi_task *task));
+
+DEFINE_STUB_V(iscsi_put_pdu, (struct spdk_iscsi_pdu *pdu));
+
+DEFINE_STUB_V(iscsi_param_free, (struct iscsi_param *params));
+
+DEFINE_STUB(iscsi_conn_params_init, int, (struct iscsi_param **params), 0);
+
+DEFINE_STUB_V(iscsi_clear_all_transfer_task,
+ (struct spdk_iscsi_conn *conn, struct spdk_scsi_lun *lun,
+ struct spdk_iscsi_pdu *pdu));
+
+DEFINE_STUB(iscsi_build_iovs, int,
+ (struct spdk_iscsi_conn *conn, struct iovec *iov, int num_iovs,
+ struct spdk_iscsi_pdu *pdu, uint32_t *mapped_length),
+ 0);
+
+DEFINE_STUB_V(iscsi_queue_task,
+ (struct spdk_iscsi_conn *conn, struct spdk_iscsi_task *task));
+
+DEFINE_STUB_V(iscsi_task_response,
+ (struct spdk_iscsi_conn *conn, struct spdk_iscsi_task *task));
+
+DEFINE_STUB_V(iscsi_task_mgmt_response,
+ (struct spdk_iscsi_conn *conn, struct spdk_iscsi_task *task));
+
+DEFINE_STUB_V(iscsi_send_nopin, (struct spdk_iscsi_conn *conn));
+
+bool
+iscsi_del_transfer_task(struct spdk_iscsi_conn *conn, uint32_t task_tag)
+{
+ struct spdk_iscsi_task *task;
+
+ task = TAILQ_FIRST(&conn->active_r2t_tasks);
+ if (task == NULL || task->tag != task_tag) {
+ return false;
+ }
+
+ TAILQ_REMOVE(&conn->active_r2t_tasks, task, link);
+ task->is_r2t_active = false;
+ iscsi_task_put(task);
+
+ return true;
+}
+
+DEFINE_STUB(iscsi_handle_incoming_pdus, int, (struct spdk_iscsi_conn *conn), 0);
+
+DEFINE_STUB_V(iscsi_free_sess, (struct spdk_iscsi_sess *sess));
+
+DEFINE_STUB(iscsi_tgt_node_cleanup_luns, int,
+ (struct spdk_iscsi_conn *conn, struct spdk_iscsi_tgt_node *target),
+ 0);
+
+DEFINE_STUB(iscsi_pdu_calc_header_digest, uint32_t,
+ (struct spdk_iscsi_pdu *pdu), 0);
+
+DEFINE_STUB(spdk_iscsi_pdu_calc_data_digest, uint32_t,
+ (struct spdk_iscsi_pdu *pdu), 0);
+
+DEFINE_STUB_V(shutdown_iscsi_conns_done, (void));
+
+static struct spdk_iscsi_task *
+ut_conn_task_get(struct spdk_iscsi_task *parent)
+{
+ struct spdk_iscsi_task *task;
+
+ task = calloc(1, sizeof(*task));
+ SPDK_CU_ASSERT_FATAL(task != NULL);
+
+ task->scsi.ref = 1;
+
+ if (parent) {
+ task->parent = parent;
+ parent->scsi.ref++;
+ }
+ return task;
+}
+
+static void
+ut_conn_create_read_tasks(struct spdk_iscsi_task *primary)
+{
+ struct spdk_iscsi_task *subtask;
+ uint32_t remaining_size = 0;
+
+ while (1) {
+ if (primary->current_datain_offset < primary->scsi.transfer_len) {
+ remaining_size = primary->scsi.transfer_len - primary->current_datain_offset;
+
+ subtask = ut_conn_task_get(primary);
+
+ subtask->scsi.offset = primary->current_datain_offset;
+ subtask->scsi.length = spdk_min(SPDK_BDEV_LARGE_BUF_MAX_SIZE, remaining_size);
+ subtask->scsi.status = SPDK_SCSI_STATUS_GOOD;
+
+ primary->current_datain_offset += subtask->scsi.length;
+
+ TAILQ_INSERT_TAIL(&g_ut_read_tasks, subtask, link);
+ }
+
+ if (primary->current_datain_offset == primary->scsi.transfer_len) {
+ break;
+ }
+ }
+}
+
+static void
+read_task_split_in_order_case(void)
+{
+ struct spdk_iscsi_task primary = {};
+ struct spdk_iscsi_task *task, *tmp;
+
+ primary.scsi.transfer_len = SPDK_BDEV_LARGE_BUF_MAX_SIZE * 8;
+ TAILQ_INIT(&primary.subtask_list);
+ primary.current_datain_offset = 0;
+ primary.bytes_completed = 0;
+ primary.scsi.ref = 1;
+
+ ut_conn_create_read_tasks(&primary);
+ SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&g_ut_read_tasks));
+
+ TAILQ_FOREACH(task, &g_ut_read_tasks, link) {
+ CU_ASSERT(&primary == iscsi_task_get_primary(task));
+ process_read_task_completion(NULL, task, &primary);
+ }
+
+ CU_ASSERT(primary.bytes_completed == primary.scsi.transfer_len);
+ CU_ASSERT(primary.scsi.ref == 0);
+
+ TAILQ_FOREACH_SAFE(task, &g_ut_read_tasks, link, tmp) {
+ CU_ASSERT(task->scsi.ref == 0);
+ TAILQ_REMOVE(&g_ut_read_tasks, task, link);
+ free(task);
+ }
+
+}
+
+static void
+read_task_split_reverse_order_case(void)
+{
+ struct spdk_iscsi_task primary = {};
+ struct spdk_iscsi_task *task, *tmp;
+
+ primary.scsi.transfer_len = SPDK_BDEV_LARGE_BUF_MAX_SIZE * 8;
+ TAILQ_INIT(&primary.subtask_list);
+ primary.current_datain_offset = 0;
+ primary.bytes_completed = 0;
+ primary.scsi.ref = 1;
+
+ ut_conn_create_read_tasks(&primary);
+ SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&g_ut_read_tasks));
+
+ TAILQ_FOREACH_REVERSE(task, &g_ut_read_tasks, read_tasks_head, link) {
+ CU_ASSERT(&primary == iscsi_task_get_primary(task));
+ process_read_task_completion(NULL, task, &primary);
+ }
+
+ CU_ASSERT(primary.bytes_completed == primary.scsi.transfer_len);
+ CU_ASSERT(primary.scsi.ref == 0);
+
+ TAILQ_FOREACH_SAFE(task, &g_ut_read_tasks, link, tmp) {
+ CU_ASSERT(task->scsi.ref == 0);
+ TAILQ_REMOVE(&g_ut_read_tasks, task, link);
+ free(task);
+ }
+}
+
+static void
+propagate_scsi_error_status_for_split_read_tasks(void)
+{
+ struct spdk_iscsi_task primary = {};
+ struct spdk_iscsi_task task1 = {}, task2 = {}, task3 = {}, task4 = {}, task5 = {}, task6 = {};
+
+ primary.scsi.transfer_len = 512 * 6;
+ primary.rsp_scsi_status = SPDK_SCSI_STATUS_GOOD;
+ TAILQ_INIT(&primary.subtask_list);
+ primary.scsi.ref = 7;
+
+ task1.scsi.offset = 0;
+ task1.scsi.length = 512;
+ task1.scsi.status = SPDK_SCSI_STATUS_GOOD;
+ task1.scsi.ref = 1;
+ task1.parent = &primary;
+
+ task2.scsi.offset = 512;
+ task2.scsi.length = 512;
+ task2.scsi.status = SPDK_SCSI_STATUS_CHECK_CONDITION;
+ task2.scsi.ref = 1;
+ task2.parent = &primary;
+
+ task3.scsi.offset = 512 * 2;
+ task3.scsi.length = 512;
+ task3.scsi.status = SPDK_SCSI_STATUS_GOOD;
+ task3.scsi.ref = 1;
+ task3.parent = &primary;
+
+ task4.scsi.offset = 512 * 3;
+ task4.scsi.length = 512;
+ task4.scsi.status = SPDK_SCSI_STATUS_GOOD;
+ task4.scsi.ref = 1;
+ task4.parent = &primary;
+
+ task5.scsi.offset = 512 * 4;
+ task5.scsi.length = 512;
+ task5.scsi.status = SPDK_SCSI_STATUS_GOOD;
+ task5.scsi.ref = 1;
+ task5.parent = &primary;
+
+ task6.scsi.offset = 512 * 5;
+ task6.scsi.length = 512;
+ task6.scsi.status = SPDK_SCSI_STATUS_GOOD;
+ task6.scsi.ref = 1;
+ task6.parent = &primary;
+
+ /* task2 has check condition status, and verify if the check condition
+ * status is propagated to remaining tasks correctly when these tasks complete
+ * by the following order, task4, task3, task2, task1, primary, task5, and task6.
+ */
+ process_read_task_completion(NULL, &task4, &primary);
+ process_read_task_completion(NULL, &task3, &primary);
+ process_read_task_completion(NULL, &task2, &primary);
+ process_read_task_completion(NULL, &task1, &primary);
+ process_read_task_completion(NULL, &task5, &primary);
+ process_read_task_completion(NULL, &task6, &primary);
+
+ CU_ASSERT(primary.rsp_scsi_status == SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT(task1.scsi.status == SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT(task2.scsi.status == SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT(task3.scsi.status == SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT(task4.scsi.status == SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT(task5.scsi.status == SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT(task6.scsi.status == SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT(primary.bytes_completed == primary.scsi.transfer_len);
+ CU_ASSERT(TAILQ_EMPTY(&primary.subtask_list));
+ CU_ASSERT(primary.scsi.ref == 0);
+ CU_ASSERT(task1.scsi.ref == 0);
+ CU_ASSERT(task2.scsi.ref == 0);
+ CU_ASSERT(task3.scsi.ref == 0);
+ CU_ASSERT(task4.scsi.ref == 0);
+ CU_ASSERT(task5.scsi.ref == 0);
+ CU_ASSERT(task6.scsi.ref == 0);
+}
+
+static void
+process_non_read_task_completion_test(void)
+{
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_task primary = {};
+ struct spdk_iscsi_task task = {};
+
+ TAILQ_INIT(&conn.active_r2t_tasks);
+
+ primary.bytes_completed = 0;
+ primary.scsi.transfer_len = 4096 * 3;
+ primary.rsp_scsi_status = SPDK_SCSI_STATUS_GOOD;
+ primary.scsi.ref = 1;
+ TAILQ_INSERT_TAIL(&conn.active_r2t_tasks, &primary, link);
+ primary.is_r2t_active = true;
+ primary.tag = 1;
+
+ /* First subtask which failed. */
+ task.scsi.length = 4096;
+ task.scsi.data_transferred = 4096;
+ task.scsi.status = SPDK_SCSI_STATUS_CHECK_CONDITION;
+ task.scsi.ref = 1;
+ task.parent = &primary;
+ primary.scsi.ref++;
+
+ process_non_read_task_completion(&conn, &task, &primary);
+ CU_ASSERT(!TAILQ_EMPTY(&conn.active_r2t_tasks));
+ CU_ASSERT(primary.bytes_completed == 4096);
+ CU_ASSERT(primary.scsi.data_transferred == 0);
+ CU_ASSERT(primary.rsp_scsi_status == SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT(task.scsi.ref == 0);
+ CU_ASSERT(primary.scsi.ref == 1);
+
+ /* Second subtask which succeeded. */
+ task.scsi.length = 4096;
+ task.scsi.data_transferred = 4096;
+ task.scsi.status = SPDK_SCSI_STATUS_GOOD;
+ task.scsi.ref = 1;
+ task.parent = &primary;
+ primary.scsi.ref++;
+
+ process_non_read_task_completion(&conn, &task, &primary);
+ CU_ASSERT(!TAILQ_EMPTY(&conn.active_r2t_tasks));
+ CU_ASSERT(primary.bytes_completed == 4096 * 2);
+ CU_ASSERT(primary.scsi.data_transferred == 4096);
+ CU_ASSERT(primary.rsp_scsi_status == SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT(task.scsi.ref == 0);
+ CU_ASSERT(primary.scsi.ref == 1);
+
+ /* Third and final subtask which succeeded. */
+ task.scsi.length = 4096;
+ task.scsi.data_transferred = 4096;
+ task.scsi.status = SPDK_SCSI_STATUS_GOOD;
+ task.scsi.ref = 1;
+ task.parent = &primary;
+ primary.scsi.ref++;
+
+ process_non_read_task_completion(&conn, &task, &primary);
+ CU_ASSERT(TAILQ_EMPTY(&conn.active_r2t_tasks));
+ CU_ASSERT(primary.bytes_completed == 4096 * 3);
+ CU_ASSERT(primary.scsi.data_transferred == 4096 * 2);
+ CU_ASSERT(primary.rsp_scsi_status == SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT(task.scsi.ref == 0);
+ CU_ASSERT(primary.scsi.ref == 0);
+
+ /* Tricky case when the last task completed was the initial task. */
+ primary.scsi.length = 4096;
+ primary.bytes_completed = 4096 * 2;
+ primary.scsi.data_transferred = 4096 * 2;
+ primary.scsi.transfer_len = 4096 * 3;
+ primary.scsi.status = SPDK_SCSI_STATUS_GOOD;
+ primary.rsp_scsi_status = SPDK_SCSI_STATUS_GOOD;
+ primary.scsi.ref = 2;
+ TAILQ_INSERT_TAIL(&conn.active_r2t_tasks, &primary, link);
+ primary.is_r2t_active = true;
+
+ process_non_read_task_completion(&conn, &primary, &primary);
+ CU_ASSERT(TAILQ_EMPTY(&conn.active_r2t_tasks));
+ CU_ASSERT(primary.bytes_completed == 4096 * 3);
+ CU_ASSERT(primary.scsi.data_transferred == 4096 * 2);
+ CU_ASSERT(primary.rsp_scsi_status == SPDK_SCSI_STATUS_GOOD);
+ CU_ASSERT(primary.scsi.ref == 0);
+
+ /* Further tricky case when the last task completed ws the initial task,
+ * and the R2T was already terminated.
+ */
+ primary.scsi.ref = 1;
+ primary.scsi.length = 4096;
+ primary.bytes_completed = 4096 * 2;
+ primary.scsi.data_transferred = 4096 * 2;
+ primary.scsi.transfer_len = 4096 * 3;
+ primary.scsi.status = SPDK_SCSI_STATUS_GOOD;
+ primary.rsp_scsi_status = SPDK_SCSI_STATUS_GOOD;
+ primary.is_r2t_active = false;
+
+ process_non_read_task_completion(&conn, &primary, &primary);
+ CU_ASSERT(primary.bytes_completed == 4096 * 3);
+ CU_ASSERT(primary.scsi.data_transferred == 4096 * 2);
+ CU_ASSERT(primary.rsp_scsi_status == SPDK_SCSI_STATUS_GOOD);
+ CU_ASSERT(primary.scsi.ref == 0);
+}
+
+static bool
+dequeue_pdu(void *_head, struct spdk_iscsi_pdu *pdu)
+{
+ TAILQ_HEAD(queued_pdus, spdk_iscsi_pdu) *head = _head;
+ struct spdk_iscsi_pdu *tmp;
+
+ TAILQ_FOREACH(tmp, head, tailq) {
+ if (tmp == pdu) {
+ TAILQ_REMOVE(head, tmp, tailq);
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool
+dequeue_task(void *_head, struct spdk_iscsi_task *task)
+{
+ TAILQ_HEAD(queued_tasks, spdk_iscsi_task) *head = _head;
+ struct spdk_iscsi_task *tmp;
+
+ TAILQ_FOREACH(tmp, head, link) {
+ if (tmp == task) {
+ TAILQ_REMOVE(head, tmp, link);
+ return true;
+ }
+ }
+ return false;
+}
+
+static void iscsi_conn_pdu_dummy_complete(void *arg)
+{
+}
+
+static void
+free_tasks_on_connection(void)
+{
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_pdu pdu1 = {}, pdu2 = {}, pdu3 = {}, pdu4 = {};
+ struct spdk_iscsi_task task1 = {}, task2 = {}, task3 = {};
+ struct spdk_scsi_lun lun1 = {}, lun2 = {};
+
+ TAILQ_INIT(&conn.write_pdu_list);
+ TAILQ_INIT(&conn.snack_pdu_list);
+ TAILQ_INIT(&conn.queued_datain_tasks);
+ conn.data_in_cnt = MAX_LARGE_DATAIN_PER_CONNECTION;
+
+ pdu1.task = &task1;
+ pdu2.task = &task2;
+ pdu3.task = &task3;
+
+ pdu1.cb_fn = iscsi_conn_pdu_dummy_complete;
+ pdu2.cb_fn = iscsi_conn_pdu_dummy_complete;
+ pdu3.cb_fn = iscsi_conn_pdu_dummy_complete;
+ pdu4.cb_fn = iscsi_conn_pdu_dummy_complete;
+
+ task1.scsi.lun = &lun1;
+ task2.scsi.lun = &lun2;
+
+ task1.is_queued = false;
+ task2.is_queued = false;
+ task3.is_queued = true;
+
+ /* Test conn->write_pdu_list. */
+
+ task1.scsi.ref = 1;
+ task2.scsi.ref = 1;
+ task3.scsi.ref = 1;
+ TAILQ_INSERT_TAIL(&conn.write_pdu_list, &pdu1, tailq);
+ TAILQ_INSERT_TAIL(&conn.write_pdu_list, &pdu2, tailq);
+ TAILQ_INSERT_TAIL(&conn.write_pdu_list, &pdu3, tailq);
+ TAILQ_INSERT_TAIL(&conn.write_pdu_list, &pdu4, tailq);
+
+ /* Free all PDUs when exiting connection. */
+ iscsi_conn_free_tasks(&conn);
+
+ CU_ASSERT(TAILQ_EMPTY(&conn.write_pdu_list));
+ CU_ASSERT(task1.scsi.ref == 0);
+ CU_ASSERT(task2.scsi.ref == 0);
+ CU_ASSERT(task3.scsi.ref == 0);
+
+ /* Test conn->snack_pdu_list */
+
+ task1.scsi.ref = 1;
+ task2.scsi.ref = 1;
+ task3.scsi.ref = 1;
+ pdu1.cb_fn = iscsi_conn_pdu_dummy_complete;
+ pdu2.cb_fn = iscsi_conn_pdu_dummy_complete;
+ pdu3.cb_fn = iscsi_conn_pdu_dummy_complete;
+ TAILQ_INSERT_TAIL(&conn.snack_pdu_list, &pdu1, tailq);
+ TAILQ_INSERT_TAIL(&conn.snack_pdu_list, &pdu2, tailq);
+ TAILQ_INSERT_TAIL(&conn.snack_pdu_list, &pdu3, tailq);
+
+ /* Free all PDUs and associated tasks when exiting connection. */
+ iscsi_conn_free_tasks(&conn);
+
+ CU_ASSERT(!dequeue_pdu(&conn.snack_pdu_list, &pdu1));
+ CU_ASSERT(!dequeue_pdu(&conn.snack_pdu_list, &pdu2));
+ CU_ASSERT(!dequeue_pdu(&conn.snack_pdu_list, &pdu3));
+ CU_ASSERT(task1.scsi.ref == 0);
+ CU_ASSERT(task2.scsi.ref == 0);
+ CU_ASSERT(task3.scsi.ref == 0);
+
+ /* Test conn->queued_datain_tasks */
+
+ task1.scsi.ref = 1;
+ task2.scsi.ref = 1;
+ task3.scsi.ref = 1;
+ TAILQ_INSERT_TAIL(&conn.queued_datain_tasks, &task1, link);
+ TAILQ_INSERT_TAIL(&conn.queued_datain_tasks, &task2, link);
+ TAILQ_INSERT_TAIL(&conn.queued_datain_tasks, &task3, link);
+
+ /* Free all tasks which is not queued when exiting connection. */
+ iscsi_conn_free_tasks(&conn);
+
+ CU_ASSERT(!dequeue_task(&conn.queued_datain_tasks, &task1));
+ CU_ASSERT(!dequeue_task(&conn.queued_datain_tasks, &task2));
+ CU_ASSERT(dequeue_task(&conn.queued_datain_tasks, &task3));
+ CU_ASSERT(task1.scsi.ref == 0);
+ CU_ASSERT(task2.scsi.ref == 0);
+ CU_ASSERT(task3.scsi.ref == 1);
+}
+
+static void
+free_tasks_with_queued_datain(void)
+{
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_pdu pdu1 = {}, pdu2 = {}, pdu3 = {}, pdu4 = {}, pdu5 = {}, pdu6 = {};
+ struct spdk_iscsi_task task1 = {}, task2 = {}, task3 = {}, task4 = {}, task5 = {}, task6 = {};
+
+ TAILQ_INIT(&conn.write_pdu_list);
+ TAILQ_INIT(&conn.snack_pdu_list);
+ TAILQ_INIT(&conn.queued_datain_tasks);
+
+ pdu1.task = &task1;
+ pdu2.task = &task2;
+ pdu3.task = &task3;
+ pdu1.cb_fn = iscsi_conn_pdu_dummy_complete;
+ pdu2.cb_fn = iscsi_conn_pdu_dummy_complete;
+ pdu3.cb_fn = iscsi_conn_pdu_dummy_complete;
+
+ task1.scsi.ref = 1;
+ task2.scsi.ref = 1;
+ task3.scsi.ref = 1;
+
+ pdu3.bhs.opcode = ISCSI_OP_SCSI_DATAIN;
+ task3.scsi.offset = 1;
+ conn.data_in_cnt = 1;
+
+ TAILQ_INSERT_TAIL(&conn.write_pdu_list, &pdu1, tailq);
+ TAILQ_INSERT_TAIL(&conn.write_pdu_list, &pdu2, tailq);
+ TAILQ_INSERT_TAIL(&conn.write_pdu_list, &pdu3, tailq);
+
+ task4.scsi.ref = 1;
+ task5.scsi.ref = 1;
+ task6.scsi.ref = 1;
+
+ task4.pdu = &pdu4;
+ task5.pdu = &pdu5;
+ task6.pdu = &pdu6;
+ pdu4.cb_fn = iscsi_conn_pdu_dummy_complete;
+ pdu5.cb_fn = iscsi_conn_pdu_dummy_complete;
+ pdu6.cb_fn = iscsi_conn_pdu_dummy_complete;
+
+ TAILQ_INSERT_TAIL(&conn.queued_datain_tasks, &task4, link);
+ TAILQ_INSERT_TAIL(&conn.queued_datain_tasks, &task5, link);
+ TAILQ_INSERT_TAIL(&conn.queued_datain_tasks, &task6, link);
+
+ iscsi_conn_free_tasks(&conn);
+
+ CU_ASSERT(TAILQ_EMPTY(&conn.write_pdu_list));
+ CU_ASSERT(TAILQ_EMPTY(&conn.queued_datain_tasks));
+}
+
+static void
+abort_queued_datain_task_test(void)
+{
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_task task = {}, subtask = {};
+ struct spdk_iscsi_pdu pdu = {};
+ struct iscsi_bhs_scsi_req *scsi_req;
+ int rc;
+
+ TAILQ_INIT(&conn.queued_datain_tasks);
+ task.scsi.ref = 1;
+ task.scsi.dxfer_dir = SPDK_SCSI_DIR_FROM_DEV;
+ task.pdu = &pdu;
+ TAILQ_INIT(&task.subtask_list);
+ scsi_req = (struct iscsi_bhs_scsi_req *)&pdu.bhs;
+ scsi_req->read_bit = 1;
+
+ g_new_task = &subtask;
+
+ /* Case1: Queue one task, and this task is not executed */
+ task.scsi.transfer_len = SPDK_BDEV_LARGE_BUF_MAX_SIZE * 3;
+ task.scsi.offset = 0;
+ TAILQ_INSERT_TAIL(&conn.queued_datain_tasks, &task, link);
+
+ /* No slots for sub read tasks */
+ conn.data_in_cnt = MAX_LARGE_DATAIN_PER_CONNECTION;
+ rc = _iscsi_conn_abort_queued_datain_task(&conn, &task);
+ CU_ASSERT(rc != 0);
+ CU_ASSERT(!TAILQ_EMPTY(&conn.queued_datain_tasks));
+
+ /* Have slots for sub read tasks */
+ conn.data_in_cnt = 0;
+ rc = _iscsi_conn_abort_queued_datain_task(&conn, &task);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(TAILQ_EMPTY(&conn.queued_datain_tasks));
+ CU_ASSERT(task.current_datain_offset == SPDK_BDEV_LARGE_BUF_MAX_SIZE * 3);
+ CU_ASSERT(task.scsi.ref == 0);
+ CU_ASSERT(subtask.scsi.offset == 0);
+ CU_ASSERT(subtask.scsi.length == SPDK_BDEV_LARGE_BUF_MAX_SIZE * 3);
+ CU_ASSERT(subtask.scsi.ref == 0);
+
+ /* Case2: Queue one task, and this task is partially executed */
+ task.scsi.ref = 1;
+ task.scsi.transfer_len = SPDK_BDEV_LARGE_BUF_MAX_SIZE * 3;
+ task.current_datain_offset = SPDK_BDEV_LARGE_BUF_MAX_SIZE;
+ TAILQ_INSERT_TAIL(&conn.queued_datain_tasks, &task, link);
+
+ /* No slots for sub read tasks */
+ conn.data_in_cnt = MAX_LARGE_DATAIN_PER_CONNECTION;
+ rc = _iscsi_conn_abort_queued_datain_task(&conn, &task);
+ CU_ASSERT(rc != 0);
+ CU_ASSERT(!TAILQ_EMPTY(&conn.queued_datain_tasks));
+
+ /* have slots for sub read tasks */
+ conn.data_in_cnt = 0;
+ rc = _iscsi_conn_abort_queued_datain_task(&conn, &task);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(task.current_datain_offset == SPDK_BDEV_LARGE_BUF_MAX_SIZE * 3);
+ CU_ASSERT(task.scsi.ref == 2);
+ CU_ASSERT(TAILQ_FIRST(&task.subtask_list) == &subtask);
+ CU_ASSERT(subtask.scsi.offset == SPDK_BDEV_LARGE_BUF_MAX_SIZE);
+ CU_ASSERT(subtask.scsi.length == SPDK_BDEV_LARGE_BUF_MAX_SIZE * 2);
+ CU_ASSERT(subtask.scsi.ref == 1);
+
+ g_new_task = NULL;
+}
+
+static bool
+datain_task_is_queued(struct spdk_iscsi_conn *conn,
+ struct spdk_iscsi_task *task)
+{
+ struct spdk_iscsi_task *tmp;
+
+ TAILQ_FOREACH(tmp, &conn->queued_datain_tasks, link) {
+ if (tmp == task) {
+ return true;
+ }
+ }
+ return false;
+}
+static void
+abort_queued_datain_tasks_test(void)
+{
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_task task1 = {}, task2 = {}, task3 = {}, task4 = {}, task5 = {}, task6 = {};
+ struct spdk_iscsi_task subtask = {};
+ struct spdk_iscsi_pdu pdu1 = {}, pdu2 = {}, pdu3 = {}, pdu4 = {}, pdu5 = {}, pdu6 = {};
+ struct spdk_iscsi_pdu mgmt_pdu1 = {}, mgmt_pdu2 = {};
+ struct spdk_scsi_lun lun1 = {}, lun2 = {};
+ uint32_t alloc_cmd_sn;
+ struct iscsi_bhs_scsi_req *scsi_req;
+ int rc;
+
+ TAILQ_INIT(&conn.queued_datain_tasks);
+ conn.data_in_cnt = 0;
+
+ g_new_task = &subtask;
+
+ alloc_cmd_sn = 88;
+
+ pdu1.cmd_sn = alloc_cmd_sn;
+ alloc_cmd_sn++;
+ scsi_req = (struct iscsi_bhs_scsi_req *)&pdu1.bhs;
+ scsi_req->read_bit = 1;
+ task1.scsi.ref = 1;
+ task1.current_datain_offset = 0;
+ task1.scsi.transfer_len = 512;
+ task1.scsi.lun = &lun1;
+ iscsi_task_set_pdu(&task1, &pdu1);
+ TAILQ_INSERT_TAIL(&conn.queued_datain_tasks, &task1, link);
+
+ pdu2.cmd_sn = alloc_cmd_sn;
+ alloc_cmd_sn++;
+ scsi_req = (struct iscsi_bhs_scsi_req *)&pdu2.bhs;
+ scsi_req->read_bit = 1;
+ task2.scsi.ref = 1;
+ task2.current_datain_offset = 0;
+ task2.scsi.transfer_len = 512;
+ task2.scsi.lun = &lun2;
+ iscsi_task_set_pdu(&task2, &pdu2);
+ TAILQ_INSERT_TAIL(&conn.queued_datain_tasks, &task2, link);
+
+ mgmt_pdu1.cmd_sn = alloc_cmd_sn;
+ alloc_cmd_sn++;
+
+ pdu3.cmd_sn = alloc_cmd_sn;
+ alloc_cmd_sn++;
+ scsi_req = (struct iscsi_bhs_scsi_req *)&pdu3.bhs;
+ scsi_req->read_bit = 1;
+ task3.scsi.ref = 1;
+ task3.current_datain_offset = 0;
+ task3.scsi.transfer_len = 512;
+ task3.scsi.lun = &lun1;
+ iscsi_task_set_pdu(&task3, &pdu3);
+ TAILQ_INSERT_TAIL(&conn.queued_datain_tasks, &task3, link);
+
+ pdu4.cmd_sn = alloc_cmd_sn;
+ alloc_cmd_sn++;
+ scsi_req = (struct iscsi_bhs_scsi_req *)&pdu4.bhs;
+ scsi_req->read_bit = 1;
+ task4.scsi.ref = 1;
+ task4.current_datain_offset = 0;
+ task4.scsi.transfer_len = 512;
+ task4.scsi.lun = &lun2;
+ iscsi_task_set_pdu(&task4, &pdu4);
+ TAILQ_INSERT_TAIL(&conn.queued_datain_tasks, &task4, link);
+
+ pdu5.cmd_sn = alloc_cmd_sn;
+ alloc_cmd_sn++;
+ scsi_req = (struct iscsi_bhs_scsi_req *)&pdu5.bhs;
+ scsi_req->read_bit = 1;
+ task5.scsi.ref = 1;
+ task5.current_datain_offset = 0;
+ task5.scsi.transfer_len = 512;
+ task5.scsi.lun = &lun1;
+ iscsi_task_set_pdu(&task5, &pdu5);
+ TAILQ_INSERT_TAIL(&conn.queued_datain_tasks, &task5, link);
+
+ mgmt_pdu2.cmd_sn = alloc_cmd_sn;
+ alloc_cmd_sn++;
+
+ pdu6.cmd_sn = alloc_cmd_sn;
+ alloc_cmd_sn++;
+ scsi_req = (struct iscsi_bhs_scsi_req *)&pdu6.bhs;
+ scsi_req->read_bit = 1;
+ task6.scsi.ref = 1;
+ task6.current_datain_offset = 0;
+ task6.scsi.transfer_len = 512;
+ task6.scsi.lun = &lun2;
+ iscsi_task_set_pdu(&task6, &pdu6);
+ TAILQ_INSERT_TAIL(&conn.queued_datain_tasks, &task6, link);
+
+ rc = iscsi_conn_abort_queued_datain_tasks(&conn, &lun1, &mgmt_pdu1);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(!datain_task_is_queued(&conn, &task1));
+ CU_ASSERT(datain_task_is_queued(&conn, &task2));
+ CU_ASSERT(datain_task_is_queued(&conn, &task3));
+ CU_ASSERT(datain_task_is_queued(&conn, &task4));
+ CU_ASSERT(datain_task_is_queued(&conn, &task5));
+ CU_ASSERT(datain_task_is_queued(&conn, &task6));
+
+ rc = iscsi_conn_abort_queued_datain_tasks(&conn, &lun2, &mgmt_pdu2);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(!datain_task_is_queued(&conn, &task2));
+ CU_ASSERT(datain_task_is_queued(&conn, &task3));
+ CU_ASSERT(!datain_task_is_queued(&conn, &task4));
+ CU_ASSERT(datain_task_is_queued(&conn, &task5));
+ CU_ASSERT(datain_task_is_queued(&conn, &task6));
+
+ CU_ASSERT(task1.scsi.ref == 0);
+ CU_ASSERT(task2.scsi.ref == 0);
+ CU_ASSERT(task3.scsi.ref == 1);
+ CU_ASSERT(task4.scsi.ref == 0);
+ CU_ASSERT(task5.scsi.ref == 1);
+ CU_ASSERT(task6.scsi.ref == 1);
+ CU_ASSERT(subtask.scsi.ref == 0);
+
+ g_new_task = NULL;
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("conn_suite", NULL, NULL);
+
+ CU_ADD_TEST(suite, read_task_split_in_order_case);
+ CU_ADD_TEST(suite, read_task_split_reverse_order_case);
+ CU_ADD_TEST(suite, propagate_scsi_error_status_for_split_read_tasks);
+ CU_ADD_TEST(suite, process_non_read_task_completion_test);
+ CU_ADD_TEST(suite, free_tasks_on_connection);
+ CU_ADD_TEST(suite, free_tasks_with_queued_datain);
+ CU_ADD_TEST(suite, abort_queued_datain_task_test);
+ CU_ADD_TEST(suite, abort_queued_datain_tasks_test);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/iscsi/init_grp.c/.gitignore b/src/spdk/test/unit/lib/iscsi/init_grp.c/.gitignore
new file mode 100644
index 000000000..8fbc2b636
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/init_grp.c/.gitignore
@@ -0,0 +1 @@
+init_grp_ut
diff --git a/src/spdk/test/unit/lib/iscsi/init_grp.c/Makefile b/src/spdk/test/unit/lib/iscsi/init_grp.c/Makefile
new file mode 100644
index 000000000..708e691a5
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/init_grp.c/Makefile
@@ -0,0 +1,39 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+SPDK_LIB_LIST = conf
+TEST_FILE = init_grp_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/iscsi/init_grp.c/init_grp.conf b/src/spdk/test/unit/lib/iscsi/init_grp.c/init_grp.conf
new file mode 100644
index 000000000..aaa660def
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/init_grp.c/init_grp.conf
@@ -0,0 +1,31 @@
+[IG_Valid0]
+# Success is expected.
+ InitiatorName iqn.2017-10.spdk.io:0001
+ Netmask 192.168.2.0
+
+[IG_Valid1]
+# Success is expected.
+ InitiatorName iqn.2017-10.spdk.io:0001
+ Netmask 192.168.2.0
+ Netmask 192.168.2.1
+
+[IG_Valid2]
+# Success is expected.
+ InitiatorName iqn.2017-10.spdk.io:0001
+ InitiatorName iqn.2017-10.spdk.io:0002
+ Netmask 192.168.2.0
+
+[IG_Valid3]
+# Success is expected.
+ InitiatorName iqn.2017-10.spdk.io:0001
+ InitiatorName iqn.2017-10.spdk.io:0002
+ Netmask 192.168.2.0
+ Netmask 192.168.2.1
+
+[IG_Invalid0]
+# Failure is expected.
+ InitiatorName iqn.2017-10.spdk.io:0001
+
+[IG_Invalid1]
+# Failure is expected.
+ Netmask 192.168.2.0
diff --git a/src/spdk/test/unit/lib/iscsi/init_grp.c/init_grp_ut.c b/src/spdk/test/unit/lib/iscsi/init_grp.c/init_grp_ut.c
new file mode 100644
index 000000000..199aad8b8
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/init_grp.c/init_grp_ut.c
@@ -0,0 +1,674 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+#include "CUnit/Basic.h"
+
+#include "iscsi/init_grp.c"
+#include "unit/lib/json_mock.c"
+
+SPDK_LOG_REGISTER_COMPONENT("iscsi", SPDK_LOG_ISCSI)
+
+struct spdk_iscsi_globals g_iscsi;
+
+const char *config_file;
+
+static int
+test_setup(void)
+{
+ TAILQ_INIT(&g_iscsi.ig_head);
+ return 0;
+}
+
+static void
+create_from_config_file_cases(void)
+{
+ struct spdk_conf *config;
+ struct spdk_conf_section *sp;
+ char section_name[64];
+ int section_index;
+ int rc;
+
+ config = spdk_conf_allocate();
+
+ rc = spdk_conf_read(config, config_file);
+ CU_ASSERT(rc == 0);
+
+ section_index = 0;
+ while (true) {
+ snprintf(section_name, sizeof(section_name), "IG_Valid%d", section_index);
+
+ sp = spdk_conf_find_section(config, section_name);
+ if (sp == NULL) {
+ break;
+ }
+
+ rc = iscsi_parse_init_grp(sp);
+ CU_ASSERT(rc == 0);
+
+ iscsi_init_grps_destroy();
+
+ section_index++;
+ }
+
+ section_index = 0;
+ while (true) {
+ snprintf(section_name, sizeof(section_name), "IG_Invalid%d", section_index);
+
+ sp = spdk_conf_find_section(config, section_name);
+ if (sp == NULL) {
+ break;
+ }
+
+ rc = iscsi_parse_init_grp(sp);
+ CU_ASSERT(rc != 0);
+
+ iscsi_init_grps_destroy();
+
+ section_index++;
+ }
+
+ spdk_conf_free(config);
+}
+
+
+static void
+create_initiator_group_success_case(void)
+{
+ struct spdk_iscsi_init_grp *ig;
+
+ ig = iscsi_init_grp_create(1);
+ CU_ASSERT(ig != NULL);
+
+ iscsi_init_grp_destroy(ig);
+}
+
+static void
+find_initiator_group_success_case(void)
+{
+ struct spdk_iscsi_init_grp *ig, *tmp;
+ int rc;
+
+ ig = iscsi_init_grp_create(1);
+ CU_ASSERT(ig != NULL);
+
+ rc = iscsi_init_grp_register(ig);
+ CU_ASSERT(rc == 0);
+
+ ig = iscsi_init_grp_find_by_tag(1);
+ CU_ASSERT(ig != NULL);
+
+ tmp = iscsi_init_grp_unregister(1);
+ CU_ASSERT(ig == tmp);
+ iscsi_init_grp_destroy(ig);
+
+ ig = iscsi_init_grp_find_by_tag(1);
+ CU_ASSERT(ig == NULL);
+}
+
+static void
+register_initiator_group_twice_case(void)
+{
+ struct spdk_iscsi_init_grp *ig, *tmp;
+ int rc;
+
+ ig = iscsi_init_grp_create(1);
+ CU_ASSERT(ig != NULL);
+
+ rc = iscsi_init_grp_register(ig);
+ CU_ASSERT(rc == 0);
+
+ rc = iscsi_init_grp_register(ig);
+ CU_ASSERT(rc != 0);
+
+ ig = iscsi_init_grp_find_by_tag(1);
+ CU_ASSERT(ig != NULL);
+
+ tmp = iscsi_init_grp_unregister(1);
+ CU_ASSERT(tmp == ig);
+ iscsi_init_grp_destroy(ig);
+
+ ig = iscsi_init_grp_find_by_tag(1);
+ CU_ASSERT(ig == NULL);
+}
+
+static void
+add_initiator_name_success_case(void)
+{
+
+ int rc;
+ struct spdk_iscsi_init_grp *ig;
+ struct spdk_iscsi_initiator_name *iname;
+ char *name1 = "iqn.2017-10.spdk.io:0001";
+ char *name2 = "iqn.2017-10.spdk.io:0002";
+
+ ig = iscsi_init_grp_create(1);
+ CU_ASSERT(ig != NULL);
+
+ /* add two different names to the empty name list */
+ rc = iscsi_init_grp_add_initiator(ig, name1);
+ CU_ASSERT(rc == 0);
+
+ rc = iscsi_init_grp_add_initiator(ig, name2);
+ CU_ASSERT(rc == 0);
+
+ /* check if two names are added correctly. */
+ iname = iscsi_init_grp_find_initiator(ig, name1);
+ CU_ASSERT(iname != NULL);
+
+ iname = iscsi_init_grp_find_initiator(ig, name2);
+ CU_ASSERT(iname != NULL);
+
+ /* restore the initial state */
+ rc = iscsi_init_grp_delete_initiator(ig, name1);
+ CU_ASSERT(rc == 0);
+
+ iname = iscsi_init_grp_find_initiator(ig, name1);
+ CU_ASSERT(iname == NULL);
+
+ rc = iscsi_init_grp_delete_initiator(ig, name2);
+ CU_ASSERT(rc == 0);
+
+ iname = iscsi_init_grp_find_initiator(ig, name2);
+ CU_ASSERT(iname == NULL);
+
+ iscsi_init_grp_destroy(ig);
+}
+
+static void
+add_initiator_name_fail_case(void)
+{
+ int rc;
+ struct spdk_iscsi_init_grp *ig;
+ struct spdk_iscsi_initiator_name *iname;
+ char *name1 = "iqn.2017-10.spdk.io:0001";
+
+ ig = iscsi_init_grp_create(1);
+ CU_ASSERT(ig != NULL);
+
+ /* add an name to the full name list */
+ ig->ninitiators = MAX_INITIATOR;
+
+ rc = iscsi_init_grp_add_initiator(ig, name1);
+ CU_ASSERT(rc != 0);
+
+ ig->ninitiators = 0;
+
+ /* add the same name to the name list twice */
+ rc = iscsi_init_grp_add_initiator(ig, name1);
+ CU_ASSERT(rc == 0);
+
+ rc = iscsi_init_grp_add_initiator(ig, name1);
+ CU_ASSERT(rc != 0);
+
+ /* restore the initial state */
+ rc = iscsi_init_grp_delete_initiator(ig, name1);
+ CU_ASSERT(rc == 0);
+
+ iname = iscsi_init_grp_find_initiator(ig, name1);
+ CU_ASSERT(iname == NULL);
+
+ iscsi_init_grp_destroy(ig);
+}
+
+static void
+delete_all_initiator_names_success_case(void)
+{
+ int rc;
+ struct spdk_iscsi_init_grp *ig;
+ struct spdk_iscsi_initiator_name *iname;
+ char *name1 = "iqn.2017-10.spdk.io:0001";
+ char *name2 = "iqn.2017-10.spdk.io:0002";
+
+ ig = iscsi_init_grp_create(1);
+ CU_ASSERT(ig != NULL);
+
+ /* add two different names to the empty name list */
+ rc = iscsi_init_grp_add_initiator(ig, name1);
+ CU_ASSERT(rc == 0);
+
+ rc = iscsi_init_grp_add_initiator(ig, name2);
+ CU_ASSERT(rc == 0);
+
+ /* delete all initiator names */
+ iscsi_init_grp_delete_all_initiators(ig);
+
+ /* check if two names are deleted correctly. */
+ iname = iscsi_init_grp_find_initiator(ig, name1);
+ CU_ASSERT(iname == NULL);
+
+ iname = iscsi_init_grp_find_initiator(ig, name2);
+ CU_ASSERT(iname == NULL);
+
+ /* restore the initial state */
+ iscsi_init_grp_destroy(ig);
+}
+
+static void
+add_netmask_success_case(void)
+{
+ int rc;
+ struct spdk_iscsi_init_grp *ig;
+ struct spdk_iscsi_initiator_netmask *imask;
+ char *netmask1 = "192.168.2.0";
+ char *netmask2 = "192.168.2.1";
+
+ ig = iscsi_init_grp_create(1);
+ CU_ASSERT(ig != NULL);
+
+ /* add two different netmasks to the empty netmask list */
+ rc = iscsi_init_grp_add_netmask(ig, netmask1);
+ CU_ASSERT(rc == 0);
+
+ rc = iscsi_init_grp_add_netmask(ig, netmask2);
+ CU_ASSERT(rc == 0);
+
+ /* check if two netmasks are added correctly. */
+ imask = iscsi_init_grp_find_netmask(ig, netmask1);
+ CU_ASSERT(imask != NULL);
+
+ imask = iscsi_init_grp_find_netmask(ig, netmask2);
+ CU_ASSERT(imask != NULL);
+
+ /* restore the initial state */
+ rc = iscsi_init_grp_delete_netmask(ig, netmask1);
+ CU_ASSERT(rc == 0);
+
+ imask = iscsi_init_grp_find_netmask(ig, netmask1);
+ CU_ASSERT(imask == NULL);
+
+ rc = iscsi_init_grp_delete_netmask(ig, netmask2);
+ CU_ASSERT(rc == 0);
+
+ imask = iscsi_init_grp_find_netmask(ig, netmask2);
+ CU_ASSERT(imask == NULL);
+
+ iscsi_init_grp_destroy(ig);
+}
+
+static void
+add_netmask_fail_case(void)
+{
+ int rc;
+ struct spdk_iscsi_init_grp *ig;
+ struct spdk_iscsi_initiator_netmask *imask;
+ char *netmask1 = "192.168.2.0";
+
+ ig = iscsi_init_grp_create(1);
+ CU_ASSERT(ig != NULL);
+
+ /* add an netmask to the full netmask list */
+ ig->nnetmasks = MAX_NETMASK;
+
+ rc = iscsi_init_grp_add_netmask(ig, netmask1);
+ CU_ASSERT(rc != 0);
+
+ ig->nnetmasks = 0;
+
+ /* add the same netmask to the netmask list twice */
+ rc = iscsi_init_grp_add_netmask(ig, netmask1);
+ CU_ASSERT(rc == 0);
+
+ rc = iscsi_init_grp_add_netmask(ig, netmask1);
+ CU_ASSERT(rc != 0);
+
+ /* restore the initial state */
+ rc = iscsi_init_grp_delete_netmask(ig, netmask1);
+ CU_ASSERT(rc == 0);
+
+ imask = iscsi_init_grp_find_netmask(ig, netmask1);
+ CU_ASSERT(imask == NULL);
+
+ iscsi_init_grp_destroy(ig);
+}
+
+static void
+delete_all_netmasks_success_case(void)
+{
+ int rc;
+ struct spdk_iscsi_init_grp *ig;
+ struct spdk_iscsi_initiator_netmask *imask;
+ char *netmask1 = "192.168.2.0";
+ char *netmask2 = "192.168.2.1";
+
+ ig = iscsi_init_grp_create(1);
+ CU_ASSERT(ig != NULL);
+
+ /* add two different netmasks to the empty netmask list */
+ rc = iscsi_init_grp_add_netmask(ig, netmask1);
+ CU_ASSERT(rc == 0);
+
+ rc = iscsi_init_grp_add_netmask(ig, netmask2);
+ CU_ASSERT(rc == 0);
+
+ /* delete all netmasks */
+ iscsi_init_grp_delete_all_netmasks(ig);
+
+ /* check if two netmasks are deleted correctly. */
+ imask = iscsi_init_grp_find_netmask(ig, netmask1);
+ CU_ASSERT(imask == NULL);
+
+ imask = iscsi_init_grp_find_netmask(ig, netmask2);
+ CU_ASSERT(imask == NULL);
+
+ /* restore the initial state */
+ iscsi_init_grp_destroy(ig);
+}
+
+static void
+initiator_name_overwrite_all_to_any_case(void)
+{
+ int rc;
+ struct spdk_iscsi_init_grp *ig;
+ struct spdk_iscsi_initiator_name *iname;
+ char *all = "ALL";
+ char *any = "ANY";
+ char *all_not = "!ALL";
+ char *any_not = "!ANY";
+
+ ig = iscsi_init_grp_create(1);
+ CU_ASSERT(ig != NULL);
+
+ rc = iscsi_init_grp_add_initiator(ig, all);
+ CU_ASSERT(rc == 0);
+
+ iname = iscsi_init_grp_find_initiator(ig, all);
+ CU_ASSERT(iname == NULL);
+
+ iname = iscsi_init_grp_find_initiator(ig, any);
+ CU_ASSERT(iname != NULL);
+
+ rc = iscsi_init_grp_delete_initiator(ig, any);
+ CU_ASSERT(rc == 0);
+
+ rc = iscsi_init_grp_add_initiator(ig, all_not);
+ CU_ASSERT(rc == 0);
+
+ iname = iscsi_init_grp_find_initiator(ig, all_not);
+ CU_ASSERT(iname == NULL);
+
+ iname = iscsi_init_grp_find_initiator(ig, any_not);
+ CU_ASSERT(iname != NULL);
+
+ rc = iscsi_init_grp_delete_initiator(ig, any_not);
+ CU_ASSERT(rc == 0);
+
+ iscsi_init_grp_destroy(ig);
+}
+
+static void
+netmask_overwrite_all_to_any_case(void)
+{
+ int rc;
+ struct spdk_iscsi_init_grp *ig;
+ struct spdk_iscsi_initiator_netmask *imask;
+ char *all = "ALL";
+ char *any = "ANY";
+
+ ig = iscsi_init_grp_create(1);
+ CU_ASSERT(ig != NULL);
+
+ rc = iscsi_init_grp_add_netmask(ig, all);
+ CU_ASSERT(rc == 0);
+
+ imask = iscsi_init_grp_find_netmask(ig, all);
+ CU_ASSERT(imask == NULL);
+
+ imask = iscsi_init_grp_find_netmask(ig, any);
+ CU_ASSERT(imask != NULL);
+
+ rc = iscsi_init_grp_delete_netmask(ig, any);
+ CU_ASSERT(rc == 0);
+
+ iscsi_init_grp_destroy(ig);
+}
+
+static void
+add_delete_initiator_names_case(void)
+{
+ int rc, i;
+ struct spdk_iscsi_init_grp *ig;
+ struct spdk_iscsi_initiator_name *iname;
+ char *names[3] = {"iqn.2018-02.spdk.io:0001", "iqn.2018-02.spdk.io:0002", "iqn.2018-02.spdk.io:0003"};
+
+ ig = iscsi_init_grp_create(1);
+ SPDK_CU_ASSERT_FATAL(ig != NULL);
+
+ rc = iscsi_init_grp_add_initiators(ig, 3, names);
+ CU_ASSERT(rc == 0);
+
+ for (i = 0; i < 3; i++) {
+ iname = iscsi_init_grp_find_initiator(ig, names[i]);
+ CU_ASSERT(iname != NULL);
+ }
+
+ rc = iscsi_init_grp_delete_initiators(ig, 3, names);
+ CU_ASSERT(rc == 0);
+
+ if (ig != NULL) {
+ CU_ASSERT(TAILQ_EMPTY(&ig->initiator_head));
+ }
+
+ iscsi_init_grp_destroy(ig);
+}
+
+static void
+add_duplicated_initiator_names_case(void)
+{
+ int rc;
+ struct spdk_iscsi_init_grp *ig;
+ char *names[3] = {"iqn.2018-02.spdk.io:0001", "iqn.2018-02.spdk.io:0002", "iqn.2018-02.spdk.io:0001"};
+
+ ig = iscsi_init_grp_create(1);
+ SPDK_CU_ASSERT_FATAL(ig != NULL);
+
+ rc = iscsi_init_grp_add_initiators(ig, 3, names);
+ CU_ASSERT(rc != 0);
+
+ if (ig != NULL) {
+ CU_ASSERT(TAILQ_EMPTY(&ig->initiator_head));
+ }
+
+ iscsi_init_grp_destroy(ig);
+}
+
+static void
+delete_nonexisting_initiator_names_case(void)
+{
+ int rc, i;
+ struct spdk_iscsi_init_grp *ig;
+ struct spdk_iscsi_initiator_name *iname;
+ char *names1[3] = {"iqn.2018-02.spdk.io:0001", "iqn.2018-02.spdk.io:0002", "iqn.2018-02.spdk.io:0003"};
+ char *names2[3] = {"iqn.2018-02.spdk.io:0001", "iqn.2018-02.spdk.io:0002", "iqn.2018-02.spdk.io:0004"};
+
+ ig = iscsi_init_grp_create(1);
+ SPDK_CU_ASSERT_FATAL(ig != NULL);
+
+ rc = iscsi_init_grp_add_initiators(ig, 3, names1);
+ CU_ASSERT(rc == 0);
+
+ for (i = 0; i < 3; i++) {
+ iname = iscsi_init_grp_find_initiator(ig, names1[i]);
+ CU_ASSERT(iname != NULL);
+ }
+
+ rc = iscsi_init_grp_delete_initiators(ig, 3, names2);
+ CU_ASSERT(rc != 0);
+
+ for (i = 0; i < 3; i++) {
+ iname = iscsi_init_grp_find_initiator(ig, names1[i]);
+ CU_ASSERT(iname != NULL);
+ }
+
+ rc = iscsi_init_grp_delete_initiators(ig, 3, names1);
+ CU_ASSERT(rc == 0);
+
+ if (ig != NULL) {
+ CU_ASSERT(TAILQ_EMPTY(&ig->initiator_head));
+ }
+
+ iscsi_init_grp_destroy(ig);
+}
+
+static void
+add_delete_netmasks_case(void)
+{
+ int rc, i;
+ struct spdk_iscsi_init_grp *ig;
+ struct spdk_iscsi_initiator_netmask *netmask;
+ char *netmasks[3] = {"192.168.2.0", "192.168.2.1", "192.168.2.2"};
+
+ ig = iscsi_init_grp_create(1);
+ SPDK_CU_ASSERT_FATAL(ig != NULL);
+
+ rc = iscsi_init_grp_add_netmasks(ig, 3, netmasks);
+ CU_ASSERT(rc == 0);
+
+ for (i = 0; i < 3; i++) {
+ netmask = iscsi_init_grp_find_netmask(ig, netmasks[i]);
+ CU_ASSERT(netmask != NULL);
+ }
+
+ rc = iscsi_init_grp_delete_netmasks(ig, 3, netmasks);
+ CU_ASSERT(rc == 0);
+
+ if (ig != NULL) {
+ CU_ASSERT(TAILQ_EMPTY(&ig->netmask_head));
+ }
+
+ iscsi_init_grp_destroy(ig);
+}
+
+static void
+add_duplicated_netmasks_case(void)
+{
+ int rc;
+ struct spdk_iscsi_init_grp *ig;
+ char *netmasks[3] = {"192.168.2.0", "192.168.2.1", "192.168.2.0"};
+
+ ig = iscsi_init_grp_create(1);
+ SPDK_CU_ASSERT_FATAL(ig != NULL);
+
+ rc = iscsi_init_grp_add_netmasks(ig, 3, netmasks);
+ CU_ASSERT(rc != 0);
+
+ if (ig != NULL) {
+ CU_ASSERT(TAILQ_EMPTY(&ig->netmask_head));
+ }
+
+ iscsi_init_grp_destroy(ig);
+}
+
+static void
+delete_nonexisting_netmasks_case(void)
+{
+ int rc, i;
+ struct spdk_iscsi_init_grp *ig;
+ struct spdk_iscsi_initiator_netmask *netmask;
+ char *netmasks1[3] = {"192.168.2.0", "192.168.2.1", "192.168.2.2"};
+ char *netmasks2[3] = {"192.168.2.0", "192.168.2.1", "192.168.2.3"};
+
+ ig = iscsi_init_grp_create(1);
+ SPDK_CU_ASSERT_FATAL(ig != NULL);
+
+ rc = iscsi_init_grp_add_netmasks(ig, 3, netmasks1);
+ CU_ASSERT(rc == 0);
+
+ for (i = 0; i < 3; i++) {
+ netmask = iscsi_init_grp_find_netmask(ig, netmasks1[i]);
+ CU_ASSERT(netmask != NULL);
+ }
+
+ rc = iscsi_init_grp_delete_netmasks(ig, 3, netmasks2);
+ CU_ASSERT(rc != 0);
+
+ for (i = 0; i < 3; i++) {
+ netmask = iscsi_init_grp_find_netmask(ig, netmasks1[i]);
+ CU_ASSERT(netmask != NULL);
+ }
+
+ rc = iscsi_init_grp_delete_netmasks(ig, 3, netmasks1);
+ CU_ASSERT(rc == 0);
+
+ if (ig != NULL) {
+ CU_ASSERT(TAILQ_EMPTY(&ig->netmask_head));
+ }
+
+ iscsi_init_grp_destroy(ig);
+}
+
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (argc < 2) {
+ fprintf(stderr, "usage: %s <config file>\n", argv[0]);
+ exit(1);
+ }
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ config_file = argv[1];
+
+ suite = CU_add_suite("init_grp_suite", test_setup, NULL);
+
+ CU_ADD_TEST(suite, create_from_config_file_cases);
+ CU_ADD_TEST(suite, create_initiator_group_success_case);
+ CU_ADD_TEST(suite, find_initiator_group_success_case);
+ CU_ADD_TEST(suite, register_initiator_group_twice_case);
+ CU_ADD_TEST(suite, add_initiator_name_success_case);
+ CU_ADD_TEST(suite, add_initiator_name_fail_case);
+ CU_ADD_TEST(suite, delete_all_initiator_names_success_case);
+ CU_ADD_TEST(suite, add_netmask_success_case);
+ CU_ADD_TEST(suite, add_netmask_fail_case);
+ CU_ADD_TEST(suite, delete_all_netmasks_success_case);
+ CU_ADD_TEST(suite, initiator_name_overwrite_all_to_any_case);
+ CU_ADD_TEST(suite, netmask_overwrite_all_to_any_case);
+ CU_ADD_TEST(suite, add_delete_initiator_names_case);
+ CU_ADD_TEST(suite, add_duplicated_initiator_names_case);
+ CU_ADD_TEST(suite, delete_nonexisting_initiator_names_case);
+ CU_ADD_TEST(suite, add_delete_netmasks_case);
+ CU_ADD_TEST(suite, add_duplicated_netmasks_case);
+ CU_ADD_TEST(suite, delete_nonexisting_netmasks_case);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/iscsi/iscsi.c/.gitignore b/src/spdk/test/unit/lib/iscsi/iscsi.c/.gitignore
new file mode 100644
index 000000000..4d41887c8
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/iscsi.c/.gitignore
@@ -0,0 +1 @@
+iscsi_ut
diff --git a/src/spdk/test/unit/lib/iscsi/iscsi.c/Makefile b/src/spdk/test/unit/lib/iscsi/iscsi.c/Makefile
new file mode 100644
index 000000000..66d7334a4
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/iscsi.c/Makefile
@@ -0,0 +1,46 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+SPDK_LIB_LIST = conf util
+
+SCSI_OBJS = port
+ISCSI_OBJS = md5 param
+LIBS += $(SCSI_OBJS:%=$(SPDK_ROOT_DIR)/lib/scsi/%.o)
+LIBS += $(ISCSI_OBJS:%=$(SPDK_ROOT_DIR)/lib/iscsi/%.o)
+LIBS += -lcunit
+
+TEST_FILE = iscsi_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/iscsi/iscsi.c/iscsi_ut.c b/src/spdk/test/unit/lib/iscsi/iscsi.c/iscsi_ut.c
new file mode 100644
index 000000000..f96afd999
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/iscsi.c/iscsi_ut.c
@@ -0,0 +1,2024 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk/endian.h"
+#include "spdk/scsi.h"
+#include "spdk_cunit.h"
+
+#include "CUnit/Basic.h"
+
+#include "iscsi/iscsi.c"
+
+#include "../common.c"
+#include "iscsi/portal_grp.h"
+#include "scsi/scsi_internal.h"
+#include "common/lib/test_env.c"
+
+#include "spdk_internal/mock.h"
+
+#define UT_TARGET_NAME1 "iqn.2017-11.spdk.io:t0001"
+#define UT_TARGET_NAME2 "iqn.2017-11.spdk.io:t0002"
+#define UT_INITIATOR_NAME1 "iqn.2017-11.spdk.io:i0001"
+#define UT_INITIATOR_NAME2 "iqn.2017-11.spdk.io:i0002"
+#define UT_ISCSI_TSIH 256
+
+struct spdk_iscsi_tgt_node g_tgt;
+
+struct spdk_iscsi_tgt_node *
+iscsi_find_tgt_node(const char *target_name)
+{
+ if (strcasecmp(target_name, UT_TARGET_NAME1) == 0) {
+ g_tgt.dev = NULL;
+ return (struct spdk_iscsi_tgt_node *)&g_tgt;
+ } else {
+ return NULL;
+ }
+}
+
+bool
+iscsi_tgt_node_access(struct spdk_iscsi_conn *conn,
+ struct spdk_iscsi_tgt_node *target,
+ const char *iqn, const char *addr)
+{
+ if (strcasecmp(conn->initiator_name, UT_INITIATOR_NAME1) == 0) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+DEFINE_STUB(iscsi_send_tgts, int,
+ (struct spdk_iscsi_conn *conn, const char *iiqn, const char *iaddr,
+ const char *tiqn, uint8_t *data, int alloc_len, int data_len),
+ 0);
+
+DEFINE_STUB(iscsi_tgt_node_is_destructed, bool,
+ (struct spdk_iscsi_tgt_node *target), false);
+
+DEFINE_STUB_V(iscsi_portal_grp_close_all, (void));
+
+DEFINE_STUB_V(iscsi_conn_schedule, (struct spdk_iscsi_conn *conn));
+
+DEFINE_STUB_V(iscsi_conn_free_pdu,
+ (struct spdk_iscsi_conn *conn, struct spdk_iscsi_pdu *pdu));
+
+DEFINE_STUB_V(iscsi_conn_pdu_generic_complete, (void *cb_arg));
+
+DEFINE_STUB(iscsi_conn_handle_queued_datain_tasks, int,
+ (struct spdk_iscsi_conn *conn), 0);
+
+DEFINE_STUB(iscsi_conn_abort_queued_datain_task, int,
+ (struct spdk_iscsi_conn *conn, uint32_t ref_task_tag), 0);
+
+DEFINE_STUB(iscsi_conn_abort_queued_datain_tasks, int,
+ (struct spdk_iscsi_conn *conn, struct spdk_scsi_lun *lun,
+ struct spdk_iscsi_pdu *pdu), 0);
+
+DEFINE_STUB(iscsi_chap_get_authinfo, int,
+ (struct iscsi_chap_auth *auth, const char *authuser, int ag_tag),
+ 0);
+
+DEFINE_STUB(spdk_sock_set_recvbuf, int, (struct spdk_sock *sock, int sz), 0);
+
+int
+spdk_scsi_lun_get_id(const struct spdk_scsi_lun *lun)
+{
+ return lun->id;
+}
+
+DEFINE_STUB(spdk_scsi_lun_is_removing, bool, (const struct spdk_scsi_lun *lun),
+ true);
+
+struct spdk_scsi_lun *
+spdk_scsi_dev_get_lun(struct spdk_scsi_dev *dev, int lun_id)
+{
+ if (lun_id < 0 || lun_id >= SPDK_SCSI_DEV_MAX_LUN) {
+ return NULL;
+ }
+
+ return dev->lun[lun_id];
+}
+
+DEFINE_STUB(spdk_scsi_lun_id_int_to_fmt, uint64_t, (int lun_id), 0);
+
+DEFINE_STUB(spdk_scsi_lun_id_fmt_to_int, int, (uint64_t lun_fmt), 0);
+
+DEFINE_STUB(spdk_scsi_lun_get_dif_ctx, bool,
+ (struct spdk_scsi_lun *lun, struct spdk_scsi_task *task,
+ struct spdk_dif_ctx *dif_ctx), false);
+
+static void
+op_login_check_target_test(void)
+{
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_pdu rsp_pdu = {};
+ struct spdk_iscsi_tgt_node *target;
+ int rc;
+
+ /* expect success */
+ snprintf(conn.initiator_name, sizeof(conn.initiator_name),
+ "%s", UT_INITIATOR_NAME1);
+
+ rc = iscsi_op_login_check_target(&conn, &rsp_pdu,
+ UT_TARGET_NAME1, &target);
+ CU_ASSERT(rc == 0);
+
+ /* expect failure */
+ snprintf(conn.initiator_name, sizeof(conn.initiator_name),
+ "%s", UT_INITIATOR_NAME1);
+
+ rc = iscsi_op_login_check_target(&conn, &rsp_pdu,
+ UT_TARGET_NAME2, &target);
+ CU_ASSERT(rc != 0);
+
+ /* expect failure */
+ snprintf(conn.initiator_name, sizeof(conn.initiator_name),
+ "%s", UT_INITIATOR_NAME2);
+
+ rc = iscsi_op_login_check_target(&conn, &rsp_pdu,
+ UT_TARGET_NAME1, &target);
+ CU_ASSERT(rc != 0);
+}
+
+static void
+op_login_session_normal_test(void)
+{
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_portal portal = {};
+ struct spdk_iscsi_portal_grp group = {};
+ struct spdk_iscsi_pdu rsp_pdu = {};
+ struct iscsi_bhs_login_rsp *rsph;
+ struct spdk_iscsi_sess sess = {};
+ struct iscsi_param param = {};
+ int rc;
+
+ /* setup related data structures */
+ rsph = (struct iscsi_bhs_login_rsp *)&rsp_pdu.bhs;
+ rsph->tsih = 0;
+ memset(rsph->isid, 0, sizeof(rsph->isid));
+ conn.portal = &portal;
+ portal.group = &group;
+ conn.portal->group->tag = 0;
+ conn.params = NULL;
+
+ /* expect failure: NULL params for target name */
+ rc = iscsi_op_login_session_normal(&conn, &rsp_pdu, UT_INITIATOR_NAME1,
+ NULL, 0);
+ CU_ASSERT(rc != 0);
+ CU_ASSERT(rsph->status_class == ISCSI_CLASS_INITIATOR_ERROR);
+ CU_ASSERT(rsph->status_detail == ISCSI_LOGIN_MISSING_PARMS);
+
+ /* expect failure: incorrect key for target name */
+ param.next = NULL;
+ rc = iscsi_op_login_session_normal(&conn, &rsp_pdu, UT_INITIATOR_NAME1,
+ &param, 0);
+ CU_ASSERT(rc != 0);
+ CU_ASSERT(rsph->status_class == ISCSI_CLASS_INITIATOR_ERROR);
+ CU_ASSERT(rsph->status_detail == ISCSI_LOGIN_MISSING_PARMS);
+
+ /* expect failure: NULL target name */
+ param.key = "TargetName";
+ param.val = NULL;
+ rc = iscsi_op_login_session_normal(&conn, &rsp_pdu, UT_INITIATOR_NAME1,
+ &param, 0);
+ CU_ASSERT(rc != 0);
+ CU_ASSERT(rsph->status_class == ISCSI_CLASS_INITIATOR_ERROR);
+ CU_ASSERT(rsph->status_detail == ISCSI_LOGIN_MISSING_PARMS);
+
+ /* expect failure: session not found */
+ param.key = "TargetName";
+ param.val = "iqn.2017-11.spdk.io:t0001";
+ snprintf(conn.initiator_name, sizeof(conn.initiator_name),
+ "%s", UT_INITIATOR_NAME1);
+ rsph->tsih = 1; /* to append the session */
+ rc = iscsi_op_login_session_normal(&conn, &rsp_pdu, UT_INITIATOR_NAME1,
+ &param, 0);
+ CU_ASSERT(conn.target_port == NULL);
+ CU_ASSERT(rc != 0);
+ CU_ASSERT(rsph->status_class == ISCSI_CLASS_INITIATOR_ERROR);
+ CU_ASSERT(rsph->status_detail == ISCSI_LOGIN_CONN_ADD_FAIL);
+
+ /* expect failure: session found while tag is wrong */
+ g_iscsi.MaxSessions = UT_ISCSI_TSIH * 2;
+ g_iscsi.session = calloc(1, sizeof(void *) * g_iscsi.MaxSessions);
+ g_iscsi.session[UT_ISCSI_TSIH - 1] = &sess;
+ sess.tsih = UT_ISCSI_TSIH;
+ rsph->tsih = UT_ISCSI_TSIH >> 8; /* to append the session */
+ sess.tag = 1;
+ rc = iscsi_op_login_session_normal(&conn, &rsp_pdu, UT_INITIATOR_NAME1,
+ &param, 0);
+ CU_ASSERT(conn.target_port == NULL);
+ CU_ASSERT(rc != 0);
+ CU_ASSERT(rsph->status_class == ISCSI_CLASS_INITIATOR_ERROR);
+ CU_ASSERT(rsph->status_detail == ISCSI_LOGIN_CONN_ADD_FAIL);
+
+ /* expect suceess: drop the session */
+ rsph->tsih = 0; /* to create the session */
+ g_iscsi.AllowDuplicateIsid = false;
+ rc = iscsi_op_login_session_normal(&conn, &rsp_pdu, UT_INITIATOR_NAME1,
+ &param, 0);
+ CU_ASSERT(rc == 0);
+
+ /* expect suceess: create the session */
+ rsph->tsih = 0; /* to create the session */
+ g_iscsi.AllowDuplicateIsid = true;
+ rc = iscsi_op_login_session_normal(&conn, &rsp_pdu, UT_INITIATOR_NAME1,
+ &param, 0);
+ CU_ASSERT(rc == 0);
+
+ free(g_iscsi.session);
+}
+
+static void
+maxburstlength_test(void)
+{
+ struct spdk_iscsi_sess sess = {};
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_scsi_dev dev = {};
+ struct spdk_scsi_lun lun = {};
+ struct spdk_iscsi_pdu *req_pdu, *data_out_pdu, *r2t_pdu;
+ struct iscsi_bhs_scsi_req *req;
+ struct iscsi_bhs_r2t *r2t;
+ struct iscsi_bhs_data_out *data_out;
+ struct spdk_iscsi_pdu *response_pdu;
+ int rc;
+
+ req_pdu = iscsi_get_pdu(&conn);
+ data_out_pdu = iscsi_get_pdu(&conn);
+
+ sess.ExpCmdSN = 0;
+ sess.MaxCmdSN = 64;
+ sess.session_type = SESSION_TYPE_NORMAL;
+ sess.MaxBurstLength = 1024;
+
+ lun.id = 0;
+
+ dev.lun[0] = &lun;
+
+ conn.full_feature = 1;
+ conn.sess = &sess;
+ conn.dev = &dev;
+ conn.state = ISCSI_CONN_STATE_RUNNING;
+ TAILQ_INIT(&conn.write_pdu_list);
+ TAILQ_INIT(&conn.active_r2t_tasks);
+
+ req_pdu->bhs.opcode = ISCSI_OP_SCSI;
+ req_pdu->data_segment_len = 0;
+
+ req = (struct iscsi_bhs_scsi_req *)&req_pdu->bhs;
+
+ to_be32(&req->cmd_sn, 0);
+ to_be32(&req->expected_data_xfer_len, 1028);
+ to_be32(&req->itt, 0x1234);
+ req->write_bit = 1;
+ req->final_bit = 1;
+
+ rc = iscsi_pdu_hdr_handle(&conn, req_pdu);
+ if (rc == 0 && !req_pdu->is_rejected) {
+ rc = iscsi_pdu_payload_handle(&conn, req_pdu);
+ }
+ CU_ASSERT(rc == 0);
+
+ response_pdu = TAILQ_FIRST(&g_write_pdu_list);
+ SPDK_CU_ASSERT_FATAL(response_pdu != NULL);
+
+ /*
+ * Confirm that a correct R2T reply was sent in response to the
+ * SCSI request.
+ */
+ TAILQ_REMOVE(&g_write_pdu_list, response_pdu, tailq);
+ CU_ASSERT(response_pdu->bhs.opcode == ISCSI_OP_R2T);
+ r2t = (struct iscsi_bhs_r2t *)&response_pdu->bhs;
+ CU_ASSERT(from_be32(&r2t->desired_xfer_len) == 1024);
+ CU_ASSERT(from_be32(&r2t->buffer_offset) == 0);
+ CU_ASSERT(from_be32(&r2t->itt) == 0x1234);
+
+ data_out_pdu->bhs.opcode = ISCSI_OP_SCSI_DATAOUT;
+ data_out_pdu->bhs.flags = ISCSI_FLAG_FINAL;
+ data_out_pdu->data_segment_len = 1028;
+ data_out = (struct iscsi_bhs_data_out *)&data_out_pdu->bhs;
+ data_out->itt = r2t->itt;
+ data_out->ttt = r2t->ttt;
+ DSET24(data_out->data_segment_len, 1028);
+
+ rc = iscsi_pdu_hdr_handle(&conn, data_out_pdu);
+ if (rc == 0 && !data_out_pdu->is_rejected) {
+ rc = iscsi_pdu_payload_handle(&conn, data_out_pdu);
+ }
+ CU_ASSERT(rc == SPDK_ISCSI_CONNECTION_FATAL);
+
+ SPDK_CU_ASSERT_FATAL(response_pdu->task != NULL);
+ iscsi_task_disassociate_pdu(response_pdu->task);
+ iscsi_task_put(response_pdu->task);
+ iscsi_put_pdu(response_pdu);
+
+ r2t_pdu = TAILQ_FIRST(&g_write_pdu_list);
+ CU_ASSERT(r2t_pdu != NULL);
+ TAILQ_REMOVE(&g_write_pdu_list, r2t_pdu, tailq);
+ iscsi_put_pdu(r2t_pdu);
+
+ iscsi_put_pdu(data_out_pdu);
+ iscsi_put_pdu(req_pdu);
+}
+
+static void
+underflow_for_read_transfer_test(void)
+{
+ struct spdk_iscsi_sess sess = {};
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_task task = {};
+ struct spdk_scsi_dev dev = {};
+ struct spdk_scsi_lun lun = {};
+ struct spdk_iscsi_pdu *pdu;
+ struct iscsi_bhs_scsi_req *scsi_req;
+ struct iscsi_bhs_data_in *datah;
+ uint32_t residual_count = 0;
+
+ sess.MaxBurstLength = SPDK_ISCSI_MAX_BURST_LENGTH;
+
+ conn.sess = &sess;
+ conn.MaxRecvDataSegmentLength = 8192;
+
+ dev.lun[0] = &lun;
+ conn.dev = &dev;
+
+ pdu = iscsi_get_pdu(&conn);
+ SPDK_CU_ASSERT_FATAL(pdu != NULL);
+
+ scsi_req = (struct iscsi_bhs_scsi_req *)&pdu->bhs;
+ scsi_req->read_bit = 1;
+
+ iscsi_task_set_pdu(&task, pdu);
+ task.parent = NULL;
+
+ task.scsi.iovs = &task.scsi.iov;
+ task.scsi.iovcnt = 1;
+ task.scsi.length = 512;
+ task.scsi.transfer_len = 512;
+ task.bytes_completed = 512;
+ task.scsi.data_transferred = 256;
+ task.scsi.status = SPDK_SCSI_STATUS_GOOD;
+
+ iscsi_task_response(&conn, &task);
+ iscsi_put_pdu(pdu);
+
+ /*
+ * In this case, a SCSI Data-In PDU should contain the Status
+ * for the data transfer.
+ */
+ to_be32(&residual_count, 256);
+
+ pdu = TAILQ_FIRST(&g_write_pdu_list);
+ SPDK_CU_ASSERT_FATAL(pdu != NULL);
+
+ CU_ASSERT(pdu->bhs.opcode == ISCSI_OP_SCSI_DATAIN);
+
+ datah = (struct iscsi_bhs_data_in *)&pdu->bhs;
+
+ CU_ASSERT(datah->flags == (ISCSI_DATAIN_UNDERFLOW | ISCSI_FLAG_FINAL | ISCSI_DATAIN_STATUS));
+ CU_ASSERT(datah->res_cnt == residual_count);
+
+ TAILQ_REMOVE(&g_write_pdu_list, pdu, tailq);
+ iscsi_put_pdu(pdu);
+
+ CU_ASSERT(TAILQ_EMPTY(&g_write_pdu_list));
+}
+
+static void
+underflow_for_zero_read_transfer_test(void)
+{
+ struct spdk_iscsi_sess sess = {};
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_task task = {};
+ struct spdk_scsi_dev dev = {};
+ struct spdk_scsi_lun lun = {};
+ struct spdk_iscsi_pdu *pdu;
+ struct iscsi_bhs_scsi_req *scsi_req;
+ struct iscsi_bhs_scsi_resp *resph;
+ uint32_t residual_count = 0, data_segment_len;
+
+ sess.MaxBurstLength = SPDK_ISCSI_MAX_BURST_LENGTH;
+
+ conn.sess = &sess;
+ conn.MaxRecvDataSegmentLength = 8192;
+
+ dev.lun[0] = &lun;
+ conn.dev = &dev;
+
+ pdu = iscsi_get_pdu(&conn);
+ SPDK_CU_ASSERT_FATAL(pdu != NULL);
+
+ scsi_req = (struct iscsi_bhs_scsi_req *)&pdu->bhs;
+ scsi_req->read_bit = 1;
+
+ iscsi_task_set_pdu(&task, pdu);
+ task.parent = NULL;
+
+ task.scsi.length = 512;
+ task.scsi.transfer_len = 512;
+ task.bytes_completed = 512;
+ task.scsi.data_transferred = 0;
+ task.scsi.status = SPDK_SCSI_STATUS_GOOD;
+
+ iscsi_task_response(&conn, &task);
+ iscsi_put_pdu(pdu);
+
+ /*
+ * In this case, only a SCSI Response PDU is expected and
+ * underflow must be set in it.
+ * */
+ to_be32(&residual_count, 512);
+
+ pdu = TAILQ_FIRST(&g_write_pdu_list);
+ SPDK_CU_ASSERT_FATAL(pdu != NULL);
+
+ CU_ASSERT(pdu->bhs.opcode == ISCSI_OP_SCSI_RSP);
+
+ resph = (struct iscsi_bhs_scsi_resp *)&pdu->bhs;
+
+ CU_ASSERT(resph->flags == (ISCSI_SCSI_UNDERFLOW | 0x80));
+
+ data_segment_len = DGET24(resph->data_segment_len);
+ CU_ASSERT(data_segment_len == 0);
+ CU_ASSERT(resph->res_cnt == residual_count);
+
+ TAILQ_REMOVE(&g_write_pdu_list, pdu, tailq);
+ iscsi_put_pdu(pdu);
+
+ CU_ASSERT(TAILQ_EMPTY(&g_write_pdu_list));
+}
+
+static void
+underflow_for_request_sense_test(void)
+{
+ struct spdk_iscsi_sess sess = {};
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_task task = {};
+ struct spdk_scsi_dev dev = {};
+ struct spdk_scsi_lun lun = {};
+ struct spdk_iscsi_pdu *pdu1, *pdu2;
+ struct iscsi_bhs_scsi_req *scsi_req;
+ struct iscsi_bhs_data_in *datah;
+ struct iscsi_bhs_scsi_resp *resph;
+ uint32_t residual_count = 0, data_segment_len;
+
+ sess.MaxBurstLength = SPDK_ISCSI_MAX_BURST_LENGTH;
+
+ conn.sess = &sess;
+ conn.MaxRecvDataSegmentLength = 8192;
+
+ dev.lun[0] = &lun;
+ conn.dev = &dev;
+
+ pdu1 = iscsi_get_pdu(&conn);
+ SPDK_CU_ASSERT_FATAL(pdu1 != NULL);
+
+ scsi_req = (struct iscsi_bhs_scsi_req *)&pdu1->bhs;
+ scsi_req->read_bit = 1;
+
+ iscsi_task_set_pdu(&task, pdu1);
+ task.parent = NULL;
+
+ task.scsi.iovs = &task.scsi.iov;
+ task.scsi.iovcnt = 1;
+ task.scsi.length = 512;
+ task.scsi.transfer_len = 512;
+ task.bytes_completed = 512;
+
+ task.scsi.sense_data_len = 18;
+ task.scsi.data_transferred = 18;
+ task.scsi.status = SPDK_SCSI_STATUS_GOOD;
+
+ iscsi_task_response(&conn, &task);
+ iscsi_put_pdu(pdu1);
+
+ /*
+ * In this case, a SCSI Data-In PDU and a SCSI Response PDU are returned.
+ * Sense data are set both in payload and sense area.
+ * The SCSI Data-In PDU sets FINAL and the SCSI Response PDU sets UNDERFLOW.
+ *
+ * Probably there will be different implementation but keeping current SPDK
+ * implementation by adding UT will be valuable for any implementation.
+ */
+ to_be32(&residual_count, 494);
+
+ pdu1 = TAILQ_FIRST(&g_write_pdu_list);
+ SPDK_CU_ASSERT_FATAL(pdu1 != NULL);
+
+ CU_ASSERT(pdu1->bhs.opcode == ISCSI_OP_SCSI_DATAIN);
+
+ datah = (struct iscsi_bhs_data_in *)&pdu1->bhs;
+
+ CU_ASSERT(datah->flags == ISCSI_FLAG_FINAL);
+
+ data_segment_len = DGET24(datah->data_segment_len);
+ CU_ASSERT(data_segment_len == 18);
+ CU_ASSERT(datah->res_cnt == 0);
+
+ TAILQ_REMOVE(&g_write_pdu_list, pdu1, tailq);
+ iscsi_put_pdu(pdu1);
+
+ pdu2 = TAILQ_FIRST(&g_write_pdu_list);
+ /* inform scan-build (clang 6) that these pointers are not the same */
+ SPDK_CU_ASSERT_FATAL(pdu1 != pdu2);
+ SPDK_CU_ASSERT_FATAL(pdu2 != NULL);
+
+ CU_ASSERT(pdu2->bhs.opcode == ISCSI_OP_SCSI_RSP);
+
+ resph = (struct iscsi_bhs_scsi_resp *)&pdu2->bhs;
+
+ CU_ASSERT(resph->flags == (ISCSI_SCSI_UNDERFLOW | 0x80));
+
+ data_segment_len = DGET24(resph->data_segment_len);
+ CU_ASSERT(data_segment_len == task.scsi.sense_data_len + 2);
+ CU_ASSERT(resph->res_cnt == residual_count);
+
+ TAILQ_REMOVE(&g_write_pdu_list, pdu2, tailq);
+ iscsi_put_pdu(pdu2);
+
+ CU_ASSERT(TAILQ_EMPTY(&g_write_pdu_list));
+}
+
+static void
+underflow_for_check_condition_test(void)
+{
+ struct spdk_iscsi_sess sess = {};
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_task task = {};
+ struct spdk_scsi_dev dev = {};
+ struct spdk_scsi_lun lun = {};
+ struct spdk_iscsi_pdu *pdu;
+ struct iscsi_bhs_scsi_req *scsi_req;
+ struct iscsi_bhs_scsi_resp *resph;
+ uint32_t data_segment_len;
+
+ sess.MaxBurstLength = SPDK_ISCSI_MAX_BURST_LENGTH;
+
+ conn.sess = &sess;
+ conn.MaxRecvDataSegmentLength = 8192;
+
+ dev.lun[0] = &lun;
+ conn.dev = &dev;
+
+ pdu = iscsi_get_pdu(&conn);
+ SPDK_CU_ASSERT_FATAL(pdu != NULL);
+
+ scsi_req = (struct iscsi_bhs_scsi_req *)&pdu->bhs;
+ scsi_req->read_bit = 1;
+
+ iscsi_task_set_pdu(&task, pdu);
+ task.parent = NULL;
+
+ task.scsi.iovs = &task.scsi.iov;
+ task.scsi.iovcnt = 1;
+ task.scsi.length = 512;
+ task.scsi.transfer_len = 512;
+ task.bytes_completed = 512;
+
+ task.scsi.sense_data_len = 18;
+ task.scsi.data_transferred = 18;
+ task.scsi.status = SPDK_SCSI_STATUS_CHECK_CONDITION;
+
+ iscsi_task_response(&conn, &task);
+ iscsi_put_pdu(pdu);
+
+ /*
+ * In this case, a SCSI Response PDU is returned.
+ * Sense data is set in sense area.
+ * Underflow is not set.
+ */
+ pdu = TAILQ_FIRST(&g_write_pdu_list);
+ SPDK_CU_ASSERT_FATAL(pdu != NULL);
+
+ CU_ASSERT(pdu->bhs.opcode == ISCSI_OP_SCSI_RSP);
+
+ resph = (struct iscsi_bhs_scsi_resp *)&pdu->bhs;
+
+ CU_ASSERT(resph->flags == 0x80);
+
+ data_segment_len = DGET24(resph->data_segment_len);
+ CU_ASSERT(data_segment_len == task.scsi.sense_data_len + 2);
+ CU_ASSERT(resph->res_cnt == 0);
+
+ TAILQ_REMOVE(&g_write_pdu_list, pdu, tailq);
+ iscsi_put_pdu(pdu);
+
+ CU_ASSERT(TAILQ_EMPTY(&g_write_pdu_list));
+}
+
+static void
+add_transfer_task_test(void)
+{
+ struct spdk_iscsi_sess sess = {};
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_task task = {};
+ struct spdk_iscsi_pdu *pdu, *tmp;
+ struct iscsi_bhs_r2t *r2th;
+ int rc, count = 0;
+ uint32_t buffer_offset, desired_xfer_len;
+
+ sess.MaxBurstLength = SPDK_ISCSI_MAX_BURST_LENGTH; /* 1M */
+ sess.MaxOutstandingR2T = DEFAULT_MAXR2T; /* 4 */
+
+ conn.sess = &sess;
+ TAILQ_INIT(&conn.queued_r2t_tasks);
+ TAILQ_INIT(&conn.active_r2t_tasks);
+
+ pdu = iscsi_get_pdu(&conn);
+ SPDK_CU_ASSERT_FATAL(pdu != NULL);
+
+ pdu->data_segment_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH; /* 64K */
+ task.scsi.transfer_len = 16 * 1024 * 1024;
+ iscsi_task_set_pdu(&task, pdu);
+
+ /* The following tests if the task is queued because R2T tasks are full. */
+ conn.pending_r2t = DEFAULT_MAXR2T;
+
+ rc = add_transfer_task(&conn, &task);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(TAILQ_FIRST(&conn.queued_r2t_tasks) == &task);
+
+ TAILQ_REMOVE(&conn.queued_r2t_tasks, &task, link);
+ CU_ASSERT(TAILQ_EMPTY(&conn.queued_r2t_tasks));
+
+ /* The following tests if multiple R2Ts are issued. */
+ conn.pending_r2t = 0;
+
+ rc = add_transfer_task(&conn, &task);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(TAILQ_FIRST(&conn.active_r2t_tasks) == &task);
+
+ TAILQ_REMOVE(&conn.active_r2t_tasks, &task, link);
+ CU_ASSERT(TAILQ_EMPTY(&conn.active_r2t_tasks));
+
+ CU_ASSERT(conn.data_out_cnt == 255);
+ CU_ASSERT(conn.pending_r2t == 1);
+ CU_ASSERT(conn.ttt == 1);
+
+ CU_ASSERT(task.data_out_cnt == 255);
+ CU_ASSERT(task.ttt == 1);
+ CU_ASSERT(task.outstanding_r2t == sess.MaxOutstandingR2T);
+ CU_ASSERT(task.next_r2t_offset ==
+ pdu->data_segment_len + sess.MaxBurstLength * sess.MaxOutstandingR2T);
+
+
+ while (!TAILQ_EMPTY(&g_write_pdu_list)) {
+ tmp = TAILQ_FIRST(&g_write_pdu_list);
+ TAILQ_REMOVE(&g_write_pdu_list, tmp, tailq);
+
+ r2th = (struct iscsi_bhs_r2t *)&tmp->bhs;
+
+ buffer_offset = from_be32(&r2th->buffer_offset);
+ CU_ASSERT(buffer_offset == pdu->data_segment_len + sess.MaxBurstLength * count);
+
+ desired_xfer_len = from_be32(&r2th->desired_xfer_len);
+ CU_ASSERT(desired_xfer_len == sess.MaxBurstLength);
+
+ iscsi_put_pdu(tmp);
+ count++;
+ }
+
+ CU_ASSERT(count == DEFAULT_MAXR2T);
+
+ iscsi_put_pdu(pdu);
+}
+
+static void
+get_transfer_task_test(void)
+{
+ struct spdk_iscsi_sess sess = {};
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_task task1 = {}, task2 = {}, *task;
+ struct spdk_iscsi_pdu *pdu1, *pdu2, *pdu;
+ int rc;
+
+ sess.MaxBurstLength = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ sess.MaxOutstandingR2T = 1;
+
+ conn.sess = &sess;
+ TAILQ_INIT(&conn.active_r2t_tasks);
+
+ pdu1 = iscsi_get_pdu(&conn);
+ SPDK_CU_ASSERT_FATAL(pdu1 != NULL);
+
+ pdu1->data_segment_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ task1.scsi.transfer_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ iscsi_task_set_pdu(&task1, pdu1);
+
+ rc = add_transfer_task(&conn, &task1);
+ CU_ASSERT(rc == 0);
+
+ pdu2 = iscsi_get_pdu(&conn);
+ SPDK_CU_ASSERT_FATAL(pdu2 != NULL);
+
+ pdu2->data_segment_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ task2.scsi.transfer_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ iscsi_task_set_pdu(&task2, pdu2);
+
+ rc = add_transfer_task(&conn, &task2);
+ CU_ASSERT(rc == 0);
+
+ task = get_transfer_task(&conn, 1);
+ CU_ASSERT(task == &task1);
+
+ task = get_transfer_task(&conn, 2);
+ CU_ASSERT(task == &task2);
+
+ while (!TAILQ_EMPTY(&conn.active_r2t_tasks)) {
+ task = TAILQ_FIRST(&conn.active_r2t_tasks);
+ TAILQ_REMOVE(&conn.active_r2t_tasks, task, link);
+ }
+
+ while (!TAILQ_EMPTY(&g_write_pdu_list)) {
+ pdu = TAILQ_FIRST(&g_write_pdu_list);
+ TAILQ_REMOVE(&g_write_pdu_list, pdu, tailq);
+ iscsi_put_pdu(pdu);
+ }
+
+ iscsi_put_pdu(pdu2);
+ iscsi_put_pdu(pdu1);
+}
+
+static void
+del_transfer_task_test(void)
+{
+ struct spdk_iscsi_sess sess = {};
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_task *task1, *task2, *task3, *task4, *task5;
+ struct spdk_iscsi_pdu *pdu1, *pdu2, *pdu3, *pdu4, *pdu5, *pdu;
+ int rc;
+
+ sess.MaxBurstLength = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ sess.MaxOutstandingR2T = 1;
+
+ conn.sess = &sess;
+ TAILQ_INIT(&conn.active_r2t_tasks);
+ TAILQ_INIT(&conn.queued_r2t_tasks);
+
+ pdu1 = iscsi_get_pdu(&conn);
+ SPDK_CU_ASSERT_FATAL(pdu1 != NULL);
+
+ pdu1->data_segment_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+
+ task1 = iscsi_task_get(&conn, NULL, NULL);
+ SPDK_CU_ASSERT_FATAL(task1 != NULL);
+
+ task1->scsi.transfer_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ iscsi_task_set_pdu(task1, pdu1);
+ task1->tag = 11;
+
+ rc = add_transfer_task(&conn, task1);
+ CU_ASSERT(rc == 0);
+
+ pdu2 = iscsi_get_pdu(&conn);
+ SPDK_CU_ASSERT_FATAL(pdu2 != NULL);
+
+ pdu2->data_segment_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+
+ task2 = iscsi_task_get(&conn, NULL, NULL);
+ SPDK_CU_ASSERT_FATAL(task2 != NULL);
+
+ task2->scsi.transfer_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ iscsi_task_set_pdu(task2, pdu2);
+ task2->tag = 12;
+
+ rc = add_transfer_task(&conn, task2);
+ CU_ASSERT(rc == 0);
+
+ pdu3 = iscsi_get_pdu(&conn);
+ SPDK_CU_ASSERT_FATAL(pdu3 != NULL);
+
+ pdu3->data_segment_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+
+ task3 = iscsi_task_get(&conn, NULL, NULL);
+ SPDK_CU_ASSERT_FATAL(task3 != NULL);
+
+ task3->scsi.transfer_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ iscsi_task_set_pdu(task3, pdu3);
+ task3->tag = 13;
+
+ rc = add_transfer_task(&conn, task3);
+ CU_ASSERT(rc == 0);
+
+ pdu4 = iscsi_get_pdu(&conn);
+ SPDK_CU_ASSERT_FATAL(pdu4 != NULL);
+
+ pdu4->data_segment_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+
+ task4 = iscsi_task_get(&conn, NULL, NULL);
+ SPDK_CU_ASSERT_FATAL(task4 != NULL);
+
+ task4->scsi.transfer_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ iscsi_task_set_pdu(task4, pdu4);
+ task4->tag = 14;
+
+ rc = add_transfer_task(&conn, task4);
+ CU_ASSERT(rc == 0);
+
+ pdu5 = iscsi_get_pdu(&conn);
+ SPDK_CU_ASSERT_FATAL(pdu5 != NULL);
+
+ pdu5->data_segment_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+
+ task5 = iscsi_task_get(&conn, NULL, NULL);
+ SPDK_CU_ASSERT_FATAL(task5 != NULL);
+
+ task5->scsi.transfer_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ iscsi_task_set_pdu(task5, pdu5);
+ task5->tag = 15;
+
+ rc = add_transfer_task(&conn, task5);
+ CU_ASSERT(rc == 0);
+
+ CU_ASSERT(get_transfer_task(&conn, 1) == task1);
+ CU_ASSERT(get_transfer_task(&conn, 5) == NULL);
+ iscsi_del_transfer_task(&conn, 11);
+ CU_ASSERT(get_transfer_task(&conn, 1) == NULL);
+ CU_ASSERT(get_transfer_task(&conn, 5) == task5);
+
+ CU_ASSERT(get_transfer_task(&conn, 2) == task2);
+ iscsi_del_transfer_task(&conn, 12);
+ CU_ASSERT(get_transfer_task(&conn, 2) == NULL);
+
+ CU_ASSERT(get_transfer_task(&conn, 3) == task3);
+ iscsi_del_transfer_task(&conn, 13);
+ CU_ASSERT(get_transfer_task(&conn, 3) == NULL);
+
+ CU_ASSERT(get_transfer_task(&conn, 4) == task4);
+ iscsi_del_transfer_task(&conn, 14);
+ CU_ASSERT(get_transfer_task(&conn, 4) == NULL);
+
+ CU_ASSERT(get_transfer_task(&conn, 5) == task5);
+ iscsi_del_transfer_task(&conn, 15);
+ CU_ASSERT(get_transfer_task(&conn, 5) == NULL);
+
+ CU_ASSERT(TAILQ_EMPTY(&conn.active_r2t_tasks));
+
+ while (!TAILQ_EMPTY(&g_write_pdu_list)) {
+ pdu = TAILQ_FIRST(&g_write_pdu_list);
+ TAILQ_REMOVE(&g_write_pdu_list, pdu, tailq);
+ iscsi_put_pdu(pdu);
+ }
+
+ iscsi_put_pdu(pdu5);
+ iscsi_put_pdu(pdu4);
+ iscsi_put_pdu(pdu3);
+ iscsi_put_pdu(pdu2);
+ iscsi_put_pdu(pdu1);
+}
+
+static void
+clear_all_transfer_tasks_test(void)
+{
+ struct spdk_iscsi_sess sess = {};
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_task *task1, *task2, *task3, *task4, *task5, *task6;
+ struct spdk_iscsi_pdu *pdu1, *pdu2, *pdu3, *pdu4, *pdu5, *pdu6, *pdu;
+ struct spdk_iscsi_pdu *mgmt_pdu1, *mgmt_pdu2;
+ struct spdk_scsi_lun lun1 = {}, lun2 = {};
+ uint32_t alloc_cmd_sn;
+ int rc;
+
+ sess.MaxBurstLength = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ sess.MaxOutstandingR2T = 1;
+
+ conn.sess = &sess;
+ TAILQ_INIT(&conn.active_r2t_tasks);
+ TAILQ_INIT(&conn.queued_r2t_tasks);
+
+ alloc_cmd_sn = 10;
+
+ task1 = iscsi_task_get(&conn, NULL, NULL);
+ SPDK_CU_ASSERT_FATAL(task1 != NULL);
+ pdu1 = iscsi_get_pdu(&conn);
+ SPDK_CU_ASSERT_FATAL(pdu1 != NULL);
+
+ pdu1->data_segment_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ pdu1->cmd_sn = alloc_cmd_sn;
+ alloc_cmd_sn++;
+ task1->scsi.transfer_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ task1->scsi.lun = &lun1;
+ iscsi_task_set_pdu(task1, pdu1);
+
+ rc = add_transfer_task(&conn, task1);
+ CU_ASSERT(rc == 0);
+
+ mgmt_pdu1 = iscsi_get_pdu(&conn);
+ SPDK_CU_ASSERT_FATAL(mgmt_pdu1 != NULL);
+
+ mgmt_pdu1->cmd_sn = alloc_cmd_sn;
+ alloc_cmd_sn++;
+
+ task2 = iscsi_task_get(&conn, NULL, NULL);
+ SPDK_CU_ASSERT_FATAL(task2 != NULL);
+ pdu2 = iscsi_get_pdu(&conn);
+ SPDK_CU_ASSERT_FATAL(pdu2 != NULL);
+
+ pdu2->data_segment_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ pdu2->cmd_sn = alloc_cmd_sn;
+ alloc_cmd_sn++;
+ task2->scsi.transfer_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ task2->scsi.lun = &lun1;
+ iscsi_task_set_pdu(task2, pdu2);
+
+ rc = add_transfer_task(&conn, task2);
+ CU_ASSERT(rc == 0);
+
+ task3 = iscsi_task_get(&conn, NULL, NULL);
+ SPDK_CU_ASSERT_FATAL(task3 != NULL);
+ pdu3 = iscsi_get_pdu(&conn);
+ SPDK_CU_ASSERT_FATAL(pdu3 != NULL);
+
+ pdu3->data_segment_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ pdu3->cmd_sn = alloc_cmd_sn;
+ alloc_cmd_sn++;
+ task3->scsi.transfer_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ task3->scsi.lun = &lun1;
+ iscsi_task_set_pdu(task3, pdu3);
+
+ rc = add_transfer_task(&conn, task3);
+ CU_ASSERT(rc == 0);
+
+ task4 = iscsi_task_get(&conn, NULL, NULL);
+ SPDK_CU_ASSERT_FATAL(task4 != NULL);
+ pdu4 = iscsi_get_pdu(&conn);
+ SPDK_CU_ASSERT_FATAL(pdu4 != NULL);
+
+ pdu4->data_segment_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ pdu4->cmd_sn = alloc_cmd_sn;
+ alloc_cmd_sn++;
+ task4->scsi.transfer_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ task4->scsi.lun = &lun2;
+ iscsi_task_set_pdu(task4, pdu4);
+
+ rc = add_transfer_task(&conn, task4);
+ CU_ASSERT(rc == 0);
+
+ task5 = iscsi_task_get(&conn, NULL, NULL);
+ SPDK_CU_ASSERT_FATAL(task5 != NULL);
+ pdu5 = iscsi_get_pdu(&conn);
+ SPDK_CU_ASSERT_FATAL(pdu5 != NULL);
+
+ pdu5->data_segment_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ pdu5->cmd_sn = alloc_cmd_sn;
+ alloc_cmd_sn++;
+ task5->scsi.transfer_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ task5->scsi.lun = &lun2;
+ iscsi_task_set_pdu(task5, pdu5);
+
+ rc = add_transfer_task(&conn, task5);
+ CU_ASSERT(rc == 0);
+
+ mgmt_pdu2 = iscsi_get_pdu(&conn);
+ SPDK_CU_ASSERT_FATAL(mgmt_pdu2 != NULL);
+
+ mgmt_pdu2->cmd_sn = alloc_cmd_sn;
+ alloc_cmd_sn++;
+
+ task6 = iscsi_task_get(&conn, NULL, NULL);
+ SPDK_CU_ASSERT_FATAL(task6 != NULL);
+ pdu6 = iscsi_get_pdu(&conn);
+ SPDK_CU_ASSERT_FATAL(pdu6 != NULL);
+
+ pdu6->data_segment_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ pdu6->cmd_sn = alloc_cmd_sn;
+ alloc_cmd_sn++;
+ task5->scsi.transfer_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ task6->scsi.lun = &lun2;
+ iscsi_task_set_pdu(task6, pdu6);
+
+ rc = add_transfer_task(&conn, task6);
+ CU_ASSERT(rc == 0);
+
+ CU_ASSERT(conn.ttt == 4);
+
+ CU_ASSERT(get_transfer_task(&conn, 1) == task1);
+ CU_ASSERT(get_transfer_task(&conn, 2) == task2);
+ CU_ASSERT(get_transfer_task(&conn, 3) == task3);
+ CU_ASSERT(get_transfer_task(&conn, 4) == task4);
+ CU_ASSERT(get_transfer_task(&conn, 5) == NULL);
+
+ iscsi_clear_all_transfer_task(&conn, &lun1, mgmt_pdu1);
+
+ CU_ASSERT(!TAILQ_EMPTY(&conn.queued_r2t_tasks));
+ CU_ASSERT(get_transfer_task(&conn, 1) == NULL);
+ CU_ASSERT(get_transfer_task(&conn, 2) == task2);
+ CU_ASSERT(get_transfer_task(&conn, 3) == task3);
+ CU_ASSERT(get_transfer_task(&conn, 4) == task4);
+ CU_ASSERT(get_transfer_task(&conn, 5) == task5);
+ CU_ASSERT(get_transfer_task(&conn, 6) == NULL);
+
+ iscsi_clear_all_transfer_task(&conn, &lun1, NULL);
+
+ CU_ASSERT(TAILQ_EMPTY(&conn.queued_r2t_tasks));
+ CU_ASSERT(get_transfer_task(&conn, 1) == NULL);
+ CU_ASSERT(get_transfer_task(&conn, 2) == NULL);
+ CU_ASSERT(get_transfer_task(&conn, 3) == NULL);
+ CU_ASSERT(get_transfer_task(&conn, 4) == task4);
+ CU_ASSERT(get_transfer_task(&conn, 5) == task5);
+ CU_ASSERT(get_transfer_task(&conn, 6) == task6);
+
+ iscsi_clear_all_transfer_task(&conn, &lun2, mgmt_pdu2);
+
+ CU_ASSERT(get_transfer_task(&conn, 4) == NULL);
+ CU_ASSERT(get_transfer_task(&conn, 5) == NULL);
+ CU_ASSERT(get_transfer_task(&conn, 6) == task6);
+
+ iscsi_clear_all_transfer_task(&conn, NULL, NULL);
+
+ CU_ASSERT(get_transfer_task(&conn, 6) == NULL);
+
+ CU_ASSERT(TAILQ_EMPTY(&conn.active_r2t_tasks));
+ while (!TAILQ_EMPTY(&g_write_pdu_list)) {
+ pdu = TAILQ_FIRST(&g_write_pdu_list);
+ TAILQ_REMOVE(&g_write_pdu_list, pdu, tailq);
+ iscsi_put_pdu(pdu);
+ }
+
+ iscsi_put_pdu(mgmt_pdu2);
+ iscsi_put_pdu(mgmt_pdu1);
+ iscsi_put_pdu(pdu6);
+ iscsi_put_pdu(pdu5);
+ iscsi_put_pdu(pdu4);
+ iscsi_put_pdu(pdu3);
+ iscsi_put_pdu(pdu2);
+ iscsi_put_pdu(pdu1);
+}
+
+static void
+build_iovs_test(void)
+{
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_pdu pdu = {};
+ struct iovec iovs[5] = {};
+ uint8_t *data;
+ uint32_t mapped_length = 0;
+ int rc;
+
+ conn.header_digest = true;
+ conn.data_digest = true;
+
+ DSET24(&pdu.bhs.data_segment_len, 512);
+ data = calloc(1, 512);
+ SPDK_CU_ASSERT_FATAL(data != NULL);
+ pdu.data = data;
+
+ pdu.bhs.total_ahs_len = 0;
+ pdu.bhs.opcode = ISCSI_OP_SCSI;
+
+ pdu.writev_offset = 0;
+ rc = iscsi_build_iovs(&conn, iovs, 5, &pdu, &mapped_length);
+ CU_ASSERT(rc == 4);
+ CU_ASSERT(iovs[0].iov_base == (void *)&pdu.bhs);
+ CU_ASSERT(iovs[0].iov_len == ISCSI_BHS_LEN);
+ CU_ASSERT(iovs[1].iov_base == (void *)pdu.header_digest);
+ CU_ASSERT(iovs[1].iov_len == ISCSI_DIGEST_LEN);
+ CU_ASSERT(iovs[2].iov_base == (void *)pdu.data);
+ CU_ASSERT(iovs[2].iov_len == 512);
+ CU_ASSERT(iovs[3].iov_base == (void *)pdu.data_digest);
+ CU_ASSERT(iovs[3].iov_len == ISCSI_DIGEST_LEN);
+ CU_ASSERT(mapped_length == ISCSI_BHS_LEN + ISCSI_DIGEST_LEN + 512 + ISCSI_DIGEST_LEN);
+
+ pdu.writev_offset = ISCSI_BHS_LEN / 2;
+ rc = iscsi_build_iovs(&conn, iovs, 5, &pdu, &mapped_length);
+ CU_ASSERT(rc == 4);
+ CU_ASSERT(iovs[0].iov_base == (void *)((uint8_t *)&pdu.bhs + ISCSI_BHS_LEN / 2));
+ CU_ASSERT(iovs[0].iov_len == ISCSI_BHS_LEN / 2);
+ CU_ASSERT(iovs[1].iov_base == (void *)pdu.header_digest);
+ CU_ASSERT(iovs[1].iov_len == ISCSI_DIGEST_LEN);
+ CU_ASSERT(iovs[2].iov_base == (void *)pdu.data);
+ CU_ASSERT(iovs[2].iov_len == 512);
+ CU_ASSERT(iovs[3].iov_base == (void *)pdu.data_digest);
+ CU_ASSERT(iovs[3].iov_len == ISCSI_DIGEST_LEN);
+ CU_ASSERT(mapped_length == ISCSI_BHS_LEN / 2 + ISCSI_DIGEST_LEN + 512 + ISCSI_DIGEST_LEN);
+
+ pdu.writev_offset = ISCSI_BHS_LEN;
+ rc = iscsi_build_iovs(&conn, iovs, 5, &pdu, &mapped_length);
+ CU_ASSERT(rc == 3);
+ CU_ASSERT(iovs[0].iov_base == (void *)pdu.header_digest);
+ CU_ASSERT(iovs[0].iov_len == ISCSI_DIGEST_LEN);
+ CU_ASSERT(iovs[1].iov_base == (void *)pdu.data);
+ CU_ASSERT(iovs[1].iov_len == 512);
+ CU_ASSERT(iovs[2].iov_base == (void *)pdu.data_digest);
+ CU_ASSERT(iovs[2].iov_len == ISCSI_DIGEST_LEN);
+ CU_ASSERT(mapped_length == ISCSI_DIGEST_LEN + 512 + ISCSI_DIGEST_LEN);
+
+ pdu.writev_offset = ISCSI_BHS_LEN + ISCSI_DIGEST_LEN / 2;
+ rc = iscsi_build_iovs(&conn, iovs, 5, &pdu, &mapped_length);
+ CU_ASSERT(rc == 3);
+ CU_ASSERT(iovs[0].iov_base == (void *)((uint8_t *)pdu.header_digest + ISCSI_DIGEST_LEN / 2));
+ CU_ASSERT(iovs[0].iov_len == ISCSI_DIGEST_LEN / 2);
+ CU_ASSERT(iovs[1].iov_base == (void *)pdu.data);
+ CU_ASSERT(iovs[1].iov_len == 512);
+ CU_ASSERT(iovs[2].iov_base == (void *)pdu.data_digest);
+ CU_ASSERT(iovs[2].iov_len == ISCSI_DIGEST_LEN);
+ CU_ASSERT(mapped_length == ISCSI_DIGEST_LEN / 2 + 512 + ISCSI_DIGEST_LEN);
+
+ pdu.writev_offset = ISCSI_BHS_LEN + ISCSI_DIGEST_LEN;
+ rc = iscsi_build_iovs(&conn, iovs, 5, &pdu, &mapped_length);
+ CU_ASSERT(rc == 2);
+ CU_ASSERT(iovs[0].iov_base == (void *)pdu.data);
+ CU_ASSERT(iovs[0].iov_len == 512);
+ CU_ASSERT(iovs[1].iov_base == (void *)pdu.data_digest);
+ CU_ASSERT(iovs[1].iov_len == ISCSI_DIGEST_LEN);
+ CU_ASSERT(mapped_length == 512 + ISCSI_DIGEST_LEN);
+
+ pdu.writev_offset = ISCSI_BHS_LEN + ISCSI_DIGEST_LEN + 512;
+ rc = iscsi_build_iovs(&conn, iovs, 5, &pdu, &mapped_length);
+ CU_ASSERT(rc == 1);
+ CU_ASSERT(iovs[0].iov_base == (void *)pdu.data_digest);
+ CU_ASSERT(iovs[0].iov_len == ISCSI_DIGEST_LEN);
+ CU_ASSERT(mapped_length == ISCSI_DIGEST_LEN);
+
+ pdu.writev_offset = ISCSI_BHS_LEN + ISCSI_DIGEST_LEN + 512 + ISCSI_DIGEST_LEN / 2;
+ rc = iscsi_build_iovs(&conn, iovs, 5, &pdu, &mapped_length);
+ CU_ASSERT(rc == 1);
+ CU_ASSERT(iovs[0].iov_base == (void *)((uint8_t *)pdu.data_digest + ISCSI_DIGEST_LEN / 2));
+ CU_ASSERT(iovs[0].iov_len == ISCSI_DIGEST_LEN / 2);
+ CU_ASSERT(mapped_length == ISCSI_DIGEST_LEN / 2);
+
+ pdu.writev_offset = ISCSI_BHS_LEN + ISCSI_DIGEST_LEN + 512 + ISCSI_DIGEST_LEN;
+ rc = iscsi_build_iovs(&conn, iovs, 5, &pdu, &mapped_length);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(mapped_length == 0);
+
+ pdu.writev_offset = 0;
+ rc = iscsi_build_iovs(&conn, iovs, 1, &pdu, &mapped_length);
+ CU_ASSERT(rc == 1);
+ CU_ASSERT(iovs[0].iov_base == (void *)&pdu.bhs);
+ CU_ASSERT(iovs[0].iov_len == ISCSI_BHS_LEN);
+ CU_ASSERT(mapped_length == ISCSI_BHS_LEN);
+
+ rc = iscsi_build_iovs(&conn, iovs, 2, &pdu, &mapped_length);
+ CU_ASSERT(rc == 2);
+ CU_ASSERT(iovs[0].iov_base == (void *)&pdu.bhs);
+ CU_ASSERT(iovs[0].iov_len == ISCSI_BHS_LEN);
+ CU_ASSERT(iovs[1].iov_base == (void *)pdu.header_digest);
+ CU_ASSERT(iovs[1].iov_len == ISCSI_DIGEST_LEN);
+ CU_ASSERT(mapped_length == ISCSI_BHS_LEN + ISCSI_DIGEST_LEN);
+
+ rc = iscsi_build_iovs(&conn, iovs, 3, &pdu, &mapped_length);
+ CU_ASSERT(rc == 3);
+ CU_ASSERT(iovs[0].iov_base == (void *)&pdu.bhs);
+ CU_ASSERT(iovs[0].iov_len == ISCSI_BHS_LEN);
+ CU_ASSERT(iovs[1].iov_base == (void *)pdu.header_digest);
+ CU_ASSERT(iovs[1].iov_len == ISCSI_DIGEST_LEN);
+ CU_ASSERT(iovs[2].iov_base == (void *)pdu.data);
+ CU_ASSERT(iovs[2].iov_len == 512);
+ CU_ASSERT(mapped_length == ISCSI_BHS_LEN + ISCSI_DIGEST_LEN + 512);
+
+ rc = iscsi_build_iovs(&conn, iovs, 4, &pdu, &mapped_length);
+ CU_ASSERT(rc == 4);
+ CU_ASSERT(iovs[0].iov_base == (void *)&pdu.bhs);
+ CU_ASSERT(iovs[0].iov_len == ISCSI_BHS_LEN);
+ CU_ASSERT(iovs[1].iov_base == (void *)pdu.header_digest);
+ CU_ASSERT(iovs[1].iov_len == ISCSI_DIGEST_LEN);
+ CU_ASSERT(iovs[2].iov_base == (void *)pdu.data);
+ CU_ASSERT(iovs[2].iov_len == 512);
+ CU_ASSERT(iovs[3].iov_base == (void *)pdu.data_digest);
+ CU_ASSERT(iovs[3].iov_len == ISCSI_DIGEST_LEN);
+ CU_ASSERT(mapped_length == ISCSI_BHS_LEN + ISCSI_DIGEST_LEN + 512 + ISCSI_DIGEST_LEN);
+
+ free(data);
+}
+
+static void
+build_iovs_with_md_test(void)
+{
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_pdu pdu = {};
+ struct iovec iovs[6] = {};
+ uint8_t *data;
+ uint32_t mapped_length = 0;
+ int rc;
+
+ conn.header_digest = true;
+ conn.data_digest = true;
+
+ DSET24(&pdu.bhs.data_segment_len, 4096 * 2);
+ data = calloc(1, (4096 + 128) * 2);
+ SPDK_CU_ASSERT_FATAL(data != NULL);
+ pdu.data = data;
+ pdu.data_buf_len = (4096 + 128) * 2;
+
+ pdu.bhs.total_ahs_len = 0;
+ pdu.bhs.opcode = ISCSI_OP_SCSI;
+
+ rc = spdk_dif_ctx_init(&pdu.dif_ctx, 4096 + 128, 128, true, false, SPDK_DIF_TYPE1,
+ 0, 0, 0, 0, 0, 0);
+ CU_ASSERT(rc == 0);
+
+ pdu.dif_insert_or_strip = true;
+
+ pdu.writev_offset = 0;
+ rc = iscsi_build_iovs(&conn, iovs, 6, &pdu, &mapped_length);
+ CU_ASSERT(rc == 5);
+ CU_ASSERT(iovs[0].iov_base == (void *)&pdu.bhs);
+ CU_ASSERT(iovs[0].iov_len == ISCSI_BHS_LEN);
+ CU_ASSERT(iovs[1].iov_base == (void *)pdu.header_digest);
+ CU_ASSERT(iovs[1].iov_len == ISCSI_DIGEST_LEN);
+ CU_ASSERT(iovs[2].iov_base == (void *)pdu.data);
+ CU_ASSERT(iovs[2].iov_len == 4096);
+ CU_ASSERT(iovs[3].iov_base == (void *)(pdu.data + 4096 + 128));
+ CU_ASSERT(iovs[3].iov_len == 4096);
+ CU_ASSERT(iovs[4].iov_base == (void *)pdu.data_digest);
+ CU_ASSERT(iovs[4].iov_len == ISCSI_DIGEST_LEN);
+ CU_ASSERT(mapped_length == ISCSI_BHS_LEN + ISCSI_DIGEST_LEN + 4096 * 2 + ISCSI_DIGEST_LEN);
+
+ pdu.writev_offset = ISCSI_BHS_LEN + ISCSI_DIGEST_LEN + 2048;
+ rc = iscsi_build_iovs(&conn, iovs, 6, &pdu, &mapped_length);
+ CU_ASSERT(rc == 3);
+ CU_ASSERT(iovs[0].iov_base == (void *)(pdu.data + 2048));
+ CU_ASSERT(iovs[0].iov_len == 2048);
+ CU_ASSERT(iovs[1].iov_base == (void *)(pdu.data + 4096 + 128));
+ CU_ASSERT(iovs[1].iov_len == 4096);
+ CU_ASSERT(iovs[2].iov_base == (void *)pdu.data_digest);
+ CU_ASSERT(iovs[2].iov_len == ISCSI_DIGEST_LEN);
+ CU_ASSERT(mapped_length == 2048 + 4096 + ISCSI_DIGEST_LEN);
+
+ pdu.writev_offset = ISCSI_BHS_LEN + ISCSI_DIGEST_LEN + 4096 * 2;
+ rc = iscsi_build_iovs(&conn, iovs, 6, &pdu, &mapped_length);
+ CU_ASSERT(rc == 1);
+ CU_ASSERT(iovs[0].iov_base == (void *)pdu.data_digest);
+ CU_ASSERT(iovs[0].iov_len == ISCSI_DIGEST_LEN);
+ CU_ASSERT(mapped_length == ISCSI_DIGEST_LEN);
+
+ pdu.writev_offset = 0;
+ rc = iscsi_build_iovs(&conn, iovs, 3, &pdu, &mapped_length);
+ CU_ASSERT(rc == 3);
+ CU_ASSERT(iovs[0].iov_base == (void *)&pdu.bhs);
+ CU_ASSERT(iovs[0].iov_len == ISCSI_BHS_LEN);
+ CU_ASSERT(iovs[1].iov_base == (void *)pdu.header_digest);
+ CU_ASSERT(iovs[1].iov_len == ISCSI_DIGEST_LEN);
+ CU_ASSERT(iovs[2].iov_base == (void *)pdu.data);
+ CU_ASSERT(iovs[2].iov_len == 4096);
+ CU_ASSERT(mapped_length == ISCSI_BHS_LEN + ISCSI_DIGEST_LEN + 4096);
+
+ free(data);
+}
+
+static void
+check_iscsi_reject(struct spdk_iscsi_pdu *pdu, uint8_t reason)
+{
+ struct spdk_iscsi_pdu *rsp_pdu;
+ struct iscsi_bhs_reject *reject_bhs;
+
+ CU_ASSERT(pdu->is_rejected == true);
+ rsp_pdu = TAILQ_FIRST(&g_write_pdu_list);
+ CU_ASSERT(rsp_pdu != NULL);
+ reject_bhs = (struct iscsi_bhs_reject *)&rsp_pdu->bhs;
+ CU_ASSERT(reject_bhs->reason == reason);
+
+ TAILQ_REMOVE(&g_write_pdu_list, rsp_pdu, tailq);
+ iscsi_put_pdu(rsp_pdu);
+ pdu->is_rejected = false;
+}
+
+static void
+check_login_response(uint8_t status_class, uint8_t status_detail)
+{
+ struct spdk_iscsi_pdu *rsp_pdu;
+ struct iscsi_bhs_login_rsp *login_rsph;
+
+ rsp_pdu = TAILQ_FIRST(&g_write_pdu_list);
+ CU_ASSERT(rsp_pdu != NULL);
+ login_rsph = (struct iscsi_bhs_login_rsp *)&rsp_pdu->bhs;
+ CU_ASSERT(login_rsph->status_class == status_class);
+ CU_ASSERT(login_rsph->status_detail == status_detail);
+
+ TAILQ_REMOVE(&g_write_pdu_list, rsp_pdu, tailq);
+ iscsi_put_pdu(rsp_pdu);
+}
+
+static void
+pdu_hdr_op_login_test(void)
+{
+ struct spdk_iscsi_sess sess = {};
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_pdu pdu = {};
+ struct iscsi_bhs_login_req *login_reqh;
+ int rc;
+
+ login_reqh = (struct iscsi_bhs_login_req *)&pdu.bhs;
+
+ /* Case 1 - On discovery session, target only accepts text requests with the
+ * SendTargets key and logout request with reason "close the session".
+ */
+ sess.session_type = SESSION_TYPE_DISCOVERY;
+ conn.full_feature = true;
+ conn.sess = &sess;
+
+ rc = iscsi_pdu_hdr_op_login(&conn, &pdu);
+ CU_ASSERT(rc == SPDK_ISCSI_CONNECTION_FATAL);
+
+ /* Case 2 - Data segment length is limited to be not more than 8KB, the default
+ * FirstBurstLength, for login request.
+ */
+ sess.session_type = SESSION_TYPE_INVALID;
+ conn.full_feature = false;
+ conn.sess = NULL;
+ pdu.data_segment_len = SPDK_ISCSI_FIRST_BURST_LENGTH + 1;
+
+ rc = iscsi_pdu_hdr_op_login(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_iscsi_reject(&pdu, ISCSI_REASON_PROTOCOL_ERROR);
+
+ /* Case 3 - PDU pool is empty */
+ pdu.data_segment_len = SPDK_ISCSI_FIRST_BURST_LENGTH;
+ g_pdu_pool_is_empty = true;
+
+ rc = iscsi_pdu_hdr_op_login(&conn, &pdu);
+ CU_ASSERT(rc == SPDK_ISCSI_CONNECTION_FATAL);
+
+ /* Case 4 - A login request with the C bit set to 1 must have the T bit set to 0. */
+ g_pdu_pool_is_empty = false;
+ login_reqh->flags |= ISCSI_LOGIN_TRANSIT;
+ login_reqh->flags |= ISCSI_LOGIN_CONTINUE;
+
+ rc = iscsi_pdu_hdr_op_login(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_login_response(ISCSI_CLASS_INITIATOR_ERROR, ISCSI_LOGIN_INITIATOR_ERROR);
+
+ /* Case 5 - Both version-min and version-max must be set to 0x00. */
+ login_reqh->flags = 0;
+ login_reqh->version_min = ISCSI_VERSION + 1;
+
+ rc = iscsi_pdu_hdr_op_login(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_login_response(ISCSI_CLASS_INITIATOR_ERROR, ISCSI_LOGIN_UNSUPPORTED_VERSION);
+
+ /* Case 6 - T bit is set to 1 correctly but invalid stage code is set to NSG. */
+ login_reqh->version_min = ISCSI_VERSION;
+ login_reqh->flags |= ISCSI_LOGIN_TRANSIT;
+ login_reqh->flags |= ISCSI_NSG_RESERVED_CODE;
+
+ rc = iscsi_pdu_hdr_op_login(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_login_response(ISCSI_CLASS_INITIATOR_ERROR, ISCSI_LOGIN_INITIATOR_ERROR);
+
+ /* Case 7 - Login request is correct. Login response is initialized and set to
+ * the current connection.
+ */
+ login_reqh->flags = 0;
+
+ rc = iscsi_pdu_hdr_op_login(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(conn.login_rsp_pdu != NULL);
+
+ iscsi_put_pdu(conn.login_rsp_pdu);
+}
+
+static void
+pdu_hdr_op_text_test(void)
+{
+ struct spdk_iscsi_sess sess = {};
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_pdu pdu = {};
+ struct iscsi_bhs_text_req *text_reqh;
+ int rc;
+
+ text_reqh = (struct iscsi_bhs_text_req *)&pdu.bhs;
+
+ conn.sess = &sess;
+
+ /* Case 1 - Data segment length for text request must not be more than
+ * FirstBurstLength plus extra space to account for digests.
+ */
+ pdu.data_segment_len = iscsi_get_max_immediate_data_size() + 1;
+
+ rc = iscsi_pdu_hdr_op_text(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_iscsi_reject(&pdu, ISCSI_REASON_PROTOCOL_ERROR);
+
+ /* Case 2 - A text request with the C bit set to 1 must have the F bit set to 0. */
+ pdu.data_segment_len = iscsi_get_max_immediate_data_size();
+ text_reqh->flags |= ISCSI_FLAG_FINAL;
+ text_reqh->flags |= ISCSI_TEXT_CONTINUE;
+
+ rc = iscsi_pdu_hdr_op_text(&conn, &pdu);
+ CU_ASSERT(rc == -1);
+
+ /* Case 3 - ExpStatSN of the text request is expected to match StatSN of the current
+ * connection. But StarPort iSCSI initiator didn't follow the expectation. In this
+ * case we overwrite StatSN by ExpStatSN and processes the request as correct.
+ */
+ text_reqh->flags = 0;
+ to_be32(&text_reqh->exp_stat_sn, 1234);
+ to_be32(&conn.StatSN, 4321);
+
+ rc = iscsi_pdu_hdr_op_text(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(conn.StatSN == 1234);
+
+ /* Case 4 - Text request is the first in the sequence of text requests and responses,
+ * and so its ITT is hold to the current connection.
+ */
+ sess.current_text_itt = 0xffffffffU;
+ to_be32(&text_reqh->itt, 5678);
+
+ rc = iscsi_pdu_hdr_op_text(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(sess.current_text_itt == 5678);
+
+ /* Case 5 - If text request is sent as part of a sequence of text requests and responses,
+ * its ITT must be the same for all the text requests. But it was not. */
+ sess.current_text_itt = 5679;
+
+ rc = iscsi_pdu_hdr_op_text(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_iscsi_reject(&pdu, ISCSI_REASON_PROTOCOL_ERROR);
+
+ /* Case 6 - Different from case 5, its ITT matches the value saved in the connection. */
+ text_reqh->flags = 0;
+ sess.current_text_itt = 5678;
+
+ rc = iscsi_pdu_hdr_op_text(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+}
+
+static void
+check_logout_response(uint8_t response, uint32_t stat_sn, uint32_t exp_cmd_sn,
+ uint32_t max_cmd_sn)
+{
+ struct spdk_iscsi_pdu *rsp_pdu;
+ struct iscsi_bhs_logout_resp *logout_rsph;
+
+ rsp_pdu = TAILQ_FIRST(&g_write_pdu_list);
+ CU_ASSERT(rsp_pdu != NULL);
+ logout_rsph = (struct iscsi_bhs_logout_resp *)&rsp_pdu->bhs;
+ CU_ASSERT(logout_rsph->response == response);
+ CU_ASSERT(from_be32(&logout_rsph->stat_sn) == stat_sn);
+ CU_ASSERT(from_be32(&logout_rsph->exp_cmd_sn) == exp_cmd_sn);
+ CU_ASSERT(from_be32(&logout_rsph->max_cmd_sn) == max_cmd_sn);
+
+ TAILQ_REMOVE(&g_write_pdu_list, rsp_pdu, tailq);
+ iscsi_put_pdu(rsp_pdu);
+}
+
+static void
+pdu_hdr_op_logout_test(void)
+{
+ struct spdk_iscsi_sess sess = {};
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_pdu pdu = {};
+ struct iscsi_bhs_logout_req *logout_reqh;
+ int rc;
+
+ logout_reqh = (struct iscsi_bhs_logout_req *)&pdu.bhs;
+
+ /* Case 1 - Target can accept logout request only with the reason "close the session"
+ * on discovery session.
+ */
+ logout_reqh->reason = 1;
+ conn.sess = &sess;
+ sess.session_type = SESSION_TYPE_DISCOVERY;
+
+ rc = iscsi_pdu_hdr_op_logout(&conn, &pdu);
+ CU_ASSERT(rc == SPDK_ISCSI_CONNECTION_FATAL);
+
+ /* Case 2 - Session is not established yet but connection was closed successfully. */
+ conn.sess = NULL;
+ conn.StatSN = 1234;
+ to_be32(&logout_reqh->exp_stat_sn, 1234);
+ pdu.cmd_sn = 5678;
+
+ rc = iscsi_pdu_hdr_op_logout(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_logout_response(0, 1234, 5678, 5678);
+ CU_ASSERT(conn.StatSN == 1235);
+
+ /* Case 3 - Session type is normal but CID was not found. Hence connection or session
+ * was not closed.
+ */
+ sess.session_type = SESSION_TYPE_NORMAL;
+ sess.ExpCmdSN = 5679;
+ sess.connections = 1;
+ conn.sess = &sess;
+ conn.id = 1;
+
+ rc = iscsi_pdu_hdr_op_logout(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_logout_response(1, 1235, 5679, 1);
+ CU_ASSERT(conn.StatSN == 1236);
+ CU_ASSERT(sess.MaxCmdSN == 1);
+
+ /* Case 4 - Session type is normal and CID was found. Connection or session was closed
+ * successfully.
+ */
+ to_be16(&logout_reqh->cid, 1);
+
+ rc = iscsi_pdu_hdr_op_logout(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_logout_response(0, 1236, 5679, 2);
+ CU_ASSERT(conn.StatSN == 1237);
+ CU_ASSERT(sess.MaxCmdSN == 2);
+
+ /* Case 5 - PDU pool is empty. */
+ g_pdu_pool_is_empty = true;
+
+ rc = iscsi_pdu_hdr_op_logout(&conn, &pdu);
+ CU_ASSERT(rc == SPDK_ISCSI_CONNECTION_FATAL);
+
+ g_pdu_pool_is_empty = false;
+}
+
+static void
+check_scsi_task(struct spdk_iscsi_pdu *pdu, enum spdk_scsi_data_dir dir)
+{
+ struct spdk_iscsi_task *task;
+
+ task = pdu->task;
+ CU_ASSERT(task != NULL);
+ CU_ASSERT(task->pdu == pdu);
+ CU_ASSERT(task->scsi.dxfer_dir == dir);
+
+ iscsi_task_put(task);
+ pdu->task = NULL;
+}
+
+static void
+pdu_hdr_op_scsi_test(void)
+{
+ struct spdk_iscsi_sess sess = {};
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_pdu pdu = {};
+ struct spdk_scsi_dev dev = {};
+ struct spdk_scsi_lun lun = {};
+ struct iscsi_bhs_scsi_req *scsi_reqh;
+ int rc;
+
+ scsi_reqh = (struct iscsi_bhs_scsi_req *)&pdu.bhs;
+
+ conn.sess = &sess;
+ conn.dev = &dev;
+
+ /* Case 1 - SCSI command is acceptable only on normal session. */
+ sess.session_type = SESSION_TYPE_DISCOVERY;
+
+ rc = iscsi_pdu_hdr_op_scsi(&conn, &pdu);
+ CU_ASSERT(rc == SPDK_ISCSI_CONNECTION_FATAL);
+
+ /* Case 2 - Task pool is empty. */
+ g_task_pool_is_empty = true;
+
+ rc = iscsi_pdu_hdr_op_scsi(&conn, &pdu);
+ CU_ASSERT(rc == SPDK_ISCSI_CONNECTION_FATAL);
+
+ g_task_pool_is_empty = false;
+
+ /* Case 3 - bidirectional operations (both R and W flags are set to 1) are not supported. */
+ sess.session_type = SESSION_TYPE_NORMAL;
+ scsi_reqh->read_bit = 1;
+ scsi_reqh->write_bit = 1;
+
+ rc = iscsi_pdu_hdr_op_scsi(&conn, &pdu);
+ CU_ASSERT(rc == SPDK_ISCSI_CONNECTION_FATAL);
+
+ /* Case 4 - LUN is hot-removed, and return immediately. */
+ scsi_reqh->write_bit = 0;
+
+ rc = iscsi_pdu_hdr_op_scsi(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(pdu.task == NULL);
+
+ /* Case 5 - SCSI read command PDU is correct, and the configured iSCSI task is set to the PDU. */
+ dev.lun[0] = &lun;
+
+ rc = iscsi_pdu_hdr_op_scsi(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_scsi_task(&pdu, SPDK_SCSI_DIR_FROM_DEV);
+
+ /* Case 6 - For SCSI write command PDU, its data segment length must not be more than
+ * FirstBurstLength plus extra space to account for digests.
+ */
+ scsi_reqh->read_bit = 0;
+ scsi_reqh->write_bit = 1;
+ pdu.data_segment_len = iscsi_get_max_immediate_data_size() + 1;
+
+ rc = iscsi_pdu_hdr_op_scsi(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_iscsi_reject(&pdu, ISCSI_REASON_PROTOCOL_ERROR);
+
+ /* Case 7 - For SCSI write command PDU, its data segment length must not be more than
+ * Expected Data Transfer Length (EDTL).
+ */
+ pdu.data_segment_len = iscsi_get_max_immediate_data_size();
+ to_be32(&scsi_reqh->expected_data_xfer_len, pdu.data_segment_len - 1);
+
+ rc = iscsi_pdu_hdr_op_scsi(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_iscsi_reject(&pdu, ISCSI_REASON_PROTOCOL_ERROR);
+
+ /* Case 8 - If ImmediateData is not enabled for the session, SCSI write command PDU
+ * cannot have data segment.
+ */
+ to_be32(&scsi_reqh->expected_data_xfer_len, pdu.data_segment_len);
+
+ rc = iscsi_pdu_hdr_op_scsi(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_iscsi_reject(&pdu, ISCSI_REASON_PROTOCOL_ERROR);
+
+ /* Case 9 - For SCSI write command PDU, its data segment length must not be more
+ * than FirstBurstLength.
+ */
+ sess.ImmediateData = true;
+
+ rc = iscsi_pdu_hdr_op_scsi(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_iscsi_reject(&pdu, ISCSI_REASON_PROTOCOL_ERROR);
+
+ /* Case 10 - SCSI write command PDU is correct, and the configured iSCSI task is set to the PDU. */
+ sess.FirstBurstLength = pdu.data_segment_len;
+
+ rc = iscsi_pdu_hdr_op_scsi(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_scsi_task(&pdu, SPDK_SCSI_DIR_TO_DEV);
+
+ /* Case 11 - R and W must not both be 0 when EDTL is not 0. */
+ scsi_reqh->write_bit = 0;
+
+ rc = iscsi_pdu_hdr_op_scsi(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_iscsi_reject(&pdu, ISCSI_REASON_INVALID_PDU_FIELD);
+
+ /* Case 11 - R and W are both 0 and EDTL is also 0, and hence SCSI command PDU is accepted. */
+ to_be32(&scsi_reqh->expected_data_xfer_len, 0);
+
+ rc = iscsi_pdu_hdr_op_scsi(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_scsi_task(&pdu, SPDK_SCSI_DIR_NONE);
+}
+
+static void
+check_iscsi_task_mgmt_response(uint8_t response, uint32_t task_tag, uint32_t stat_sn,
+ uint32_t exp_cmd_sn, uint32_t max_cmd_sn)
+{
+ struct spdk_iscsi_pdu *rsp_pdu;
+ struct iscsi_bhs_task_resp *rsph;
+
+ rsp_pdu = TAILQ_FIRST(&g_write_pdu_list);
+ CU_ASSERT(rsp_pdu != NULL);
+ rsph = (struct iscsi_bhs_task_resp *)&rsp_pdu->bhs;
+ CU_ASSERT(rsph->response == response);
+ CU_ASSERT(from_be32(&rsph->itt) == task_tag);
+ CU_ASSERT(from_be32(&rsph->exp_cmd_sn) == exp_cmd_sn);
+ CU_ASSERT(from_be32(&rsph->max_cmd_sn) == max_cmd_sn);
+
+ TAILQ_REMOVE(&g_write_pdu_list, rsp_pdu, tailq);
+ iscsi_put_pdu(rsp_pdu);
+}
+
+static void
+pdu_hdr_op_task_mgmt_test(void)
+{
+ struct spdk_iscsi_sess sess = {};
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_pdu pdu = {};
+ struct spdk_scsi_dev dev = {};
+ struct spdk_scsi_lun lun = {};
+ struct iscsi_bhs_task_req *task_reqh;
+ int rc;
+
+ /* TBD: This test covers only error paths before creating iSCSI task for now.
+ * Testing iSCSI task creation in iscsi_pdu_hdr_op_task() by UT is not simple
+ * and do it separately later.
+ */
+
+ task_reqh = (struct iscsi_bhs_task_req *)&pdu.bhs;
+
+ conn.sess = &sess;
+ conn.dev = &dev;
+
+ /* Case 1 - Task Management Function request PDU is acceptable only on normal session. */
+ sess.session_type = SESSION_TYPE_DISCOVERY;
+
+ rc = iscsi_pdu_hdr_op_task(&conn, &pdu);
+ CU_ASSERT(rc == SPDK_ISCSI_CONNECTION_FATAL);
+
+ /* Case 2 - LUN is hot removed. "LUN does not exist" response is sent. */
+ sess.session_type = SESSION_TYPE_NORMAL;
+ task_reqh->immediate = 0;
+ to_be32(&task_reqh->itt, 1234);
+
+ rc = iscsi_pdu_hdr_op_task(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_iscsi_task_mgmt_response(ISCSI_TASK_FUNC_RESP_LUN_NOT_EXIST, 1234, 0, 0, 1);
+
+ /* Case 3 - Unassigned function is specified. "Function rejected" response is sent. */
+ dev.lun[0] = &lun;
+ task_reqh->flags = 0;
+
+ rc = iscsi_pdu_hdr_op_task(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_iscsi_task_mgmt_response(ISCSI_TASK_FUNC_REJECTED, 1234, 0, 0, 2);
+
+ /* Case 4 - CLEAR TASK SET is not supported. "Task management function not supported"
+ * response is sent.
+ */
+ task_reqh->flags = ISCSI_TASK_FUNC_CLEAR_TASK_SET;
+
+ rc = iscsi_pdu_hdr_op_task(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_iscsi_task_mgmt_response(ISCSI_TASK_FUNC_RESP_FUNC_NOT_SUPPORTED, 1234, 0, 0, 3);
+
+ /* Case 5 - CLEAR ACA is not supported. "Task management function not supported" is sent. */
+ task_reqh->flags = ISCSI_TASK_FUNC_CLEAR_ACA;
+
+ rc = iscsi_pdu_hdr_op_task(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_iscsi_task_mgmt_response(ISCSI_TASK_FUNC_RESP_FUNC_NOT_SUPPORTED, 1234, 0, 0, 4);
+
+ /* Case 6 - TARGET WARM RESET is not supported. "Task management function not supported
+ * is sent.
+ */
+ task_reqh->flags = ISCSI_TASK_FUNC_TARGET_WARM_RESET;
+
+ rc = iscsi_pdu_hdr_op_task(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_iscsi_task_mgmt_response(ISCSI_TASK_FUNC_RESP_FUNC_NOT_SUPPORTED, 1234, 0, 0, 5);
+
+ /* Case 7 - TARGET COLD RESET is not supported. "Task management function not supported
+ * is sent.
+ */
+ task_reqh->flags = ISCSI_TASK_FUNC_TARGET_COLD_RESET;
+
+ rc = iscsi_pdu_hdr_op_task(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_iscsi_task_mgmt_response(ISCSI_TASK_FUNC_RESP_FUNC_NOT_SUPPORTED, 1234, 0, 0, 6);
+
+ /* Case 8 - TASK REASSIGN is not supported. "Task management function not supported" is sent. */
+ task_reqh->flags = ISCSI_TASK_FUNC_TASK_REASSIGN;
+
+ rc = iscsi_pdu_hdr_op_task(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_iscsi_task_mgmt_response(ISCSI_TASK_FUNC_RESP_FUNC_NOT_SUPPORTED, 1234, 0, 0, 7);
+}
+
+static void
+pdu_hdr_op_nopout_test(void)
+{
+ struct spdk_iscsi_sess sess = {};
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_pdu pdu = {};
+ struct iscsi_bhs_nop_out *nopout_reqh;
+ int rc;
+
+ nopout_reqh = (struct iscsi_bhs_nop_out *)&pdu.bhs;
+
+ conn.sess = &sess;
+
+ /* Case 1 - NOP-Out PDU is acceptable only on normal session. */
+ sess.session_type = SESSION_TYPE_DISCOVERY;
+
+ rc = iscsi_pdu_hdr_op_nopout(&conn, &pdu);
+ CU_ASSERT(rc == SPDK_ISCSI_CONNECTION_FATAL);
+
+ /* Case 2 - The length of the reflected ping data is limited to MaxRecvDataSegmentLength. */
+ sess.session_type = SESSION_TYPE_NORMAL;
+ pdu.data_segment_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH + 1;
+
+ rc = iscsi_pdu_hdr_op_nopout(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_iscsi_reject(&pdu, ISCSI_REASON_PROTOCOL_ERROR);
+
+ /* Case 3 - If Initiator Task Tag contains 0xffffffff, the I bit must be set
+ * to 1 and Target Transfer Tag should be copied from NOP-In PDU. This case
+ * satisfies the former but doesn't satisfy the latter, but ignore the error
+ * for now.
+ */
+ pdu.data_segment_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ conn.id = 1234;
+ to_be32(&nopout_reqh->ttt, 1235);
+ to_be32(&nopout_reqh->itt, 0xffffffffU);
+ nopout_reqh->immediate = 1;
+
+ rc = iscsi_pdu_hdr_op_nopout(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+
+ /* Case 4 - This case doesn't satisfy the above former. This error is not ignored. */
+ nopout_reqh->immediate = 0;
+
+ rc = iscsi_pdu_hdr_op_nopout(&conn, &pdu);
+ CU_ASSERT(rc == SPDK_ISCSI_CONNECTION_FATAL);
+}
+
+static void
+check_iscsi_r2t(struct spdk_iscsi_task *task, uint32_t len)
+{
+ struct spdk_iscsi_pdu *rsp_pdu;
+ struct iscsi_bhs_r2t *rsph;
+
+ rsp_pdu = TAILQ_FIRST(&g_write_pdu_list);
+ CU_ASSERT(rsp_pdu != NULL);
+ rsph = (struct iscsi_bhs_r2t *)&rsp_pdu->bhs;
+ CU_ASSERT(rsph->opcode == ISCSI_OP_R2T);
+ CU_ASSERT(from_be64(&rsph->lun) == spdk_scsi_lun_id_int_to_fmt(task->lun_id));
+ CU_ASSERT(from_be32(&rsph->buffer_offset) == task->next_r2t_offset);
+ CU_ASSERT(from_be32(&rsph->desired_xfer_len) == len);
+
+ TAILQ_REMOVE(&g_write_pdu_list, rsp_pdu, tailq);
+ iscsi_put_pdu(rsp_pdu);
+}
+
+static void
+pdu_hdr_op_data_test(void)
+{
+ struct spdk_iscsi_sess sess = {};
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_pdu pdu = {};
+ struct spdk_iscsi_task primary = {};
+ struct spdk_scsi_dev dev = {};
+ struct spdk_scsi_lun lun = {};
+ struct iscsi_bhs_data_out *data_reqh;
+ int rc;
+
+ data_reqh = (struct iscsi_bhs_data_out *)&pdu.bhs;
+
+ conn.sess = &sess;
+ conn.dev = &dev;
+ TAILQ_INIT(&conn.active_r2t_tasks);
+
+ /* Case 1 - SCSI Data-Out PDU is acceptable only on normal session. */
+ sess.session_type = SESSION_TYPE_DISCOVERY;
+
+ rc = iscsi_pdu_hdr_op_data(&conn, &pdu);
+ CU_ASSERT(rc == SPDK_ISCSI_CONNECTION_FATAL);
+
+ /* Case 2 - Data segment length must not be more than MaxRecvDataSegmentLength. */
+ sess.session_type = SESSION_TYPE_NORMAL;
+ pdu.data_segment_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH + 1;
+
+ rc = iscsi_pdu_hdr_op_data(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_iscsi_reject(&pdu, ISCSI_REASON_PROTOCOL_ERROR);
+
+ /* Case 3 - R2T task whose Target Transfer Tag matches is not found. */
+ pdu.data_segment_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+
+ rc = iscsi_pdu_hdr_op_data(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_iscsi_reject(&pdu, ISCSI_REASON_INVALID_PDU_FIELD);
+
+ /* Case 4 - R2T task whose Target Transfer Tag matches is found but data segment length
+ * is more than Desired Data Transfer Length of the R2T.
+ */
+ primary.desired_data_transfer_length = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH - 1;
+ conn.pending_r2t = 1;
+ TAILQ_INSERT_TAIL(&conn.active_r2t_tasks, &primary, link);
+
+ rc = iscsi_pdu_hdr_op_data(&conn, &pdu);
+ CU_ASSERT(rc == SPDK_ISCSI_CONNECTION_FATAL);
+
+ /* Case 5 - Initiator task tag doesn't match tag of R2T task. */
+ primary.desired_data_transfer_length = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ to_be32(&data_reqh->itt, 1);
+
+ rc = iscsi_pdu_hdr_op_data(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_iscsi_reject(&pdu, ISCSI_REASON_INVALID_PDU_FIELD);
+
+ /* Case 6 - DataSN doesn't match the Data-Out PDU number within the current
+ * output sequence.
+ */
+ to_be32(&data_reqh->itt, 0);
+ to_be32(&data_reqh->data_sn, 1);
+
+ rc = iscsi_pdu_hdr_op_data(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_iscsi_reject(&pdu, ISCSI_REASON_PROTOCOL_ERROR);
+
+ /* Case 7 - Output sequence must be in increasing buffer offset and must not
+ * be overlaid but they are not satisfied.
+ */
+ to_be32(&data_reqh->data_sn, 0);
+ to_be32(&data_reqh->buffer_offset, 4096);
+
+ rc = iscsi_pdu_hdr_op_data(&conn, &pdu);
+ CU_ASSERT(rc == SPDK_ISCSI_CONNECTION_FATAL);
+
+ /* Case 8 - Data segment length must not exceed MaxBurstLength. */
+ to_be32(&data_reqh->buffer_offset, 0);
+ sess.MaxBurstLength = pdu.data_segment_len - 1;
+
+ rc = iscsi_pdu_hdr_op_data(&conn, &pdu);
+ CU_ASSERT(rc == SPDK_ISCSI_CONNECTION_FATAL);
+
+ /* Case 9 - LUN is hot removed. */
+ sess.MaxBurstLength = pdu.data_segment_len * 4;
+ to_be32(&data_reqh->data_sn, primary.r2t_datasn);
+ to_be32(&data_reqh->buffer_offset, primary.next_expected_r2t_offset);
+
+ rc = iscsi_pdu_hdr_op_data(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(pdu.task == NULL);
+
+ /* Case 10 - SCSI Data-Out PDU is correct and processed. Created task is held
+ * to the PDU, but its F bit is 0 and hence R2T is not sent.
+ */
+ dev.lun[0] = &lun;
+ to_be32(&data_reqh->data_sn, primary.r2t_datasn);
+ to_be32(&data_reqh->buffer_offset, primary.next_expected_r2t_offset);
+
+ rc = iscsi_pdu_hdr_op_data(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(pdu.task != NULL);
+ iscsi_task_put(pdu.task);
+ pdu.task = NULL;
+
+ /* Case 11 - SCSI Data-Out PDU is correct and processed. Created task is held
+ * to the PDU, and Its F bit is 1 and hence R2T is sent.
+ */
+ data_reqh->flags |= ISCSI_FLAG_FINAL;
+ to_be32(&data_reqh->data_sn, primary.r2t_datasn);
+ to_be32(&data_reqh->buffer_offset, primary.next_expected_r2t_offset);
+ primary.scsi.transfer_len = pdu.data_segment_len * 5;
+
+ rc = iscsi_pdu_hdr_op_data(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(pdu.task != NULL);
+ check_iscsi_r2t(pdu.task, pdu.data_segment_len * 4);
+ iscsi_task_put(pdu.task);
+
+ /* Case 12 - Task pool is empty. */
+ to_be32(&data_reqh->data_sn, primary.r2t_datasn);
+ to_be32(&data_reqh->buffer_offset, primary.next_expected_r2t_offset);
+ g_task_pool_is_empty = true;
+
+ rc = iscsi_pdu_hdr_op_data(&conn, &pdu);
+ CU_ASSERT(rc == SPDK_ISCSI_CONNECTION_FATAL);
+
+ g_task_pool_is_empty = false;
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("iscsi_suite", NULL, NULL);
+
+ CU_ADD_TEST(suite, op_login_check_target_test);
+ CU_ADD_TEST(suite, op_login_session_normal_test);
+ CU_ADD_TEST(suite, maxburstlength_test);
+ CU_ADD_TEST(suite, underflow_for_read_transfer_test);
+ CU_ADD_TEST(suite, underflow_for_zero_read_transfer_test);
+ CU_ADD_TEST(suite, underflow_for_request_sense_test);
+ CU_ADD_TEST(suite, underflow_for_check_condition_test);
+ CU_ADD_TEST(suite, add_transfer_task_test);
+ CU_ADD_TEST(suite, get_transfer_task_test);
+ CU_ADD_TEST(suite, del_transfer_task_test);
+ CU_ADD_TEST(suite, clear_all_transfer_tasks_test);
+ CU_ADD_TEST(suite, build_iovs_test);
+ CU_ADD_TEST(suite, build_iovs_with_md_test);
+ CU_ADD_TEST(suite, pdu_hdr_op_login_test);
+ CU_ADD_TEST(suite, pdu_hdr_op_text_test);
+ CU_ADD_TEST(suite, pdu_hdr_op_logout_test);
+ CU_ADD_TEST(suite, pdu_hdr_op_scsi_test);
+ CU_ADD_TEST(suite, pdu_hdr_op_task_mgmt_test);
+ CU_ADD_TEST(suite, pdu_hdr_op_nopout_test);
+ CU_ADD_TEST(suite, pdu_hdr_op_data_test);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/iscsi/param.c/.gitignore b/src/spdk/test/unit/lib/iscsi/param.c/.gitignore
new file mode 100644
index 000000000..269921462
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/param.c/.gitignore
@@ -0,0 +1 @@
+param_ut
diff --git a/src/spdk/test/unit/lib/iscsi/param.c/Makefile b/src/spdk/test/unit/lib/iscsi/param.c/Makefile
new file mode 100644
index 000000000..d1b567b54
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/param.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = param_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/iscsi/param.c/param_ut.c b/src/spdk/test/unit/lib/iscsi/param.c/param_ut.c
new file mode 100644
index 000000000..ccf62643f
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/param.c/param_ut.c
@@ -0,0 +1,400 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk/scsi.h"
+
+#include "spdk_cunit.h"
+
+#include "../common.c"
+#include "iscsi/param.c"
+
+#include "spdk_internal/mock.h"
+
+struct spdk_iscsi_globals g_iscsi;
+
+DEFINE_STUB(iscsi_find_tgt_node, struct spdk_iscsi_tgt_node *,
+ (const char *target_name), NULL);
+
+DEFINE_STUB(iscsi_tgt_node_access, bool,
+ (struct spdk_iscsi_conn *conn, struct spdk_iscsi_tgt_node *target,
+ const char *iqn, const char *addr),
+ false);
+
+DEFINE_STUB(iscsi_send_tgts, int,
+ (struct spdk_iscsi_conn *conn, const char *iiqn, const char *iaddr,
+ const char *tiqn, uint8_t *data, int alloc_len, int data_len),
+ 0);
+
+static void
+burst_length_param_negotation(int FirstBurstLength, int MaxBurstLength,
+ int initialR2T)
+{
+ struct spdk_iscsi_sess sess;
+ struct spdk_iscsi_conn conn;
+ struct iscsi_param *params;
+ struct iscsi_param **params_p;
+ char data[8192];
+ int rc;
+ int total, len;
+
+ total = 0;
+ params = NULL;
+ params_p = &params;
+
+ memset(&sess, 0, sizeof(sess));
+ memset(&conn, 0, sizeof(conn));
+ memset(data, 0, 8192);
+
+ sess.ExpCmdSN = 0;
+ sess.MaxCmdSN = 64;
+ sess.session_type = SESSION_TYPE_NORMAL;
+ sess.params = NULL;
+ sess.MaxBurstLength = 65536;
+ sess.InitialR2T = true;
+ sess.FirstBurstLength = SPDK_ISCSI_FIRST_BURST_LENGTH;
+ sess.MaxOutstandingR2T = 1;
+
+ /* set default params */
+ rc = iscsi_sess_params_init(&sess.params);
+ CU_ASSERT(rc == 0);
+
+ rc = iscsi_param_set_int(sess.params, "FirstBurstLength",
+ sess.FirstBurstLength);
+ CU_ASSERT(rc == 0);
+
+ rc = iscsi_param_set_int(sess.params, "MaxBurstLength",
+ sess.MaxBurstLength);
+ CU_ASSERT(rc == 0);
+
+ rc = iscsi_param_set(sess.params, "InitialR2T",
+ sess.InitialR2T ? "Yes" : "No");
+ CU_ASSERT(rc == 0);
+
+ conn.full_feature = 1;
+ conn.sess = &sess;
+ conn.MaxRecvDataSegmentLength = 65536;
+
+ rc = iscsi_conn_params_init(&conn.params);
+ CU_ASSERT(rc == 0);
+
+ /* construct the data */
+ len = snprintf(data + total, 8192 - total, "%s=%d",
+ "FirstBurstLength", FirstBurstLength);
+ total += len + 1;
+
+ len = snprintf(data + total, 8192 - total, "%s=%d",
+ "MaxBurstLength", MaxBurstLength);
+ total += len + 1;
+
+ len = snprintf(data + total, 8192 - total, "%s=%d",
+ "InitialR2T", initialR2T);
+ total += len + 1;
+
+ /* add one extra NUL byte at the end to match real iSCSI params */
+ total++;
+
+ /* store incoming parameters */
+ rc = iscsi_parse_params(params_p, data, total, false, NULL);
+ CU_ASSERT(rc == 0);
+
+ /* negotiate parameters */
+ rc = iscsi_negotiate_params(&conn, params_p,
+ data, 8192, rc);
+ CU_ASSERT(rc > 0);
+
+ rc = iscsi_copy_param2var(&conn);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(conn.sess->FirstBurstLength <= SPDK_ISCSI_FIRST_BURST_LENGTH);
+ CU_ASSERT(conn.sess->FirstBurstLength <= conn.sess->MaxBurstLength);
+ CU_ASSERT(conn.sess->MaxBurstLength <= SPDK_ISCSI_MAX_BURST_LENGTH);
+ CU_ASSERT(conn.sess->MaxOutstandingR2T == 1);
+
+ iscsi_param_free(sess.params);
+ iscsi_param_free(conn.params);
+ iscsi_param_free(*params_p);
+}
+
+static void
+param_negotiation_test(void)
+{
+ burst_length_param_negotation(8192, 16384, 0);
+ burst_length_param_negotation(8192, 16384, 1);
+ burst_length_param_negotation(8192, 1024, 1);
+ burst_length_param_negotation(8192, 1024, 0);
+ burst_length_param_negotation(512, 1024, 1);
+ burst_length_param_negotation(512, 1024, 0);
+}
+
+static void
+list_negotiation_test(void)
+{
+ int add_param_value = 0;
+ struct iscsi_param param = {};
+ char *new_val;
+ char valid_list_buf[1024];
+ char in_val_buf[1024];
+
+#define TEST_LIST(valid_list, in_val, expected_result) \
+ do { \
+ snprintf(valid_list_buf, sizeof(valid_list_buf), "%s", valid_list); \
+ snprintf(in_val_buf, sizeof(in_val_buf), "%s", in_val); \
+ new_val = iscsi_negotiate_param_list(&add_param_value, &param, valid_list_buf, in_val_buf, NULL); \
+ if (expected_result) { \
+ SPDK_CU_ASSERT_FATAL(new_val != NULL); \
+ CU_ASSERT_STRING_EQUAL(new_val, expected_result); \
+ } \
+ } while (0)
+
+ TEST_LIST("None", "None", "None");
+ TEST_LIST("CHAP,None", "None", "None");
+ TEST_LIST("CHAP,None", "CHAP", "CHAP");
+ TEST_LIST("KRB5,SRP,CHAP,None", "SRP,CHAP,None", "SRP");
+ TEST_LIST("KRB5,SRP,CHAP,None", "CHAP,SRP,None", "CHAP");
+ TEST_LIST("KRB5,SRP,CHAP,None", "SPKM1,SRP,CHAP,None", "SRP");
+ TEST_LIST("KRB5,SRP,None", "CHAP,None", "None");
+}
+
+#define PARSE(strconst, partial_enabled, partial_text) \
+ data = strconst; \
+ len = sizeof(strconst) - 1; \
+ rc = iscsi_parse_params(&params, data, len, partial_enabled, partial_text)
+
+#define EXPECT_VAL(key, expected_value) \
+ { \
+ const char *val = iscsi_param_get_val(params, key); \
+ CU_ASSERT(val != NULL); \
+ if (val != NULL) { \
+ CU_ASSERT(strcmp(val, expected_value) == 0); \
+ } \
+ }
+
+#define EXPECT_NULL(key) \
+ CU_ASSERT(iscsi_param_get_val(params, key) == NULL)
+
+static void
+parse_valid_test(void)
+{
+ struct iscsi_param *params = NULL;
+ int rc;
+ char *data;
+ int len;
+ char *partial_parameter = NULL;
+
+ /* simple test with a single key=value */
+ PARSE("Abc=def\0", false, NULL);
+ CU_ASSERT(rc == 0);
+ EXPECT_VAL("Abc", "def");
+
+ /* multiple key=value pairs */
+ PARSE("Aaa=bbbbbb\0Xyz=test\0", false, NULL);
+ CU_ASSERT(rc == 0);
+ EXPECT_VAL("Aaa", "bbbbbb");
+ EXPECT_VAL("Xyz", "test");
+
+ /* value with embedded '=' */
+ PARSE("A=b=c\0", false, NULL);
+ CU_ASSERT(rc == 0);
+ EXPECT_VAL("A", "b=c");
+
+ /* CHAP_C=AAAA.... with value length 8192 */
+ len = strlen("CHAP_C=") + ISCSI_TEXT_MAX_VAL_LEN + 1/* null terminators */;
+ data = malloc(len);
+ SPDK_CU_ASSERT_FATAL(data != NULL);
+ memset(data, 'A', len);
+ memcpy(data, "CHAP_C", 6);
+ data[6] = '=';
+ data[len - 1] = '\0';
+ rc = iscsi_parse_params(&params, data, len, false, NULL);
+ CU_ASSERT(rc == 0);
+ free(data);
+
+ /* partial parameter: value is partial */
+ PARSE("C=AAA\0D=B", true, &partial_parameter);
+ SPDK_CU_ASSERT_FATAL(partial_parameter != NULL);
+ CU_ASSERT_STRING_EQUAL(partial_parameter, "D=B");
+ CU_ASSERT(rc == 0);
+ EXPECT_VAL("C", "AAA");
+ EXPECT_NULL("D");
+ PARSE("XXXX\0E=UUUU\0", false, &partial_parameter);
+ CU_ASSERT(rc == 0);
+ EXPECT_VAL("D", "BXXXX");
+ EXPECT_VAL("E", "UUUU");
+ CU_ASSERT_PTR_NULL(partial_parameter);
+
+ /* partial parameter: key is partial */
+ PARSE("IAMAFAK", true, &partial_parameter);
+ CU_ASSERT_STRING_EQUAL(partial_parameter, "IAMAFAK");
+ CU_ASSERT(rc == 0);
+ EXPECT_NULL("IAMAFAK");
+ PARSE("EDKEY=TTTT\0F=IIII", false, &partial_parameter);
+ CU_ASSERT(rc == 0);
+ EXPECT_VAL("IAMAFAKEDKEY", "TTTT");
+ EXPECT_VAL("F", "IIII");
+ CU_ASSERT_PTR_NULL(partial_parameter);
+
+ /* Second partial parameter is the only parameter */
+ PARSE("OOOO", true, &partial_parameter);
+ CU_ASSERT_STRING_EQUAL(partial_parameter, "OOOO");
+ CU_ASSERT(rc == 0);
+ EXPECT_NULL("OOOO");
+ PARSE("LL=MMMM", false, &partial_parameter);
+ CU_ASSERT(rc == 0);
+ EXPECT_VAL("OOOOLL", "MMMM");
+ CU_ASSERT_PTR_NULL(partial_parameter);
+
+ partial_parameter = NULL;
+ data = "PartialKey=";
+ len = 7;
+ rc = iscsi_parse_params(&params, data, len, true, &partial_parameter);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT_STRING_EQUAL(partial_parameter, "Partial");
+ EXPECT_NULL("PartialKey");
+ PARSE("Key=Value", false, &partial_parameter);
+ CU_ASSERT(rc == 0);
+ EXPECT_VAL("PartialKey", "Value");
+ CU_ASSERT_PTR_NULL(partial_parameter);
+
+ iscsi_param_free(params);
+}
+
+static void
+parse_invalid_test(void)
+{
+ struct iscsi_param *params = NULL;
+ int rc;
+ char *data;
+ int len;
+
+ /* key without '=' */
+ PARSE("Abc\0", false, NULL);
+ CU_ASSERT(rc != 0);
+ EXPECT_NULL("Abc");
+
+ /* multiple key=value pairs, one missing '=' */
+ PARSE("Abc=def\0Xyz\0Www=test\0", false, NULL);
+ CU_ASSERT(rc != 0);
+ EXPECT_VAL("Abc", "def");
+ EXPECT_NULL("Xyz");
+ EXPECT_NULL("Www");
+
+ /* empty key */
+ PARSE("=abcdef", false, NULL);
+ CU_ASSERT(rc != 0);
+ EXPECT_NULL("");
+
+ /* CHAP_C=AAAA.... with value length 8192 + 1 */
+ len = strlen("CHAP_C=") + ISCSI_TEXT_MAX_VAL_LEN + 1 /* max value len + 1 */ +
+ 1 /* null terminators */;
+ data = malloc(len);
+ SPDK_CU_ASSERT_FATAL(data != NULL);
+ memset(data, 'A', len);
+ memcpy(data, "CHAP_C", 6);
+ data[6] = '=';
+ data[len - 1] = '\0';
+ rc = iscsi_parse_params(&params, data, len, false, NULL);
+ free(data);
+ CU_ASSERT(rc != 0);
+ EXPECT_NULL("CHAP_C");
+
+ /* Test simple value, length of value bigger than 255 */
+ len = strlen("A=") + ISCSI_TEXT_MAX_SIMPLE_VAL_LEN + 1 /* max simple value len + 1 */ +
+ 1 /* null terminators */;
+ data = malloc(len);
+ SPDK_CU_ASSERT_FATAL(data != NULL);
+ memset(data, 'A', len);
+ data[1] = '=';
+ data[len - 1] = '\0';
+ rc = iscsi_parse_params(&params, data, len, false, NULL);
+ free(data);
+ CU_ASSERT(rc != 0);
+ EXPECT_NULL("A");
+
+ /* key length bigger than 63 */
+ len = ISCSI_TEXT_MAX_KEY_LEN + 1 /* max key length + 1 */ + 1 /* = */ + 1 /* A */ +
+ 1 /* null terminators */;
+ data = malloc(len);
+ SPDK_CU_ASSERT_FATAL(data != NULL);
+ memset(data, 'A', len);
+ data[64] = '=';
+ data[len - 1] = '\0';
+ rc = iscsi_parse_params(&params, data, len, false, NULL);
+ free(data);
+ CU_ASSERT(rc != 0);
+ EXPECT_NULL("A");
+
+ /* duplicated key */
+ PARSE("B=BB", false, NULL);
+ CU_ASSERT(rc == 0);
+ PARSE("B=BBBB", false, NULL);
+ CU_ASSERT(rc != 0);
+ EXPECT_VAL("B", "BB");
+
+ /* Test where data buffer has non-NULL characters past the end of
+ * the valid data region. This can happen with SPDK iSCSI target,
+ * since data buffers are reused and we do not zero the data buffers
+ * after they are freed since it would be too expensive. Added as
+ * part of fixing an intermittent Calsoft failure that triggered this
+ * bug.
+ */
+ data = "MaxRecvDataSegmentLength=81928";
+ len = strlen(data) - 1;
+ rc = iscsi_parse_params(&params, data, len, false, NULL);
+ EXPECT_VAL("MaxRecvDataSegmentLength", "8192");
+ CU_ASSERT(rc == 0);
+ iscsi_param_free(params);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("iscsi_suite", NULL, NULL);
+
+ CU_ADD_TEST(suite, param_negotiation_test);
+ CU_ADD_TEST(suite, list_negotiation_test);
+ CU_ADD_TEST(suite, parse_valid_test);
+ CU_ADD_TEST(suite, parse_invalid_test);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/iscsi/portal_grp.c/.gitignore b/src/spdk/test/unit/lib/iscsi/portal_grp.c/.gitignore
new file mode 100644
index 000000000..106ffebc2
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/portal_grp.c/.gitignore
@@ -0,0 +1 @@
+portal_grp_ut
diff --git a/src/spdk/test/unit/lib/iscsi/portal_grp.c/Makefile b/src/spdk/test/unit/lib/iscsi/portal_grp.c/Makefile
new file mode 100644
index 000000000..f3ca0646f
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/portal_grp.c/Makefile
@@ -0,0 +1,40 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+SPDK_LIB_LIST = conf
+
+TEST_FILE = portal_grp_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/iscsi/portal_grp.c/portal_grp_ut.c b/src/spdk/test/unit/lib/iscsi/portal_grp.c/portal_grp_ut.c
new file mode 100644
index 000000000..a89a1567f
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/portal_grp.c/portal_grp_ut.c
@@ -0,0 +1,419 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "common/lib/ut_multithread.c"
+#include "common/lib/test_sock.c"
+
+#include "../common.c"
+#include "iscsi/portal_grp.c"
+#include "unit/lib/json_mock.c"
+
+#include "spdk_internal/thread.h"
+
+DEFINE_STUB(iscsi_conn_construct, int,
+ (struct spdk_iscsi_portal *portal, struct spdk_sock *sock),
+ 0);
+
+struct spdk_iscsi_globals g_iscsi;
+
+static int
+test_setup(void)
+{
+ TAILQ_INIT(&g_iscsi.portal_head);
+ TAILQ_INIT(&g_iscsi.pg_head);
+ pthread_mutex_init(&g_iscsi.mutex, NULL);
+ return 0;
+}
+
+static void
+portal_create_ipv4_normal_case(void)
+{
+ struct spdk_iscsi_portal *p;
+
+ const char *host = "192.168.2.0";
+ const char *port = "3260";
+
+ p = iscsi_portal_create(host, port);
+ CU_ASSERT(p != NULL);
+
+ iscsi_portal_destroy(p);
+ CU_ASSERT(TAILQ_EMPTY(&g_iscsi.portal_head));
+}
+
+static void
+portal_create_ipv6_normal_case(void)
+{
+ struct spdk_iscsi_portal *p;
+
+ const char *host = "[2001:ad6:1234::]";
+ const char *port = "3260";
+
+ p = iscsi_portal_create(host, port);
+ CU_ASSERT(p != NULL);
+
+ iscsi_portal_destroy(p);
+ CU_ASSERT(TAILQ_EMPTY(&g_iscsi.portal_head));
+}
+
+static void
+portal_create_ipv4_wildcard_case(void)
+{
+ struct spdk_iscsi_portal *p;
+
+ const char *host = "*";
+ const char *port = "3260";
+
+ p = iscsi_portal_create(host, port);
+ CU_ASSERT(p != NULL);
+
+ iscsi_portal_destroy(p);
+ CU_ASSERT(TAILQ_EMPTY(&g_iscsi.portal_head));
+}
+
+static void
+portal_create_ipv6_wildcard_case(void)
+{
+ struct spdk_iscsi_portal *p;
+
+ const char *host = "[*]";
+ const char *port = "3260";
+
+ p = iscsi_portal_create(host, port);
+ CU_ASSERT(p != NULL);
+
+ iscsi_portal_destroy(p);
+ CU_ASSERT(TAILQ_EMPTY(&g_iscsi.portal_head));
+}
+
+static void
+portal_create_twice_case(void)
+{
+ struct spdk_iscsi_portal *p1, *p2;
+
+ const char *host = "192.168.2.0";
+ const char *port = "3260";
+
+ p1 = iscsi_portal_create(host, port);
+ CU_ASSERT(p1 != NULL);
+
+ p2 = iscsi_portal_create(host, port);
+ CU_ASSERT(p2 == NULL);
+
+ iscsi_portal_destroy(p1);
+ CU_ASSERT(TAILQ_EMPTY(&g_iscsi.portal_head));
+}
+
+static void
+parse_portal_ipv4_normal_case(void)
+{
+ const char *string = "192.168.2.0:3260";
+ const char *host_str = "192.168.2.0";
+ const char *port_str = "3260";
+ struct spdk_iscsi_portal *p = NULL;
+ int rc;
+
+ rc = iscsi_parse_portal(string, &p);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(p != NULL);
+ CU_ASSERT(strcmp(p->host, host_str) == 0);
+ CU_ASSERT(strcmp(p->port, port_str) == 0);
+
+ iscsi_portal_destroy(p);
+ CU_ASSERT(TAILQ_EMPTY(&g_iscsi.portal_head));
+
+}
+
+static void
+parse_portal_ipv6_normal_case(void)
+{
+ const char *string = "[2001:ad6:1234::]:3260";
+ const char *host_str = "[2001:ad6:1234::]";
+ const char *port_str = "3260";
+ struct spdk_iscsi_portal *p = NULL;
+ int rc;
+
+ rc = iscsi_parse_portal(string, &p);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(p != NULL);
+ CU_ASSERT(strcmp(p->host, host_str) == 0);
+ CU_ASSERT(strcmp(p->port, port_str) == 0);
+
+ iscsi_portal_destroy(p);
+ CU_ASSERT(TAILQ_EMPTY(&g_iscsi.portal_head));
+}
+
+static void
+parse_portal_ipv4_skip_port_case(void)
+{
+ const char *string = "192.168.2.0";
+ const char *host_str = "192.168.2.0";
+ const char *port_str = "3260";
+ struct spdk_iscsi_portal *p = NULL;
+ int rc;
+
+ rc = iscsi_parse_portal(string, &p);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(p != NULL);
+ CU_ASSERT(strcmp(p->host, host_str) == 0);
+ CU_ASSERT(strcmp(p->port, port_str) == 0);
+
+ iscsi_portal_destroy(p);
+ CU_ASSERT(TAILQ_EMPTY(&g_iscsi.portal_head));
+}
+
+static void
+parse_portal_ipv6_skip_port_case(void)
+{
+ const char *string = "[2001:ad6:1234::]";
+ const char *host_str = "[2001:ad6:1234::]";
+ const char *port_str = "3260";
+ struct spdk_iscsi_portal *p = NULL;
+ int rc;
+
+ rc = iscsi_parse_portal(string, &p);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(p != NULL);
+ CU_ASSERT(strcmp(p->host, host_str) == 0);
+ CU_ASSERT(strcmp(p->port, port_str) == 0);
+
+ iscsi_portal_destroy(p);
+ CU_ASSERT(TAILQ_EMPTY(&g_iscsi.portal_head));
+}
+
+static void
+portal_grp_register_unregister_case(void)
+{
+ struct spdk_iscsi_portal *p;
+ struct spdk_iscsi_portal_grp *pg1, *pg2;
+ int rc;
+ const char *host = "192.168.2.0";
+ const char *port = "3260";
+
+ pg1 = iscsi_portal_grp_create(1);
+ CU_ASSERT(pg1 != NULL);
+
+ p = iscsi_portal_create(host, port);
+ CU_ASSERT(p != NULL);
+
+ iscsi_portal_grp_add_portal(pg1, p);
+
+ rc = iscsi_portal_grp_register(pg1);
+ CU_ASSERT(rc == 0);
+
+ pg2 = iscsi_portal_grp_unregister(1);
+ CU_ASSERT(pg2 != NULL);
+ CU_ASSERT(pg1 == pg2);
+
+ CU_ASSERT(TAILQ_EMPTY(&g_iscsi.pg_head));
+
+ iscsi_portal_grp_destroy(pg1);
+
+ CU_ASSERT(TAILQ_EMPTY(&g_iscsi.portal_head));
+}
+
+static void
+portal_grp_register_twice_case(void)
+{
+ struct spdk_iscsi_portal *p;
+ struct spdk_iscsi_portal_grp *pg1, *pg2;
+ int rc;
+ const char *host = "192.168.2.0";
+ const char *port = "3260";
+
+ pg1 = iscsi_portal_grp_create(1);
+ CU_ASSERT(pg1 != NULL);
+
+ p = iscsi_portal_create(host, port);
+ CU_ASSERT(p != NULL);
+
+ iscsi_portal_grp_add_portal(pg1, p);
+
+ rc = iscsi_portal_grp_register(pg1);
+ CU_ASSERT(rc == 0);
+
+ rc = iscsi_portal_grp_register(pg1);
+ CU_ASSERT(rc != 0);
+
+ pg2 = iscsi_portal_grp_unregister(1);
+ CU_ASSERT(pg2 != NULL);
+ CU_ASSERT(pg1 == pg2);
+
+ CU_ASSERT(TAILQ_EMPTY(&g_iscsi.pg_head));
+
+ iscsi_portal_grp_destroy(pg1);
+
+ CU_ASSERT(TAILQ_EMPTY(&g_iscsi.portal_head));
+}
+
+static void
+portal_grp_add_delete_case(void)
+{
+ struct spdk_sock sock = {};
+ struct spdk_iscsi_portal_grp *pg1, *pg2;
+ struct spdk_iscsi_portal *p;
+ int rc;
+
+ const char *host = "192.168.2.0";
+ const char *port = "3260";
+
+ allocate_threads(1);
+ set_thread(0);
+
+ /* internal of iscsi_create_portal_group */
+ pg1 = iscsi_portal_grp_create(1);
+ CU_ASSERT(pg1 != NULL);
+
+ p = iscsi_portal_create(host, port);
+ CU_ASSERT(p != NULL);
+
+ iscsi_portal_grp_add_portal(pg1, p);
+
+ MOCK_SET(spdk_sock_listen, &sock);
+ rc = iscsi_portal_grp_open(pg1);
+ CU_ASSERT(rc == 0);
+ MOCK_CLEAR_P(spdk_sock_listen);
+
+ rc = iscsi_portal_grp_register(pg1);
+ CU_ASSERT(rc == 0);
+
+ /* internal of delete_portal_group */
+ pg2 = iscsi_portal_grp_unregister(1);
+ CU_ASSERT(pg2 != NULL);
+ CU_ASSERT(pg1 == pg2);
+
+ iscsi_portal_grp_release(pg2);
+
+ poll_thread(0);
+
+ CU_ASSERT(TAILQ_EMPTY(&g_iscsi.portal_head));
+ CU_ASSERT(TAILQ_EMPTY(&g_iscsi.pg_head));
+
+ free_threads();
+}
+
+static void
+portal_grp_add_delete_twice_case(void)
+{
+ struct spdk_sock sock = {};
+ struct spdk_iscsi_portal_grp *pg1, *pg2;
+ struct spdk_iscsi_portal *p;
+ int rc;
+
+ const char *host = "192.168.2.0";
+ const char *port1 = "3260", *port2 = "3261";
+
+ allocate_threads(1);
+ set_thread(0);
+
+ /* internal of iscsi_create_portal_group related */
+ pg1 = iscsi_portal_grp_create(1);
+ CU_ASSERT(pg1 != NULL);
+
+ p = iscsi_portal_create(host, port1);
+ CU_ASSERT(p != NULL);
+
+ iscsi_portal_grp_add_portal(pg1, p);
+
+ MOCK_SET(spdk_sock_listen, &sock);
+ rc = iscsi_portal_grp_open(pg1);
+ CU_ASSERT(rc == 0);
+
+ rc = iscsi_portal_grp_register(pg1);
+ CU_ASSERT(rc == 0);
+
+ /* internal of iscsi_create_portal_group related */
+ pg2 = iscsi_portal_grp_create(2);
+ CU_ASSERT(pg2 != NULL);
+
+ p = iscsi_portal_create(host, port2);
+ CU_ASSERT(p != NULL);
+
+ iscsi_portal_grp_add_portal(pg2, p);
+
+ rc = iscsi_portal_grp_open(pg2);
+ CU_ASSERT(rc == 0);
+
+ rc = iscsi_portal_grp_register(pg2);
+ CU_ASSERT(rc == 0);
+
+ /* internal of destroy_portal_group related */
+ iscsi_portal_grp_close(pg1);
+ iscsi_portal_grp_close(pg2);
+
+ poll_thread(0);
+
+ iscsi_portal_grps_destroy();
+
+ CU_ASSERT(TAILQ_EMPTY(&g_iscsi.portal_head));
+ CU_ASSERT(TAILQ_EMPTY(&g_iscsi.pg_head));
+
+ MOCK_CLEAR_P(spdk_sock_listen);
+
+ free_threads();
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("portal_grp_suite", test_setup, NULL);
+
+ CU_ADD_TEST(suite, portal_create_ipv4_normal_case);
+ CU_ADD_TEST(suite, portal_create_ipv6_normal_case);
+ CU_ADD_TEST(suite, portal_create_ipv4_wildcard_case);
+ CU_ADD_TEST(suite, portal_create_ipv6_wildcard_case);
+ CU_ADD_TEST(suite, portal_create_twice_case);
+ CU_ADD_TEST(suite, parse_portal_ipv4_normal_case);
+ CU_ADD_TEST(suite, parse_portal_ipv6_normal_case);
+ CU_ADD_TEST(suite, parse_portal_ipv4_skip_port_case);
+ CU_ADD_TEST(suite, parse_portal_ipv6_skip_port_case);
+ CU_ADD_TEST(suite, portal_grp_register_unregister_case);
+ CU_ADD_TEST(suite, portal_grp_register_twice_case);
+ CU_ADD_TEST(suite, portal_grp_add_delete_case);
+ CU_ADD_TEST(suite, portal_grp_add_delete_twice_case);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/iscsi/tgt_node.c/.gitignore b/src/spdk/test/unit/lib/iscsi/tgt_node.c/.gitignore
new file mode 100644
index 000000000..010d84b83
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/tgt_node.c/.gitignore
@@ -0,0 +1 @@
+tgt_node_ut
diff --git a/src/spdk/test/unit/lib/iscsi/tgt_node.c/Makefile b/src/spdk/test/unit/lib/iscsi/tgt_node.c/Makefile
new file mode 100644
index 000000000..90bd4f990
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/tgt_node.c/Makefile
@@ -0,0 +1,39 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+SPDK_LIB_LIST = conf
+TEST_FILE = tgt_node_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/iscsi/tgt_node.c/tgt_node.conf b/src/spdk/test/unit/lib/iscsi/tgt_node.c/tgt_node.conf
new file mode 100644
index 000000000..6bf5aa664
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/tgt_node.c/tgt_node.conf
@@ -0,0 +1,95 @@
+[Global]
+
+# Test that parsing fails if there is no TargetName
+[Failure0]
+ TargetAlias "Data Disk1"
+ Mapping PortalGroup1 InitiatorGroup1
+ AuthMethod Auto
+ AuthGroup AuthGroup1
+ UseDigest Auto
+ QueueDepth 128
+ LUN0 Malloc0
+ LUN1 Malloc1
+
+# Test that parsing fails if there is no Mapping
+[Failure1]
+ TargetName target1
+ TargetAlias "Data Disk1"
+ AuthMethod Auto
+ AuthGroup AuthGroup1
+ UseDigest Auto
+ QueueDepth 128
+ LUN0 Malloc0
+ LUN1 Malloc1
+
+# Test that parsing fails if Mapping does not define Portal or InitiatorGroup
+[Failure2]
+ TargetName target1
+ TargetAlias "Data Disk1"
+ Mapping
+ AuthMethod Auto
+ AuthGroup AuthGroup1
+ UseDigest Auto
+ QueueDepth 128
+ LUN0 Malloc0
+ LUN1 Malloc1
+
+# Test that parsing fails if Mapping does not define InitiatorGroup
+[Failure3]
+ TargetName target1
+ TargetAlias "Data Disk1"
+ Mapping PortalGroup1
+ AuthMethod Auto
+ AuthGroup AuthGroup1
+ UseDigest Auto
+ QueueDepth 128
+ LUN0 Malloc0
+ LUN1 Malloc1
+
+# Test that parsing fails if Mapping switches PortalGroup/InitiatorGroup order
+[Failure4]
+ TargetName target1
+ TargetAlias "Data Disk1"
+ Mapping InitiatorGroup1 PortalGroup1
+ AuthMethod Auto
+ AuthGroup AuthGroup1
+ UseDigest Auto
+ QueueDepth 128
+ LUN0 Malloc0
+ LUN1 Malloc1
+
+# Test that parsing fails if Mapping uses invalid InitiatorGroup0
+[Failure5]
+ TargetName target1
+ TargetAlias "Data Disk1"
+ Mapping PortalGroup1 InitiatorGroup0
+ AuthMethod Auto
+ AuthGroup AuthGroup1
+ UseDigest Auto
+ QueueDepth 128
+ LUN0 Malloc0
+ LUN1 Malloc1
+
+# Test that parsing fails if Mapping uses invalid PortalGroup0
+[Failure6]
+ TargetName target1
+ TargetAlias "Data Disk1"
+ Mapping PortalGroup0 InitiatorGroup1
+ AuthMethod Auto
+ AuthGroup AuthGroup1
+ UseDigest Auto
+ QueueDepth 128
+ LUN0 Malloc0
+ LUN1 Malloc1
+
+# Test that parsing fails if AuthMethod is invalid
+[Failure7]
+ TargetName target1
+ TargetAlias "Data Disk1"
+ Mapping PortalGroup1 InitiatorGroup1
+ AuthMethod SomeGarbage
+ AuthGroup AuthGroup1
+ UseDigest Auto
+ QueueDepth 128
+ LUN0 Malloc0
+ LUN1 Malloc1
diff --git a/src/spdk/test/unit/lib/iscsi/tgt_node.c/tgt_node_ut.c b/src/spdk/test/unit/lib/iscsi/tgt_node.c/tgt_node_ut.c
new file mode 100644
index 000000000..3f3bda39b
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/tgt_node.c/tgt_node_ut.c
@@ -0,0 +1,832 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk/scsi.h"
+
+#include "CUnit/Basic.h"
+#include "spdk_internal/mock.h"
+
+#include "../common.c"
+#include "iscsi/tgt_node.c"
+#include "scsi/scsi_internal.h"
+#include "unit/lib/json_mock.c"
+#include "common/lib/test_env.c"
+
+struct spdk_iscsi_globals g_iscsi;
+
+const char *config_file;
+
+DEFINE_STUB(spdk_scsi_dev_get_id,
+ int,
+ (const struct spdk_scsi_dev *dev),
+ 0);
+
+DEFINE_STUB(spdk_scsi_lun_get_bdev_name,
+ const char *,
+ (const struct spdk_scsi_lun *lun),
+ NULL);
+
+DEFINE_STUB(spdk_scsi_lun_get_id,
+ int,
+ (const struct spdk_scsi_lun *lun),
+ 0);
+
+DEFINE_STUB_V(spdk_iscsi_op_abort_task_set,
+ (struct spdk_iscsi_task *task,
+ uint8_t function));
+
+DEFINE_STUB(spdk_sock_is_ipv6, bool, (struct spdk_sock *sock), false);
+
+DEFINE_STUB(spdk_sock_is_ipv4, bool, (struct spdk_sock *sock), false);
+
+DEFINE_STUB(iscsi_portal_grp_find_by_tag,
+ struct spdk_iscsi_portal_grp *, (int tag), NULL);
+
+DEFINE_STUB(iscsi_init_grp_find_by_tag, struct spdk_iscsi_init_grp *,
+ (int tag), NULL);
+
+struct spdk_scsi_lun *
+spdk_scsi_dev_get_lun(struct spdk_scsi_dev *dev, int lun_id)
+{
+ if (lun_id < 0 || lun_id >= SPDK_SCSI_DEV_MAX_LUN) {
+ return NULL;
+ }
+
+ return dev->lun[lun_id];
+}
+
+int
+spdk_scsi_dev_add_lun(struct spdk_scsi_dev *dev, const char *bdev_name, int lun_id,
+ void (*hotremove_cb)(const struct spdk_scsi_lun *, void *),
+ void *hotremove_ctx)
+{
+ if (bdev_name == NULL) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+static void
+add_lun_test_cases(void)
+{
+ struct spdk_iscsi_tgt_node tgtnode = {};
+ int lun_id = 0;
+ char *bdev_name = NULL;
+ struct spdk_scsi_dev scsi_dev = {};
+ int rc;
+
+ /* case 1 */
+ tgtnode.num_active_conns = 1;
+
+ rc = iscsi_tgt_node_add_lun(&tgtnode, bdev_name, lun_id);
+ CU_ASSERT(rc != 0);
+
+ /* case 2 */
+ tgtnode.num_active_conns = 0;
+ lun_id = -2;
+
+ rc = iscsi_tgt_node_add_lun(&tgtnode, bdev_name, lun_id);
+ CU_ASSERT(rc != 0);
+
+ /* case 3 */
+ lun_id = SPDK_SCSI_DEV_MAX_LUN;
+
+ rc = iscsi_tgt_node_add_lun(&tgtnode, bdev_name, lun_id);
+ CU_ASSERT(rc != 0);
+
+ /* case 4 */
+ lun_id = -1;
+ tgtnode.dev = NULL;
+
+ rc = iscsi_tgt_node_add_lun(&tgtnode, bdev_name, lun_id);
+ CU_ASSERT(rc != 0);
+
+ /* case 5 */
+ tgtnode.dev = &scsi_dev;
+
+ rc = iscsi_tgt_node_add_lun(&tgtnode, bdev_name, lun_id);
+ CU_ASSERT(rc != 0);
+
+ /* case 6 */
+ bdev_name = "LUN0";
+
+ rc = iscsi_tgt_node_add_lun(&tgtnode, bdev_name, lun_id);
+ CU_ASSERT(rc == 0);
+}
+
+static void
+config_file_fail_cases(void)
+{
+ struct spdk_conf *config;
+ struct spdk_conf_section *sp;
+ char section_name[64];
+ int section_index;
+ int rc;
+
+ config = spdk_conf_allocate();
+
+ rc = spdk_conf_read(config, config_file);
+ CU_ASSERT(rc == 0);
+
+ section_index = 0;
+ while (true) {
+ snprintf(section_name, sizeof(section_name), "Failure%d", section_index);
+ sp = spdk_conf_find_section(config, section_name);
+ if (sp == NULL) {
+ break;
+ }
+ rc = iscsi_parse_tgt_node(sp);
+ CU_ASSERT(rc < 0);
+ section_index++;
+ }
+
+ spdk_conf_free(config);
+}
+
+static void
+allow_any_allowed(void)
+{
+ bool result;
+ char *netmask;
+ char *addr1, *addr2;
+
+ netmask = "ANY";
+ addr1 = "2001:ad6:1234:5678:9abc::";
+ addr2 = "192.168.2.1";
+
+ result = iscsi_netmask_allow_addr(netmask, addr1);
+ CU_ASSERT(result == true);
+
+ result = iscsi_netmask_allow_addr(netmask, addr2);
+ CU_ASSERT(result == true);
+}
+
+static void
+allow_ipv6_allowed(void)
+{
+ bool result;
+ char *netmask;
+ char *addr;
+
+ netmask = "[2001:ad6:1234::]/48";
+ addr = "2001:ad6:1234:5678:9abc::";
+
+ result = iscsi_ipv6_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == true);
+
+ result = iscsi_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == true);
+
+ /* Netmask prefix bits == 128 (all bits must match) */
+ netmask = "[2001:ad6:1234:5678:9abc::1]/128";
+ addr = "2001:ad6:1234:5678:9abc::1";
+ result = iscsi_ipv6_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == true);
+}
+
+static void
+allow_ipv6_denied(void)
+{
+ bool result;
+ char *netmask;
+ char *addr;
+
+ netmask = "[2001:ad6:1234::]/56";
+ addr = "2001:ad6:1234:5678:9abc::";
+
+ result = iscsi_ipv6_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == false);
+
+ result = iscsi_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == false);
+
+ /* Netmask prefix bits == 128 (all bits must match) */
+ netmask = "[2001:ad6:1234:5678:9abc::1]/128";
+ addr = "2001:ad6:1234:5678:9abc::2";
+ result = iscsi_ipv6_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == false);
+}
+
+static void
+allow_ipv6_invalid(void)
+{
+ bool result;
+ char *netmask;
+ char *addr;
+
+ /* Netmask prefix bits > 128 */
+ netmask = "[2001:ad6:1234::]/129";
+ addr = "2001:ad6:1234:5678:9abc::";
+ result = iscsi_ipv6_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == false);
+
+ /* Netmask prefix bits == 0 */
+ netmask = "[2001:ad6:1234::]/0";
+ addr = "2001:ad6:1234:5678:9abc::";
+ result = iscsi_ipv6_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == false);
+
+ /* Netmask prefix bits < 0 */
+ netmask = "[2001:ad6:1234::]/-1";
+ addr = "2001:ad6:1234:5678:9abc::";
+ result = iscsi_ipv6_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == false);
+}
+
+static void
+allow_ipv4_allowed(void)
+{
+ bool result;
+ char *netmask;
+ char *addr;
+
+ netmask = "192.168.2.0/24";
+ addr = "192.168.2.1";
+
+ result = iscsi_ipv4_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == true);
+
+ result = iscsi_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == true);
+
+ /* Netmask prefix == 32 (all bits must match) */
+ netmask = "192.168.2.1/32";
+ addr = "192.168.2.1";
+ result = iscsi_ipv4_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == true);
+}
+
+static void
+allow_ipv4_denied(void)
+{
+ bool result;
+ char *netmask;
+ char *addr;
+
+ netmask = "192.168.2.0";
+ addr = "192.168.2.1";
+
+ result = iscsi_ipv4_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == false);
+
+ result = iscsi_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == false);
+
+ /* Netmask prefix == 32 (all bits must match) */
+ netmask = "192.168.2.1/32";
+ addr = "192.168.2.2";
+ result = iscsi_ipv4_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == false);
+}
+
+static void
+allow_ipv4_invalid(void)
+{
+ bool result;
+ char *netmask;
+ char *addr;
+
+ /* Netmask prefix bits > 32 */
+ netmask = "192.168.2.0/33";
+ addr = "192.168.2.1";
+ result = iscsi_ipv4_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == false);
+
+ /* Netmask prefix bits == 0 */
+ netmask = "192.168.2.0/0";
+ addr = "192.168.2.1";
+ result = iscsi_ipv4_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == false);
+
+ /* Netmask prefix bits < 0 */
+ netmask = "192.168.2.0/-1";
+ addr = "192.168.2.1";
+ result = iscsi_ipv4_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == false);
+}
+
+static void
+node_access_allowed(void)
+{
+ struct spdk_iscsi_tgt_node tgtnode = {};
+ struct spdk_iscsi_portal_grp pg = {};
+ struct spdk_iscsi_init_grp ig = {};
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_portal portal = {};
+ struct spdk_iscsi_initiator_name iname = {};
+ struct spdk_iscsi_initiator_netmask imask = {};
+ struct spdk_scsi_dev scsi_dev = {};
+ struct spdk_iscsi_pg_map *pg_map;
+ char *iqn, *addr;
+ bool result;
+
+ /* portal group initialization */
+ pg.tag = 1;
+
+ /* initiator group initialization */
+ ig.tag = 1;
+
+ ig.ninitiators = 1;
+ snprintf(iname.name, sizeof(iname.name), "iqn.2017-10.spdk.io:0001");
+ TAILQ_INIT(&ig.initiator_head);
+ TAILQ_INSERT_TAIL(&ig.initiator_head, &iname, tailq);
+
+ ig.nnetmasks = 1;
+ snprintf(imask.mask, sizeof(imask.mask), "192.168.2.0/24");
+ TAILQ_INIT(&ig.netmask_head);
+ TAILQ_INSERT_TAIL(&ig.netmask_head, &imask, tailq);
+
+ /* target initialization */
+ snprintf(tgtnode.name, sizeof(tgtnode.name), "iqn.2017-10.spdk.io:0001");
+ TAILQ_INIT(&tgtnode.pg_map_head);
+
+ snprintf(scsi_dev.name, sizeof(scsi_dev.name), "iqn.2017-10.spdk.io:0001");
+ tgtnode.dev = &scsi_dev;
+
+ pg_map = iscsi_tgt_node_add_pg_map(&tgtnode, &pg);
+ iscsi_pg_map_add_ig_map(pg_map, &ig);
+
+ /* portal initialization */
+ portal.group = &pg;
+ snprintf(portal.host, sizeof(portal.host), "192.168.2.0");
+ snprintf(portal.port, sizeof(portal.port), "3260");
+
+ /* input for UT */
+ conn.portal = &portal;
+
+ iqn = "iqn.2017-10.spdk.io:0001";
+ addr = "192.168.2.1";
+
+ result = iscsi_tgt_node_access(&conn, &tgtnode, iqn, addr);
+ CU_ASSERT(result == true);
+
+ iscsi_pg_map_delete_ig_map(pg_map, &ig);
+ iscsi_tgt_node_delete_pg_map(&tgtnode, &pg);
+}
+
+static void
+node_access_denied_by_empty_netmask(void)
+{
+ struct spdk_iscsi_tgt_node tgtnode = {};
+ struct spdk_iscsi_portal_grp pg = {};
+ struct spdk_iscsi_init_grp ig = {};
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_portal portal = {};
+ struct spdk_iscsi_initiator_name iname = {};
+ struct spdk_scsi_dev scsi_dev = {};
+ struct spdk_iscsi_pg_map *pg_map;
+ char *iqn, *addr;
+ bool result;
+
+ /* portal group initialization */
+ pg.tag = 1;
+
+ /* initiator group initialization */
+ ig.tag = 1;
+
+ ig.ninitiators = 1;
+ snprintf(iname.name, sizeof(iname.name), "iqn.2017-10.spdk.io:0001");
+ TAILQ_INIT(&ig.initiator_head);
+ TAILQ_INSERT_TAIL(&ig.initiator_head, &iname, tailq);
+
+ ig.nnetmasks = 0;
+ TAILQ_INIT(&ig.netmask_head);
+
+ /* target initialization */
+ snprintf(tgtnode.name, sizeof(tgtnode.name), "iqn.2017-10.spdk.io:0001");
+ TAILQ_INIT(&tgtnode.pg_map_head);
+
+ snprintf(scsi_dev.name, sizeof(scsi_dev.name), "iqn.2017-10.spdk.io:0001");
+ tgtnode.dev = &scsi_dev;
+
+ pg_map = iscsi_tgt_node_add_pg_map(&tgtnode, &pg);
+ iscsi_pg_map_add_ig_map(pg_map, &ig);
+
+ /* portal initialization */
+ portal.group = &pg;
+ snprintf(portal.host, sizeof(portal.host), "192.168.2.0");
+ snprintf(portal.port, sizeof(portal.port), "3260");
+
+ /* input for UT */
+ conn.portal = &portal;
+
+ iqn = "iqn.2017-10.spdk.io:0001";
+ addr = "192.168.3.1";
+
+ result = iscsi_tgt_node_access(&conn, &tgtnode, iqn, addr);
+ CU_ASSERT(result == false);
+
+ iscsi_pg_map_delete_ig_map(pg_map, &ig);
+ iscsi_tgt_node_delete_pg_map(&tgtnode, &pg);
+}
+
+#define IQN1 "iqn.2017-11.spdk.io:0001"
+#define NO_IQN1 "!iqn.2017-11.spdk.io:0001"
+#define IQN2 "iqn.2017-11.spdk.io:0002"
+#define IP1 "192.168.2.0"
+#define IP2 "192.168.2.1"
+
+static void
+node_access_multi_initiator_groups_cases(void)
+{
+ struct spdk_iscsi_tgt_node tgtnode = {};
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_portal_grp pg = {};
+ struct spdk_iscsi_portal portal = {};
+ struct spdk_iscsi_init_grp ig1 = {}, ig2 = {};
+ struct spdk_iscsi_initiator_name iname1 = {}, iname2 = {};
+ struct spdk_iscsi_initiator_netmask imask1 = {}, imask2 = {};
+ struct spdk_scsi_dev scsi_dev = {};
+ struct spdk_iscsi_pg_map *pg_map;
+ char *iqn, *addr;
+ bool result;
+
+ /* target initialization */
+ snprintf(tgtnode.name, sizeof(tgtnode.name), IQN1);
+ TAILQ_INIT(&tgtnode.pg_map_head);
+
+ snprintf(scsi_dev.name, sizeof(scsi_dev.name), IQN1);
+ tgtnode.dev = &scsi_dev;
+
+ /* initiator group initialization */
+ ig1.tag = 1;
+ TAILQ_INIT(&ig1.initiator_head);
+ TAILQ_INIT(&ig1.netmask_head);
+
+ ig1.ninitiators = 1;
+ TAILQ_INSERT_TAIL(&ig1.initiator_head, &iname1, tailq);
+
+ ig1.nnetmasks = 1;
+ TAILQ_INSERT_TAIL(&ig1.netmask_head, &imask1, tailq);
+
+ ig2.tag = 2;
+ TAILQ_INIT(&ig2.initiator_head);
+ TAILQ_INIT(&ig2.netmask_head);
+
+ ig2.ninitiators = 1;
+ TAILQ_INSERT_TAIL(&ig2.initiator_head, &iname2, tailq);
+
+ ig2.nnetmasks = 1;
+ TAILQ_INSERT_TAIL(&ig2.netmask_head, &imask2, tailq);
+
+ /* portal group initialization */
+ pg.tag = 1;
+
+ pg_map = iscsi_tgt_node_add_pg_map(&tgtnode, &pg);
+ iscsi_pg_map_add_ig_map(pg_map, &ig1);
+ iscsi_pg_map_add_ig_map(pg_map, &ig2);
+
+ /* portal initialization */
+ portal.group = &pg;
+ snprintf(portal.host, sizeof(portal.host), IP1);
+ snprintf(portal.port, sizeof(portal.port), "3260");
+
+ /* connection initialization */
+ conn.portal = &portal;
+
+ iqn = IQN1;
+ addr = IP1;
+
+ /*
+ * case 1:
+ * +-------------------------------------------+---------+
+ * | IG1 | IG2 | |
+ * +-------------------------------------------+ |
+ * | name | addr | name | addr | result |
+ * +-------------------------------------------+---------+
+ * +-------------------------------------------+---------+
+ * | denied | - | - | - | denied |
+ * +-------------------------------------------+---------+
+ */
+ snprintf(iname1.name, sizeof(iname1.name), NO_IQN1);
+
+ result = iscsi_tgt_node_access(&conn, &tgtnode, iqn, addr);
+ CU_ASSERT(result == false);
+
+ /*
+ * case 2:
+ * +-------------------------------------------+---------+
+ * | IG1 | IG2 | |
+ * +-------------------------------------------+ |
+ * | name | addr | name | addr | result |
+ * +-------------------------------------------+---------+
+ * +-------------------------------------------+---------+
+ * | allowed | allowed | - | - | allowed |
+ * +-------------------------------------------+---------+
+ */
+ snprintf(iname1.name, sizeof(iname1.name), IQN1);
+ snprintf(imask1.mask, sizeof(imask1.mask), IP1);
+
+ result = iscsi_tgt_node_access(&conn, &tgtnode, iqn, addr);
+ CU_ASSERT(result == true);
+
+ /*
+ * case 3:
+ * +-------------------------------------------+---------+
+ * | IG1 | IG2 | |
+ * +-------------------------------------------+ |
+ * | name | addr | name | addr | result |
+ * +-------------------------------------------+---------+
+ * +-------------------------------------------+---------+
+ * | allowed | denied | denied | - | denied |
+ * +-------------------------------------------+---------+
+ */
+ snprintf(iname1.name, sizeof(iname1.name), IQN1);
+ snprintf(imask1.mask, sizeof(imask1.mask), IP2);
+ snprintf(iname2.name, sizeof(iname2.name), NO_IQN1);
+
+ result = iscsi_tgt_node_access(&conn, &tgtnode, iqn, addr);
+ CU_ASSERT(result == false);
+
+ /*
+ * case 4:
+ * +-------------------------------------------+---------+
+ * | IG1 | IG2 | |
+ * +-------------------------------------------+ |
+ * | name | addr | name | addr | result |
+ * +-------------------------------------------+---------+
+ * +-------------------------------------------+---------+
+ * | allowed | denied | allowed | allowed | allowed |
+ * +-------------------------------------------+---------+
+ */
+ snprintf(iname1.name, sizeof(iname1.name), IQN1);
+ snprintf(imask1.mask, sizeof(imask1.mask), IP2);
+ snprintf(iname2.name, sizeof(iname2.name), IQN1);
+ snprintf(imask2.mask, sizeof(imask2.mask), IP1);
+
+ result = iscsi_tgt_node_access(&conn, &tgtnode, iqn, addr);
+ CU_ASSERT(result == true);
+
+ /*
+ * case 5:
+ * +---------------------------------------------+---------+
+ * | IG1 | IG2 | |
+ * +---------------------------------------------+ |
+ * | name | addr | name | addr | result |
+ * +---------------------------------------------+---------+
+ * +---------------------------------------------+---------+
+ * | allowed | denied | allowed | denied | denied |
+ * +---------------------------------------------+---------+
+ */
+ snprintf(iname1.name, sizeof(iname1.name), IQN1);
+ snprintf(imask1.mask, sizeof(imask1.mask), IP2);
+ snprintf(iname2.name, sizeof(iname2.name), IQN1);
+ snprintf(imask2.mask, sizeof(imask2.mask), IP2);
+
+ result = iscsi_tgt_node_access(&conn, &tgtnode, iqn, addr);
+ CU_ASSERT(result == false);
+
+ /*
+ * case 6:
+ * +---------------------------------------------+---------+
+ * | IG1 | IG2 | |
+ * +---------------------------------------------+ |
+ * | name | addr | name | addr | result |
+ * +---------------------------------------------+---------+
+ * +---------------------------------------------+---------+
+ * | allowed | denied | not found | - | denied |
+ * +---------------------------------------------+---------+
+ */
+ snprintf(iname1.name, sizeof(iname1.name), IQN1);
+ snprintf(imask1.mask, sizeof(imask1.mask), IP2);
+ snprintf(iname2.name, sizeof(iname2.name), IQN2);
+
+ result = iscsi_tgt_node_access(&conn, &tgtnode, iqn, addr);
+ CU_ASSERT(result == false);
+
+ /*
+ * case 7:
+ * +---------------------------------------------+---------+
+ * | IG1 | IG2 | |
+ * +---------------------------------------------+ |
+ * | name | addr | name | addr | result |
+ * +---------------------------------------------+---------+
+ * +---------------------------------------------+---------+
+ * | not found | - | denied | - | denied |
+ * +---------------------------------------------+---------+
+ */
+ snprintf(iname1.name, sizeof(iname1.name), IQN2);
+ snprintf(iname2.name, sizeof(iname2.name), NO_IQN1);
+
+ result = iscsi_tgt_node_access(&conn, &tgtnode, iqn, addr);
+ CU_ASSERT(result == false);
+
+ /*
+ * case 8:
+ * +---------------------------------------------+---------+
+ * | IG1 | IG2 | |
+ * +---------------------------------------------+ |
+ * | name | addr | name | addr | result |
+ * +---------------------------------------------+---------+
+ * +---------------------------------------------+---------+
+ * | not found | - | allowed | allowed | allowed |
+ * +---------------------------------------------+---------+
+ */
+ snprintf(iname1.name, sizeof(iname1.name), IQN2);
+ snprintf(iname2.name, sizeof(iname2.name), IQN1);
+ snprintf(imask2.mask, sizeof(imask2.mask), IP1);
+
+ result = iscsi_tgt_node_access(&conn, &tgtnode, iqn, addr);
+ CU_ASSERT(result == true);
+
+ /*
+ * case 9:
+ * +---------------------------------------------+---------+
+ * | IG1 | IG2 | |
+ * +---------------------------------------------+ |
+ * | name | addr | name | addr | result |
+ * +---------------------------------------------+---------+
+ * +---------------------------------------------+---------+
+ * | not found | - | allowed | denied | denied |
+ * +---------------------------------------------+---------+
+ */
+ snprintf(iname1.name, sizeof(iname1.name), IQN2);
+ snprintf(iname2.name, sizeof(iname2.name), IQN1);
+ snprintf(imask2.mask, sizeof(imask2.mask), IP2);
+
+ result = iscsi_tgt_node_access(&conn, &tgtnode, iqn, addr);
+ CU_ASSERT(result == false);
+
+ /*
+ * case 10:
+ * +---------------------------------------------+---------+
+ * | IG1 | IG2 | |
+ * +---------------------------------------------+ |
+ * | name | addr | name | addr | result |
+ * +---------------------------------------------+---------+
+ * +---------------------------------------------+---------+
+ * | not found | - | not found | - | denied |
+ * +---------------------------------------------+---------+
+ */
+ snprintf(iname1.name, sizeof(iname1.name), IQN2);
+ snprintf(iname2.name, sizeof(iname2.name), IQN2);
+
+ result = iscsi_tgt_node_access(&conn, &tgtnode, iqn, addr);
+ CU_ASSERT(result == false);
+
+ iscsi_pg_map_delete_ig_map(pg_map, &ig1);
+ iscsi_pg_map_delete_ig_map(pg_map, &ig2);
+ iscsi_tgt_node_delete_pg_map(&tgtnode, &pg);
+}
+
+static void
+allow_iscsi_name_multi_maps_case(void)
+{
+ struct spdk_iscsi_tgt_node tgtnode = {};
+ struct spdk_iscsi_portal_grp pg1 = {}, pg2 = {};
+ struct spdk_iscsi_init_grp ig = {};
+ struct spdk_iscsi_initiator_name iname = {};
+ struct spdk_iscsi_pg_map *pg_map1, *pg_map2;
+ struct spdk_scsi_dev scsi_dev = {};
+ char *iqn;
+ bool result;
+
+ /* target initialization */
+ TAILQ_INIT(&tgtnode.pg_map_head);
+
+ snprintf(scsi_dev.name, sizeof(scsi_dev.name), IQN1);
+ tgtnode.dev = &scsi_dev;
+
+ /* initiator group initialization */
+ TAILQ_INIT(&ig.initiator_head);
+
+ ig.ninitiators = 1;
+ TAILQ_INSERT_TAIL(&ig.initiator_head, &iname, tailq);
+
+ /* portal group initialization */
+ pg1.tag = 1;
+ pg2.tag = 1;
+
+ pg_map1 = iscsi_tgt_node_add_pg_map(&tgtnode, &pg1);
+ pg_map2 = iscsi_tgt_node_add_pg_map(&tgtnode, &pg2);
+ iscsi_pg_map_add_ig_map(pg_map1, &ig);
+ iscsi_pg_map_add_ig_map(pg_map2, &ig);
+
+ /* test for IG1 <-> PG1, PG2 case */
+ iqn = IQN1;
+
+ snprintf(iname.name, sizeof(iname.name), IQN1);
+
+ result = iscsi_tgt_node_allow_iscsi_name(&tgtnode, iqn);
+ CU_ASSERT(result == true);
+
+ snprintf(iname.name, sizeof(iname.name), IQN2);
+
+ result = iscsi_tgt_node_allow_iscsi_name(&tgtnode, iqn);
+ CU_ASSERT(result == false);
+
+ iscsi_pg_map_delete_ig_map(pg_map1, &ig);
+ iscsi_pg_map_delete_ig_map(pg_map2, &ig);
+ iscsi_tgt_node_delete_pg_map(&tgtnode, &pg1);
+ iscsi_tgt_node_delete_pg_map(&tgtnode, &pg2);
+}
+
+/*
+ * static bool
+ * iscsi_check_chap_params(bool disable_chap, bool require_chap,
+ * bool mutual_chap, int chap_group);
+ */
+static void
+chap_param_test_cases(void)
+{
+ /* Auto */
+ CU_ASSERT(iscsi_check_chap_params(false, false, false, 0) == true);
+
+ /* None */
+ CU_ASSERT(iscsi_check_chap_params(true, false, false, 0) == true);
+
+ /* CHAP */
+ CU_ASSERT(iscsi_check_chap_params(false, true, false, 0) == true);
+
+ /* CHAP Mutual */
+ CU_ASSERT(iscsi_check_chap_params(false, true, true, 0) == true);
+
+ /* Check mutual exclusiveness of disabled and required */
+ CU_ASSERT(iscsi_check_chap_params(true, true, false, 0) == false);
+
+ /* Mutual requires Required */
+ CU_ASSERT(iscsi_check_chap_params(false, false, true, 0) == false);
+
+ /* Remaining combinations */
+ CU_ASSERT(iscsi_check_chap_params(true, false, true, 0) == false);
+ CU_ASSERT(iscsi_check_chap_params(true, true, true, 0) == false);
+
+ /* Valid auth group ID */
+ CU_ASSERT(iscsi_check_chap_params(false, false, false, 1) == true);
+
+ /* Invalid auth group ID */
+ CU_ASSERT(iscsi_check_chap_params(false, false, false, -1) == false);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (argc < 2) {
+ fprintf(stderr, "usage: %s <config file>\n", argv[0]);
+ exit(1);
+ }
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ config_file = argv[1];
+
+ suite = CU_add_suite("iscsi_target_node_suite", NULL, NULL);
+
+ CU_ADD_TEST(suite, add_lun_test_cases);
+ CU_ADD_TEST(suite, config_file_fail_cases);
+ CU_ADD_TEST(suite, allow_any_allowed);
+ CU_ADD_TEST(suite, allow_ipv6_allowed);
+ CU_ADD_TEST(suite, allow_ipv6_denied);
+ CU_ADD_TEST(suite, allow_ipv6_invalid);
+ CU_ADD_TEST(suite, allow_ipv4_allowed);
+ CU_ADD_TEST(suite, allow_ipv4_denied);
+ CU_ADD_TEST(suite, allow_ipv4_invalid);
+ CU_ADD_TEST(suite, node_access_allowed);
+ CU_ADD_TEST(suite, node_access_denied_by_empty_netmask);
+ CU_ADD_TEST(suite, node_access_multi_initiator_groups_cases);
+ CU_ADD_TEST(suite, allow_iscsi_name_multi_maps_case);
+ CU_ADD_TEST(suite, chap_param_test_cases);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/json/Makefile b/src/spdk/test/unit/lib/json/Makefile
new file mode 100644
index 000000000..db38f27dc
--- /dev/null
+++ b/src/spdk/test/unit/lib/json/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = json_parse.c json_util.c json_write.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/json/json_parse.c/.gitignore b/src/spdk/test/unit/lib/json/json_parse.c/.gitignore
new file mode 100644
index 000000000..2b4445fd8
--- /dev/null
+++ b/src/spdk/test/unit/lib/json/json_parse.c/.gitignore
@@ -0,0 +1 @@
+json_parse_ut
diff --git a/src/spdk/test/unit/lib/json/json_parse.c/Makefile b/src/spdk/test/unit/lib/json/json_parse.c/Makefile
new file mode 100644
index 000000000..3d4100240
--- /dev/null
+++ b/src/spdk/test/unit/lib/json/json_parse.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = json_parse_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/json/json_parse.c/json_parse_ut.c b/src/spdk/test/unit/lib/json/json_parse.c/json_parse_ut.c
new file mode 100644
index 000000000..7f704214b
--- /dev/null
+++ b/src/spdk/test/unit/lib/json/json_parse.c/json_parse_ut.c
@@ -0,0 +1,931 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "json/json_parse.c"
+
+static uint8_t g_buf[1000];
+static void *g_end;
+static struct spdk_json_val g_vals[100];
+static int g_cur_val;
+
+/* Fill buf with raw data */
+#define BUF_SETUP(in) \
+ memset(g_buf, 0, sizeof(g_buf)); \
+ if (sizeof(in) > 1) { \
+ memcpy(g_buf, in, sizeof(in) - 1); \
+ } \
+ g_end = NULL
+
+/*
+ * Do two checks - first pass NULL for values to ensure the count is correct,
+ * then pass g_vals to get the actual values.
+ */
+#define PARSE_PASS_FLAGS(in, num_vals, trailing, flags) \
+ BUF_SETUP(in); \
+ CU_ASSERT(spdk_json_parse(g_buf, sizeof(in) - 1, NULL, 0, &g_end, flags) == num_vals); \
+ memset(g_vals, 0, sizeof(g_vals)); \
+ CU_ASSERT(spdk_json_parse(g_buf, sizeof(in) - 1, g_vals, sizeof(g_vals), &g_end, flags | SPDK_JSON_PARSE_FLAG_DECODE_IN_PLACE) == num_vals); \
+ CU_ASSERT(g_end == g_buf + sizeof(in) - sizeof(trailing)); \
+ CU_ASSERT(memcmp(g_end, trailing, sizeof(trailing) - 1) == 0); \
+ g_cur_val = 0
+
+#define PARSE_PASS(in, num_vals, trailing) \
+ PARSE_PASS_FLAGS(in, num_vals, trailing, 0)
+
+#define PARSE_FAIL_FLAGS(in, retval, flags) \
+ BUF_SETUP(in); \
+ CU_ASSERT(spdk_json_parse(g_buf, sizeof(in) - 1, NULL, 0, &g_end, flags) == retval)
+
+#define PARSE_FAIL(in, retval) \
+ PARSE_FAIL_FLAGS(in, retval, 0)
+
+#define VAL_STRING_MATCH(str, var_type) \
+ CU_ASSERT(g_vals[g_cur_val].type == var_type); \
+ CU_ASSERT(g_vals[g_cur_val].len == sizeof(str) - 1); \
+ if (g_vals[g_cur_val].len == sizeof(str) - 1 && sizeof(str) > 1) { \
+ CU_ASSERT(memcmp(g_vals[g_cur_val].start, str, g_vals[g_cur_val].len) == 0); \
+ } \
+ g_cur_val++
+
+#define VAL_STRING(str) VAL_STRING_MATCH(str, SPDK_JSON_VAL_STRING)
+#define VAL_NAME(str) VAL_STRING_MATCH(str, SPDK_JSON_VAL_NAME)
+#define VAL_NUMBER(num) VAL_STRING_MATCH(num, SPDK_JSON_VAL_NUMBER)
+
+#define VAL_LITERAL(str, val_type) \
+ CU_ASSERT(g_vals[g_cur_val].type == val_type); \
+ CU_ASSERT(g_vals[g_cur_val].len == strlen(str)); \
+ if (g_vals[g_cur_val].len == strlen(str)) { \
+ CU_ASSERT(memcmp(g_vals[g_cur_val].start, str, g_vals[g_cur_val].len) == 0); \
+ } \
+ g_cur_val++
+
+#define VAL_TRUE() VAL_LITERAL("true", SPDK_JSON_VAL_TRUE)
+#define VAL_FALSE() VAL_LITERAL("false", SPDK_JSON_VAL_FALSE)
+#define VAL_NULL() VAL_LITERAL("null", SPDK_JSON_VAL_NULL)
+
+#define VAL_ARRAY_BEGIN(count) \
+ CU_ASSERT(g_vals[g_cur_val].type == SPDK_JSON_VAL_ARRAY_BEGIN); \
+ CU_ASSERT(g_vals[g_cur_val].len == count); \
+ g_cur_val++
+
+#define VAL_ARRAY_END() \
+ CU_ASSERT(g_vals[g_cur_val].type == SPDK_JSON_VAL_ARRAY_END); \
+ g_cur_val++
+
+#define VAL_OBJECT_BEGIN(count) \
+ CU_ASSERT(g_vals[g_cur_val].type == SPDK_JSON_VAL_OBJECT_BEGIN); \
+ CU_ASSERT(g_vals[g_cur_val].len == count); \
+ g_cur_val++
+
+#define VAL_OBJECT_END() \
+ CU_ASSERT(g_vals[g_cur_val].type == SPDK_JSON_VAL_OBJECT_END); \
+ g_cur_val++
+
+/* Simplified macros for string-only testing */
+#define STR_PASS(in, out) \
+ PARSE_PASS("\"" in "\"", 1, ""); \
+ VAL_STRING(out)
+
+#define STR_FAIL(in, retval) \
+ PARSE_FAIL("\"" in "\"", retval)
+
+/* Simplified macros for number-only testing (no whitespace allowed) */
+#define NUM_PASS(in) \
+ PARSE_PASS(in, 1, ""); \
+ VAL_NUMBER(in)
+
+#define NUM_FAIL(in, retval) \
+ PARSE_FAIL(in, retval)
+
+static void
+test_parse_literal(void)
+{
+ PARSE_PASS("true", 1, "");
+ VAL_TRUE();
+
+ PARSE_PASS(" true ", 1, "");
+ VAL_TRUE();
+
+ PARSE_PASS("false", 1, "");
+ VAL_FALSE();
+
+ PARSE_PASS("null", 1, "");
+ VAL_NULL();
+
+ PARSE_PASS("trueaaa", 1, "aaa");
+ VAL_TRUE();
+
+ PARSE_PASS("truefalse", 1, "false");
+ VAL_TRUE();
+
+ PARSE_PASS("true false", 1, "false");
+ VAL_TRUE();
+
+ PARSE_PASS("true,false", 1, ",false");
+ VAL_TRUE();
+
+ PARSE_PASS("true,", 1, ",");
+ VAL_TRUE();
+
+ PARSE_FAIL("True", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("abcdef", SPDK_JSON_PARSE_INVALID);
+
+ PARSE_FAIL("t", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("tru", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("f", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("fals", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("n", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("nul", SPDK_JSON_PARSE_INCOMPLETE);
+
+ PARSE_FAIL("taaaaa", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("faaaaa", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("naaaaa", SPDK_JSON_PARSE_INVALID);
+}
+
+static void
+test_parse_string_simple(void)
+{
+ PARSE_PASS("\"\"", 1, "");
+ VAL_STRING("");
+
+ PARSE_PASS("\"hello world\"", 1, "");
+ VAL_STRING("hello world");
+
+ PARSE_PASS(" \"hello world\" ", 1, "");
+ VAL_STRING("hello world");
+
+ /* Unterminated string */
+ PARSE_FAIL("\"hello world", SPDK_JSON_PARSE_INCOMPLETE);
+
+ /* Trailing comma */
+ PARSE_PASS("\"hello world\",", 1, ",");
+ VAL_STRING("hello world");
+}
+
+static void
+test_parse_string_control_chars(void)
+{
+ /* U+0000 through U+001F must be escaped */
+ STR_FAIL("\x00", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x01", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x02", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x03", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x04", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x05", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x06", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x07", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x08", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x09", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x0A", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x0B", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x0C", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x0D", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x0E", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x0F", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x10", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x11", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x12", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x13", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x14", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x15", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x16", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x17", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x18", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x19", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x1A", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x1B", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x1C", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x1D", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x1E", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x1F", SPDK_JSON_PARSE_INVALID);
+ STR_PASS(" ", " "); /* \x20 (first valid unescaped char) */
+
+ /* Test control chars in the middle of a string */
+ STR_FAIL("abc\ndef", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("abc\tdef", SPDK_JSON_PARSE_INVALID);
+}
+
+static void
+test_parse_string_utf8(void)
+{
+ /* Valid one-, two-, three-, and four-byte sequences */
+ STR_PASS("\x41", "A");
+ STR_PASS("\xC3\xB6", "\xC3\xB6");
+ STR_PASS("\xE2\x88\x9A", "\xE2\x88\x9A");
+ STR_PASS("\xF0\xA0\x9C\x8E", "\xF0\xA0\x9C\x8E");
+
+ /* Examples from RFC 3629 */
+ STR_PASS("\x41\xE2\x89\xA2\xCE\x91\x2E", "\x41\xE2\x89\xA2\xCE\x91\x2E");
+ STR_PASS("\xED\x95\x9C\xEA\xB5\xAD\xEC\x96\xB4", "\xED\x95\x9C\xEA\xB5\xAD\xEC\x96\xB4");
+ STR_PASS("\xE6\x97\xA5\xE6\x9C\xAC\xE8\xAA\x9E", "\xE6\x97\xA5\xE6\x9C\xAC\xE8\xAA\x9E");
+ STR_PASS("\xEF\xBB\xBF\xF0\xA3\x8E\xB4", "\xEF\xBB\xBF\xF0\xA3\x8E\xB4");
+
+ /* Edge cases */
+ STR_PASS("\x7F", "\x7F");
+ STR_FAIL("\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xC1", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xC2", SPDK_JSON_PARSE_INVALID);
+ STR_PASS("\xC2\x80", "\xC2\x80");
+ STR_PASS("\xC2\xBF", "\xC2\xBF");
+ STR_PASS("\xDF\x80", "\xDF\x80");
+ STR_PASS("\xDF\xBF", "\xDF\xBF");
+ STR_FAIL("\xDF", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xE0\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xE0\x1F", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xE0\x1F\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xE0", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xE0\xA0", SPDK_JSON_PARSE_INVALID);
+ STR_PASS("\xE0\xA0\x80", "\xE0\xA0\x80");
+ STR_PASS("\xE0\xA0\xBF", "\xE0\xA0\xBF");
+ STR_FAIL("\xE0\xA0\xC0", SPDK_JSON_PARSE_INVALID);
+ STR_PASS("\xE0\xBF\x80", "\xE0\xBF\x80");
+ STR_PASS("\xE0\xBF\xBF", "\xE0\xBF\xBF");
+ STR_FAIL("\xE0\xC0\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xE1", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xE1\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xE1\x7F\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xE1\x80\x7F", SPDK_JSON_PARSE_INVALID);
+ STR_PASS("\xE1\x80\x80", "\xE1\x80\x80");
+ STR_PASS("\xE1\x80\xBF", "\xE1\x80\xBF");
+ STR_PASS("\xE1\xBF\x80", "\xE1\xBF\x80");
+ STR_PASS("\xE1\xBF\xBF", "\xE1\xBF\xBF");
+ STR_FAIL("\xE1\xC0\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xE1\x80\xC0", SPDK_JSON_PARSE_INVALID);
+ STR_PASS("\xEF\x80\x80", "\xEF\x80\x80");
+ STR_PASS("\xEF\xBF\xBF", "\xEF\xBF\xBF");
+ STR_FAIL("\xF0", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF0\x90", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF0\x90\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF0\x80\x80\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF0\x8F\x80\x80", SPDK_JSON_PARSE_INVALID);
+ STR_PASS("\xF0\x90\x80\x80", "\xF0\x90\x80\x80");
+ STR_PASS("\xF0\x90\x80\xBF", "\xF0\x90\x80\xBF");
+ STR_PASS("\xF0\x90\xBF\x80", "\xF0\x90\xBF\x80");
+ STR_PASS("\xF0\xBF\x80\x80", "\xF0\xBF\x80\x80");
+ STR_FAIL("\xF0\xC0\x80\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF1", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF1\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF1\x80\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF1\x80\x80\x7F", SPDK_JSON_PARSE_INVALID);
+ STR_PASS("\xF1\x80\x80\x80", "\xF1\x80\x80\x80");
+ STR_PASS("\xF1\x80\x80\xBF", "\xF1\x80\x80\xBF");
+ STR_PASS("\xF1\x80\xBF\x80", "\xF1\x80\xBF\x80");
+ STR_PASS("\xF1\xBF\x80\x80", "\xF1\xBF\x80\x80");
+ STR_PASS("\xF3\x80\x80\x80", "\xF3\x80\x80\x80");
+ STR_FAIL("\xF3\xC0\x80\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF3\x80\xC0\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF3\x80\x80\xC0", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF4", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF4\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF4\x80\x80", SPDK_JSON_PARSE_INVALID);
+ STR_PASS("\xF4\x80\x80\x80", "\xF4\x80\x80\x80");
+ STR_PASS("\xF4\x8F\x80\x80", "\xF4\x8F\x80\x80");
+ STR_PASS("\xF4\x8F\xBF\xBF", "\xF4\x8F\xBF\xBF");
+ STR_FAIL("\xF4\x90\x80\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF5", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF5\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF5\x80\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF5\x80\x80\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF5\x80\x80\x80\x80", SPDK_JSON_PARSE_INVALID);
+
+ /* Overlong encodings */
+ STR_FAIL("\xC0\x80", SPDK_JSON_PARSE_INVALID);
+
+ /* Surrogate pairs */
+ STR_FAIL("\xED\xA0\x80", SPDK_JSON_PARSE_INVALID); /* U+D800 First high surrogate */
+ STR_FAIL("\xED\xAF\xBF", SPDK_JSON_PARSE_INVALID); /* U+DBFF Last high surrogate */
+ STR_FAIL("\xED\xB0\x80", SPDK_JSON_PARSE_INVALID); /* U+DC00 First low surrogate */
+ STR_FAIL("\xED\xBF\xBF", SPDK_JSON_PARSE_INVALID); /* U+DFFF Last low surrogate */
+ STR_FAIL("\xED\xA1\x8C\xED\xBE\xB4",
+ SPDK_JSON_PARSE_INVALID); /* U+233B4 (invalid surrogate pair encoding) */
+}
+
+static void
+test_parse_string_escapes_twochar(void)
+{
+ STR_PASS("\\\"", "\"");
+ STR_PASS("\\\\", "\\");
+ STR_PASS("\\/", "/");
+ STR_PASS("\\b", "\b");
+ STR_PASS("\\f", "\f");
+ STR_PASS("\\n", "\n");
+ STR_PASS("\\r", "\r");
+ STR_PASS("\\t", "\t");
+
+ STR_PASS("abc\\tdef", "abc\tdef");
+ STR_PASS("abc\\\"def", "abc\"def");
+
+ /* Backslash at end of string (will be treated as escaped quote) */
+ STR_FAIL("\\", SPDK_JSON_PARSE_INCOMPLETE);
+ STR_FAIL("abc\\", SPDK_JSON_PARSE_INCOMPLETE);
+
+ /* Invalid C-like escapes */
+ STR_FAIL("\\a", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\\v", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\\'", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\\?", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\\0", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\\x00", SPDK_JSON_PARSE_INVALID);
+
+ /* Other invalid escapes */
+ STR_FAIL("\\B", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\\z", SPDK_JSON_PARSE_INVALID);
+}
+
+static void
+test_parse_string_escapes_unicode(void)
+{
+ STR_PASS("\\u0000", "\0");
+ STR_PASS("\\u0001", "\1");
+ STR_PASS("\\u0041", "A");
+ STR_PASS("\\uAAAA", "\xEA\xAA\xAA");
+ STR_PASS("\\uaaaa", "\xEA\xAA\xAA");
+ STR_PASS("\\uAaAa", "\xEA\xAA\xAA");
+
+ STR_FAIL("\\u", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\\u0", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\\u00", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\\u000", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\\u000g", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\\U", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\\U0000", SPDK_JSON_PARSE_INVALID);
+
+ PARSE_FAIL("\"\\u", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("\"\\u0", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("\"\\u00", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("\"\\u000", SPDK_JSON_PARSE_INCOMPLETE);
+
+ /* Surrogate pair */
+ STR_PASS("\\uD834\\uDD1E", "\xF0\x9D\x84\x9E");
+
+ /* Low surrogate without high */
+ STR_FAIL("\\uDC00", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\\uDC00\\uDC00", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\\uDC00abcdef", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\\uDEAD", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("\"\\uD834", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("\"\\uD834\\", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("\"\\uD834\\u", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("\"\\uD834\\uD", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("\"\\uD834\\uDD1", SPDK_JSON_PARSE_INCOMPLETE);
+
+ /* High surrogate without low */
+ STR_FAIL("\\uD800", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\\uD800abcdef", SPDK_JSON_PARSE_INVALID);
+
+ /* High surrogate followed by high surrogate */
+ STR_FAIL("\\uD800\\uD800", SPDK_JSON_PARSE_INVALID);
+}
+
+static void
+test_parse_number(void)
+{
+ NUM_PASS("0");
+ NUM_PASS("1");
+ NUM_PASS("100");
+ NUM_PASS("-1");
+ NUM_PASS("-0");
+ NUM_PASS("3.0");
+ NUM_PASS("3.00");
+ NUM_PASS("3.001");
+ NUM_PASS("3.14159");
+ NUM_PASS("3.141592653589793238462643383279");
+ NUM_PASS("1e400");
+ NUM_PASS("1E400");
+ NUM_PASS("0e10");
+ NUM_PASS("0e0");
+ NUM_PASS("-0e0");
+ NUM_PASS("-0e+0");
+ NUM_PASS("-0e-0");
+ NUM_PASS("1e+400");
+ NUM_PASS("1e-400");
+ NUM_PASS("6.022e23");
+ NUM_PASS("-1.234e+56");
+ NUM_PASS("1.23e+56");
+ NUM_PASS("-1.23e-56");
+ NUM_PASS("1.23e-56");
+ NUM_PASS("1e04");
+
+ /* Trailing garbage */
+ PARSE_PASS("0A", 1, "A");
+ VAL_NUMBER("0");
+
+ PARSE_PASS("0,", 1, ",");
+ VAL_NUMBER("0");
+
+ PARSE_PASS("0true", 1, "true");
+ VAL_NUMBER("0");
+
+ PARSE_PASS("00", 1, "0");
+ VAL_NUMBER("0");
+ PARSE_FAIL("[00", SPDK_JSON_PARSE_INVALID);
+
+ PARSE_PASS("007", 1, "07");
+ VAL_NUMBER("0");
+ PARSE_FAIL("[007]", SPDK_JSON_PARSE_INVALID);
+
+ PARSE_PASS("345.678.1", 1, ".1");
+ VAL_NUMBER("345.678");
+ PARSE_FAIL("[345.678.1]", SPDK_JSON_PARSE_INVALID);
+
+ PARSE_PASS("3.2e-4+5", 1, "+5");
+ VAL_NUMBER("3.2e-4");
+ PARSE_FAIL("[3.2e-4+5]", SPDK_JSON_PARSE_INVALID);
+
+ PARSE_PASS("3.4.5", 1, ".5");
+ VAL_NUMBER("3.4");
+ PARSE_FAIL("[3.4.5]", SPDK_JSON_PARSE_INVALID);
+
+ NUM_FAIL("345.", SPDK_JSON_PARSE_INCOMPLETE);
+ NUM_FAIL("+1", SPDK_JSON_PARSE_INVALID);
+ NUM_FAIL("--1", SPDK_JSON_PARSE_INVALID);
+ NUM_FAIL("3.", SPDK_JSON_PARSE_INCOMPLETE);
+ NUM_FAIL("3.+4", SPDK_JSON_PARSE_INVALID);
+ NUM_FAIL("3.2e+-4", SPDK_JSON_PARSE_INVALID);
+ NUM_FAIL("3.2e-+4", SPDK_JSON_PARSE_INVALID);
+ NUM_FAIL("3e+", SPDK_JSON_PARSE_INCOMPLETE);
+ NUM_FAIL("3e-", SPDK_JSON_PARSE_INCOMPLETE);
+ NUM_FAIL("3.e4", SPDK_JSON_PARSE_INVALID);
+ NUM_FAIL("3.2eX", SPDK_JSON_PARSE_INVALID);
+ NUM_FAIL("-", SPDK_JSON_PARSE_INCOMPLETE);
+ NUM_FAIL("NaN", SPDK_JSON_PARSE_INVALID);
+ NUM_FAIL(".123", SPDK_JSON_PARSE_INVALID);
+}
+
+static void
+test_parse_array(void)
+{
+ char buffer[SPDK_JSON_MAX_NESTING_DEPTH + 2] = {0};
+
+ PARSE_PASS("[]", 2, "");
+ VAL_ARRAY_BEGIN(0);
+ VAL_ARRAY_END();
+
+ PARSE_PASS("[true]", 3, "");
+ VAL_ARRAY_BEGIN(1);
+ VAL_TRUE();
+ VAL_ARRAY_END();
+
+ PARSE_PASS("[true, false]", 4, "");
+ VAL_ARRAY_BEGIN(2);
+ VAL_TRUE();
+ VAL_FALSE();
+ VAL_ARRAY_END();
+
+ PARSE_PASS("[\"hello\"]", 3, "");
+ VAL_ARRAY_BEGIN(1);
+ VAL_STRING("hello");
+ VAL_ARRAY_END();
+
+ PARSE_PASS("[[]]", 4, "");
+ VAL_ARRAY_BEGIN(2);
+ VAL_ARRAY_BEGIN(0);
+ VAL_ARRAY_END();
+ VAL_ARRAY_END();
+
+ PARSE_PASS("[\"hello\", \"world\"]", 4, "");
+ VAL_ARRAY_BEGIN(2);
+ VAL_STRING("hello");
+ VAL_STRING("world");
+ VAL_ARRAY_END();
+
+ PARSE_PASS("[],", 2, ",");
+ VAL_ARRAY_BEGIN(0);
+ VAL_ARRAY_END();
+
+ PARSE_FAIL("]", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("[", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("[true", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("[\"hello", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("[\"hello\"", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("[true,]", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("[,]", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("[,true]", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("[true}", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("[true,,true]", SPDK_JSON_PARSE_INVALID);
+
+ /* Nested arrays exactly up to the allowed nesting depth */
+ memset(buffer, '[', SPDK_JSON_MAX_NESTING_DEPTH);
+ buffer[SPDK_JSON_MAX_NESTING_DEPTH] = ' ';
+ PARSE_FAIL(buffer, SPDK_JSON_PARSE_INCOMPLETE);
+
+ /* Nested arrays exceeding the maximum allowed nesting depth for this implementation */
+ buffer[SPDK_JSON_MAX_NESTING_DEPTH] = '[';
+ PARSE_FAIL(buffer, SPDK_JSON_PARSE_MAX_DEPTH_EXCEEDED);
+}
+
+static void
+test_parse_object(void)
+{
+ PARSE_PASS("{}", 2, "");
+ VAL_OBJECT_BEGIN(0);
+ VAL_OBJECT_END();
+
+ PARSE_PASS("{\"a\": true}", 4, "");
+ VAL_OBJECT_BEGIN(2);
+ VAL_NAME("a");
+ VAL_TRUE();
+ VAL_OBJECT_END();
+
+ PARSE_PASS("{\"abc\": \"def\"}", 4, "");
+ VAL_OBJECT_BEGIN(2);
+ VAL_NAME("abc");
+ VAL_STRING("def");
+ VAL_OBJECT_END();
+
+ PARSE_PASS("{\"a\": true, \"b\": false}", 6, "");
+ VAL_OBJECT_BEGIN(4);
+ VAL_NAME("a");
+ VAL_TRUE();
+ VAL_NAME("b");
+ VAL_FALSE();
+ VAL_OBJECT_END();
+
+ PARSE_PASS("{\"a\": { \"b\": true } }", 7, "");
+ VAL_OBJECT_BEGIN(5);
+ VAL_NAME("a");
+ VAL_OBJECT_BEGIN(2);
+ VAL_NAME("b");
+ VAL_TRUE();
+ VAL_OBJECT_END();
+ VAL_OBJECT_END();
+
+ PARSE_PASS("{\"{test\": 0}", 4, "");
+ VAL_OBJECT_BEGIN(2);
+ VAL_NAME("{test");
+ VAL_NUMBER("0");
+ VAL_OBJECT_END();
+
+ PARSE_PASS("{\"test}\": 1}", 4, "");
+ VAL_OBJECT_BEGIN(2);
+ VAL_NAME("test}");
+ VAL_NUMBER("1");
+ VAL_OBJECT_END();
+
+ PARSE_PASS("{\"\\\"\": 2}", 4, "");
+ VAL_OBJECT_BEGIN(2);
+ VAL_NAME("\"");
+ VAL_NUMBER("2");
+ VAL_OBJECT_END();
+
+ PARSE_PASS("{\"a\":true},", 4, ",");
+ VAL_OBJECT_BEGIN(2);
+ VAL_NAME("a");
+ VAL_TRUE();
+ VAL_OBJECT_END();
+
+ /* Object end without object begin (trailing garbage) */
+ PARSE_PASS("true}", 1, "}");
+ VAL_TRUE();
+
+ PARSE_PASS("0}", 1, "}");
+ VAL_NUMBER("0");
+
+ PARSE_PASS("\"a\"}", 1, "}");
+ VAL_STRING("a");
+
+ PARSE_FAIL("}", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("{", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("{\"a", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("{\"a\"", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("{\"a\":", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("{\"a\":true", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("{\"a\":true,", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("{\"a\":true]", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("{\"a\":true,}", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("{\"a\":true,\"}", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("{\"a\":true,\"b}", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("{\"a\":true,\"b\"}", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("{\"a\":true,\"b\":}", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("{\"a\":true,\"b\",}", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("{\"a\",}", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("{,\"a\": true}", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("{a:true}", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("{'a':true}", SPDK_JSON_PARSE_INVALID);
+}
+
+static void
+test_parse_nesting(void)
+{
+ PARSE_PASS("[[[[[[[[]]]]]]]]", 16, "");
+
+ PARSE_PASS("{\"a\": [0, 1, 2]}", 8, "");
+ VAL_OBJECT_BEGIN(6);
+ VAL_NAME("a");
+ VAL_ARRAY_BEGIN(3);
+ VAL_NUMBER("0");
+ VAL_NUMBER("1");
+ VAL_NUMBER("2");
+ VAL_ARRAY_END();
+ VAL_OBJECT_END();
+
+ PARSE_PASS("{\"a\": [0, 1, 2], \"b\": 3 }", 10, "");
+ VAL_OBJECT_BEGIN(8);
+ VAL_NAME("a");
+ VAL_ARRAY_BEGIN(3);
+ VAL_NUMBER("0");
+ VAL_NUMBER("1");
+ VAL_NUMBER("2");
+ VAL_ARRAY_END();
+ VAL_NAME("b");
+ VAL_NUMBER("3");
+ VAL_OBJECT_END();
+
+ PARSE_PASS("[0, 1, {\"a\": 3}, 4, 5]", 10, "");
+ VAL_ARRAY_BEGIN(8);
+ VAL_NUMBER("0");
+ VAL_NUMBER("1");
+ VAL_OBJECT_BEGIN(2);
+ VAL_NAME("a");
+ VAL_NUMBER("3");
+ VAL_OBJECT_END();
+ VAL_NUMBER("4");
+ VAL_NUMBER("5");
+ VAL_ARRAY_END();
+
+ PARSE_PASS("\t[ { \"a\": {\"b\": [ {\"c\": 1}, 2 ],\n\"d\": 3}, \"e\" : 4}, 5 ] ", 20, "");
+ VAL_ARRAY_BEGIN(18);
+ VAL_OBJECT_BEGIN(15);
+ VAL_NAME("a");
+ VAL_OBJECT_BEGIN(10);
+ VAL_NAME("b");
+ VAL_ARRAY_BEGIN(5);
+ VAL_OBJECT_BEGIN(2);
+ VAL_NAME("c");
+ VAL_NUMBER("1");
+ VAL_OBJECT_END();
+ VAL_NUMBER("2");
+ VAL_ARRAY_END();
+ VAL_NAME("d");
+ VAL_NUMBER("3");
+ VAL_OBJECT_END();
+ VAL_NAME("e");
+ VAL_NUMBER("4");
+ VAL_OBJECT_END();
+ VAL_NUMBER("5");
+ VAL_ARRAY_END();
+
+ /* Examples from RFC 7159 */
+ PARSE_PASS(
+ "{\n"
+ " \"Image\": {\n"
+ " \"Width\": 800,\n"
+ " \"Height\": 600,\n"
+ " \"Title\": \"View from 15th Floor\",\n"
+ " \"Thumbnail\": {\n"
+ " \"Url\": \"http://www.example.com/image/481989943\",\n"
+ " \"Height\": 125,\n"
+ " \"Width\": 100\n"
+ " },\n"
+ " \"Animated\" : false,\n"
+ " \"IDs\": [116, 943, 234, 38793]\n"
+ " }\n"
+ "}\n",
+ 29, "");
+
+ VAL_OBJECT_BEGIN(27);
+ VAL_NAME("Image");
+ VAL_OBJECT_BEGIN(24);
+ VAL_NAME("Width");
+ VAL_NUMBER("800");
+ VAL_NAME("Height");
+ VAL_NUMBER("600");
+ VAL_NAME("Title");
+ VAL_STRING("View from 15th Floor");
+ VAL_NAME("Thumbnail");
+ VAL_OBJECT_BEGIN(6);
+ VAL_NAME("Url");
+ VAL_STRING("http://www.example.com/image/481989943");
+ VAL_NAME("Height");
+ VAL_NUMBER("125");
+ VAL_NAME("Width");
+ VAL_NUMBER("100");
+ VAL_OBJECT_END();
+ VAL_NAME("Animated");
+ VAL_FALSE();
+ VAL_NAME("IDs");
+ VAL_ARRAY_BEGIN(4);
+ VAL_NUMBER("116");
+ VAL_NUMBER("943");
+ VAL_NUMBER("234");
+ VAL_NUMBER("38793");
+ VAL_ARRAY_END();
+ VAL_OBJECT_END();
+ VAL_OBJECT_END();
+
+ PARSE_PASS(
+ "[\n"
+ " {\n"
+ " \"precision\": \"zip\",\n"
+ " \"Latitude\": 37.7668,\n"
+ " \"Longitude\": -122.3959,\n"
+ " \"Address\": \"\",\n"
+ " \"City\": \"SAN FRANCISCO\",\n"
+ " \"State\": \"CA\",\n"
+ " \"Zip\": \"94107\",\n"
+ " \"Country\": \"US\"\n"
+ " },\n"
+ " {\n"
+ " \"precision\": \"zip\",\n"
+ " \"Latitude\": 37.371991,\n"
+ " \"Longitude\": -122.026020,\n"
+ " \"Address\": \"\",\n"
+ " \"City\": \"SUNNYVALE\",\n"
+ " \"State\": \"CA\",\n"
+ " \"Zip\": \"94085\",\n"
+ " \"Country\": \"US\"\n"
+ " }\n"
+ "]",
+ 38, "");
+
+ VAL_ARRAY_BEGIN(36);
+ VAL_OBJECT_BEGIN(16);
+ VAL_NAME("precision");
+ VAL_STRING("zip");
+ VAL_NAME("Latitude");
+ VAL_NUMBER("37.7668");
+ VAL_NAME("Longitude");
+ VAL_NUMBER("-122.3959");
+ VAL_NAME("Address");
+ VAL_STRING("");
+ VAL_NAME("City");
+ VAL_STRING("SAN FRANCISCO");
+ VAL_NAME("State");
+ VAL_STRING("CA");
+ VAL_NAME("Zip");
+ VAL_STRING("94107");
+ VAL_NAME("Country");
+ VAL_STRING("US");
+ VAL_OBJECT_END();
+ VAL_OBJECT_BEGIN(16);
+ VAL_NAME("precision");
+ VAL_STRING("zip");
+ VAL_NAME("Latitude");
+ VAL_NUMBER("37.371991");
+ VAL_NAME("Longitude");
+ VAL_NUMBER("-122.026020");
+ VAL_NAME("Address");
+ VAL_STRING("");
+ VAL_NAME("City");
+ VAL_STRING("SUNNYVALE");
+ VAL_NAME("State");
+ VAL_STRING("CA");
+ VAL_NAME("Zip");
+ VAL_STRING("94085");
+ VAL_NAME("Country");
+ VAL_STRING("US");
+ VAL_OBJECT_END();
+ VAL_ARRAY_END();
+
+ /* Trailing garbage */
+ PARSE_PASS("{\"a\": [0, 1, 2]}]", 8, "]");
+ VAL_OBJECT_BEGIN(6);
+ VAL_NAME("a");
+ VAL_ARRAY_BEGIN(3);
+ VAL_NUMBER("0");
+ VAL_NUMBER("1");
+ VAL_NUMBER("2");
+ VAL_ARRAY_END();
+ VAL_OBJECT_END();
+
+ PARSE_PASS("{\"a\": [0, 1, 2]}}", 8, "}");
+ PARSE_PASS("{\"a\": [0, 1, 2]}]", 8, "]");
+ VAL_OBJECT_BEGIN(6);
+ VAL_NAME("a");
+ VAL_ARRAY_BEGIN(3);
+ VAL_NUMBER("0");
+ VAL_NUMBER("1");
+ VAL_NUMBER("2");
+ VAL_ARRAY_END();
+ VAL_OBJECT_END();
+
+ PARSE_FAIL("{\"a\": [0, 1, 2}]", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("{\"a\": [0, 1, 2]", SPDK_JSON_PARSE_INCOMPLETE);
+}
+
+
+static void
+test_parse_comment(void)
+{
+ /* Comments are not allowed by the JSON RFC */
+ PARSE_PASS("[0]", 3, "");
+ PARSE_FAIL("/* test */[0]", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("[/* test */0]", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("[0/* test */]", SPDK_JSON_PARSE_INVALID);
+
+ /*
+ * This is allowed since the parser stops once it reads a complete JSON object.
+ * The next parse call would fail (see tests above) when parsing the comment.
+ */
+ PARSE_PASS("[0]/* test */", 3, "/* test */");
+
+ /*
+ * Test with non-standard comments enabled.
+ */
+ PARSE_PASS_FLAGS("/* test */[0]", 3, "", SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS);
+ VAL_ARRAY_BEGIN(1);
+ VAL_NUMBER("0");
+ VAL_ARRAY_END();
+
+ PARSE_PASS_FLAGS("[/* test */0]", 3, "", SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS);
+ VAL_ARRAY_BEGIN(1);
+ VAL_NUMBER("0");
+ VAL_ARRAY_END();
+
+ PARSE_PASS_FLAGS("[0/* test */]", 3, "", SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS);
+ VAL_ARRAY_BEGIN(1);
+ VAL_NUMBER("0");
+ VAL_ARRAY_END();
+
+ PARSE_FAIL_FLAGS("/* test */", SPDK_JSON_PARSE_INCOMPLETE, SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS);
+ PARSE_FAIL_FLAGS("[/* test */", SPDK_JSON_PARSE_INCOMPLETE, SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS);
+ PARSE_FAIL_FLAGS("[0/* test */", SPDK_JSON_PARSE_INCOMPLETE, SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS);
+
+ /*
+ * Single-line comments
+ */
+ PARSE_PASS_FLAGS("// test\n0", 1, "", SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS);
+ VAL_NUMBER("0");
+
+ PARSE_PASS_FLAGS("// test\r\n0", 1, "", SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS);
+ VAL_NUMBER("0");
+
+ PARSE_PASS_FLAGS("// [0] test\n0", 1, "", SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS);
+ VAL_NUMBER("0");
+
+ PARSE_FAIL_FLAGS("//", SPDK_JSON_PARSE_INCOMPLETE, SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS);
+ PARSE_FAIL_FLAGS("// test", SPDK_JSON_PARSE_INCOMPLETE, SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS);
+ PARSE_FAIL_FLAGS("//\n", SPDK_JSON_PARSE_INCOMPLETE, SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS);
+
+ /* Invalid character following slash */
+ PARSE_FAIL_FLAGS("[0/x", SPDK_JSON_PARSE_INVALID, SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS);
+
+ /* Single slash at end of buffer */
+ PARSE_FAIL_FLAGS("[0/", SPDK_JSON_PARSE_INCOMPLETE, SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("json", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_parse_literal);
+ CU_ADD_TEST(suite, test_parse_string_simple);
+ CU_ADD_TEST(suite, test_parse_string_control_chars);
+ CU_ADD_TEST(suite, test_parse_string_utf8);
+ CU_ADD_TEST(suite, test_parse_string_escapes_twochar);
+ CU_ADD_TEST(suite, test_parse_string_escapes_unicode);
+ CU_ADD_TEST(suite, test_parse_number);
+ CU_ADD_TEST(suite, test_parse_array);
+ CU_ADD_TEST(suite, test_parse_object);
+ CU_ADD_TEST(suite, test_parse_nesting);
+ CU_ADD_TEST(suite, test_parse_comment);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+
+ CU_basic_run_tests();
+
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/json/json_util.c/.gitignore b/src/spdk/test/unit/lib/json/json_util.c/.gitignore
new file mode 100644
index 000000000..02f6d50c5
--- /dev/null
+++ b/src/spdk/test/unit/lib/json/json_util.c/.gitignore
@@ -0,0 +1 @@
+json_util_ut
diff --git a/src/spdk/test/unit/lib/json/json_util.c/Makefile b/src/spdk/test/unit/lib/json/json_util.c/Makefile
new file mode 100644
index 000000000..c9a282083
--- /dev/null
+++ b/src/spdk/test/unit/lib/json/json_util.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = json_util_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/json/json_util.c/json_util_ut.c b/src/spdk/test/unit/lib/json/json_util.c/json_util_ut.c
new file mode 100644
index 000000000..2f883521f
--- /dev/null
+++ b/src/spdk/test/unit/lib/json/json_util.c/json_util_ut.c
@@ -0,0 +1,954 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "json/json_util.c"
+
+/* For spdk_json_parse() */
+#include "json/json_parse.c"
+
+#define NUM_SETUP(x) \
+ snprintf(buf, sizeof(buf), "%s", x); \
+ v.type = SPDK_JSON_VAL_NUMBER; \
+ v.start = buf; \
+ v.len = sizeof(x) - 1
+
+#define NUM_UINT16_PASS(s, i) \
+ NUM_SETUP(s); \
+ CU_ASSERT(spdk_json_number_to_uint16(&v, &u16) == 0); \
+ CU_ASSERT(u16 == i)
+
+#define NUM_UINT16_FAIL(s) \
+ NUM_SETUP(s); \
+ CU_ASSERT(spdk_json_number_to_uint16(&v, &u16) != 0)
+
+#define NUM_INT32_PASS(s, i) \
+ NUM_SETUP(s); \
+ CU_ASSERT(spdk_json_number_to_int32(&v, &i32) == 0); \
+ CU_ASSERT(i32 == i)
+
+#define NUM_INT32_FAIL(s) \
+ NUM_SETUP(s); \
+ CU_ASSERT(spdk_json_number_to_int32(&v, &i32) != 0)
+
+#define NUM_UINT64_PASS(s, i) \
+ NUM_SETUP(s); \
+ CU_ASSERT(spdk_json_number_to_uint64(&v, &u64) == 0); \
+ CU_ASSERT(u64 == i)
+
+#define NUM_UINT64_FAIL(s) \
+ NUM_SETUP(s); \
+ CU_ASSERT(spdk_json_number_to_uint64(&v, &u64) != 0)
+
+static void
+test_strequal(void)
+{
+ struct spdk_json_val v;
+
+ v.type = SPDK_JSON_VAL_STRING;
+ v.start = "test";
+ v.len = sizeof("test") - 1;
+ CU_ASSERT(spdk_json_strequal(&v, "test") == true);
+ CU_ASSERT(spdk_json_strequal(&v, "TEST") == false);
+ CU_ASSERT(spdk_json_strequal(&v, "hello") == false);
+ CU_ASSERT(spdk_json_strequal(&v, "t") == false);
+
+ v.type = SPDK_JSON_VAL_NAME;
+ CU_ASSERT(spdk_json_strequal(&v, "test") == true);
+
+ v.type = SPDK_JSON_VAL_NUMBER;
+ CU_ASSERT(spdk_json_strequal(&v, "test") == false);
+
+ v.type = SPDK_JSON_VAL_STRING;
+ v.start = "test\0hello";
+ v.len = sizeof("test\0hello") - 1;
+ CU_ASSERT(spdk_json_strequal(&v, "test") == false);
+}
+
+static void
+test_num_to_uint16(void)
+{
+ struct spdk_json_val v;
+ char buf[100];
+ uint16_t u16 = 0;
+
+ NUM_SETUP("1234");
+ CU_ASSERT(spdk_json_number_to_uint16(&v, &u16) == 0);
+ CU_ASSERT(u16 == 1234);
+
+ NUM_UINT16_PASS("0", 0);
+ NUM_UINT16_PASS("1234", 1234);
+ NUM_UINT16_PASS("1234.00000", 1234);
+ NUM_UINT16_PASS("1.2e1", 12);
+ NUM_UINT16_PASS("12340e-1", 1234);
+
+ NUM_UINT16_FAIL("1.2");
+ NUM_UINT16_FAIL("-1234");
+ NUM_UINT16_FAIL("1.2E0");
+ NUM_UINT16_FAIL("1.234e1");
+ NUM_UINT16_FAIL("12341e-1");
+}
+
+static void
+test_num_to_int32(void)
+{
+ struct spdk_json_val v;
+ char buf[100];
+ int32_t i32 = 0;
+
+ NUM_SETUP("1234");
+ CU_ASSERT(spdk_json_number_to_int32(&v, &i32) == 0);
+ CU_ASSERT(i32 == 1234);
+
+
+ NUM_INT32_PASS("0", 0);
+ NUM_INT32_PASS("1234", 1234);
+ NUM_INT32_PASS("-1234", -1234);
+ NUM_INT32_PASS("1234.00000", 1234);
+ NUM_INT32_PASS("1.2e1", 12);
+ NUM_INT32_PASS("12340e-1", 1234);
+ NUM_INT32_PASS("-0", 0);
+
+ NUM_INT32_FAIL("1.2");
+ NUM_INT32_FAIL("1.2E0");
+ NUM_INT32_FAIL("1.234e1");
+ NUM_INT32_FAIL("12341e-1");
+}
+
+static void
+test_num_to_uint64(void)
+{
+ struct spdk_json_val v;
+ char buf[100];
+ uint64_t u64 = 0;
+
+ NUM_SETUP("1234");
+ CU_ASSERT(spdk_json_number_to_uint64(&v, &u64) == 0);
+ CU_ASSERT(u64 == 1234);
+
+
+ NUM_UINT64_PASS("0", 0);
+ NUM_UINT64_PASS("1234", 1234);
+ NUM_UINT64_PASS("1234.00000", 1234);
+ NUM_UINT64_PASS("1.2e1", 12);
+ NUM_UINT64_PASS("12340e-1", 1234);
+ NUM_UINT64_PASS("123456780e-1", 12345678);
+
+ NUM_UINT64_FAIL("1.2");
+ NUM_UINT64_FAIL("-1234");
+ NUM_UINT64_FAIL("1.2E0");
+ NUM_UINT64_FAIL("1.234e1");
+ NUM_UINT64_FAIL("12341e-1");
+ NUM_UINT64_FAIL("123456781e-1");
+}
+
+static void
+test_decode_object(void)
+{
+ struct my_object {
+ char *my_name;
+ uint32_t my_int;
+ bool my_bool;
+ };
+ struct spdk_json_val object[] = {
+ {"", 6, SPDK_JSON_VAL_OBJECT_BEGIN},
+ {"first", 5, SPDK_JSON_VAL_NAME},
+ {"HELLO", 5, SPDK_JSON_VAL_STRING},
+ {"second", 6, SPDK_JSON_VAL_NAME},
+ {"234", 3, SPDK_JSON_VAL_NUMBER},
+ {"third", 5, SPDK_JSON_VAL_NAME},
+ {"", 1, SPDK_JSON_VAL_TRUE},
+ {"", 0, SPDK_JSON_VAL_OBJECT_END},
+ };
+
+ struct spdk_json_object_decoder decoders[] = {
+ {"first", offsetof(struct my_object, my_name), spdk_json_decode_string, false},
+ {"second", offsetof(struct my_object, my_int), spdk_json_decode_uint32, false},
+ {"third", offsetof(struct my_object, my_bool), spdk_json_decode_bool, false},
+ {"fourth", offsetof(struct my_object, my_bool), spdk_json_decode_bool, true},
+ };
+ struct my_object output = {
+ .my_name = NULL,
+ .my_int = 0,
+ .my_bool = false,
+ };
+ uint32_t answer = 234;
+ char *answer_str = "HELLO";
+ bool answer_bool = true;
+
+ /* Passing Test: object containing simple types */
+ CU_ASSERT(spdk_json_decode_object(object, decoders, 4, &output) == 0);
+ SPDK_CU_ASSERT_FATAL(output.my_name != NULL);
+ CU_ASSERT(memcmp(output.my_name, answer_str, 6) == 0);
+ CU_ASSERT(output.my_int == answer);
+ CU_ASSERT(output.my_bool == answer_bool);
+
+ /* Failing Test: member with no matching decoder */
+ /* i.e. I remove the matching decoder from the boolean argument */
+ CU_ASSERT(spdk_json_decode_object(object, decoders, 2, &output) != 0);
+
+ /* Failing Test: non-optional decoder with no corresponding member */
+
+ decoders[3].optional = false;
+ CU_ASSERT(spdk_json_decode_object(object, decoders, 4, &output) != 0);
+
+ /* return to base state */
+ decoders[3].optional = true;
+
+ /* Failing Test: duplicated names for json values */
+ object[3].start = "first";
+ object[3].len = 5;
+ CU_ASSERT(spdk_json_decode_object(object, decoders, 3, &output) != 0);
+
+ /* return to base state */
+ object[3].start = "second";
+ object[3].len = 6;
+
+ /* Failing Test: invalid value for decoder */
+ object[2].start = "HELO";
+ CU_ASSERT(spdk_json_decode_object(object, decoders, 3, &output) != 0);
+
+ /* return to base state */
+ object[2].start = "HELLO";
+
+ /* Failing Test: not an object */
+ object[0].type = SPDK_JSON_VAL_ARRAY_BEGIN;
+ CU_ASSERT(spdk_json_decode_object(object, decoders, 3, &output) != 0);
+
+ free(output.my_name);
+}
+
+static void
+test_decode_array(void)
+{
+ struct spdk_json_val values[4];
+ uint32_t my_int[2] = {0, 0};
+ char *my_string[2] = {NULL, NULL};
+ size_t out_size;
+
+ /* passing integer test */
+ values[0].type = SPDK_JSON_VAL_ARRAY_BEGIN;
+ values[0].len = 2;
+ values[1].type = SPDK_JSON_VAL_NUMBER;
+ values[1].len = 4;
+ values[1].start = "1234";
+ values[2].type = SPDK_JSON_VAL_NUMBER;
+ values[2].len = 4;
+ values[2].start = "5678";
+ values[3].type = SPDK_JSON_VAL_ARRAY_END;
+ CU_ASSERT(spdk_json_decode_array(values, spdk_json_decode_uint32, my_int, 2, &out_size,
+ sizeof(uint32_t)) == 0);
+ CU_ASSERT(my_int[0] == 1234);
+ CU_ASSERT(my_int[1] == 5678);
+ CU_ASSERT(out_size == 2);
+
+ /* array length exceeds max */
+ values[0].len = 3;
+ CU_ASSERT(spdk_json_decode_array(values, spdk_json_decode_uint32, my_int, 2, &out_size,
+ sizeof(uint32_t)) != 0);
+
+ /* mixed types */
+ values[0].len = 2;
+ values[2].type = SPDK_JSON_VAL_STRING;
+ values[2].len = 5;
+ values[2].start = "HELLO";
+ CU_ASSERT(spdk_json_decode_array(values, spdk_json_decode_uint32, my_int, 2, &out_size,
+ sizeof(uint32_t)) != 0);
+
+ /* no array start */
+ values[0].type = SPDK_JSON_VAL_NUMBER;
+ values[2].type = SPDK_JSON_VAL_NUMBER;
+ values[2].len = 4;
+ values[2].start = "5678";
+ CU_ASSERT(spdk_json_decode_array(values, spdk_json_decode_uint32, my_int, 2, &out_size,
+ sizeof(uint32_t)) != 0);
+
+ /* mismatched array type and parser */
+ values[0].type = SPDK_JSON_VAL_ARRAY_BEGIN;
+ values[1].type = SPDK_JSON_VAL_STRING;
+ values[1].len = 5;
+ values[1].start = "HELLO";
+ values[2].type = SPDK_JSON_VAL_STRING;
+ values[2].len = 5;
+ values[2].start = "WORLD";
+ CU_ASSERT(spdk_json_decode_array(values, spdk_json_decode_uint32, my_int, 2, &out_size,
+ sizeof(uint32_t)) != 0);
+
+ /* passing String example */
+ CU_ASSERT(spdk_json_decode_array(values, spdk_json_decode_string, my_string, 2, &out_size,
+ sizeof(char *)) == 0);
+ SPDK_CU_ASSERT_FATAL(my_string[0] != NULL);
+ SPDK_CU_ASSERT_FATAL(my_string[1] != NULL);
+ CU_ASSERT(memcmp(my_string[0], "HELLO", 6) == 0);
+ CU_ASSERT(memcmp(my_string[1], "WORLD", 6) == 0);
+ CU_ASSERT(out_size == 2);
+
+ free(my_string[0]);
+ free(my_string[1]);
+}
+
+static void
+test_decode_bool(void)
+{
+ struct spdk_json_val v;
+ bool b;
+
+ /* valid bool (true) */
+ v.type = SPDK_JSON_VAL_TRUE;
+ b = false;
+ CU_ASSERT(spdk_json_decode_bool(&v, &b) == 0);
+ CU_ASSERT(b == true);
+
+ /* valid bool (false) */
+ v.type = SPDK_JSON_VAL_FALSE;
+ b = true;
+ CU_ASSERT(spdk_json_decode_bool(&v, &b) == 0);
+ CU_ASSERT(b == false);
+
+ /* incorrect type */
+ v.type = SPDK_JSON_VAL_NULL;
+ CU_ASSERT(spdk_json_decode_bool(&v, &b) != 0);
+}
+
+static void
+test_decode_int32(void)
+{
+ struct spdk_json_val v;
+ int32_t i;
+
+ /* correct type and valid value */
+ v.type = SPDK_JSON_VAL_NUMBER;
+ v.start = "33";
+ v.len = 2;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_int32(&v, &i) == 0);
+ CU_ASSERT(i == 33);
+
+ /* correct type and invalid value (float) */
+ v.start = "32.45";
+ v.len = 5;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_int32(&v, &i) != 0);
+
+ /* incorrect type */
+ v.type = SPDK_JSON_VAL_STRING;
+ v.start = "String";
+ v.len = 6;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_int32(&v, &i) != 0);
+
+ /* incorrect type */
+ v.type = SPDK_JSON_VAL_TRUE;
+ CU_ASSERT(spdk_json_decode_int32(&v, &i) != 0);
+
+ /* edge case (integer max) */
+ v.type = SPDK_JSON_VAL_NUMBER;
+ v.start = "2147483647";
+ v.len = 10;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_int32(&v, &i) == 0);
+ CU_ASSERT(i == 2147483647);
+
+ /* invalid value (overflow) */
+ v.start = "2147483648";
+ i = 0;
+ CU_ASSERT(spdk_json_decode_int32(&v, &i) != 0);
+
+ /* edge case (integer min) */
+ v.type = SPDK_JSON_VAL_NUMBER;
+ v.start = "-2147483648";
+ v.len = 11;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_int32(&v, &i) == 0);
+ CU_ASSERT(i == -2147483648);
+
+ /* invalid value (overflow) */
+ v.start = "-2147483649";
+ CU_ASSERT(spdk_json_decode_int32(&v, &i) != 0);
+
+ /* valid exponent */
+ v.start = "4e3";
+ v.len = 3;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_int32(&v, &i) == 0);
+ CU_ASSERT(i == 4000);
+
+ /* invalid negative exponent */
+ v.start = "-400e-4";
+ v.len = 7;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_int32(&v, &i) != 0);
+
+ /* invalid negative exponent */
+ v.start = "400e-4";
+ v.len = 6;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_int32(&v, &i) != 0);
+
+ /* valid negative exponent */
+ v.start = "-400e-2";
+ v.len = 7;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_int32(&v, &i) == 0);
+ CU_ASSERT(i == -4);
+
+ /* invalid exponent (overflow) */
+ v.start = "-2e32";
+ v.len = 5;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_int32(&v, &i) != 0);
+
+ /* valid exponent with decimal */
+ v.start = "2.13e2";
+ v.len = 6;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_int32(&v, &i) == 0);
+ CU_ASSERT(i == 213);
+
+ /* invalid exponent with decimal */
+ v.start = "2.134e2";
+ v.len = 7;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_int32(&v, &i) != 0);
+}
+
+static void
+test_decode_uint16(void)
+{
+ struct spdk_json_val v;
+ uint32_t i;
+
+ /* incorrect type */
+ v.type = SPDK_JSON_VAL_STRING;
+ v.start = "Strin";
+ v.len = 5;
+ CU_ASSERT(spdk_json_decode_uint16(&v, &i) != 0);
+
+ /* invalid value (float) */
+ v.type = SPDK_JSON_VAL_NUMBER;
+ v.start = "123.4";
+ v.len = 5;
+ CU_ASSERT(spdk_json_decode_uint16(&v, &i) != 0);
+
+ /* edge case (0) */
+ v.start = "0";
+ v.len = 1;
+ i = 456;
+ CU_ASSERT(spdk_json_decode_uint16(&v, &i) == 0);
+ CU_ASSERT(i == 0);
+
+ /* invalid value (negative) */
+ v.start = "-1";
+ v.len = 2;
+ CU_ASSERT(spdk_json_decode_uint16(&v, &i) != 0);
+
+ /* edge case (maximum) */
+ v.start = "65535";
+ v.len = 5;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint16(&v, &i) == 0);
+ CU_ASSERT(i == 65535);
+
+ /* invalid value (overflow) */
+ v.start = "65536";
+ v.len = 5;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint16(&v, &i) != 0);
+
+ /* valid exponent */
+ v.start = "66E2";
+ v.len = 4;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint16(&v, &i) == 0);
+ CU_ASSERT(i == 6600);
+
+ /* invalid exponent (overflow) */
+ v.start = "66E3";
+ v.len = 4;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint16(&v, &i) != 0);
+
+ /* invalid exponent (decimal) */
+ v.start = "65.535E2";
+ v.len = 7;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint16(&v, &i) != 0);
+
+ /* valid exponent with decimal */
+ v.start = "65.53E2";
+ v.len = 7;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint16(&v, &i) == 0);
+ CU_ASSERT(i == 6553);
+
+ /* invalid negative exponent */
+ v.start = "40e-2";
+ v.len = 5;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint16(&v, &i) != 0);
+
+ /* invalid negative exponent */
+ v.start = "-40e-1";
+ v.len = 6;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint16(&v, &i) != 0);
+
+ /* valid negative exponent */
+ v.start = "40e-1";
+ v.len = 5;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint16(&v, &i) == 0);
+ CU_ASSERT(i == 4);
+}
+
+static void
+test_decode_uint32(void)
+{
+ struct spdk_json_val v;
+ uint32_t i;
+
+ /* incorrect type */
+ v.type = SPDK_JSON_VAL_STRING;
+ v.start = "String";
+ v.len = 6;
+ CU_ASSERT(spdk_json_decode_uint32(&v, &i) != 0);
+
+ /* invalid value (float) */
+ v.type = SPDK_JSON_VAL_NUMBER;
+ v.start = "123.45";
+ v.len = 6;
+ CU_ASSERT(spdk_json_decode_uint32(&v, &i) != 0);
+
+ /* edge case (0) */
+ v.start = "0";
+ v.len = 1;
+ i = 456;
+ CU_ASSERT(spdk_json_decode_uint32(&v, &i) == 0);
+ CU_ASSERT(i == 0);
+
+ /* invalid value (negative) */
+ v.start = "-1";
+ v.len = 2;
+ CU_ASSERT(spdk_json_decode_uint32(&v, &i) != 0);
+
+ /* edge case (maximum) */
+ v.start = "4294967295";
+ v.len = 10;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint32(&v, &i) == 0);
+ CU_ASSERT(i == 4294967295);
+
+ /* invalid value (overflow) */
+ v.start = "4294967296";
+ v.len = 10;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint32(&v, &i) != 0);
+
+ /* valid exponent */
+ v.start = "42E2";
+ v.len = 4;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint32(&v, &i) == 0);
+ CU_ASSERT(i == 4200);
+
+ /* invalid exponent (overflow) */
+ v.start = "42e32";
+ v.len = 5;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint32(&v, &i) != 0);
+
+ /* invalid exponent (decimal) */
+ v.start = "42.323E2";
+ v.len = 8;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint32(&v, &i) != 0);
+
+ /* valid exponent with decimal */
+ v.start = "42.32E2";
+ v.len = 7;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint32(&v, &i) == 0);
+ CU_ASSERT(i == 4232);
+
+ /* invalid negative exponent */
+ v.start = "400e-4";
+ v.len = 6;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint32(&v, &i) != 0);
+
+ /* invalid negative exponent */
+ v.start = "-400e-2";
+ v.len = 7;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint32(&v, &i) != 0);
+
+ /* valid negative exponent */
+ v.start = "400e-2";
+ v.len = 6;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint32(&v, &i) == 0);
+ CU_ASSERT(i == 4);
+
+ /* valid negative exponent */
+ v.start = "10e-1";
+ v.len = 5;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint32(&v, &i) == 0);
+ CU_ASSERT(i == 1);
+}
+
+static void
+test_decode_uint64(void)
+{
+ struct spdk_json_val v;
+ uint64_t i;
+
+ /* incorrect type */
+ v.type = SPDK_JSON_VAL_STRING;
+ v.start = "String";
+ v.len = 6;
+ CU_ASSERT(spdk_json_decode_uint64(&v, &i) != 0);
+
+ /* invalid value (float) */
+ v.type = SPDK_JSON_VAL_NUMBER;
+ v.start = "123.45";
+ v.len = 6;
+ CU_ASSERT(spdk_json_decode_uint64(&v, &i) != 0);
+
+ /* edge case (0) */
+ v.start = "0";
+ v.len = 1;
+ i = 456;
+ CU_ASSERT(spdk_json_decode_uint64(&v, &i) == 0);
+ CU_ASSERT(i == 0);
+
+ /* invalid value (negative) */
+ v.start = "-1";
+ v.len = 2;
+ CU_ASSERT(spdk_json_decode_uint64(&v, &i) != 0);
+
+ /* edge case (maximum) */
+ v.start = "18446744073709551615";
+ v.len = 20;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint64(&v, &i) == 0);
+ CU_ASSERT(i == 18446744073709551615U);
+
+ /* invalid value (overflow) */
+ v.start = "18446744073709551616";
+ v.len = 20;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint64(&v, &i) != 0);
+
+ /* valid exponent */
+ v.start = "42E2";
+ v.len = 4;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint64(&v, &i) == 0);
+ CU_ASSERT(i == 4200);
+
+ /* invalid exponent (overflow) */
+ v.start = "42e64";
+ v.len = 5;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint64(&v, &i) != 0);
+
+ /* invalid exponent (decimal) */
+ v.start = "42.323E2";
+ v.len = 8;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint64(&v, &i) != 0);
+
+ /* valid exponent with decimal */
+ v.start = "42.32E2";
+ v.len = 7;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint64(&v, &i) == 0);
+ CU_ASSERT(i == 4232);
+
+ /* invalid negative exponent */
+ v.start = "400e-4";
+ v.len = 6;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint64(&v, &i) != 0);
+
+ /* invalid negative exponent */
+ v.start = "-400e-2";
+ v.len = 7;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint64(&v, &i) != 0);
+
+ /* valid negative exponent */
+ v.start = "400e-2";
+ v.len = 6;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint64(&v, &i) == 0);
+ CU_ASSERT(i == 4);
+}
+
+static void
+test_decode_string(void)
+{
+ struct spdk_json_val v;
+ char *value = NULL;
+
+ /* Passing Test: Standard */
+ v.type = SPDK_JSON_VAL_STRING;
+ v.start = "HELLO";
+ v.len = 5;
+ CU_ASSERT(spdk_json_decode_string(&v, &value) == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(memcmp(value, v.start, 6) == 0);
+
+ /* Edge Test: Empty String */
+ v.start = "";
+ v.len = 0;
+ CU_ASSERT(spdk_json_decode_string(&v, &value) == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(memcmp(value, v.start, 1) == 0);
+
+ /*
+ * Failing Test: Null Terminator In String
+ * It is valid for a json string to contain \u0000 and the parser will accept it.
+ * However, a null terminated C string cannot contain '\0' and should be rejected
+ * if that character is found before the end of the string.
+ */
+ v.start = "HELO";
+ v.len = 5;
+ CU_ASSERT(spdk_json_decode_string(&v, &value) != 0);
+
+ /* Failing Test: Wrong Type */
+ v.start = "45673";
+ v.type = SPDK_JSON_VAL_NUMBER;
+ CU_ASSERT(spdk_json_decode_string(&v, &value) != 0);
+
+ /* Passing Test: Special Characters */
+ v.type = SPDK_JSON_VAL_STRING;
+ v.start = "HE\bLL\tO\\WORLD";
+ v.len = 13;
+ CU_ASSERT(spdk_json_decode_string(&v, &value) == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(memcmp(value, v.start, 14) == 0);
+
+ free(value);
+}
+
+char ut_json_text[] =
+ "{"
+ " \"string\": \"Some string data\","
+ " \"object\": { "
+ " \"another_string\": \"Yet anoter string data\","
+ " \"array name with space\": [1, [], {} ]"
+ " },"
+ " \"array\": [ \"Text\", 2, {} ]"
+ "}"
+ ;
+
+static void
+test_find(void)
+{
+ struct spdk_json_val *values, *key, *val, *key2, *val2;
+ ssize_t values_cnt;
+ ssize_t rc;
+
+ values_cnt = spdk_json_parse(ut_json_text, strlen(ut_json_text), NULL, 0, NULL, 0);
+ SPDK_CU_ASSERT_FATAL(values_cnt > 0);
+
+ values = calloc(values_cnt, sizeof(struct spdk_json_val));
+ SPDK_CU_ASSERT_FATAL(values != NULL);
+
+ rc = spdk_json_parse(ut_json_text, strlen(ut_json_text), values, values_cnt, NULL, 0);
+ SPDK_CU_ASSERT_FATAL(values_cnt == rc);
+
+ key = val = NULL;
+ rc = spdk_json_find(values, "string", &key, &val, SPDK_JSON_VAL_STRING);
+ CU_ASSERT(rc == 0);
+
+ CU_ASSERT(key != NULL && spdk_json_strequal(key, "string") == true);
+ CU_ASSERT(val != NULL && spdk_json_strequal(val, "Some string data") == true);
+
+ key = val = NULL;
+ rc = spdk_json_find(values, "object", &key, &val, SPDK_JSON_VAL_OBJECT_BEGIN);
+ CU_ASSERT(rc == 0);
+
+ CU_ASSERT(key != NULL && spdk_json_strequal(key, "object") == true);
+
+ /* Find key in "object" by passing SPDK_JSON_VAL_ANY to match any type */
+ key2 = val2 = NULL;
+ rc = spdk_json_find(val, "array name with space", &key2, &val2, SPDK_JSON_VAL_ANY);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(key2 != NULL && spdk_json_strequal(key2, "array name with space") == true);
+ CU_ASSERT(val2 != NULL && val2->type == SPDK_JSON_VAL_ARRAY_BEGIN);
+
+ /* Find the "array" key in "object" by passing SPDK_JSON_VAL_ARRAY_BEGIN to match only array */
+ key2 = val2 = NULL;
+ rc = spdk_json_find(val, "array name with space", &key2, &val2, SPDK_JSON_VAL_ARRAY_BEGIN);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(key2 != NULL && spdk_json_strequal(key2, "array name with space") == true);
+ CU_ASSERT(val2 != NULL && val2->type == SPDK_JSON_VAL_ARRAY_BEGIN);
+
+ /* Negative test - key doesn't exist */
+ key2 = val2 = NULL;
+ rc = spdk_json_find(val, "this_key_does_not_exist", &key2, &val2, SPDK_JSON_VAL_ANY);
+ CU_ASSERT(rc == -ENOENT);
+
+ /* Negative test - key type doesn't match */
+ key2 = val2 = NULL;
+ rc = spdk_json_find(val, "another_string", &key2, &val2, SPDK_JSON_VAL_ARRAY_BEGIN);
+ CU_ASSERT(rc == -EDOM);
+
+ free(values);
+}
+
+static void
+test_iterating(void)
+{
+ struct spdk_json_val *values;
+ struct spdk_json_val *string_key;
+ struct spdk_json_val *object_key, *object_val;
+ struct spdk_json_val *array_key, *array_val;
+ struct spdk_json_val *another_string_key;
+ struct spdk_json_val *array_name_with_space_key, *array_name_with_space_val;
+ struct spdk_json_val *it;
+ ssize_t values_cnt;
+ ssize_t rc;
+
+ values_cnt = spdk_json_parse(ut_json_text, strlen(ut_json_text), NULL, 0, NULL, 0);
+ SPDK_CU_ASSERT_FATAL(values_cnt > 0);
+
+ values = calloc(values_cnt, sizeof(struct spdk_json_val));
+ SPDK_CU_ASSERT_FATAL(values != NULL);
+
+ rc = spdk_json_parse(ut_json_text, strlen(ut_json_text), values, values_cnt, NULL, 0);
+ SPDK_CU_ASSERT_FATAL(values_cnt == rc);
+
+ /* Iterate over object keys. JSON spec doesn't guarantee order of keys in object but
+ * SPDK implementation implicitly does.
+ */
+ string_key = spdk_json_object_first(values);
+ CU_ASSERT(spdk_json_strequal(string_key, "string") == true);
+
+ object_key = spdk_json_next(string_key);
+ object_val = json_value(object_key);
+ CU_ASSERT(spdk_json_strequal(object_key, "object") == true);
+
+ array_key = spdk_json_next(object_key);
+ array_val = json_value(array_key);
+ CU_ASSERT(spdk_json_strequal(array_key, "array") == true);
+
+ /* NULL '}' */
+ CU_ASSERT(spdk_json_next(array_key) == NULL);
+
+ /* Iterate over subobjects */
+ another_string_key = spdk_json_object_first(object_val);
+ CU_ASSERT(spdk_json_strequal(another_string_key, "another_string") == true);
+
+ array_name_with_space_key = spdk_json_next(another_string_key);
+ array_name_with_space_val = json_value(array_name_with_space_key);
+ CU_ASSERT(spdk_json_strequal(array_name_with_space_key, "array name with space") == true);
+
+ CU_ASSERT(spdk_json_next(array_name_with_space_key) == NULL);
+
+ /* Iterate over array in subobject */
+ it = spdk_json_array_first(array_name_with_space_val);
+ SPDK_CU_ASSERT_FATAL(it != NULL);
+ CU_ASSERT(it->type == SPDK_JSON_VAL_NUMBER);
+
+ it = spdk_json_next(it);
+ SPDK_CU_ASSERT_FATAL(it != NULL);
+ CU_ASSERT(it->type == SPDK_JSON_VAL_ARRAY_BEGIN);
+
+ it = spdk_json_next(it);
+ SPDK_CU_ASSERT_FATAL(it != NULL);
+ CU_ASSERT(it->type == SPDK_JSON_VAL_OBJECT_BEGIN);
+
+ it = spdk_json_next(it);
+ CU_ASSERT(it == NULL);
+
+ /* Iterate over array in root object */
+ it = spdk_json_array_first(array_val);
+ SPDK_CU_ASSERT_FATAL(it != NULL);
+ CU_ASSERT(it->type == SPDK_JSON_VAL_STRING);
+
+ it = spdk_json_next(it);
+ SPDK_CU_ASSERT_FATAL(it != NULL);
+ CU_ASSERT(it->type == SPDK_JSON_VAL_NUMBER);
+
+ it = spdk_json_next(it);
+ SPDK_CU_ASSERT_FATAL(it != NULL);
+ CU_ASSERT(it->type == SPDK_JSON_VAL_OBJECT_BEGIN);
+
+ /* Array end */
+ it = spdk_json_next(it);
+ CU_ASSERT(it == NULL);
+
+ free(values);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("json", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_strequal);
+ CU_ADD_TEST(suite, test_num_to_uint16);
+ CU_ADD_TEST(suite, test_num_to_int32);
+ CU_ADD_TEST(suite, test_num_to_uint64);
+ CU_ADD_TEST(suite, test_decode_object);
+ CU_ADD_TEST(suite, test_decode_array);
+ CU_ADD_TEST(suite, test_decode_bool);
+ CU_ADD_TEST(suite, test_decode_uint16);
+ CU_ADD_TEST(suite, test_decode_int32);
+ CU_ADD_TEST(suite, test_decode_uint32);
+ CU_ADD_TEST(suite, test_decode_uint64);
+ CU_ADD_TEST(suite, test_decode_string);
+ CU_ADD_TEST(suite, test_find);
+ CU_ADD_TEST(suite, test_iterating);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+
+ CU_basic_run_tests();
+
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/json/json_write.c/.gitignore b/src/spdk/test/unit/lib/json/json_write.c/.gitignore
new file mode 100644
index 000000000..dd576b238
--- /dev/null
+++ b/src/spdk/test/unit/lib/json/json_write.c/.gitignore
@@ -0,0 +1 @@
+json_write_ut
diff --git a/src/spdk/test/unit/lib/json/json_write.c/Makefile b/src/spdk/test/unit/lib/json/json_write.c/Makefile
new file mode 100644
index 000000000..9fe1fa916
--- /dev/null
+++ b/src/spdk/test/unit/lib/json/json_write.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = json_write_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/json/json_write.c/json_write_ut.c b/src/spdk/test/unit/lib/json/json_write.c/json_write_ut.c
new file mode 100644
index 000000000..d208f650c
--- /dev/null
+++ b/src/spdk/test/unit/lib/json/json_write.c/json_write_ut.c
@@ -0,0 +1,736 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "json/json_write.c"
+#include "json/json_parse.c"
+
+#include "spdk/util.h"
+
+static uint8_t g_buf[1000];
+static uint8_t *g_write_pos;
+
+static int
+write_cb(void *cb_ctx, const void *data, size_t size)
+{
+ size_t buf_free = g_buf + sizeof(g_buf) - g_write_pos;
+
+ if (size > buf_free) {
+ return -1;
+ }
+
+ memcpy(g_write_pos, data, size);
+ g_write_pos += size;
+
+ return 0;
+}
+
+#define BEGIN() \
+ memset(g_buf, 0, sizeof(g_buf)); \
+ g_write_pos = g_buf; \
+ w = spdk_json_write_begin(write_cb, NULL, 0); \
+ SPDK_CU_ASSERT_FATAL(w != NULL)
+
+#define END(json) \
+ CU_ASSERT(spdk_json_write_end(w) == 0); \
+ CU_ASSERT(g_write_pos - g_buf == sizeof(json) - 1); \
+ CU_ASSERT(memcmp(json, g_buf, sizeof(json) - 1) == 0)
+
+#define END_NOCMP() \
+ CU_ASSERT(spdk_json_write_end(w) == 0)
+
+#define END_FAIL() \
+ CU_ASSERT(spdk_json_write_end(w) < 0)
+
+#define VAL_STRING(str) \
+ CU_ASSERT(spdk_json_write_string_raw(w, str, sizeof(str) - 1) == 0)
+
+#define VAL_STRING_FAIL(str) \
+ CU_ASSERT(spdk_json_write_string_raw(w, str, sizeof(str) - 1) < 0)
+
+#define STR_PASS(in, out) \
+ BEGIN(); VAL_STRING(in); END("\"" out "\"")
+
+#define STR_FAIL(in) \
+ BEGIN(); VAL_STRING_FAIL(in); END_FAIL()
+
+#define VAL_STRING_UTF16LE(str) \
+ CU_ASSERT(spdk_json_write_string_utf16le_raw(w, (const uint16_t *)str, sizeof(str) / sizeof(uint16_t) - 1) == 0)
+
+#define VAL_STRING_UTF16LE_FAIL(str) \
+ CU_ASSERT(spdk_json_write_string_utf16le_raw(w, (const uint16_t *)str, sizeof(str) / sizeof(uint16_t) - 1) < 0)
+
+#define STR_UTF16LE_PASS(in, out) \
+ BEGIN(); VAL_STRING_UTF16LE(in); END("\"" out "\"")
+
+#define STR_UTF16LE_FAIL(in) \
+ BEGIN(); VAL_STRING_UTF16LE_FAIL(in); END_FAIL()
+
+#define VAL_NAME(name) \
+ CU_ASSERT(spdk_json_write_name_raw(w, name, sizeof(name) - 1) == 0)
+
+#define VAL_NULL() CU_ASSERT(spdk_json_write_null(w) == 0)
+#define VAL_TRUE() CU_ASSERT(spdk_json_write_bool(w, true) == 0)
+#define VAL_FALSE() CU_ASSERT(spdk_json_write_bool(w, false) == 0)
+
+#define VAL_INT32(i) CU_ASSERT(spdk_json_write_int32(w, i) == 0);
+#define VAL_UINT32(u) CU_ASSERT(spdk_json_write_uint32(w, u) == 0);
+
+#define VAL_INT64(i) CU_ASSERT(spdk_json_write_int64(w, i) == 0);
+#define VAL_UINT64(u) CU_ASSERT(spdk_json_write_uint64(w, u) == 0);
+
+#define VAL_ARRAY_BEGIN() CU_ASSERT(spdk_json_write_array_begin(w) == 0)
+#define VAL_ARRAY_END() CU_ASSERT(spdk_json_write_array_end(w) == 0)
+
+#define VAL_OBJECT_BEGIN() CU_ASSERT(spdk_json_write_object_begin(w) == 0)
+#define VAL_OBJECT_END() CU_ASSERT(spdk_json_write_object_end(w) == 0)
+
+#define VAL(v) CU_ASSERT(spdk_json_write_val(w, v) == 0)
+
+static void
+test_write_literal(void)
+{
+ struct spdk_json_write_ctx *w;
+
+ BEGIN();
+ VAL_NULL();
+ END("null");
+
+ BEGIN();
+ VAL_TRUE();
+ END("true");
+
+ BEGIN();
+ VAL_FALSE();
+ END("false");
+}
+
+static void
+test_write_string_simple(void)
+{
+ struct spdk_json_write_ctx *w;
+
+ STR_PASS("hello world", "hello world");
+ STR_PASS(" ", " ");
+ STR_PASS("~", "~");
+}
+
+static void
+test_write_string_escapes(void)
+{
+ struct spdk_json_write_ctx *w;
+
+ /* Two-character escapes */
+ STR_PASS("\b", "\\b");
+ STR_PASS("\f", "\\f");
+ STR_PASS("\n", "\\n");
+ STR_PASS("\r", "\\r");
+ STR_PASS("\t", "\\t");
+ STR_PASS("\"", "\\\"");
+ STR_PASS("\\", "\\\\");
+
+ /* JSON defines an escape for forward slash, but it is optional */
+ STR_PASS("/", "/");
+
+ STR_PASS("hello\nworld", "hello\\nworld");
+
+ STR_PASS("\x00", "\\u0000");
+ STR_PASS("\x01", "\\u0001");
+ STR_PASS("\x02", "\\u0002");
+
+ STR_PASS("\xC3\xB6", "\\u00F6");
+ STR_PASS("\xE2\x88\x9A", "\\u221A");
+ STR_PASS("\xEA\xAA\xAA", "\\uAAAA");
+
+ /* Surrogate pairs */
+ STR_PASS("\xF0\x9D\x84\x9E", "\\uD834\\uDD1E");
+ STR_PASS("\xF0\xA0\x9C\x8E", "\\uD841\\uDF0E");
+
+ /* Examples from RFC 3629 */
+ STR_PASS("\x41\xE2\x89\xA2\xCE\x91\x2E", "A\\u2262\\u0391.");
+ STR_PASS("\xED\x95\x9C\xEA\xB5\xAD\xEC\x96\xB4", "\\uD55C\\uAD6D\\uC5B4");
+ STR_PASS("\xE6\x97\xA5\xE6\x9C\xAC\xE8\xAA\x9E", "\\u65E5\\u672C\\u8A9E");
+ STR_PASS("\xEF\xBB\xBF\xF0\xA3\x8E\xB4", "\\uFEFF\\uD84C\\uDFB4");
+
+ /* UTF-8 edge cases */
+ STR_PASS("\x7F", "\\u007F");
+ STR_FAIL("\x80");
+ STR_FAIL("\xC1");
+ STR_FAIL("\xC2");
+ STR_PASS("\xC2\x80", "\\u0080");
+ STR_PASS("\xC2\xBF", "\\u00BF");
+ STR_PASS("\xDF\x80", "\\u07C0");
+ STR_PASS("\xDF\xBF", "\\u07FF");
+ STR_FAIL("\xDF");
+ STR_FAIL("\xE0\x80");
+ STR_FAIL("\xE0\x1F");
+ STR_FAIL("\xE0\x1F\x80");
+ STR_FAIL("\xE0");
+ STR_FAIL("\xE0\xA0");
+ STR_PASS("\xE0\xA0\x80", "\\u0800");
+ STR_PASS("\xE0\xA0\xBF", "\\u083F");
+ STR_FAIL("\xE0\xA0\xC0");
+ STR_PASS("\xE0\xBF\x80", "\\u0FC0");
+ STR_PASS("\xE0\xBF\xBF", "\\u0FFF");
+ STR_FAIL("\xE0\xC0\x80");
+ STR_FAIL("\xE1");
+ STR_FAIL("\xE1\x80");
+ STR_FAIL("\xE1\x7F\x80");
+ STR_FAIL("\xE1\x80\x7F");
+ STR_PASS("\xE1\x80\x80", "\\u1000");
+ STR_PASS("\xE1\x80\xBF", "\\u103F");
+ STR_PASS("\xE1\xBF\x80", "\\u1FC0");
+ STR_PASS("\xE1\xBF\xBF", "\\u1FFF");
+ STR_FAIL("\xE1\xC0\x80");
+ STR_FAIL("\xE1\x80\xC0");
+ STR_PASS("\xEF\x80\x80", "\\uF000");
+ STR_PASS("\xEF\xBF\xBF", "\\uFFFF");
+ STR_FAIL("\xF0");
+ STR_FAIL("\xF0\x90");
+ STR_FAIL("\xF0\x90\x80");
+ STR_FAIL("\xF0\x80\x80\x80");
+ STR_FAIL("\xF0\x8F\x80\x80");
+ STR_PASS("\xF0\x90\x80\x80", "\\uD800\\uDC00");
+ STR_PASS("\xF0\x90\x80\xBF", "\\uD800\\uDC3F");
+ STR_PASS("\xF0\x90\xBF\x80", "\\uD803\\uDFC0");
+ STR_PASS("\xF0\xBF\x80\x80", "\\uD8BC\\uDC00");
+ STR_FAIL("\xF0\xC0\x80\x80");
+ STR_FAIL("\xF1");
+ STR_FAIL("\xF1\x80");
+ STR_FAIL("\xF1\x80\x80");
+ STR_FAIL("\xF1\x80\x80\x7F");
+ STR_PASS("\xF1\x80\x80\x80", "\\uD8C0\\uDC00");
+ STR_PASS("\xF1\x80\x80\xBF", "\\uD8C0\\uDC3F");
+ STR_PASS("\xF1\x80\xBF\x80", "\\uD8C3\\uDFC0");
+ STR_PASS("\xF1\xBF\x80\x80", "\\uD9BC\\uDC00");
+ STR_PASS("\xF3\x80\x80\x80", "\\uDAC0\\uDC00");
+ STR_FAIL("\xF3\xC0\x80\x80");
+ STR_FAIL("\xF3\x80\xC0\x80");
+ STR_FAIL("\xF3\x80\x80\xC0");
+ STR_FAIL("\xF4");
+ STR_FAIL("\xF4\x80");
+ STR_FAIL("\xF4\x80\x80");
+ STR_PASS("\xF4\x80\x80\x80", "\\uDBC0\\uDC00");
+ STR_PASS("\xF4\x8F\x80\x80", "\\uDBFC\\uDC00");
+ STR_PASS("\xF4\x8F\xBF\xBF", "\\uDBFF\\uDFFF");
+ STR_FAIL("\xF4\x90\x80\x80");
+ STR_FAIL("\xF5");
+ STR_FAIL("\xF5\x80");
+ STR_FAIL("\xF5\x80\x80");
+ STR_FAIL("\xF5\x80\x80\x80");
+ STR_FAIL("\xF5\x80\x80\x80\x80");
+
+ /* Overlong encodings */
+ STR_FAIL("\xC0\x80");
+
+ /* Surrogate pairs */
+ STR_FAIL("\xED\xA0\x80"); /* U+D800 First high surrogate */
+ STR_FAIL("\xED\xAF\xBF"); /* U+DBFF Last high surrogate */
+ STR_FAIL("\xED\xB0\x80"); /* U+DC00 First low surrogate */
+ STR_FAIL("\xED\xBF\xBF"); /* U+DFFF Last low surrogate */
+ STR_FAIL("\xED\xA1\x8C\xED\xBE\xB4"); /* U+233B4 (invalid surrogate pair encoding) */
+}
+
+static void
+test_write_string_utf16le(void)
+{
+ struct spdk_json_write_ctx *w;
+
+ /* All characters in BMP */
+ STR_UTF16LE_PASS(((uint8_t[]) {
+ 'H', 0, 'e', 0, 'l', 0, 'l', 0, 'o', 0, 0x15, 0xFE, 0, 0
+ }), "Hello\\uFE15");
+
+ /* Surrogate pair */
+ STR_UTF16LE_PASS(((uint8_t[]) {
+ 'H', 0, 'i', 0, 0x34, 0xD8, 0x1E, 0xDD, '!', 0, 0, 0
+ }), "Hi\\uD834\\uDD1E!");
+
+ /* Valid high surrogate, but no low surrogate */
+ STR_UTF16LE_FAIL(((uint8_t[]) {
+ 0x00, 0xD8, 0, 0 /* U+D800 */
+ }));
+
+ /* Invalid leading low surrogate */
+ STR_UTF16LE_FAIL(((uint8_t[]) {
+ 0x00, 0xDC, 0x00, 0xDC, 0, 0 /* U+DC00 U+DC00 */
+ }));
+
+ /* Valid high surrogate followed by another high surrogate (invalid) */
+ STR_UTF16LE_FAIL(((uint8_t[]) {
+ 0x00, 0xD8, 0x00, 0xD8, 0, 0 /* U+D800 U+D800 */
+ }));
+}
+
+static void
+test_write_number_int32(void)
+{
+ struct spdk_json_write_ctx *w;
+
+ BEGIN();
+ VAL_INT32(0);
+ END("0");
+
+ BEGIN();
+ VAL_INT32(1);
+ END("1");
+
+ BEGIN();
+ VAL_INT32(123);
+ END("123");
+
+ BEGIN();
+ VAL_INT32(-123);
+ END("-123");
+
+ BEGIN();
+ VAL_INT32(2147483647);
+ END("2147483647");
+
+ BEGIN();
+ VAL_INT32(-2147483648);
+ END("-2147483648");
+}
+
+static void
+test_write_number_uint32(void)
+{
+ struct spdk_json_write_ctx *w;
+
+ BEGIN();
+ VAL_UINT32(0);
+ END("0");
+
+ BEGIN();
+ VAL_UINT32(1);
+ END("1");
+
+ BEGIN();
+ VAL_UINT32(123);
+ END("123");
+
+ BEGIN();
+ VAL_UINT32(2147483647);
+ END("2147483647");
+
+ BEGIN();
+ VAL_UINT32(4294967295);
+ END("4294967295");
+}
+
+static void
+test_write_number_int64(void)
+{
+ struct spdk_json_write_ctx *w;
+
+ BEGIN();
+ VAL_INT64(0);
+ END("0");
+
+ BEGIN();
+ VAL_INT64(1);
+ END("1");
+
+ BEGIN();
+ VAL_INT64(123);
+ END("123");
+
+ BEGIN();
+ VAL_INT64(-123);
+ END("-123");
+
+ BEGIN();
+ VAL_INT64(INT64_MAX);
+ END("9223372036854775807");
+
+ BEGIN();
+ VAL_INT64(INT64_MIN);
+ END("-9223372036854775808");
+}
+
+static void
+test_write_number_uint64(void)
+{
+ struct spdk_json_write_ctx *w;
+
+ BEGIN();
+ VAL_UINT64(0);
+ END("0");
+
+ BEGIN();
+ VAL_UINT64(1);
+ END("1");
+
+ BEGIN();
+ VAL_UINT64(123);
+ END("123");
+
+ BEGIN();
+ VAL_UINT64(INT64_MAX);
+ END("9223372036854775807");
+
+ BEGIN();
+ VAL_UINT64(UINT64_MAX);
+ END("18446744073709551615");
+}
+
+static void
+test_write_array(void)
+{
+ struct spdk_json_write_ctx *w;
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_ARRAY_END();
+ END("[]");
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_INT32(0);
+ VAL_ARRAY_END();
+ END("[0]");
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_INT32(0);
+ VAL_INT32(1);
+ VAL_ARRAY_END();
+ END("[0,1]");
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_INT32(0);
+ VAL_INT32(1);
+ VAL_INT32(2);
+ VAL_ARRAY_END();
+ END("[0,1,2]");
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_STRING("a");
+ VAL_ARRAY_END();
+ END("[\"a\"]");
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_STRING("a");
+ VAL_STRING("b");
+ VAL_ARRAY_END();
+ END("[\"a\",\"b\"]");
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_STRING("a");
+ VAL_STRING("b");
+ VAL_STRING("c");
+ VAL_ARRAY_END();
+ END("[\"a\",\"b\",\"c\"]");
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_TRUE();
+ VAL_ARRAY_END();
+ END("[true]");
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_TRUE();
+ VAL_FALSE();
+ VAL_ARRAY_END();
+ END("[true,false]");
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_TRUE();
+ VAL_FALSE();
+ VAL_TRUE();
+ VAL_ARRAY_END();
+ END("[true,false,true]");
+}
+
+static void
+test_write_object(void)
+{
+ struct spdk_json_write_ctx *w;
+
+ BEGIN();
+ VAL_OBJECT_BEGIN();
+ VAL_OBJECT_END();
+ END("{}");
+
+ BEGIN();
+ VAL_OBJECT_BEGIN();
+ VAL_NAME("a");
+ VAL_INT32(0);
+ VAL_OBJECT_END();
+ END("{\"a\":0}");
+
+ BEGIN();
+ VAL_OBJECT_BEGIN();
+ VAL_NAME("a");
+ VAL_INT32(0);
+ VAL_NAME("b");
+ VAL_INT32(1);
+ VAL_OBJECT_END();
+ END("{\"a\":0,\"b\":1}");
+
+ BEGIN();
+ VAL_OBJECT_BEGIN();
+ VAL_NAME("a");
+ VAL_INT32(0);
+ VAL_NAME("b");
+ VAL_INT32(1);
+ VAL_NAME("c");
+ VAL_INT32(2);
+ VAL_OBJECT_END();
+ END("{\"a\":0,\"b\":1,\"c\":2}");
+}
+
+static void
+test_write_nesting(void)
+{
+ struct spdk_json_write_ctx *w;
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_ARRAY_END();
+ VAL_ARRAY_END();
+ END("[[]]");
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_ARRAY_END();
+ VAL_ARRAY_END();
+ VAL_ARRAY_END();
+ END("[[[]]]");
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_INT32(0);
+ VAL_ARRAY_BEGIN();
+ VAL_ARRAY_END();
+ VAL_ARRAY_END();
+ END("[0,[]]");
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_ARRAY_END();
+ VAL_INT32(0);
+ VAL_ARRAY_END();
+ END("[[],0]");
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_INT32(0);
+ VAL_ARRAY_BEGIN();
+ VAL_INT32(1);
+ VAL_ARRAY_END();
+ VAL_INT32(2);
+ VAL_ARRAY_END();
+ END("[0,[1],2]");
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_INT32(0);
+ VAL_INT32(1);
+ VAL_ARRAY_BEGIN();
+ VAL_INT32(2);
+ VAL_INT32(3);
+ VAL_ARRAY_END();
+ VAL_INT32(4);
+ VAL_INT32(5);
+ VAL_ARRAY_END();
+ END("[0,1,[2,3],4,5]");
+
+ BEGIN();
+ VAL_OBJECT_BEGIN();
+ VAL_NAME("a");
+ VAL_OBJECT_BEGIN();
+ VAL_OBJECT_END();
+ VAL_OBJECT_END();
+ END("{\"a\":{}}");
+
+ BEGIN();
+ VAL_OBJECT_BEGIN();
+ VAL_NAME("a");
+ VAL_OBJECT_BEGIN();
+ VAL_NAME("b");
+ VAL_INT32(0);
+ VAL_OBJECT_END();
+ VAL_OBJECT_END();
+ END("{\"a\":{\"b\":0}}");
+
+ BEGIN();
+ VAL_OBJECT_BEGIN();
+ VAL_NAME("a");
+ VAL_ARRAY_BEGIN();
+ VAL_INT32(0);
+ VAL_ARRAY_END();
+ VAL_OBJECT_END();
+ END("{\"a\":[0]}");
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_OBJECT_BEGIN();
+ VAL_NAME("a");
+ VAL_INT32(0);
+ VAL_OBJECT_END();
+ VAL_ARRAY_END();
+ END("[{\"a\":0}]");
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_OBJECT_BEGIN();
+ VAL_NAME("a");
+ VAL_OBJECT_BEGIN();
+ VAL_NAME("b");
+ VAL_ARRAY_BEGIN();
+ VAL_OBJECT_BEGIN();
+ VAL_NAME("c");
+ VAL_INT32(1);
+ VAL_OBJECT_END();
+ VAL_INT32(2);
+ VAL_ARRAY_END();
+ VAL_NAME("d");
+ VAL_INT32(3);
+ VAL_OBJECT_END();
+ VAL_NAME("e");
+ VAL_INT32(4);
+ VAL_OBJECT_END();
+ VAL_INT32(5);
+ VAL_ARRAY_END();
+ END("[{\"a\":{\"b\":[{\"c\":1},2],\"d\":3},\"e\":4},5]");
+
+ /* Examples from RFC 7159 */
+ BEGIN();
+ VAL_OBJECT_BEGIN();
+ VAL_NAME("Image");
+ VAL_OBJECT_BEGIN();
+ VAL_NAME("Width");
+ VAL_INT32(800);
+ VAL_NAME("Height");
+ VAL_INT32(600);
+ VAL_NAME("Title");
+ VAL_STRING("View from 15th Floor");
+ VAL_NAME("Thumbnail");
+ VAL_OBJECT_BEGIN();
+ VAL_NAME("Url");
+ VAL_STRING("http://www.example.com/image/481989943");
+ VAL_NAME("Height");
+ VAL_INT32(125);
+ VAL_NAME("Width");
+ VAL_INT32(100);
+ VAL_OBJECT_END();
+ VAL_NAME("Animated");
+ VAL_FALSE();
+ VAL_NAME("IDs");
+ VAL_ARRAY_BEGIN();
+ VAL_INT32(116);
+ VAL_INT32(943);
+ VAL_INT32(234);
+ VAL_INT32(38793);
+ VAL_ARRAY_END();
+ VAL_OBJECT_END();
+ VAL_OBJECT_END();
+ END(
+ "{\"Image\":"
+ "{"
+ "\"Width\":800,"
+ "\"Height\":600,"
+ "\"Title\":\"View from 15th Floor\","
+ "\"Thumbnail\":{"
+ "\"Url\":\"http://www.example.com/image/481989943\","
+ "\"Height\":125,"
+ "\"Width\":100"
+ "},"
+ "\"Animated\":false,"
+ "\"IDs\":[116,943,234,38793]"
+ "}"
+ "}");
+}
+
+/* Round-trip parse and write test */
+static void
+test_write_val(void)
+{
+ struct spdk_json_write_ctx *w;
+ struct spdk_json_val values[100];
+ char src[] = "{\"a\":[1,2,3],\"b\":{\"c\":\"d\"},\"e\":true,\"f\":false,\"g\":null}";
+
+ CU_ASSERT(spdk_json_parse(src, strlen(src), values, SPDK_COUNTOF(values), NULL,
+ SPDK_JSON_PARSE_FLAG_DECODE_IN_PLACE) == 19);
+
+ BEGIN();
+ VAL(values);
+ END("{\"a\":[1,2,3],\"b\":{\"c\":\"d\"},\"e\":true,\"f\":false,\"g\":null}");
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("json", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_write_literal);
+ CU_ADD_TEST(suite, test_write_string_simple);
+ CU_ADD_TEST(suite, test_write_string_escapes);
+ CU_ADD_TEST(suite, test_write_string_utf16le);
+ CU_ADD_TEST(suite, test_write_number_int32);
+ CU_ADD_TEST(suite, test_write_number_uint32);
+ CU_ADD_TEST(suite, test_write_number_int64);
+ CU_ADD_TEST(suite, test_write_number_uint64);
+ CU_ADD_TEST(suite, test_write_array);
+ CU_ADD_TEST(suite, test_write_object);
+ CU_ADD_TEST(suite, test_write_nesting);
+ CU_ADD_TEST(suite, test_write_val);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+
+ CU_basic_run_tests();
+
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/json_mock.c b/src/spdk/test/unit/lib/json_mock.c
new file mode 100644
index 000000000..b9cee171e
--- /dev/null
+++ b/src/spdk/test/unit/lib/json_mock.c
@@ -0,0 +1,81 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/json.h"
+#include "spdk_internal/mock.h"
+
+DEFINE_STUB(spdk_json_write_begin, struct spdk_json_write_ctx *, (spdk_json_write_cb write_cb,
+ void *cb_ctx, uint32_t flags), NULL);
+
+DEFINE_STUB(spdk_json_write_end, int, (struct spdk_json_write_ctx *w), 0);
+DEFINE_STUB(spdk_json_write_null, int, (struct spdk_json_write_ctx *w), 0);
+DEFINE_STUB(spdk_json_write_bool, int, (struct spdk_json_write_ctx *w, bool val), 0);
+DEFINE_STUB(spdk_json_write_int32, int, (struct spdk_json_write_ctx *w, int32_t val), 0);
+DEFINE_STUB(spdk_json_write_uint32, int, (struct spdk_json_write_ctx *w, uint32_t val), 0);
+DEFINE_STUB(spdk_json_write_int64, int, (struct spdk_json_write_ctx *w, int64_t val), 0);
+DEFINE_STUB(spdk_json_write_uint64, int, (struct spdk_json_write_ctx *w, uint64_t val), 0);
+DEFINE_STUB(spdk_json_write_string, int, (struct spdk_json_write_ctx *w, const char *val), 0);
+DEFINE_STUB(spdk_json_write_string_raw, int, (struct spdk_json_write_ctx *w, const char *val,
+ size_t len), 0);
+
+DEFINE_STUB(spdk_json_write_array_begin, int, (struct spdk_json_write_ctx *w), 0);
+DEFINE_STUB(spdk_json_write_array_end, int, (struct spdk_json_write_ctx *w), 0);
+DEFINE_STUB(spdk_json_write_object_begin, int, (struct spdk_json_write_ctx *w), 0);
+DEFINE_STUB(spdk_json_write_object_end, int, (struct spdk_json_write_ctx *w), 0);
+DEFINE_STUB(spdk_json_write_name, int, (struct spdk_json_write_ctx *w, const char *name), 0);
+DEFINE_STUB(spdk_json_write_name_raw, int, (struct spdk_json_write_ctx *w, const char *name,
+ size_t len), 0);
+
+/* Utility functions */
+DEFINE_STUB(spdk_json_write_named_null, int, (struct spdk_json_write_ctx *w, const char *name), 0);
+DEFINE_STUB(spdk_json_write_named_bool, int, (struct spdk_json_write_ctx *w, const char *name,
+ bool val), 0);
+DEFINE_STUB(spdk_json_write_named_int32, int, (struct spdk_json_write_ctx *w, const char *name,
+ int32_t val), 0);
+DEFINE_STUB(spdk_json_write_named_uint32, int, (struct spdk_json_write_ctx *w, const char *name,
+ uint32_t val), 0);
+DEFINE_STUB(spdk_json_write_named_uint64, int, (struct spdk_json_write_ctx *w, const char *name,
+ uint64_t val), 0);
+DEFINE_STUB(spdk_json_write_named_int64, int, (struct spdk_json_write_ctx *w, const char *name,
+ int64_t val), 0);
+DEFINE_STUB(spdk_json_write_named_string, int, (struct spdk_json_write_ctx *w, const char *name,
+ const char *val), 0);
+DEFINE_STUB(spdk_json_write_named_string_fmt, int, (struct spdk_json_write_ctx *w, const char *name,
+ const char *fmt, ...), 0);
+DEFINE_STUB(spdk_json_write_named_string_fmt_v, int, (struct spdk_json_write_ctx *w,
+ const char *name, const char *fmt, va_list args), 0);
+
+DEFINE_STUB(spdk_json_write_named_array_begin, int, (struct spdk_json_write_ctx *w,
+ const char *name), 0);
+DEFINE_STUB(spdk_json_write_named_object_begin, int, (struct spdk_json_write_ctx *w,
+ const char *name), 0);
diff --git a/src/spdk/test/unit/lib/jsonrpc/Makefile b/src/spdk/test/unit/lib/jsonrpc/Makefile
new file mode 100644
index 000000000..0fc0a2e96
--- /dev/null
+++ b/src/spdk/test/unit/lib/jsonrpc/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = jsonrpc_server.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/jsonrpc/jsonrpc_server.c/.gitignore b/src/spdk/test/unit/lib/jsonrpc/jsonrpc_server.c/.gitignore
new file mode 100644
index 000000000..8852a96d2
--- /dev/null
+++ b/src/spdk/test/unit/lib/jsonrpc/jsonrpc_server.c/.gitignore
@@ -0,0 +1 @@
+jsonrpc_server_ut
diff --git a/src/spdk/test/unit/lib/jsonrpc/jsonrpc_server.c/Makefile b/src/spdk/test/unit/lib/jsonrpc/jsonrpc_server.c/Makefile
new file mode 100644
index 000000000..6c02115f7
--- /dev/null
+++ b/src/spdk/test/unit/lib/jsonrpc/jsonrpc_server.c/Makefile
@@ -0,0 +1,39 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = jsonrpc_server_ut.c
+SPDK_LIB_LIST = json
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/jsonrpc/jsonrpc_server.c/jsonrpc_server_ut.c b/src/spdk/test/unit/lib/jsonrpc/jsonrpc_server.c/jsonrpc_server_ut.c
new file mode 100644
index 000000000..8c3ffa208
--- /dev/null
+++ b/src/spdk/test/unit/lib/jsonrpc/jsonrpc_server.c/jsonrpc_server_ut.c
@@ -0,0 +1,410 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "jsonrpc/jsonrpc_server.c"
+
+static struct spdk_jsonrpc_request *g_request;
+static int g_parse_error;
+const struct spdk_json_val *g_method;
+const struct spdk_json_val *g_params;
+
+const struct spdk_json_val *g_cur_param;
+
+#define PARSE_PASS(in, trailing) \
+ CU_ASSERT(g_cur_param == NULL); \
+ g_cur_param = NULL; \
+ CU_ASSERT(jsonrpc_parse_request(conn, in, sizeof(in) - 1) == sizeof(in) - sizeof(trailing))
+
+#define REQ_BEGIN(expected_error) \
+ if (expected_error != 0 ) { \
+ CU_ASSERT(g_parse_error == expected_error); \
+ CU_ASSERT(g_params == NULL); \
+ }
+
+#define PARSE_FAIL(in) \
+ CU_ASSERT(jsonrpc_parse_request(conn, in, sizeof(in) - 1) < 0);
+
+#define REQ_BEGIN_VALID() \
+ REQ_BEGIN(0); \
+ SPDK_CU_ASSERT_FATAL(g_params != NULL);
+
+#define REQ_BEGIN_INVALID(expected_error) \
+ REQ_BEGIN(expected_error); \
+ REQ_METHOD_MISSING(); \
+ REQ_ID_MISSING(); \
+ REQ_PARAMS_MISSING()
+
+
+#define REQ_METHOD(name) \
+ CU_ASSERT(g_method && spdk_json_strequal(g_method, name) == true)
+
+#define REQ_METHOD_MISSING() \
+ CU_ASSERT(g_method == NULL)
+
+#define REQ_ID_NUM(num) \
+ CU_ASSERT(g_request->id && g_request->id->type == SPDK_JSON_VAL_NUMBER); \
+ CU_ASSERT(g_request->id && memcmp(g_request->id->start, num, sizeof(num) - 1) == 0)
+
+
+#define REQ_ID_STRING(str) \
+ CU_ASSERT(g_request->id && g_request->id->type == SPDK_JSON_VAL_STRING); \
+ CU_ASSERT(g_request->id && memcmp(g_request->id->start, num, strlen(num) - 1) == 0))
+
+#define REQ_ID_NULL() \
+ CU_ASSERT(g_request->id && g_request->id->type == SPDK_JSON_VAL_NULL)
+
+#define REQ_ID_MISSING() \
+ CU_ASSERT(g_request->id == NULL)
+
+#define REQ_PARAMS_MISSING() \
+ CU_ASSERT(g_params == NULL)
+
+#define REQ_PARAMS_BEGIN() \
+ SPDK_CU_ASSERT_FATAL(g_params != NULL); \
+ CU_ASSERT(g_cur_param == NULL); \
+ g_cur_param = g_params
+
+#define PARAM_ARRAY_BEGIN() \
+ CU_ASSERT(g_cur_param->type == SPDK_JSON_VAL_ARRAY_BEGIN); \
+ g_cur_param++
+
+#define PARAM_ARRAY_END() \
+ CU_ASSERT(g_cur_param->type == SPDK_JSON_VAL_ARRAY_END); \
+ g_cur_param++
+
+#define PARAM_OBJECT_BEGIN() \
+ CU_ASSERT(g_cur_param->type == SPDK_JSON_VAL_OBJECT_BEGIN); \
+ g_cur_param++
+
+#define PARAM_OBJECT_END() \
+ CU_ASSERT(g_cur_param->type == SPDK_JSON_VAL_OBJECT_END); \
+ g_cur_param++
+
+#define PARAM_NUM(num) \
+ CU_ASSERT(g_cur_param->type == SPDK_JSON_VAL_NUMBER); \
+ CU_ASSERT(g_cur_param->len == sizeof(num) - 1); \
+ CU_ASSERT(memcmp(g_cur_param->start, num, sizeof(num) - 1) == 0); \
+ g_cur_param++
+
+#define PARAM_NAME(str) \
+ CU_ASSERT(g_cur_param->type == SPDK_JSON_VAL_NAME); \
+ CU_ASSERT(g_cur_param->len == sizeof(str) - 1); \
+ CU_ASSERT(g_cur_param && memcmp(g_cur_param->start, str, sizeof(str) - 1) == 0); \
+ g_cur_param++
+
+#define PARAM_STRING(str) \
+ CU_ASSERT(g_cur_param->type == SPDK_JSON_VAL_STRING); \
+ CU_ASSERT(g_cur_param->len == sizeof(str) - 1); \
+ CU_ASSERT(memcmp(g_cur_param->start, str, g_params->len) == 0); \
+ g_cur_param++
+
+#define FREE_REQUEST() \
+ ut_jsonrpc_free_request(g_request, g_parse_error); \
+ g_request = NULL; \
+ g_cur_param = NULL; \
+ g_parse_error = 0; \
+ g_method = NULL; \
+ g_cur_param = g_params = NULL
+
+static void
+ut_jsonrpc_free_request(struct spdk_jsonrpc_request *request, int err)
+{
+ struct spdk_json_write_ctx *w;
+
+ if (!request) {
+ return;
+ }
+
+ /* Need to emulate response to get the response write contex free */
+ if (err == 0) {
+ w = spdk_jsonrpc_begin_result(request);
+ spdk_json_write_string(w, "UT PASS response");
+ spdk_jsonrpc_end_result(request, w);
+ } else {
+ spdk_jsonrpc_send_error_response_fmt(request, err, "UT error response");
+ }
+
+ jsonrpc_free_request(request);
+}
+
+static void
+ut_handle(struct spdk_jsonrpc_request *request, int error, const struct spdk_json_val *method,
+ const struct spdk_json_val *params)
+{
+ CU_ASSERT(g_request == NULL);
+ g_request = request;
+ g_parse_error = error;
+ g_method = method;
+ g_params = params;
+}
+
+void
+jsonrpc_server_handle_error(struct spdk_jsonrpc_request *request, int error)
+{
+ ut_handle(request, error, NULL, NULL);
+}
+
+void
+jsonrpc_server_handle_request(struct spdk_jsonrpc_request *request,
+ const struct spdk_json_val *method, const struct spdk_json_val *params)
+{
+ ut_handle(request, 0, method, params);
+}
+
+void
+jsonrpc_server_send_response(struct spdk_jsonrpc_request *request)
+{
+}
+
+static void
+test_parse_request(void)
+{
+ struct spdk_jsonrpc_server *server;
+ struct spdk_jsonrpc_server_conn *conn;
+
+ server = calloc(1, sizeof(*server));
+ SPDK_CU_ASSERT_FATAL(server != NULL);
+
+ conn = calloc(1, sizeof(*conn));
+ SPDK_CU_ASSERT_FATAL(conn != NULL);
+
+ conn->server = server;
+
+ /* rpc call with no parameters. */
+ PARSE_PASS("{ }", "");
+ REQ_BEGIN_INVALID(SPDK_JSONRPC_ERROR_INVALID_REQUEST);
+ FREE_REQUEST();
+
+ /* rpc call with method that is not a string. */
+ PARSE_PASS("{\"jsonrpc\":\"2.0\", \"method\": null }", "");
+ REQ_BEGIN_INVALID(SPDK_JSONRPC_ERROR_INVALID_REQUEST);
+ FREE_REQUEST();
+
+ /* rpc call with invalid JSON RPC version. */
+ PARSE_PASS("{\"jsonrpc\":\"42\", \"method\": \"subtract\"}", "");
+ REQ_BEGIN_INVALID(SPDK_JSONRPC_ERROR_INVALID_REQUEST);
+ FREE_REQUEST();
+
+ /* rpc call with embedded zeros. */
+ PARSE_FAIL("{\"jsonrpc\":\"2.0\",\"method\":\"foo\",\"params\":{\"bar\": \"\0\0baz\"}}");
+ REQ_BEGIN_INVALID(SPDK_JSONRPC_ERROR_PARSE_ERROR);
+ FREE_REQUEST();
+
+ /* rpc call with positional parameters */
+ PARSE_PASS("{\"jsonrpc\":\"2.0\",\"method\":\"subtract\",\"params\":[42,23],\"id\":1}", "");
+ REQ_BEGIN_VALID();
+ REQ_METHOD("subtract");
+ REQ_ID_NUM("1");
+ REQ_PARAMS_BEGIN();
+ PARAM_ARRAY_BEGIN();
+ PARAM_NUM("42");
+ PARAM_NUM("23");
+ PARAM_ARRAY_END();
+ FREE_REQUEST();
+
+ /* rpc call with named parameters */
+ PARSE_PASS("{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": {\"subtrahend\": 23, \"minuend\": 42}, \"id\": 3}",
+ "");
+ REQ_BEGIN_VALID();
+ REQ_METHOD("subtract");
+ REQ_ID_NUM("3");
+ REQ_PARAMS_BEGIN();
+ PARAM_OBJECT_BEGIN();
+ PARAM_NAME("subtrahend");
+ PARAM_NUM("23");
+ PARAM_NAME("minuend");
+ PARAM_NUM("42");
+ PARAM_OBJECT_END();
+ FREE_REQUEST();
+
+ /* notification */
+ PARSE_PASS("{\"jsonrpc\": \"2.0\", \"method\": \"update\", \"params\": [1,2,3,4,5]}", "");
+ REQ_BEGIN_VALID();
+ REQ_METHOD("update");
+ REQ_ID_MISSING();
+ REQ_PARAMS_BEGIN();
+ PARAM_ARRAY_BEGIN();
+ PARAM_NUM("1");
+ PARAM_NUM("2");
+ PARAM_NUM("3");
+ PARAM_NUM("4");
+ PARAM_NUM("5");
+ PARAM_ARRAY_END();
+ FREE_REQUEST();
+
+ /* notification with explicit NULL ID. This is discouraged by JSON RPC spec but allowed. */
+ PARSE_PASS("{\"jsonrpc\": \"2.0\", \"method\": \"update\", \"params\": [1,2,3,4,5], \"id\": null}",
+ "");
+ REQ_BEGIN_VALID();
+ REQ_METHOD("update");
+ REQ_ID_NULL();
+ REQ_PARAMS_BEGIN();
+ PARAM_ARRAY_BEGIN();
+ PARAM_NUM("1");
+ PARAM_NUM("2");
+ PARAM_NUM("3");
+ PARAM_NUM("4");
+ PARAM_NUM("5");
+ PARAM_ARRAY_END();
+ FREE_REQUEST();
+
+ /* invalid JSON */
+ PARSE_FAIL("{\"jsonrpc\": \"2.0\", \"method\": \"foobar, \"params\": \"bar\", \"baz]");
+ REQ_BEGIN_INVALID(SPDK_JSONRPC_ERROR_PARSE_ERROR);
+ FREE_REQUEST();
+
+ /* invalid request (method must be a string; params must be array or object) */
+ PARSE_PASS("{\"jsonrpc\": \"2.0\", \"method\": 1, \"params\": \"bar\"}", "");
+ REQ_BEGIN_INVALID(SPDK_JSONRPC_ERROR_INVALID_REQUEST);
+ FREE_REQUEST();
+
+ /* batch, invalid JSON */
+ PARSE_FAIL(
+ "["
+ "{\"jsonrpc\": \"2.0\", \"method\": \"sum\", \"params\": [1,2,4], \"id\": \"1\"},"
+ "{\"jsonrpc\": \"2.0\", \"method\""
+ "]");
+ REQ_BEGIN_INVALID(SPDK_JSONRPC_ERROR_PARSE_ERROR);
+ FREE_REQUEST();
+
+ /* empty array */
+ PARSE_PASS("[]", "");
+ REQ_BEGIN_INVALID(SPDK_JSONRPC_ERROR_INVALID_REQUEST);
+ FREE_REQUEST();
+
+ /* batch - not supported */
+ PARSE_PASS(
+ "["
+ "{\"jsonrpc\": \"2.0\", \"method\": \"sum\", \"params\": [1,2,4], \"id\": \"1\"},"
+ "{\"jsonrpc\": \"2.0\", \"method\": \"notify_hello\", \"params\": [7]},"
+ "{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42,23], \"id\": \"2\"},"
+ "{\"foo\": \"boo\"},"
+ "{\"jsonrpc\": \"2.0\", \"method\": \"foo.get\", \"params\": {\"name\": \"myself\"}, \"id\": \"5\"},"
+ "{\"jsonrpc\": \"2.0\", \"method\": \"get_data\", \"id\": \"9\"}"
+ "]", "");
+
+ REQ_BEGIN_INVALID(SPDK_JSONRPC_ERROR_INVALID_REQUEST);
+ FREE_REQUEST();
+
+ CU_ASSERT(conn->outstanding_requests == 0);
+ free(conn);
+ free(server);
+}
+
+static void
+test_parse_request_streaming(void)
+{
+ struct spdk_jsonrpc_server *server;
+ struct spdk_jsonrpc_server_conn *conn;
+ const char *json_req;
+ size_t len, i;
+
+ server = calloc(1, sizeof(*server));
+ SPDK_CU_ASSERT_FATAL(server != NULL);
+
+ conn = calloc(1, sizeof(*conn));
+ SPDK_CU_ASSERT_FATAL(conn != NULL);
+
+ conn->server = server;
+
+
+ /*
+ * Two valid requests end to end in the same buffer.
+ * Parse should return the first one and point to the beginning of the second one.
+ */
+ PARSE_PASS(
+ "{\"jsonrpc\":\"2.0\",\"method\":\"a\",\"params\":[1],\"id\":1}"
+ "{\"jsonrpc\":\"2.0\",\"method\":\"b\",\"params\":[2],\"id\":2}",
+ "{\"jsonrpc\":\"2.0\",\"method\":\"b\",\"params\":[2],\"id\":2}");
+
+ REQ_BEGIN_VALID();
+ REQ_METHOD("a");
+ REQ_ID_NUM("1");
+ REQ_PARAMS_BEGIN();
+ PARAM_ARRAY_BEGIN();
+ PARAM_NUM("1");
+ PARAM_ARRAY_END();
+ FREE_REQUEST();
+
+ /* Partial (but not invalid) requests - parse should not consume anything. */
+ json_req = " {\"jsonrpc\":\"2.0\",\"method\":\"b\",\"params\":[2],\"id\":2}";
+ len = strlen(json_req);
+
+ /* Try every partial length up to the full request length */
+ for (i = 0; i < len; i++) {
+ int rc = jsonrpc_parse_request(conn, json_req, i);
+ /* Partial request - no data consumed */
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_request == NULL);
+
+ /* In case of faile, don't fload console with ussless CU assert fails. */
+ FREE_REQUEST();
+ }
+
+ /* Verify that full request can be parsed successfully */
+ CU_ASSERT(jsonrpc_parse_request(conn, json_req, len) == (ssize_t)len);
+ FREE_REQUEST();
+
+ CU_ASSERT(conn->outstanding_requests == 0);
+ free(conn);
+ free(server);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("jsonrpc", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_parse_request);
+ CU_ADD_TEST(suite, test_parse_request_streaming);
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+
+ CU_basic_run_tests();
+
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ /* This is for ASAN. Don't know why but if pointer is left in global varaible
+ * it won't be detected as leak. */
+ g_request = NULL;
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/log/Makefile b/src/spdk/test/unit/lib/log/Makefile
new file mode 100644
index 000000000..79411a459
--- /dev/null
+++ b/src/spdk/test/unit/lib/log/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = log.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/log/log.c/.gitignore b/src/spdk/test/unit/lib/log/log.c/.gitignore
new file mode 100644
index 000000000..60261c07b
--- /dev/null
+++ b/src/spdk/test/unit/lib/log/log.c/.gitignore
@@ -0,0 +1 @@
+log_ut
diff --git a/src/spdk/test/unit/lib/log/log.c/Makefile b/src/spdk/test/unit/lib/log/log.c/Makefile
new file mode 100644
index 000000000..e3ba9340c
--- /dev/null
+++ b/src/spdk/test/unit/lib/log/log.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = log_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/log/log.c/log_ut.c b/src/spdk/test/unit/lib/log/log.c/log_ut.c
new file mode 100644
index 000000000..87a578b84
--- /dev/null
+++ b/src/spdk/test/unit/lib/log/log.c/log_ut.c
@@ -0,0 +1,106 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+#include "spdk/log.h"
+
+#include "log/log.c"
+#include "log/log_flags.c"
+
+static void
+log_test(void)
+{
+ spdk_log_set_level(SPDK_LOG_ERROR);
+ CU_ASSERT_EQUAL(spdk_log_get_level(), SPDK_LOG_ERROR);
+ spdk_log_set_level(SPDK_LOG_WARN);
+ CU_ASSERT_EQUAL(spdk_log_get_level(), SPDK_LOG_WARN);
+ spdk_log_set_level(SPDK_LOG_NOTICE);
+ CU_ASSERT_EQUAL(spdk_log_get_level(), SPDK_LOG_NOTICE);
+ spdk_log_set_level(SPDK_LOG_INFO);
+ CU_ASSERT_EQUAL(spdk_log_get_level(), SPDK_LOG_INFO);
+ spdk_log_set_level(SPDK_LOG_DEBUG);
+ CU_ASSERT_EQUAL(spdk_log_get_level(), SPDK_LOG_DEBUG);
+
+ spdk_log_set_print_level(SPDK_LOG_ERROR);
+ CU_ASSERT_EQUAL(spdk_log_get_print_level(), SPDK_LOG_ERROR);
+ spdk_log_set_print_level(SPDK_LOG_WARN);
+ CU_ASSERT_EQUAL(spdk_log_get_print_level(), SPDK_LOG_WARN);
+ spdk_log_set_print_level(SPDK_LOG_NOTICE);
+ CU_ASSERT_EQUAL(spdk_log_get_print_level(), SPDK_LOG_NOTICE);
+ spdk_log_set_print_level(SPDK_LOG_INFO);
+ CU_ASSERT_EQUAL(spdk_log_get_print_level(), SPDK_LOG_INFO);
+ spdk_log_set_print_level(SPDK_LOG_DEBUG);
+ CU_ASSERT_EQUAL(spdk_log_get_print_level(), SPDK_LOG_DEBUG);
+
+#ifdef DEBUG
+ CU_ASSERT(spdk_log_get_flag("log") == false);
+
+ spdk_log_set_flag("log");
+ CU_ASSERT(spdk_log_get_flag("log") == true);
+
+ spdk_log_clear_flag("log");
+ CU_ASSERT(spdk_log_get_flag("log") == false);
+#endif
+
+ spdk_log_open(NULL);
+ spdk_log_set_flag("log");
+ SPDK_WARNLOG("log warning unit test\n");
+ SPDK_DEBUGLOG(SPDK_LOG_LOG, "log test\n");
+ SPDK_LOGDUMP(SPDK_LOG_LOG, "log dump test:", "log dump", 8);
+ spdk_log_dump(stderr, "spdk dump test:", "spdk dump", 9);
+ /* Test spdk_log_dump with more than 16 chars and less than 32 chars */
+ spdk_log_dump(stderr, "spdk dump test:", "spdk dump 16 more chars", 23);
+
+ spdk_log_close();
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("log", NULL, NULL);
+
+ CU_ADD_TEST(suite, log_test);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/lvol/Makefile b/src/spdk/test/unit/lib/lvol/Makefile
new file mode 100644
index 000000000..c9276de47
--- /dev/null
+++ b/src/spdk/test/unit/lib/lvol/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = lvol.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/lvol/lvol.c/.gitignore b/src/spdk/test/unit/lib/lvol/lvol.c/.gitignore
new file mode 100644
index 000000000..57e92bfe1
--- /dev/null
+++ b/src/spdk/test/unit/lib/lvol/lvol.c/.gitignore
@@ -0,0 +1 @@
+lvol_ut
diff --git a/src/spdk/test/unit/lib/lvol/lvol.c/Makefile b/src/spdk/test/unit/lib/lvol/lvol.c/Makefile
new file mode 100644
index 000000000..aa9acde11
--- /dev/null
+++ b/src/spdk/test/unit/lib/lvol/lvol.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = lvol_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/lvol/lvol.c/lvol_ut.c b/src/spdk/test/unit/lib/lvol/lvol.c/lvol_ut.c
new file mode 100644
index 000000000..72f7b6e81
--- /dev/null
+++ b/src/spdk/test/unit/lib/lvol/lvol.c/lvol_ut.c
@@ -0,0 +1,2096 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+#include "spdk/blob.h"
+#include "spdk/thread.h"
+#include "spdk/util.h"
+
+#include "common/lib/ut_multithread.c"
+
+#include "lvol/lvol.c"
+
+#define DEV_BUFFER_SIZE (64 * 1024 * 1024)
+#define DEV_BUFFER_BLOCKLEN (4096)
+#define DEV_BUFFER_BLOCKCNT (DEV_BUFFER_SIZE / DEV_BUFFER_BLOCKLEN)
+#define BS_CLUSTER_SIZE (1024 * 1024)
+#define BS_FREE_CLUSTERS (DEV_BUFFER_SIZE / BS_CLUSTER_SIZE)
+#define BS_PAGE_SIZE (4096)
+
+#define SPDK_BLOB_OPTS_CLUSTER_SZ (1024 * 1024)
+#define SPDK_BLOB_OPTS_NUM_MD_PAGES UINT32_MAX
+#define SPDK_BLOB_OPTS_MAX_MD_OPS 32
+#define SPDK_BLOB_OPTS_MAX_CHANNEL_OPS 512
+
+#define SPDK_BLOB_THIN_PROV (1ULL << 0)
+
+const char *uuid = "828d9766-ae50-11e7-bd8d-001e67edf350";
+
+struct spdk_blob {
+ spdk_blob_id id;
+ uint32_t ref;
+ struct spdk_blob_store *bs;
+ int close_status;
+ int open_status;
+ int load_status;
+ TAILQ_ENTRY(spdk_blob) link;
+ char uuid[SPDK_UUID_STRING_LEN];
+ char name[SPDK_LVS_NAME_MAX];
+ bool thin_provisioned;
+};
+
+int g_lvserrno;
+int g_close_super_status;
+int g_resize_rc;
+int g_inflate_rc;
+int g_remove_rc;
+bool g_lvs_rename_blob_open_error = false;
+struct spdk_lvol_store *g_lvol_store;
+struct spdk_lvol *g_lvol;
+spdk_blob_id g_blobid = 1;
+struct spdk_io_channel *g_io_channel;
+
+struct spdk_blob_store {
+ struct spdk_bs_opts bs_opts;
+ spdk_blob_id super_blobid;
+ TAILQ_HEAD(, spdk_blob) blobs;
+ int get_super_status;
+};
+
+struct lvol_ut_bs_dev {
+ struct spdk_bs_dev bs_dev;
+ int init_status;
+ int load_status;
+ struct spdk_blob_store *bs;
+};
+
+void spdk_bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel,
+ spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg)
+{
+ cb_fn(cb_arg, g_inflate_rc);
+}
+
+void spdk_bs_blob_decouple_parent(struct spdk_blob_store *bs, struct spdk_io_channel *channel,
+ spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg)
+{
+ cb_fn(cb_arg, g_inflate_rc);
+}
+
+void
+spdk_bs_iter_next(struct spdk_blob_store *bs, struct spdk_blob *b,
+ spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
+{
+ struct spdk_blob *next;
+ int _errno = 0;
+
+ next = TAILQ_NEXT(b, link);
+ if (next == NULL) {
+ _errno = -ENOENT;
+ } else if (next->load_status != 0) {
+ _errno = next->load_status;
+ }
+
+ cb_fn(cb_arg, next, _errno);
+}
+
+void
+spdk_bs_iter_first(struct spdk_blob_store *bs,
+ spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
+{
+ struct spdk_blob *first;
+ int _errno = 0;
+
+ first = TAILQ_FIRST(&bs->blobs);
+ if (first == NULL) {
+ _errno = -ENOENT;
+ } else if (first->load_status != 0) {
+ _errno = first->load_status;
+ }
+
+ cb_fn(cb_arg, first, _errno);
+}
+
+uint64_t spdk_blob_get_num_clusters(struct spdk_blob *blob)
+{
+ return 0;
+}
+
+void
+spdk_bs_get_super(struct spdk_blob_store *bs,
+ spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
+{
+ if (bs->get_super_status != 0) {
+ cb_fn(cb_arg, 0, bs->get_super_status);
+ } else {
+ cb_fn(cb_arg, bs->super_blobid, 0);
+ }
+}
+
+void
+spdk_bs_set_super(struct spdk_blob_store *bs, spdk_blob_id blobid,
+ spdk_bs_op_complete cb_fn, void *cb_arg)
+{
+ bs->super_blobid = blobid;
+ cb_fn(cb_arg, 0);
+}
+
+void
+spdk_bs_load(struct spdk_bs_dev *dev, struct spdk_bs_opts *opts,
+ spdk_bs_op_with_handle_complete cb_fn, void *cb_arg)
+{
+ struct lvol_ut_bs_dev *ut_dev = SPDK_CONTAINEROF(dev, struct lvol_ut_bs_dev, bs_dev);
+ struct spdk_blob_store *bs = NULL;
+
+ if (ut_dev->load_status == 0) {
+ bs = ut_dev->bs;
+ }
+
+ cb_fn(cb_arg, bs, ut_dev->load_status);
+}
+
+struct spdk_io_channel *spdk_bs_alloc_io_channel(struct spdk_blob_store *bs)
+{
+ if (g_io_channel == NULL) {
+ g_io_channel = calloc(1, sizeof(struct spdk_io_channel));
+ SPDK_CU_ASSERT_FATAL(g_io_channel != NULL);
+ }
+ g_io_channel->ref++;
+ return g_io_channel;
+}
+
+void spdk_bs_free_io_channel(struct spdk_io_channel *channel)
+{
+ g_io_channel->ref--;
+ if (g_io_channel->ref == 0) {
+ free(g_io_channel);
+ g_io_channel = NULL;
+ }
+ return;
+}
+
+int
+spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value,
+ uint16_t value_len)
+{
+ if (!strcmp(name, "uuid")) {
+ CU_ASSERT(value_len == SPDK_UUID_STRING_LEN);
+ memcpy(blob->uuid, value, SPDK_UUID_STRING_LEN);
+ } else if (!strcmp(name, "name")) {
+ CU_ASSERT(value_len <= SPDK_LVS_NAME_MAX);
+ memcpy(blob->name, value, value_len);
+ }
+
+ return 0;
+}
+
+int
+spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name,
+ const void **value, size_t *value_len)
+{
+ if (!strcmp(name, "uuid") && strnlen(blob->uuid, SPDK_UUID_STRING_LEN) != 0) {
+ CU_ASSERT(strnlen(blob->uuid, SPDK_UUID_STRING_LEN) == (SPDK_UUID_STRING_LEN - 1));
+ *value = blob->uuid;
+ *value_len = SPDK_UUID_STRING_LEN;
+ return 0;
+ } else if (!strcmp(name, "name") && strnlen(blob->name, SPDK_LVS_NAME_MAX) != 0) {
+ *value = blob->name;
+ *value_len = strnlen(blob->name, SPDK_LVS_NAME_MAX) + 1;
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+bool spdk_blob_is_thin_provisioned(struct spdk_blob *blob)
+{
+ return blob->thin_provisioned;
+}
+
+DEFINE_STUB(spdk_blob_get_clones, int, (struct spdk_blob_store *bs, spdk_blob_id blobid,
+ spdk_blob_id *ids, size_t *count), 0);
+DEFINE_STUB(spdk_bs_get_page_size, uint64_t, (struct spdk_blob_store *bs), BS_PAGE_SIZE);
+
+int
+spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size)
+{
+ bdev->blockcnt = size;
+ return 0;
+}
+
+static void
+init_dev(struct lvol_ut_bs_dev *dev)
+{
+ memset(dev, 0, sizeof(*dev));
+ dev->bs_dev.blockcnt = DEV_BUFFER_BLOCKCNT;
+ dev->bs_dev.blocklen = DEV_BUFFER_BLOCKLEN;
+}
+
+static void
+free_dev(struct lvol_ut_bs_dev *dev)
+{
+ struct spdk_blob_store *bs = dev->bs;
+ struct spdk_blob *blob, *tmp;
+
+ if (bs == NULL) {
+ return;
+ }
+
+ TAILQ_FOREACH_SAFE(blob, &bs->blobs, link, tmp) {
+ TAILQ_REMOVE(&bs->blobs, blob, link);
+ free(blob);
+ }
+
+ free(bs);
+ dev->bs = NULL;
+}
+
+void
+spdk_bs_init(struct spdk_bs_dev *dev, struct spdk_bs_opts *o,
+ spdk_bs_op_with_handle_complete cb_fn, void *cb_arg)
+{
+ struct lvol_ut_bs_dev *ut_dev = SPDK_CONTAINEROF(dev, struct lvol_ut_bs_dev, bs_dev);
+ struct spdk_blob_store *bs;
+
+ bs = calloc(1, sizeof(*bs));
+ SPDK_CU_ASSERT_FATAL(bs != NULL);
+
+ TAILQ_INIT(&bs->blobs);
+
+ ut_dev->bs = bs;
+
+ memcpy(&bs->bs_opts, o, sizeof(struct spdk_bs_opts));
+
+ cb_fn(cb_arg, bs, 0);
+}
+
+void
+spdk_bs_unload(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, void *cb_arg)
+{
+ cb_fn(cb_arg, 0);
+}
+
+void
+spdk_bs_destroy(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn,
+ void *cb_arg)
+{
+ free(bs);
+
+ cb_fn(cb_arg, 0);
+}
+
+void
+spdk_bs_delete_blob(struct spdk_blob_store *bs, spdk_blob_id blobid,
+ spdk_blob_op_complete cb_fn, void *cb_arg)
+{
+ struct spdk_blob *blob;
+
+ TAILQ_FOREACH(blob, &bs->blobs, link) {
+ if (blob->id == blobid) {
+ TAILQ_REMOVE(&bs->blobs, blob, link);
+ free(blob);
+ break;
+ }
+ }
+
+ cb_fn(cb_arg, g_remove_rc);
+}
+
+spdk_blob_id
+spdk_blob_get_id(struct spdk_blob *blob)
+{
+ return blob->id;
+}
+
+void
+spdk_bs_opts_init(struct spdk_bs_opts *opts)
+{
+ opts->cluster_sz = SPDK_BLOB_OPTS_CLUSTER_SZ;
+ opts->num_md_pages = SPDK_BLOB_OPTS_NUM_MD_PAGES;
+ opts->max_md_ops = SPDK_BLOB_OPTS_MAX_MD_OPS;
+ opts->max_channel_ops = SPDK_BLOB_OPTS_MAX_CHANNEL_OPS;
+ memset(&opts->bstype, 0, sizeof(opts->bstype));
+}
+
+DEFINE_STUB(spdk_bs_get_cluster_size, uint64_t, (struct spdk_blob_store *bs), BS_CLUSTER_SIZE);
+
+void spdk_blob_close(struct spdk_blob *b, spdk_blob_op_complete cb_fn, void *cb_arg)
+{
+ b->ref--;
+
+ cb_fn(cb_arg, b->close_status);
+}
+
+void
+spdk_blob_resize(struct spdk_blob *blob, uint64_t sz, spdk_blob_op_complete cb_fn, void *cb_arg)
+{
+ if (g_resize_rc != 0) {
+ return cb_fn(cb_arg, g_resize_rc);
+ } else if (sz > DEV_BUFFER_SIZE / BS_CLUSTER_SIZE) {
+ return cb_fn(cb_arg, -ENOMEM);
+ }
+ cb_fn(cb_arg, 0);
+}
+
+DEFINE_STUB(spdk_blob_set_read_only, int, (struct spdk_blob *blob), 0);
+
+void
+spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
+{
+ cb_fn(cb_arg, 0);
+}
+
+void
+spdk_bs_open_blob_ext(struct spdk_blob_store *bs, spdk_blob_id blobid,
+ struct spdk_blob_open_opts *opts, spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
+{
+ spdk_bs_open_blob(bs, blobid, cb_fn, cb_arg);
+}
+
+void
+spdk_bs_open_blob(struct spdk_blob_store *bs, spdk_blob_id blobid,
+ spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
+{
+ struct spdk_blob *blob;
+
+ if (!g_lvs_rename_blob_open_error) {
+ TAILQ_FOREACH(blob, &bs->blobs, link) {
+ if (blob->id == blobid) {
+ blob->ref++;
+ cb_fn(cb_arg, blob, blob->open_status);
+ return;
+ }
+ }
+ }
+
+ cb_fn(cb_arg, NULL, -ENOENT);
+}
+
+DEFINE_STUB(spdk_bs_free_cluster_count, uint64_t, (struct spdk_blob_store *bs), BS_FREE_CLUSTERS);
+
+void
+spdk_blob_opts_init(struct spdk_blob_opts *opts)
+{
+ opts->num_clusters = 0;
+ opts->thin_provision = false;
+ opts->xattrs.count = 0;
+ opts->xattrs.names = NULL;
+ opts->xattrs.ctx = NULL;
+ opts->xattrs.get_value = NULL;
+}
+
+void
+spdk_blob_open_opts_init(struct spdk_blob_open_opts *opts)
+{
+ opts->clear_method = BLOB_CLEAR_WITH_DEFAULT;
+}
+
+void
+spdk_bs_create_blob(struct spdk_blob_store *bs,
+ spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
+{
+ spdk_bs_create_blob_ext(bs, NULL, cb_fn, cb_arg);
+}
+
+void
+spdk_bs_create_blob_ext(struct spdk_blob_store *bs, const struct spdk_blob_opts *opts,
+ spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
+{
+ struct spdk_blob *b;
+
+ if (opts && opts->num_clusters > DEV_BUFFER_SIZE / BS_CLUSTER_SIZE) {
+ cb_fn(cb_arg, 0, -1);
+ return;
+ }
+
+ b = calloc(1, sizeof(*b));
+ SPDK_CU_ASSERT_FATAL(b != NULL);
+
+ b->id = g_blobid++;
+ if (opts != NULL && opts->thin_provision) {
+ b->thin_provisioned = true;
+ }
+ b->bs = bs;
+
+ TAILQ_INSERT_TAIL(&bs->blobs, b, link);
+ cb_fn(cb_arg, b->id, 0);
+}
+
+void
+spdk_bs_create_snapshot(struct spdk_blob_store *bs, spdk_blob_id blobid,
+ const struct spdk_blob_xattr_opts *snapshot_xattrs,
+ spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
+{
+ spdk_bs_create_blob_ext(bs, NULL, cb_fn, cb_arg);
+}
+
+void
+spdk_bs_create_clone(struct spdk_blob_store *bs, spdk_blob_id blobid,
+ const struct spdk_blob_xattr_opts *clone_xattrs,
+ spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
+{
+ spdk_bs_create_blob_ext(bs, NULL, cb_fn, cb_arg);
+}
+
+static void
+lvol_store_op_with_handle_complete(void *cb_arg, struct spdk_lvol_store *lvol_store, int lvserrno)
+{
+ g_lvol_store = lvol_store;
+ g_lvserrno = lvserrno;
+}
+
+static void
+lvol_op_with_handle_complete(void *cb_arg, struct spdk_lvol *lvol, int lvserrno)
+{
+ g_lvol = lvol;
+ g_lvserrno = lvserrno;
+}
+
+static void
+op_complete(void *cb_arg, int lvserrno)
+{
+ g_lvserrno = lvserrno;
+}
+
+static void
+lvs_init_unload_success(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_opts opts;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ g_lvserrno = -1;
+
+ CU_ASSERT(TAILQ_EMPTY(&g_lvol_stores));
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(!TAILQ_EMPTY(&g_lvol_stores));
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, false, LVOL_CLEAR_WITH_DEFAULT,
+ lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ /* Lvol store has an open lvol, this unload should fail. */
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(g_lvol_store, op_complete, NULL);
+ CU_ASSERT(rc == -EBUSY);
+ CU_ASSERT(g_lvserrno == -EBUSY);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(!TAILQ_EMPTY(&g_lvol_stores));
+
+ /* Lvol has to be closed (or destroyed) before unloading lvol store. */
+ spdk_lvol_close(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(g_lvol_store, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+ CU_ASSERT(TAILQ_EMPTY(&g_lvol_stores));
+
+ free_dev(&dev);
+}
+
+static void
+lvs_init_destroy_success(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_opts opts;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ g_lvserrno = -1;
+
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, false, LVOL_CLEAR_WITH_DEFAULT,
+ lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ /* Lvol store contains one lvol, this destroy should fail. */
+ g_lvserrno = -1;
+ rc = spdk_lvs_destroy(g_lvol_store, op_complete, NULL);
+ CU_ASSERT(rc == -EBUSY);
+ CU_ASSERT(g_lvserrno == -EBUSY);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+ spdk_lvol_close(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ spdk_lvol_destroy(g_lvol, op_complete, NULL);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_destroy(g_lvol_store, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+}
+
+static void
+lvs_init_opts_success(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_opts opts;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ g_lvserrno = -1;
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+ opts.cluster_sz = 8192;
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(dev.bs->bs_opts.cluster_sz == opts.cluster_sz);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(g_lvol_store, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ free_dev(&dev);
+}
+
+static void
+lvs_unload_lvs_is_null_fail(void)
+{
+ int rc = 0;
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(NULL, op_complete, NULL);
+ CU_ASSERT(rc == -ENODEV);
+ CU_ASSERT(g_lvserrno == -1);
+}
+
+static void
+lvs_names(void)
+{
+ struct lvol_ut_bs_dev dev_x, dev_y, dev_x2;
+ struct spdk_lvs_opts opts_none, opts_x, opts_y, opts_full;
+ struct spdk_lvol_store *lvs_x, *lvs_y, *lvs_x2;
+ int rc = 0;
+
+ init_dev(&dev_x);
+ init_dev(&dev_y);
+ init_dev(&dev_x2);
+
+ spdk_lvs_opts_init(&opts_none);
+ spdk_lvs_opts_init(&opts_x);
+ opts_x.name[0] = 'x';
+ spdk_lvs_opts_init(&opts_y);
+ opts_y.name[0] = 'y';
+ spdk_lvs_opts_init(&opts_full);
+ memset(opts_full.name, 'a', sizeof(opts_full.name));
+
+ /* Test that opts with no name fails spdk_lvs_init(). */
+ CU_ASSERT(TAILQ_EMPTY(&g_lvol_stores));
+ rc = spdk_lvs_init(&dev_x.bs_dev, &opts_none, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc != 0);
+ CU_ASSERT(g_lvol_store == NULL);
+ CU_ASSERT(TAILQ_EMPTY(&g_lvol_stores));
+
+ /* Test that opts with no null terminator for name fails spdk_lvs_init(). */
+ CU_ASSERT(TAILQ_EMPTY(&g_lvol_stores));
+ rc = spdk_lvs_init(&dev_x.bs_dev, &opts_full, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc != 0);
+ CU_ASSERT(g_lvol_store == NULL);
+ CU_ASSERT(TAILQ_EMPTY(&g_lvol_stores));
+
+ /* Test that we can create an lvolstore with name 'x'. */
+ CU_ASSERT(TAILQ_EMPTY(&g_lvol_stores));
+ g_lvol_store = NULL;
+ rc = spdk_lvs_init(&dev_x.bs_dev, &opts_x, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(!TAILQ_EMPTY(&g_lvol_stores));
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ lvs_x = g_lvol_store;
+
+ /* Test that we can create an lvolstore with name 'y'. */
+ g_lvol_store = NULL;
+ rc = spdk_lvs_init(&dev_y.bs_dev, &opts_y, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ lvs_y = g_lvol_store;
+
+ /* Test that we cannot create another lvolstore with name 'x'. */
+ rc = spdk_lvs_init(&dev_x2.bs_dev, &opts_x, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == -EEXIST);
+
+ /* Now destroy lvolstore 'x' and then confirm we can create a new lvolstore with name 'x'. */
+ g_lvserrno = -1;
+ rc = spdk_lvs_destroy(lvs_x, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+ rc = spdk_lvs_init(&dev_x.bs_dev, &opts_x, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ lvs_x = g_lvol_store;
+
+ /*
+ * Unload lvolstore 'x'. Then we should be able to create another lvolstore with name 'x'.
+ */
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(lvs_x, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+ rc = spdk_lvs_init(&dev_x2.bs_dev, &opts_x, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ lvs_x2 = g_lvol_store;
+
+ /* Confirm that we cannot load the first lvolstore 'x'. */
+ g_lvserrno = 0;
+ spdk_lvs_load(&dev_x.bs_dev, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno != 0);
+
+ /* Destroy the second lvolstore 'x'. Then we should be able to load the first lvolstore 'x'. */
+ g_lvserrno = -1;
+ rc = spdk_lvs_destroy(lvs_x2, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvserrno = -1;
+ spdk_lvs_load(&dev_x.bs_dev, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ lvs_x = g_lvol_store;
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_destroy(lvs_x, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_destroy(lvs_y, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+}
+
+static void
+lvol_create_destroy_success(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_opts opts;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, false, LVOL_CLEAR_WITH_DEFAULT,
+ lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ spdk_lvol_close(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ spdk_lvol_destroy(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(g_lvol_store, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ free_dev(&dev);
+}
+
+static void
+lvol_create_fail(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_opts opts;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ g_lvol_store = NULL;
+ g_lvserrno = 0;
+ rc = spdk_lvs_init(NULL, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc != 0);
+ CU_ASSERT(g_lvol_store == NULL);
+
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+ g_lvol = NULL;
+ rc = spdk_lvol_create(NULL, "lvol", 10, false, LVOL_CLEAR_WITH_DEFAULT,
+ lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(rc != 0);
+ CU_ASSERT(g_lvol == NULL);
+
+ g_lvol = NULL;
+ rc = spdk_lvol_create(g_lvol_store, "lvol", DEV_BUFFER_SIZE + 1, false, LVOL_CLEAR_WITH_DEFAULT,
+ lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno != 0);
+ CU_ASSERT(g_lvol == NULL);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(g_lvol_store, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ free_dev(&dev);
+}
+
+static void
+lvol_destroy_fail(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_opts opts;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, false, LVOL_CLEAR_WITH_DEFAULT,
+ lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ spdk_lvol_close(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ spdk_lvol_destroy(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, false, LVOL_CLEAR_WITH_DEFAULT,
+ lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ spdk_lvol_close(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ g_remove_rc = -1;
+ spdk_lvol_destroy(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno != 0);
+ CU_ASSERT(TAILQ_EMPTY(&g_lvol_store->lvols));
+ g_remove_rc = 0;
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(g_lvol_store, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ free_dev(&dev);
+}
+
+static void
+lvol_close_fail(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_opts opts;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, false, LVOL_CLEAR_WITH_DEFAULT,
+ lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ spdk_lvol_close(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(g_lvol_store, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ free_dev(&dev);
+}
+
+static void
+lvol_close_success(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_opts opts;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, false, LVOL_CLEAR_WITH_DEFAULT,
+ lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ spdk_lvol_close(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(g_lvol_store, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ free_dev(&dev);
+}
+
+static void
+lvol_resize(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_opts opts;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ g_resize_rc = 0;
+ g_lvserrno = -1;
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, false, LVOL_CLEAR_WITH_DEFAULT,
+ lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ /* Resize to same size */
+ spdk_lvol_resize(g_lvol, 10, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ /* Resize to smaller size */
+ spdk_lvol_resize(g_lvol, 5, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ /* Resize to bigger size */
+ spdk_lvol_resize(g_lvol, 15, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ /* Resize to size = 0 */
+ spdk_lvol_resize(g_lvol, 0, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ /* Resize to bigger size than available */
+ g_lvserrno = 0;
+ spdk_lvol_resize(g_lvol, 0xFFFFFFFF, op_complete, NULL);
+ CU_ASSERT(g_lvserrno != 0);
+
+ /* Fail resize */
+ g_resize_rc = -1;
+ g_lvserrno = 0;
+ spdk_lvol_resize(g_lvol, 10, op_complete, NULL);
+ CU_ASSERT(g_lvserrno != 0);
+ g_resize_rc = 0;
+
+ g_resize_rc = 0;
+ spdk_lvol_close(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ spdk_lvol_destroy(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(g_lvol_store, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ free_dev(&dev);
+}
+
+static void
+lvol_set_read_only(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_opts opts;
+ int rc = 0;
+ struct spdk_lvol *lvol, *clone;
+
+ init_dev(&dev);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, false, LVOL_CLEAR_WITH_DEFAULT,
+ lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ lvol = g_lvol;
+
+ /* Set lvol as read only */
+ spdk_lvol_set_read_only(lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ /* Create lvol clone from read only lvol */
+ spdk_lvol_create_clone(lvol, "clone", lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ CU_ASSERT_STRING_EQUAL(g_lvol->name, "clone");
+ clone = g_lvol;
+
+ spdk_lvol_close(lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ spdk_lvol_close(clone, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(g_lvol_store, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ free_dev(&dev);
+}
+
+static void
+null_cb(void *ctx, struct spdk_blob_store *bs, int bserrno)
+{
+ SPDK_CU_ASSERT_FATAL(bs != NULL);
+}
+
+static void
+lvs_load(void)
+{
+ int rc = -1;
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_with_handle_req *req;
+ struct spdk_bs_opts bs_opts = {};
+ struct spdk_blob *super_blob;
+
+ req = calloc(1, sizeof(*req));
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+
+ init_dev(&dev);
+ spdk_bs_opts_init(&bs_opts);
+ snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "LVOLSTORE");
+ spdk_bs_init(&dev.bs_dev, &bs_opts, null_cb, NULL);
+ SPDK_CU_ASSERT_FATAL(dev.bs != NULL);
+
+ /* Fail on bs load */
+ dev.load_status = -1;
+ CU_ASSERT(TAILQ_EMPTY(&g_lvol_stores));
+ spdk_lvs_load(&dev.bs_dev, lvol_store_op_with_handle_complete, req);
+ CU_ASSERT(g_lvserrno != 0);
+ CU_ASSERT(g_lvol_store == NULL);
+ CU_ASSERT(TAILQ_EMPTY(&g_lvol_stores));
+
+ /* Fail on getting super blob */
+ dev.load_status = 0;
+ dev.bs->get_super_status = -1;
+ spdk_lvs_load(&dev.bs_dev, lvol_store_op_with_handle_complete, req);
+ CU_ASSERT(g_lvserrno == -ENODEV);
+ CU_ASSERT(g_lvol_store == NULL);
+ CU_ASSERT(TAILQ_EMPTY(&g_lvol_stores));
+
+ /* Fail on opening super blob */
+ g_lvserrno = 0;
+ super_blob = calloc(1, sizeof(*super_blob));
+ super_blob->id = 0x100;
+ super_blob->open_status = -1;
+ TAILQ_INSERT_TAIL(&dev.bs->blobs, super_blob, link);
+ dev.bs->super_blobid = 0x100;
+ dev.bs->get_super_status = 0;
+ spdk_lvs_load(&dev.bs_dev, lvol_store_op_with_handle_complete, req);
+ CU_ASSERT(g_lvserrno == -ENODEV);
+ CU_ASSERT(g_lvol_store == NULL);
+ CU_ASSERT(TAILQ_EMPTY(&g_lvol_stores));
+
+ /* Fail on getting uuid */
+ g_lvserrno = 0;
+ super_blob->open_status = 0;
+ spdk_lvs_load(&dev.bs_dev, lvol_store_op_with_handle_complete, req);
+ CU_ASSERT(g_lvserrno == -EINVAL);
+ CU_ASSERT(g_lvol_store == NULL);
+ CU_ASSERT(TAILQ_EMPTY(&g_lvol_stores));
+
+ /* Fail on getting name */
+ g_lvserrno = 0;
+ spdk_blob_set_xattr(super_blob, "uuid", uuid, SPDK_UUID_STRING_LEN);
+ spdk_lvs_load(&dev.bs_dev, lvol_store_op_with_handle_complete, req);
+ CU_ASSERT(g_lvserrno == -EINVAL);
+ CU_ASSERT(g_lvol_store == NULL);
+ CU_ASSERT(TAILQ_EMPTY(&g_lvol_stores));
+
+ /* Fail on closing super blob */
+ g_lvserrno = 0;
+ spdk_blob_set_xattr(super_blob, "name", "lvs", strnlen("lvs", SPDK_LVS_NAME_MAX) + 1);
+ super_blob->close_status = -1;
+ spdk_lvs_load(&dev.bs_dev, lvol_store_op_with_handle_complete, req);
+ CU_ASSERT(g_lvserrno == -ENODEV);
+ CU_ASSERT(g_lvol_store == NULL);
+ CU_ASSERT(TAILQ_EMPTY(&g_lvol_stores));
+
+ /* Load successfully */
+ g_lvserrno = 0;
+ super_blob->close_status = 0;
+ spdk_lvs_load(&dev.bs_dev, lvol_store_op_with_handle_complete, req);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store != NULL);
+ CU_ASSERT(!TAILQ_EMPTY(&g_lvol_stores));
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(g_lvol_store, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(TAILQ_EMPTY(&g_lvol_stores));
+
+ free(req);
+ free_dev(&dev);
+}
+
+static void
+lvols_load(void)
+{
+ int rc = -1;
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_with_handle_req *req;
+ struct spdk_bs_opts bs_opts;
+ struct spdk_blob *super_blob, *blob1, *blob2, *blob3;
+
+ req = calloc(1, sizeof(*req));
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+
+ init_dev(&dev);
+ spdk_bs_opts_init(&bs_opts);
+ snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "LVOLSTORE");
+ spdk_bs_init(&dev.bs_dev, &bs_opts, null_cb, NULL);
+ super_blob = calloc(1, sizeof(*super_blob));
+ SPDK_CU_ASSERT_FATAL(super_blob != NULL);
+ super_blob->id = 0x100;
+ spdk_blob_set_xattr(super_blob, "uuid", uuid, SPDK_UUID_STRING_LEN);
+ spdk_blob_set_xattr(super_blob, "name", "lvs", strnlen("lvs", SPDK_LVS_NAME_MAX) + 1);
+ TAILQ_INSERT_TAIL(&dev.bs->blobs, super_blob, link);
+ dev.bs->super_blobid = 0x100;
+
+ /*
+ * Create 3 blobs, write different char values to the last char in the UUID
+ * to make sure they are unique.
+ */
+ blob1 = calloc(1, sizeof(*blob1));
+ SPDK_CU_ASSERT_FATAL(blob1 != NULL);
+ blob1->id = 0x1;
+ spdk_blob_set_xattr(blob1, "uuid", uuid, SPDK_UUID_STRING_LEN);
+ spdk_blob_set_xattr(blob1, "name", "lvol1", strnlen("lvol1", SPDK_LVOL_NAME_MAX) + 1);
+ blob1->uuid[SPDK_UUID_STRING_LEN - 2] = '1';
+
+ blob2 = calloc(1, sizeof(*blob2));
+ SPDK_CU_ASSERT_FATAL(blob2 != NULL);
+ blob2->id = 0x2;
+ spdk_blob_set_xattr(blob2, "uuid", uuid, SPDK_UUID_STRING_LEN);
+ spdk_blob_set_xattr(blob2, "name", "lvol2", strnlen("lvol2", SPDK_LVOL_NAME_MAX) + 1);
+ blob2->uuid[SPDK_UUID_STRING_LEN - 2] = '2';
+
+ blob3 = calloc(1, sizeof(*blob3));
+ SPDK_CU_ASSERT_FATAL(blob3 != NULL);
+ blob3->id = 0x2;
+ spdk_blob_set_xattr(blob3, "uuid", uuid, SPDK_UUID_STRING_LEN);
+ spdk_blob_set_xattr(blob3, "name", "lvol3", strnlen("lvol3", SPDK_LVOL_NAME_MAX) + 1);
+ blob3->uuid[SPDK_UUID_STRING_LEN - 2] = '3';
+
+ /* Load lvs with 0 blobs */
+ g_lvserrno = 0;
+ spdk_lvs_load(&dev.bs_dev, lvol_store_op_with_handle_complete, req);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store != NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(g_lvol_store, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+
+ TAILQ_INSERT_TAIL(&dev.bs->blobs, blob1, link);
+ TAILQ_INSERT_TAIL(&dev.bs->blobs, blob2, link);
+ TAILQ_INSERT_TAIL(&dev.bs->blobs, blob3, link);
+
+ /* Load lvs again with 3 blobs, but fail on 1st one */
+ g_lvol_store = NULL;
+ g_lvserrno = 0;
+ blob1->load_status = -1;
+ spdk_lvs_load(&dev.bs_dev, lvol_store_op_with_handle_complete, req);
+ CU_ASSERT(g_lvserrno != 0);
+ CU_ASSERT(g_lvol_store == NULL);
+
+ /* Load lvs again with 3 blobs, but fail on 3rd one */
+ g_lvol_store = NULL;
+ g_lvserrno = 0;
+ blob1->load_status = 0;
+ blob2->load_status = 0;
+ blob3->load_status = -1;
+ spdk_lvs_load(&dev.bs_dev, lvol_store_op_with_handle_complete, req);
+ CU_ASSERT(g_lvserrno != 0);
+ CU_ASSERT(g_lvol_store == NULL);
+
+ /* Load lvs again with 3 blobs, with success */
+ g_lvol_store = NULL;
+ g_lvserrno = 0;
+ blob1->load_status = 0;
+ blob2->load_status = 0;
+ blob3->load_status = 0;
+ spdk_lvs_load(&dev.bs_dev, lvol_store_op_with_handle_complete, req);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(!TAILQ_EMPTY(&g_lvol_store->lvols));
+
+ g_lvserrno = -1;
+ /* rc = */ spdk_lvs_unload(g_lvol_store, op_complete, NULL);
+ /*
+ * Disable these two asserts for now. lvolstore should allow unload as long
+ * as the lvols were not opened - but this is coming a future patch.
+ */
+ /* CU_ASSERT(rc == 0); */
+ /* CU_ASSERT(g_lvserrno == 0); */
+
+ free(req);
+ free_dev(&dev);
+}
+
+static void
+lvol_open(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_with_handle_req *req;
+ struct spdk_bs_opts bs_opts;
+ struct spdk_blob *super_blob, *blob1, *blob2, *blob3;
+ struct spdk_lvol *lvol, *tmp;
+
+ req = calloc(1, sizeof(*req));
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+
+ init_dev(&dev);
+ spdk_bs_opts_init(&bs_opts);
+ snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "LVOLSTORE");
+ spdk_bs_init(&dev.bs_dev, &bs_opts, null_cb, NULL);
+ super_blob = calloc(1, sizeof(*super_blob));
+ SPDK_CU_ASSERT_FATAL(super_blob != NULL);
+ super_blob->id = 0x100;
+ spdk_blob_set_xattr(super_blob, "uuid", uuid, SPDK_UUID_STRING_LEN);
+ spdk_blob_set_xattr(super_blob, "name", "lvs", strnlen("lvs", SPDK_LVS_NAME_MAX) + 1);
+ TAILQ_INSERT_TAIL(&dev.bs->blobs, super_blob, link);
+ dev.bs->super_blobid = 0x100;
+
+ /*
+ * Create 3 blobs, write different char values to the last char in the UUID
+ * to make sure they are unique.
+ */
+ blob1 = calloc(1, sizeof(*blob1));
+ SPDK_CU_ASSERT_FATAL(blob1 != NULL);
+ blob1->id = 0x1;
+ spdk_blob_set_xattr(blob1, "uuid", uuid, SPDK_UUID_STRING_LEN);
+ spdk_blob_set_xattr(blob1, "name", "lvol1", strnlen("lvol1", SPDK_LVOL_NAME_MAX) + 1);
+ blob1->uuid[SPDK_UUID_STRING_LEN - 2] = '1';
+
+ blob2 = calloc(1, sizeof(*blob2));
+ SPDK_CU_ASSERT_FATAL(blob2 != NULL);
+ blob2->id = 0x2;
+ spdk_blob_set_xattr(blob2, "uuid", uuid, SPDK_UUID_STRING_LEN);
+ spdk_blob_set_xattr(blob2, "name", "lvol2", strnlen("lvol2", SPDK_LVOL_NAME_MAX) + 1);
+ blob2->uuid[SPDK_UUID_STRING_LEN - 2] = '2';
+
+ blob3 = calloc(1, sizeof(*blob3));
+ SPDK_CU_ASSERT_FATAL(blob3 != NULL);
+ blob3->id = 0x2;
+ spdk_blob_set_xattr(blob3, "uuid", uuid, SPDK_UUID_STRING_LEN);
+ spdk_blob_set_xattr(blob3, "name", "lvol3", strnlen("lvol3", SPDK_LVOL_NAME_MAX) + 1);
+ blob3->uuid[SPDK_UUID_STRING_LEN - 2] = '3';
+
+ TAILQ_INSERT_TAIL(&dev.bs->blobs, blob1, link);
+ TAILQ_INSERT_TAIL(&dev.bs->blobs, blob2, link);
+ TAILQ_INSERT_TAIL(&dev.bs->blobs, blob3, link);
+
+ /* Load lvs with 3 blobs */
+ g_lvol_store = NULL;
+ g_lvserrno = 0;
+ spdk_lvs_load(&dev.bs_dev, lvol_store_op_with_handle_complete, req);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&g_lvol_stores));
+
+ blob1->open_status = -1;
+ blob2->open_status = -1;
+ blob3->open_status = -1;
+
+ /* Fail opening all lvols */
+ TAILQ_FOREACH_SAFE(lvol, &g_lvol_store->lvols, link, tmp) {
+ spdk_lvol_open(lvol, lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno != 0);
+ }
+
+ blob1->open_status = 0;
+ blob2->open_status = 0;
+ blob3->open_status = 0;
+
+ /* Open all lvols */
+ TAILQ_FOREACH_SAFE(lvol, &g_lvol_store->lvols, link, tmp) {
+ spdk_lvol_open(lvol, lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ }
+
+ /* Close all lvols */
+ TAILQ_FOREACH_SAFE(lvol, &g_lvol_store->lvols, link, tmp) {
+ spdk_lvol_close(lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ }
+
+ g_lvserrno = -1;
+ spdk_lvs_destroy(g_lvol_store, op_complete, NULL);
+
+ free(req);
+ free(blob1);
+ free(blob2);
+ free(blob3);
+}
+
+static void
+lvol_snapshot(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvol *lvol;
+ struct spdk_lvs_opts opts;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, true, LVOL_CLEAR_WITH_DEFAULT,
+ lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ lvol = g_lvol;
+
+ spdk_lvol_create_snapshot(lvol, "snap", lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ CU_ASSERT_STRING_EQUAL(g_lvol->name, "snap");
+
+ /* Lvol has to be closed (or destroyed) before unloading lvol store. */
+ spdk_lvol_close(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvserrno = -1;
+
+ spdk_lvol_close(lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvserrno = -1;
+
+ rc = spdk_lvs_unload(g_lvol_store, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ free_dev(&dev);
+}
+
+static void
+lvol_snapshot_fail(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvol *lvol, *snap;
+ struct spdk_lvs_opts opts;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, true, LVOL_CLEAR_WITH_DEFAULT,
+ lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ lvol = g_lvol;
+
+ spdk_lvol_create_snapshot(NULL, "snap", lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno < 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol == NULL);
+
+ spdk_lvol_create_snapshot(lvol, "", lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno < 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol == NULL);
+
+ spdk_lvol_create_snapshot(lvol, NULL, lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno < 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol == NULL);
+
+ spdk_lvol_create_snapshot(lvol, "snap", lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ CU_ASSERT_STRING_EQUAL(g_lvol->name, "snap");
+
+ snap = g_lvol;
+
+ spdk_lvol_create_snapshot(lvol, "snap", lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno < 0);
+
+ spdk_lvol_close(lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvserrno = -1;
+
+ spdk_lvol_close(snap, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvserrno = -1;
+
+ rc = spdk_lvs_unload(g_lvol_store, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ free_dev(&dev);
+}
+
+static void
+lvol_clone(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvol *lvol;
+ struct spdk_lvol *snap;
+ struct spdk_lvs_opts opts;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, true, LVOL_CLEAR_WITH_DEFAULT,
+ lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ lvol = g_lvol;
+
+ spdk_lvol_create_snapshot(lvol, "snap", lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ CU_ASSERT_STRING_EQUAL(g_lvol->name, "snap");
+
+ snap = g_lvol;
+
+ spdk_lvol_create_clone(snap, "clone", lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ CU_ASSERT_STRING_EQUAL(g_lvol->name, "clone");
+
+ /* Lvol has to be closed (or destroyed) before unloading lvol store. */
+ spdk_lvol_close(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvserrno = -1;
+
+ spdk_lvol_close(snap, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvserrno = -1;
+
+ spdk_lvol_close(lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvserrno = -1;
+
+ rc = spdk_lvs_unload(g_lvol_store, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ free_dev(&dev);
+}
+
+static void
+lvol_clone_fail(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvol *lvol;
+ struct spdk_lvol *snap;
+ struct spdk_lvol *clone;
+ struct spdk_lvs_opts opts;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, true, LVOL_CLEAR_WITH_DEFAULT,
+ lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ lvol = g_lvol;
+
+ spdk_lvol_create_snapshot(lvol, "snap", lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ CU_ASSERT_STRING_EQUAL(g_lvol->name, "snap");
+
+ snap = g_lvol;
+
+ spdk_lvol_create_clone(NULL, "clone", lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno < 0);
+
+ spdk_lvol_create_clone(snap, "", lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno < 0);
+
+ spdk_lvol_create_clone(snap, NULL, lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno < 0);
+
+ spdk_lvol_create_clone(snap, "clone", lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ CU_ASSERT_STRING_EQUAL(g_lvol->name, "clone");
+
+ clone = g_lvol;
+
+ spdk_lvol_create_clone(snap, "clone", lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno < 0);
+
+ /* Lvol has to be closed (or destroyed) before unloading lvol store. */
+ spdk_lvol_close(clone, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvserrno = -1;
+
+ spdk_lvol_close(snap, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvserrno = -1;
+
+ spdk_lvol_close(lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvserrno = -1;
+
+ rc = spdk_lvs_unload(g_lvol_store, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ free_dev(&dev);
+}
+
+static void
+lvol_names(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_opts opts;
+ struct spdk_lvol_store *lvs;
+ struct spdk_lvol *lvol, *lvol2;
+ char fullname[SPDK_LVOL_NAME_MAX];
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ g_lvserrno = -1;
+ g_lvol_store = NULL;
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ lvs = g_lvol_store;
+
+ rc = spdk_lvol_create(lvs, NULL, 1, false, LVOL_CLEAR_WITH_DEFAULT, lvol_op_with_handle_complete,
+ NULL);
+ CU_ASSERT(rc == -EINVAL);
+
+ rc = spdk_lvol_create(lvs, "", 1, false, LVOL_CLEAR_WITH_DEFAULT, lvol_op_with_handle_complete,
+ NULL);
+ CU_ASSERT(rc == -EINVAL);
+
+ memset(fullname, 'x', sizeof(fullname));
+ rc = spdk_lvol_create(lvs, fullname, 1, false, LVOL_CLEAR_WITH_DEFAULT,
+ lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == -EINVAL);
+
+ g_lvserrno = -1;
+ rc = spdk_lvol_create(lvs, "lvol", 1, false, LVOL_CLEAR_WITH_DEFAULT, lvol_op_with_handle_complete,
+ NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ lvol = g_lvol;
+
+ rc = spdk_lvol_create(lvs, "lvol", 1, false, LVOL_CLEAR_WITH_DEFAULT, lvol_op_with_handle_complete,
+ NULL);
+ CU_ASSERT(rc == -EEXIST);
+
+ g_lvserrno = -1;
+ rc = spdk_lvol_create(lvs, "lvol2", 1, false, LVOL_CLEAR_WITH_DEFAULT, lvol_op_with_handle_complete,
+ NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ lvol2 = g_lvol;
+
+ spdk_lvol_close(lvol, op_complete, NULL);
+ spdk_lvol_destroy(lvol, op_complete, NULL);
+
+ g_lvserrno = -1;
+ g_lvol = NULL;
+ rc = spdk_lvol_create(lvs, "lvol", 1, false, LVOL_CLEAR_WITH_DEFAULT, lvol_op_with_handle_complete,
+ NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ lvol = g_lvol;
+
+ spdk_lvol_close(lvol, op_complete, NULL);
+ spdk_lvol_destroy(lvol, op_complete, NULL);
+
+ spdk_lvol_close(lvol2, op_complete, NULL);
+ spdk_lvol_destroy(lvol2, op_complete, NULL);
+
+ /* Simulate creating two lvols with same name simultaneously. */
+ lvol = calloc(1, sizeof(*lvol));
+ SPDK_CU_ASSERT_FATAL(lvol != NULL);
+ snprintf(lvol->name, sizeof(lvol->name), "tmp_name");
+ TAILQ_INSERT_TAIL(&lvs->pending_lvols, lvol, link);
+ rc = spdk_lvol_create(lvs, "tmp_name", 1, false, LVOL_CLEAR_WITH_DEFAULT,
+ lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == -EEXIST);
+
+ /* Remove name from temporary list and try again. */
+ TAILQ_REMOVE(&lvs->pending_lvols, lvol, link);
+ free(lvol);
+
+ rc = spdk_lvol_create(lvs, "tmp_name", 1, false, LVOL_CLEAR_WITH_DEFAULT,
+ lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ lvol = g_lvol;
+
+ spdk_lvol_close(lvol, op_complete, NULL);
+ spdk_lvol_destroy(lvol, op_complete, NULL);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_destroy(lvs, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+}
+
+static void
+lvol_rename(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_opts opts;
+ struct spdk_lvol_store *lvs;
+ struct spdk_lvol *lvol, *lvol2;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ g_lvserrno = -1;
+ g_lvol_store = NULL;
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ lvs = g_lvol_store;
+
+ /* Trying to create new lvol */
+ g_lvserrno = -1;
+ rc = spdk_lvol_create(lvs, "lvol", 1, false, LVOL_CLEAR_WITH_DEFAULT, lvol_op_with_handle_complete,
+ NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ lvol = g_lvol;
+
+ /* Trying to create second lvol with existing lvol name */
+ g_lvserrno = -1;
+ g_lvol = NULL;
+ rc = spdk_lvol_create(lvs, "lvol", 1, false, LVOL_CLEAR_WITH_DEFAULT, lvol_op_with_handle_complete,
+ NULL);
+ CU_ASSERT(rc == -EEXIST);
+ CU_ASSERT(g_lvserrno == -1);
+ SPDK_CU_ASSERT_FATAL(g_lvol == NULL);
+
+ /* Trying to create second lvol with non existing name */
+ g_lvserrno = -1;
+ rc = spdk_lvol_create(lvs, "lvol2", 1, false, LVOL_CLEAR_WITH_DEFAULT, lvol_op_with_handle_complete,
+ NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ lvol2 = g_lvol;
+
+ /* Trying to rename lvol with not existing name */
+ spdk_lvol_rename(lvol, "lvol_new", op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT_STRING_EQUAL(lvol->name, "lvol_new");
+
+ /* Trying to rename lvol with other lvol name */
+ spdk_lvol_rename(lvol2, "lvol_new", op_complete, NULL);
+ CU_ASSERT(g_lvserrno == -EEXIST);
+ CU_ASSERT_STRING_NOT_EQUAL(lvol2->name, "lvol_new");
+
+ spdk_lvol_close(lvol, op_complete, NULL);
+ spdk_lvol_destroy(lvol, op_complete, NULL);
+
+ spdk_lvol_close(lvol2, op_complete, NULL);
+ spdk_lvol_destroy(lvol2, op_complete, NULL);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_destroy(lvs, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+}
+
+static void
+lvs_rename(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_opts opts;
+ struct spdk_lvol_store *lvs, *lvs2;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+ g_lvserrno = -1;
+ g_lvol_store = NULL;
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ lvs = g_lvol_store;
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "unimportant_lvs_name");
+ g_lvserrno = -1;
+ g_lvol_store = NULL;
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ lvs2 = g_lvol_store;
+
+ /* Trying to rename lvs with new name */
+ spdk_lvs_rename(lvs, "new_lvs_name", op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT_STRING_EQUAL(lvs->name, "new_lvs_name");
+
+ /* Trying to rename lvs with name lvs already has */
+ spdk_lvs_rename(lvs, "new_lvs_name", op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT_STRING_EQUAL(lvs->name, "new_lvs_name");
+
+ /* Trying to rename lvs with name already existing */
+ spdk_lvs_rename(lvs2, "new_lvs_name", op_complete, NULL);
+ CU_ASSERT(g_lvserrno == -EEXIST);
+ CU_ASSERT_STRING_EQUAL(lvs2->name, "unimportant_lvs_name");
+
+ /* Trying to rename lvs with another rename process started with the same name */
+ /* Simulate renaming process in progress */
+ snprintf(lvs2->new_name, sizeof(lvs2->new_name), "another_new_lvs_name");
+ CU_ASSERT_STRING_EQUAL(lvs2->new_name, "another_new_lvs_name");
+ /* Start second process */
+ spdk_lvs_rename(lvs, "another_new_lvs_name", op_complete, NULL);
+ CU_ASSERT(g_lvserrno == -EEXIST);
+ CU_ASSERT_STRING_EQUAL(lvs->name, "new_lvs_name");
+ /* reverting lvs2 new name to proper value */
+ snprintf(lvs2->new_name, sizeof(lvs2->new_name), "unimportant_lvs_name");
+ CU_ASSERT_STRING_EQUAL(lvs2->new_name, "unimportant_lvs_name");
+
+ /* Simulate error while lvs rename */
+ g_lvs_rename_blob_open_error = true;
+ spdk_lvs_rename(lvs, "complete_new_lvs_name", op_complete, NULL);
+ CU_ASSERT(g_lvserrno != 0);
+ CU_ASSERT_STRING_EQUAL(lvs->name, "new_lvs_name");
+ CU_ASSERT_STRING_EQUAL(lvs->new_name, "new_lvs_name");
+ g_lvs_rename_blob_open_error = false;
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_destroy(lvs, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_destroy(lvs2, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+}
+static void lvol_refcnt(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_opts opts;
+ struct spdk_lvol *lvol;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, false, LVOL_CLEAR_WITH_DEFAULT,
+ lvol_op_with_handle_complete, NULL);
+
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ CU_ASSERT(g_lvol->ref_count == 1);
+
+ lvol = g_lvol;
+ spdk_lvol_open(g_lvol, lvol_op_with_handle_complete, NULL);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ CU_ASSERT(lvol->ref_count == 2);
+
+ /* Trying to destroy lvol while its open should fail */
+ spdk_lvol_destroy(lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno != 0);
+
+ spdk_lvol_close(lvol, op_complete, NULL);
+ CU_ASSERT(lvol->ref_count == 1);
+ CU_ASSERT(g_lvserrno == 0);
+
+ spdk_lvol_close(lvol, op_complete, NULL);
+ CU_ASSERT(lvol->ref_count == 0);
+ CU_ASSERT(g_lvserrno == 0);
+
+ /* Try to close already closed lvol */
+ spdk_lvol_close(lvol, op_complete, NULL);
+ CU_ASSERT(lvol->ref_count == 0);
+ CU_ASSERT(g_lvserrno != 0);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(g_lvol_store, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ free_dev(&dev);
+}
+
+static void
+lvol_create_thin_provisioned(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_opts opts;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, false, LVOL_CLEAR_WITH_DEFAULT,
+ lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ CU_ASSERT(g_lvol->blob->thin_provisioned == false);
+
+ spdk_lvol_close(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ spdk_lvol_destroy(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, true, LVOL_CLEAR_WITH_DEFAULT,
+ lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ CU_ASSERT(g_lvol->blob->thin_provisioned == true);
+
+ spdk_lvol_close(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ spdk_lvol_destroy(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(g_lvol_store, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ free_dev(&dev);
+}
+
+static void
+lvol_inflate(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_opts opts;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, false, LVOL_CLEAR_WITH_DEFAULT,
+ lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ g_inflate_rc = -1;
+ spdk_lvol_inflate(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno != 0);
+
+ g_inflate_rc = 0;
+ spdk_lvol_inflate(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ spdk_lvol_close(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ spdk_lvol_destroy(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(g_lvol_store, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ free_dev(&dev);
+
+ /* Make sure that all references to the io_channel was closed after
+ * inflate call
+ */
+ CU_ASSERT(g_io_channel == NULL);
+}
+
+static void
+lvol_decouple_parent(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_opts opts;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, false, LVOL_CLEAR_WITH_DEFAULT,
+ lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ g_inflate_rc = -1;
+ spdk_lvol_decouple_parent(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno != 0);
+
+ g_inflate_rc = 0;
+ spdk_lvol_decouple_parent(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ spdk_lvol_close(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ spdk_lvol_destroy(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(g_lvol_store, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ free_dev(&dev);
+
+ /* Make sure that all references to the io_channel was closed after
+ * inflate call
+ */
+ CU_ASSERT(g_io_channel == NULL);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("lvol", NULL, NULL);
+
+ CU_ADD_TEST(suite, lvs_init_unload_success);
+ CU_ADD_TEST(suite, lvs_init_destroy_success);
+ CU_ADD_TEST(suite, lvs_init_opts_success);
+ CU_ADD_TEST(suite, lvs_unload_lvs_is_null_fail);
+ CU_ADD_TEST(suite, lvs_names);
+ CU_ADD_TEST(suite, lvol_create_destroy_success);
+ CU_ADD_TEST(suite, lvol_create_fail);
+ CU_ADD_TEST(suite, lvol_destroy_fail);
+ CU_ADD_TEST(suite, lvol_close_fail);
+ CU_ADD_TEST(suite, lvol_close_success);
+ CU_ADD_TEST(suite, lvol_resize);
+ CU_ADD_TEST(suite, lvol_set_read_only);
+ CU_ADD_TEST(suite, lvs_load);
+ CU_ADD_TEST(suite, lvols_load);
+ CU_ADD_TEST(suite, lvol_open);
+ CU_ADD_TEST(suite, lvol_snapshot);
+ CU_ADD_TEST(suite, lvol_snapshot_fail);
+ CU_ADD_TEST(suite, lvol_clone);
+ CU_ADD_TEST(suite, lvol_clone_fail);
+ CU_ADD_TEST(suite, lvol_refcnt);
+ CU_ADD_TEST(suite, lvol_names);
+ CU_ADD_TEST(suite, lvol_create_thin_provisioned);
+ CU_ADD_TEST(suite, lvol_rename);
+ CU_ADD_TEST(suite, lvs_rename);
+ CU_ADD_TEST(suite, lvol_inflate);
+ CU_ADD_TEST(suite, lvol_decouple_parent);
+
+ allocate_threads(1);
+ set_thread(0);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ free_threads();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/notify/Makefile b/src/spdk/test/unit/lib/notify/Makefile
new file mode 100644
index 000000000..9b29a3e07
--- /dev/null
+++ b/src/spdk/test/unit/lib/notify/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = notify.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/notify/notify.c/.gitignore b/src/spdk/test/unit/lib/notify/notify.c/.gitignore
new file mode 100644
index 000000000..f20d6130e
--- /dev/null
+++ b/src/spdk/test/unit/lib/notify/notify.c/.gitignore
@@ -0,0 +1 @@
+notify_ut
diff --git a/src/spdk/test/unit/lib/notify/notify.c/Makefile b/src/spdk/test/unit/lib/notify/notify.c/Makefile
new file mode 100644
index 000000000..c6490b778
--- /dev/null
+++ b/src/spdk/test/unit/lib/notify/notify.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+TEST_FILE = notify_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/notify/notify.c/notify_ut.c b/src/spdk/test/unit/lib/notify/notify.c/notify_ut.c
new file mode 100644
index 000000000..9a1095fb3
--- /dev/null
+++ b/src/spdk/test/unit/lib/notify/notify.c/notify_ut.c
@@ -0,0 +1,111 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+#include "spdk_cunit.h"
+#include "common/lib/test_env.c"
+#include "unit/lib/json_mock.c"
+#include "notify/notify.c"
+
+static int
+event_cb(uint64_t idx, const struct spdk_notify_event *event, void *ctx)
+{
+ const struct spdk_notify_event **_event = ctx;
+
+ *_event = event;
+ return 0;
+}
+
+static void
+notify(void)
+{
+ struct spdk_notify_type *n1, *n2;
+ const struct spdk_notify_event *event;
+ const char *name;
+ uint64_t cnt;
+
+ n1 = spdk_notify_type_register("one");
+ n2 = spdk_notify_type_register("two");
+
+ name = spdk_notify_type_get_name(n1);
+ CU_ASSERT(strcmp(name, "one") == 0);
+
+ name = spdk_notify_type_get_name(n2);
+ CU_ASSERT(strcmp(name, "two") == 0);
+
+
+ spdk_notify_send("one", "one_context");
+ spdk_notify_send("two", "two_context");
+
+ event = NULL;
+ cnt = spdk_notify_foreach_event(0, 1, event_cb, &event);
+ SPDK_CU_ASSERT_FATAL(cnt == 1);
+ SPDK_CU_ASSERT_FATAL(event != NULL);
+ CU_ASSERT(strcmp(event->type, "one") == 0);
+ CU_ASSERT(strcmp(event->ctx, "one_context") == 0);
+
+ event = NULL;
+ cnt = spdk_notify_foreach_event(1, 1, event_cb, &event);
+ SPDK_CU_ASSERT_FATAL(cnt == 1);
+ SPDK_CU_ASSERT_FATAL(event != NULL);
+ CU_ASSERT(strcmp(event->type, "two") == 0);
+ CU_ASSERT(strcmp(event->ctx, "two_context") == 0);
+
+ /* This event should not exist yet */
+ event = NULL;
+ cnt = spdk_notify_foreach_event(2, 1, event_cb, &event);
+ CU_ASSERT(cnt == 0);
+ CU_ASSERT(event == NULL);
+
+ SPDK_CU_ASSERT_FATAL(event == NULL);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("app_suite", NULL, NULL);
+ CU_ADD_TEST(suite, notify);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/Makefile b/src/spdk/test/unit/lib/nvme/Makefile
new file mode 100644
index 000000000..5f74579d2
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/Makefile
@@ -0,0 +1,47 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = nvme.c nvme_ctrlr.c nvme_ctrlr_cmd.c nvme_ctrlr_ocssd_cmd.c nvme_ns.c nvme_ns_cmd.c nvme_ns_ocssd_cmd.c nvme_pcie.c nvme_poll_group.c nvme_qpair.c \
+ nvme_quirks.c nvme_tcp.c nvme_uevent.c \
+
+DIRS-$(CONFIG_RDMA) += nvme_rdma.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme.c/.gitignore
new file mode 100644
index 000000000..90c0c1678
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme.c/.gitignore
@@ -0,0 +1 @@
+nvme_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme.c/Makefile
new file mode 100644
index 000000000..4202cf54c
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme.c/nvme_ut.c b/src/spdk/test/unit/lib/nvme/nvme.c/nvme_ut.c
new file mode 100644
index 000000000..cf51a14bd
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme.c/nvme_ut.c
@@ -0,0 +1,1376 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation. All rights reserved.
+ * Copyright (c) 2020 Mellanox Technologies LTD. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "spdk/env.h"
+
+#include "nvme/nvme.c"
+
+#include "spdk_internal/mock.h"
+
+#include "common/lib/test_env.c"
+
+DEFINE_STUB_V(nvme_ctrlr_proc_get_ref, (struct spdk_nvme_ctrlr *ctrlr));
+DEFINE_STUB_V(nvme_ctrlr_proc_put_ref, (struct spdk_nvme_ctrlr *ctrlr));
+DEFINE_STUB_V(nvme_ctrlr_fail, (struct spdk_nvme_ctrlr *ctrlr, bool hotremove));
+DEFINE_STUB(spdk_nvme_transport_available_by_name, bool,
+ (const char *transport_name), true);
+/* return anything non-NULL, this won't be deferenced anywhere in this test */
+DEFINE_STUB(nvme_ctrlr_get_current_process, struct spdk_nvme_ctrlr_process *,
+ (struct spdk_nvme_ctrlr *ctrlr), (struct spdk_nvme_ctrlr_process *)(uintptr_t)0x1);
+DEFINE_STUB(nvme_ctrlr_process_init, int,
+ (struct spdk_nvme_ctrlr *ctrlr), 0);
+DEFINE_STUB(nvme_ctrlr_get_ref_count, int,
+ (struct spdk_nvme_ctrlr *ctrlr), 0);
+DEFINE_STUB(dummy_probe_cb, bool,
+ (void *cb_ctx, const struct spdk_nvme_transport_id *trid,
+ struct spdk_nvme_ctrlr_opts *opts), false);
+DEFINE_STUB(nvme_transport_ctrlr_construct, struct spdk_nvme_ctrlr *,
+ (const struct spdk_nvme_transport_id *trid,
+ const struct spdk_nvme_ctrlr_opts *opts,
+ void *devhandle), NULL);
+DEFINE_STUB_V(nvme_io_msg_ctrlr_detach, (struct spdk_nvme_ctrlr *ctrlr));
+DEFINE_STUB(spdk_nvme_transport_available, bool,
+ (enum spdk_nvme_transport_type trtype), true);
+DEFINE_STUB(nvme_uevent_connect, int, (void), 1);
+
+
+static bool ut_destruct_called = false;
+void
+nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
+{
+ ut_destruct_called = true;
+}
+
+void
+spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size)
+{
+ memset(opts, 0, opts_size);
+ opts->opts_size = opts_size;
+}
+
+static void
+memset_trid(struct spdk_nvme_transport_id *trid1, struct spdk_nvme_transport_id *trid2)
+{
+ memset(trid1, 0, sizeof(struct spdk_nvme_transport_id));
+ memset(trid2, 0, sizeof(struct spdk_nvme_transport_id));
+}
+
+static bool ut_check_trtype = false;
+static bool ut_test_probe_internal = false;
+
+static int
+ut_nvme_pcie_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx,
+ bool direct_connect)
+{
+ struct spdk_nvme_ctrlr *ctrlr;
+ struct spdk_nvme_qpair qpair = {};
+ int rc;
+
+ if (probe_ctx->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) {
+ return -1;
+ }
+
+ ctrlr = calloc(1, sizeof(*ctrlr));
+ CU_ASSERT(ctrlr != NULL);
+ ctrlr->adminq = &qpair;
+
+ /* happy path with first controller */
+ MOCK_SET(nvme_transport_ctrlr_construct, ctrlr);
+ rc = nvme_ctrlr_probe(&probe_ctx->trid, probe_ctx, NULL);
+ CU_ASSERT(rc == 0);
+
+ /* failed with the second controller */
+ MOCK_SET(nvme_transport_ctrlr_construct, NULL);
+ rc = nvme_ctrlr_probe(&probe_ctx->trid, probe_ctx, NULL);
+ CU_ASSERT(rc != 0);
+ MOCK_CLEAR_P(nvme_transport_ctrlr_construct);
+
+ return -1;
+}
+
+int
+nvme_transport_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
+{
+ free(ctrlr);
+ return 0;
+}
+
+int
+nvme_transport_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx,
+ bool direct_connect)
+{
+ struct spdk_nvme_ctrlr *ctrlr = NULL;
+
+ if (ut_check_trtype == true) {
+ CU_ASSERT(probe_ctx->trid.trtype == SPDK_NVME_TRANSPORT_PCIE);
+ }
+
+ if (ut_test_probe_internal) {
+ return ut_nvme_pcie_ctrlr_scan(probe_ctx, direct_connect);
+ }
+
+ if (direct_connect == true && probe_ctx->probe_cb) {
+ nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock);
+ ctrlr = nvme_get_ctrlr_by_trid(&probe_ctx->trid);
+ nvme_robust_mutex_lock(&g_spdk_nvme_driver->lock);
+ probe_ctx->probe_cb(probe_ctx->cb_ctx, &probe_ctx->trid, &ctrlr->opts);
+ }
+ return 0;
+}
+
+static bool ut_attach_cb_called = false;
+static void
+dummy_attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
+ struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_ctrlr_opts *opts)
+{
+ ut_attach_cb_called = true;
+}
+
+static void
+test_spdk_nvme_probe(void)
+{
+ int rc = 0;
+ const struct spdk_nvme_transport_id *trid = NULL;
+ void *cb_ctx = NULL;
+ spdk_nvme_probe_cb probe_cb = NULL;
+ spdk_nvme_attach_cb attach_cb = dummy_attach_cb;
+ spdk_nvme_remove_cb remove_cb = NULL;
+ struct spdk_nvme_ctrlr ctrlr;
+ pthread_mutexattr_t attr;
+ struct nvme_driver dummy;
+ g_spdk_nvme_driver = &dummy;
+
+ /* driver init fails */
+ MOCK_SET(spdk_process_is_primary, false);
+ MOCK_SET(spdk_memzone_lookup, NULL);
+ rc = spdk_nvme_probe(trid, cb_ctx, probe_cb, attach_cb, remove_cb);
+ CU_ASSERT(rc == -1);
+
+ /*
+ * For secondary processes, the attach_cb should automatically get
+ * called for any controllers already initialized by the primary
+ * process.
+ */
+ MOCK_SET(spdk_nvme_transport_available_by_name, false);
+ MOCK_SET(spdk_process_is_primary, true);
+ dummy.initialized = true;
+ g_spdk_nvme_driver = &dummy;
+ rc = spdk_nvme_probe(trid, cb_ctx, probe_cb, attach_cb, remove_cb);
+ CU_ASSERT(rc == -1);
+
+ /* driver init passes, transport available, secondary call attach_cb */
+ MOCK_SET(spdk_nvme_transport_available_by_name, true);
+ MOCK_SET(spdk_process_is_primary, false);
+ MOCK_SET(spdk_memzone_lookup, g_spdk_nvme_driver);
+ dummy.initialized = true;
+ memset(&ctrlr, 0, sizeof(struct spdk_nvme_ctrlr));
+ CU_ASSERT(pthread_mutexattr_init(&attr) == 0);
+ CU_ASSERT(pthread_mutex_init(&dummy.lock, &attr) == 0);
+ TAILQ_INIT(&dummy.shared_attached_ctrlrs);
+ TAILQ_INSERT_TAIL(&dummy.shared_attached_ctrlrs, &ctrlr, tailq);
+ ut_attach_cb_called = false;
+ /* setup nvme_transport_ctrlr_scan() stub to also check the trype */
+ ut_check_trtype = true;
+ rc = spdk_nvme_probe(trid, cb_ctx, probe_cb, attach_cb, remove_cb);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(ut_attach_cb_called == true);
+
+ /* driver init passes, transport available, we are primary */
+ MOCK_SET(spdk_process_is_primary, true);
+ rc = spdk_nvme_probe(trid, cb_ctx, probe_cb, attach_cb, remove_cb);
+ CU_ASSERT(rc == 0);
+
+ g_spdk_nvme_driver = NULL;
+ /* reset to pre-test values */
+ MOCK_CLEAR(spdk_memzone_lookup);
+ ut_check_trtype = false;
+
+ pthread_mutex_destroy(&dummy.lock);
+ pthread_mutexattr_destroy(&attr);
+}
+
+static void
+test_spdk_nvme_connect(void)
+{
+ struct spdk_nvme_ctrlr *ret_ctrlr = NULL;
+ struct spdk_nvme_transport_id trid = {};
+ struct spdk_nvme_ctrlr_opts opts = {};
+ struct spdk_nvme_ctrlr ctrlr;
+ pthread_mutexattr_t attr;
+ struct nvme_driver dummy;
+
+ /* initialize the variable to prepare the test */
+ dummy.initialized = true;
+ TAILQ_INIT(&dummy.shared_attached_ctrlrs);
+ g_spdk_nvme_driver = &dummy;
+ CU_ASSERT(pthread_mutexattr_init(&attr) == 0);
+ CU_ASSERT(pthread_mutex_init(&g_spdk_nvme_driver->lock, &attr) == 0);
+
+ /* set NULL trid pointer to test immediate return */
+ ret_ctrlr = spdk_nvme_connect(NULL, NULL, 0);
+ CU_ASSERT(ret_ctrlr == NULL);
+
+ /* driver init passes, transport available, secondary process connects ctrlr */
+ MOCK_SET(spdk_process_is_primary, false);
+ MOCK_SET(spdk_memzone_lookup, g_spdk_nvme_driver);
+ MOCK_SET(spdk_nvme_transport_available_by_name, true);
+ memset(&trid, 0, sizeof(trid));
+ trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
+ ret_ctrlr = spdk_nvme_connect(&trid, NULL, 0);
+ CU_ASSERT(ret_ctrlr == NULL);
+
+ /* driver init passes, setup one ctrlr on the attached_list */
+ memset(&ctrlr, 0, sizeof(struct spdk_nvme_ctrlr));
+ snprintf(ctrlr.trid.traddr, sizeof(ctrlr.trid.traddr), "0000:01:00.0");
+ ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_driver->shared_attached_ctrlrs, &ctrlr, tailq);
+ /* get the ctrlr from the attached list */
+ snprintf(trid.traddr, sizeof(trid.traddr), "0000:01:00.0");
+ ret_ctrlr = spdk_nvme_connect(&trid, NULL, 0);
+ CU_ASSERT(ret_ctrlr == &ctrlr);
+ /* get the ctrlr from the attached list with default ctrlr opts */
+ ctrlr.opts.num_io_queues = DEFAULT_MAX_IO_QUEUES;
+ ret_ctrlr = spdk_nvme_connect(&trid, NULL, 0);
+ CU_ASSERT(ret_ctrlr == &ctrlr);
+ CU_ASSERT_EQUAL(ret_ctrlr->opts.num_io_queues, DEFAULT_MAX_IO_QUEUES);
+ /* get the ctrlr from the attached list with default ctrlr opts and consistent opts_size */
+ opts.num_io_queues = 1;
+ ret_ctrlr = spdk_nvme_connect(&trid, &opts, sizeof(opts));
+ CU_ASSERT(ret_ctrlr == &ctrlr);
+ CU_ASSERT_EQUAL(ret_ctrlr->opts.num_io_queues, 1);
+ CU_ASSERT_EQUAL(ret_ctrlr->opts.opts_size, sizeof(opts));
+
+ /* opts_size is 0 */
+ ret_ctrlr = spdk_nvme_connect(&trid, &opts, 0);
+ CU_ASSERT(ret_ctrlr == &ctrlr);
+ CU_ASSERT_EQUAL(ret_ctrlr->opts.opts_size, 0);
+
+ /* opts_size is less than sizeof(*opts) if opts != NULL */
+ ret_ctrlr = spdk_nvme_connect(&trid, &opts, 4);
+ CU_ASSERT(ret_ctrlr == &ctrlr);
+ CU_ASSERT_EQUAL(ret_ctrlr->opts.num_io_queues, 1);
+ CU_ASSERT_EQUAL(ret_ctrlr->opts.opts_size, 4);
+ /* remove the attached ctrlr on the attached_list */
+ CU_ASSERT(spdk_nvme_detach(&ctrlr) == 0);
+ CU_ASSERT(TAILQ_EMPTY(&g_spdk_nvme_driver->shared_attached_ctrlrs));
+
+ /* driver init passes, transport available, primary process connects ctrlr */
+ MOCK_SET(spdk_process_is_primary, true);
+ /* setup one ctrlr on the attached_list */
+ memset(&ctrlr, 0, sizeof(struct spdk_nvme_ctrlr));
+ snprintf(ctrlr.trid.traddr, sizeof(ctrlr.trid.traddr), "0000:02:00.0");
+ ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_driver->shared_attached_ctrlrs, &ctrlr, tailq);
+ /* get the ctrlr from the attached list */
+ snprintf(trid.traddr, sizeof(trid.traddr), "0000:02:00.0");
+ ret_ctrlr = spdk_nvme_connect(&trid, NULL, 0);
+ CU_ASSERT(ret_ctrlr == &ctrlr);
+ /* get the ctrlr from the attached list with default ctrlr opts */
+ ctrlr.opts.num_io_queues = DEFAULT_MAX_IO_QUEUES;
+ ret_ctrlr = spdk_nvme_connect(&trid, NULL, 0);
+ CU_ASSERT(ret_ctrlr == &ctrlr);
+ CU_ASSERT_EQUAL(ret_ctrlr->opts.num_io_queues, DEFAULT_MAX_IO_QUEUES);
+ /* get the ctrlr from the attached list with default ctrlr opts and consistent opts_size */
+ opts.num_io_queues = 2;
+ ret_ctrlr = spdk_nvme_connect(&trid, &opts, sizeof(opts));
+ CU_ASSERT(ret_ctrlr == &ctrlr);
+ CU_ASSERT_EQUAL(ret_ctrlr->opts.num_io_queues, 2);
+ /* remove the attached ctrlr on the attached_list */
+ CU_ASSERT(spdk_nvme_detach(ret_ctrlr) == 0);
+ CU_ASSERT(TAILQ_EMPTY(&g_spdk_nvme_driver->shared_attached_ctrlrs));
+
+ /* test driver init failure return */
+ MOCK_SET(spdk_process_is_primary, false);
+ MOCK_SET(spdk_memzone_lookup, NULL);
+ ret_ctrlr = spdk_nvme_connect(&trid, NULL, 0);
+ CU_ASSERT(ret_ctrlr == NULL);
+}
+
+static struct spdk_nvme_probe_ctx *
+test_nvme_init_get_probe_ctx(void)
+{
+ struct spdk_nvme_probe_ctx *probe_ctx;
+
+ probe_ctx = calloc(1, sizeof(*probe_ctx));
+ SPDK_CU_ASSERT_FATAL(probe_ctx != NULL);
+ TAILQ_INIT(&probe_ctx->init_ctrlrs);
+
+ return probe_ctx;
+}
+
+static void
+test_nvme_init_controllers(void)
+{
+ int rc = 0;
+ struct nvme_driver test_driver;
+ void *cb_ctx = NULL;
+ spdk_nvme_attach_cb attach_cb = dummy_attach_cb;
+ struct spdk_nvme_probe_ctx *probe_ctx;
+ struct spdk_nvme_ctrlr *ctrlr;
+ pthread_mutexattr_t attr;
+
+ g_spdk_nvme_driver = &test_driver;
+ ctrlr = calloc(1, sizeof(*ctrlr));
+ SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
+ ctrlr->trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
+ CU_ASSERT(pthread_mutexattr_init(&attr) == 0);
+ CU_ASSERT(pthread_mutex_init(&test_driver.lock, &attr) == 0);
+ TAILQ_INIT(&test_driver.shared_attached_ctrlrs);
+
+ /*
+ * Try to initialize, but nvme_ctrlr_process_init will fail.
+ * Verify correct behavior when it does.
+ */
+ MOCK_SET(nvme_ctrlr_process_init, 1);
+ MOCK_SET(spdk_process_is_primary, 1);
+ g_spdk_nvme_driver->initialized = false;
+ ut_destruct_called = false;
+ probe_ctx = test_nvme_init_get_probe_ctx();
+ TAILQ_INSERT_TAIL(&probe_ctx->init_ctrlrs, ctrlr, tailq);
+ probe_ctx->cb_ctx = cb_ctx;
+ probe_ctx->attach_cb = attach_cb;
+ probe_ctx->trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
+ rc = nvme_init_controllers(probe_ctx);
+ CU_ASSERT(rc != 0);
+ CU_ASSERT(g_spdk_nvme_driver->initialized == true);
+ CU_ASSERT(ut_destruct_called == true);
+
+ /*
+ * Controller init OK, need to move the controller state machine
+ * forward by setting the ctrl state so that it can be moved
+ * the shared_attached_ctrlrs list.
+ */
+ probe_ctx = test_nvme_init_get_probe_ctx();
+ TAILQ_INSERT_TAIL(&probe_ctx->init_ctrlrs, ctrlr, tailq);
+ ctrlr->state = NVME_CTRLR_STATE_READY;
+ MOCK_SET(nvme_ctrlr_process_init, 0);
+ rc = nvme_init_controllers(probe_ctx);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(ut_attach_cb_called == true);
+ CU_ASSERT(TAILQ_EMPTY(&g_nvme_attached_ctrlrs));
+ CU_ASSERT(TAILQ_FIRST(&g_spdk_nvme_driver->shared_attached_ctrlrs) == ctrlr);
+ TAILQ_REMOVE(&g_spdk_nvme_driver->shared_attached_ctrlrs, ctrlr, tailq);
+
+ /*
+ * Non-PCIe controllers should be added to the per-process list, not the shared list.
+ */
+ memset(ctrlr, 0, sizeof(struct spdk_nvme_ctrlr));
+ ctrlr->trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
+ probe_ctx = test_nvme_init_get_probe_ctx();
+ TAILQ_INSERT_TAIL(&probe_ctx->init_ctrlrs, ctrlr, tailq);
+ ctrlr->state = NVME_CTRLR_STATE_READY;
+ MOCK_SET(nvme_ctrlr_process_init, 0);
+ rc = nvme_init_controllers(probe_ctx);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(ut_attach_cb_called == true);
+ CU_ASSERT(TAILQ_EMPTY(&g_spdk_nvme_driver->shared_attached_ctrlrs));
+ CU_ASSERT(TAILQ_FIRST(&g_nvme_attached_ctrlrs) == ctrlr);
+ TAILQ_REMOVE(&g_nvme_attached_ctrlrs, ctrlr, tailq);
+ free(ctrlr);
+ CU_ASSERT(TAILQ_EMPTY(&g_nvme_attached_ctrlrs));
+
+ g_spdk_nvme_driver = NULL;
+ pthread_mutexattr_destroy(&attr);
+ pthread_mutex_destroy(&test_driver.lock);
+}
+
+static void
+test_nvme_driver_init(void)
+{
+ int rc;
+ struct nvme_driver dummy;
+ g_spdk_nvme_driver = &dummy;
+
+ /* adjust this so testing doesn't take so long */
+ g_nvme_driver_timeout_ms = 100;
+
+ /* process is primary and mem already reserved */
+ MOCK_SET(spdk_process_is_primary, true);
+ dummy.initialized = true;
+ rc = nvme_driver_init();
+ CU_ASSERT(rc == 0);
+
+ /*
+ * Process is primary and mem not yet reserved but the call
+ * to spdk_memzone_reserve() returns NULL.
+ */
+ g_spdk_nvme_driver = NULL;
+ MOCK_SET(spdk_process_is_primary, true);
+ MOCK_SET(spdk_memzone_reserve, NULL);
+ rc = nvme_driver_init();
+ CU_ASSERT(rc == -1);
+
+ /* process is not primary, no mem already reserved */
+ MOCK_SET(spdk_process_is_primary, false);
+ MOCK_SET(spdk_memzone_lookup, NULL);
+ g_spdk_nvme_driver = NULL;
+ rc = nvme_driver_init();
+ CU_ASSERT(rc == -1);
+
+ /* process is not primary, mem is already reserved & init'd */
+ MOCK_SET(spdk_process_is_primary, false);
+ MOCK_SET(spdk_memzone_lookup, (void *)&dummy);
+ dummy.initialized = true;
+ rc = nvme_driver_init();
+ CU_ASSERT(rc == 0);
+
+ /* process is not primary, mem is reserved but not initialized */
+ /* and times out */
+ MOCK_SET(spdk_process_is_primary, false);
+ MOCK_SET(spdk_memzone_reserve, (void *)&dummy);
+ dummy.initialized = false;
+ rc = nvme_driver_init();
+ CU_ASSERT(rc == -1);
+
+ /* process is primary, got mem but mutex won't init */
+ MOCK_SET(spdk_process_is_primary, true);
+ MOCK_SET(spdk_memzone_reserve, (void *)&dummy);
+ MOCK_SET(pthread_mutexattr_init, -1);
+ g_spdk_nvme_driver = NULL;
+ dummy.initialized = true;
+ rc = nvme_driver_init();
+ /* for FreeBSD we can't can't effectively mock this path */
+#ifndef __FreeBSD__
+ CU_ASSERT(rc != 0);
+#else
+ CU_ASSERT(rc == 0);
+#endif
+
+ /* process is primary, got mem, mutex OK */
+ MOCK_SET(spdk_process_is_primary, true);
+ MOCK_CLEAR(pthread_mutexattr_init);
+ g_spdk_nvme_driver = NULL;
+ rc = nvme_driver_init();
+ CU_ASSERT(g_spdk_nvme_driver->initialized == false);
+ CU_ASSERT(TAILQ_EMPTY(&g_spdk_nvme_driver->shared_attached_ctrlrs));
+ CU_ASSERT(rc == 0);
+
+ g_spdk_nvme_driver = NULL;
+ MOCK_CLEAR(spdk_memzone_reserve);
+ MOCK_CLEAR(spdk_memzone_lookup);
+}
+
+static void
+test_spdk_nvme_detach(void)
+{
+ int rc = 1;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_ctrlr *ret_ctrlr;
+ struct nvme_driver test_driver;
+
+ memset(&ctrlr, 0, sizeof(ctrlr));
+ ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
+
+ g_spdk_nvme_driver = &test_driver;
+ TAILQ_INIT(&test_driver.shared_attached_ctrlrs);
+ TAILQ_INSERT_TAIL(&test_driver.shared_attached_ctrlrs, &ctrlr, tailq);
+ CU_ASSERT(pthread_mutex_init(&test_driver.lock, NULL) == 0);
+
+ /*
+ * Controllers are ref counted so mock the function that returns
+ * the ref count so that detach will actually call the destruct
+ * function which we've mocked simply to verify that it gets
+ * called (we aren't testing what the real destruct function does
+ * here.)
+ */
+ MOCK_SET(nvme_ctrlr_get_ref_count, 0);
+ rc = spdk_nvme_detach(&ctrlr);
+ ret_ctrlr = TAILQ_FIRST(&test_driver.shared_attached_ctrlrs);
+ CU_ASSERT(ret_ctrlr == NULL);
+ CU_ASSERT(ut_destruct_called == true);
+ CU_ASSERT(rc == 0);
+
+ /*
+ * Mock the ref count to 1 so we confirm that the destruct
+ * function is not called and that attached ctrl list is
+ * not empty.
+ */
+ MOCK_SET(nvme_ctrlr_get_ref_count, 1);
+ TAILQ_INSERT_TAIL(&test_driver.shared_attached_ctrlrs, &ctrlr, tailq);
+ ut_destruct_called = false;
+ rc = spdk_nvme_detach(&ctrlr);
+ ret_ctrlr = TAILQ_FIRST(&test_driver.shared_attached_ctrlrs);
+ CU_ASSERT(ret_ctrlr != NULL);
+ CU_ASSERT(ut_destruct_called == false);
+ CU_ASSERT(rc == 0);
+
+ /*
+ * Non-PCIe controllers should be on the per-process attached_ctrlrs list, not the
+ * shared_attached_ctrlrs list. Test an RDMA controller and ensure it is removed
+ * from the correct list.
+ */
+ memset(&ctrlr, 0, sizeof(ctrlr));
+ ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
+ TAILQ_INIT(&g_nvme_attached_ctrlrs);
+ TAILQ_INSERT_TAIL(&g_nvme_attached_ctrlrs, &ctrlr, tailq);
+ MOCK_SET(nvme_ctrlr_get_ref_count, 0);
+ rc = spdk_nvme_detach(&ctrlr);
+ CU_ASSERT(TAILQ_EMPTY(&g_nvme_attached_ctrlrs));
+ CU_ASSERT(ut_destruct_called == true);
+ CU_ASSERT(rc == 0);
+
+ g_spdk_nvme_driver = NULL;
+ pthread_mutex_destroy(&test_driver.lock);
+}
+
+static void
+test_nvme_completion_poll_cb(void)
+{
+ struct nvme_completion_poll_status *status;
+ struct spdk_nvme_cpl cpl;
+
+ status = calloc(1, sizeof(*status));
+ SPDK_CU_ASSERT_FATAL(status != NULL);
+
+ memset(&cpl, 0xff, sizeof(cpl));
+
+ nvme_completion_poll_cb(status, &cpl);
+ CU_ASSERT(status->done == true);
+ CU_ASSERT(memcmp(&cpl, &status->cpl,
+ sizeof(struct spdk_nvme_cpl)) == 0);
+
+ free(status);
+}
+
+/* stub callback used by test_nvme_user_copy_cmd_complete() */
+static struct spdk_nvme_cpl ut_spdk_nvme_cpl = {0};
+static void
+dummy_cb(void *user_cb_arg, struct spdk_nvme_cpl *cpl)
+{
+ ut_spdk_nvme_cpl = *cpl;
+}
+
+static void
+test_nvme_user_copy_cmd_complete(void)
+{
+ struct nvme_request req;
+ int test_data = 0xdeadbeef;
+ int buff_size = sizeof(int);
+ void *buff;
+ static struct spdk_nvme_cpl cpl;
+
+ memset(&req, 0, sizeof(req));
+ memset(&cpl, 0x5a, sizeof(cpl));
+
+ /* test without a user buffer provided */
+ req.user_cb_fn = (void *)dummy_cb;
+ nvme_user_copy_cmd_complete(&req, &cpl);
+ CU_ASSERT(memcmp(&ut_spdk_nvme_cpl, &cpl, sizeof(cpl)) == 0);
+
+ /* test with a user buffer provided */
+ req.user_buffer = malloc(buff_size);
+ SPDK_CU_ASSERT_FATAL(req.user_buffer != NULL);
+ memset(req.user_buffer, 0, buff_size);
+ req.payload_size = buff_size;
+ buff = spdk_zmalloc(buff_size, 0x100, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
+ SPDK_CU_ASSERT_FATAL(buff != NULL);
+ req.payload = NVME_PAYLOAD_CONTIG(buff, NULL);
+ memcpy(buff, &test_data, buff_size);
+ req.cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
+ req.pid = getpid();
+
+ /* zero out the test value set in the callback */
+ memset(&ut_spdk_nvme_cpl, 0, sizeof(ut_spdk_nvme_cpl));
+
+ nvme_user_copy_cmd_complete(&req, &cpl);
+ CU_ASSERT(memcmp(req.user_buffer, &test_data, buff_size) == 0);
+ CU_ASSERT(memcmp(&ut_spdk_nvme_cpl, &cpl, sizeof(cpl)) == 0);
+
+ /*
+ * Now test the same path as above but this time choose an opc
+ * that results in a different data transfer type.
+ */
+ memset(&ut_spdk_nvme_cpl, 0, sizeof(ut_spdk_nvme_cpl));
+ memset(req.user_buffer, 0, buff_size);
+ buff = spdk_zmalloc(buff_size, 0x100, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
+ SPDK_CU_ASSERT_FATAL(buff != NULL);
+ req.payload = NVME_PAYLOAD_CONTIG(buff, NULL);
+ memcpy(buff, &test_data, buff_size);
+ req.cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
+ nvme_user_copy_cmd_complete(&req, &cpl);
+ CU_ASSERT(memcmp(req.user_buffer, &test_data, buff_size) != 0);
+ CU_ASSERT(memcmp(&ut_spdk_nvme_cpl, &cpl, sizeof(cpl)) == 0);
+
+ /* clean up */
+ free(req.user_buffer);
+}
+
+static void
+test_nvme_allocate_request_null(void)
+{
+ struct spdk_nvme_qpair qpair;
+ spdk_nvme_cmd_cb cb_fn = (spdk_nvme_cmd_cb)0x1234;
+ void *cb_arg = (void *)0x5678;
+ struct nvme_request *req = NULL;
+ struct nvme_request dummy_req;
+
+ STAILQ_INIT(&qpair.free_req);
+ STAILQ_INIT(&qpair.queued_req);
+
+ /*
+ * Put a dummy on the queue so we can make a request
+ * and confirm that what comes back is what we expect.
+ */
+ STAILQ_INSERT_HEAD(&qpair.free_req, &dummy_req, stailq);
+
+ req = nvme_allocate_request_null(&qpair, cb_fn, cb_arg);
+
+ /*
+ * Compare the req with the parmaters that we passed in
+ * as well as what the function is supposed to update.
+ */
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ CU_ASSERT(req->cb_fn == cb_fn);
+ CU_ASSERT(req->cb_arg == cb_arg);
+ CU_ASSERT(req->pid == getpid());
+ CU_ASSERT(nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_CONTIG);
+ CU_ASSERT(req->payload.md == NULL);
+ CU_ASSERT(req->payload.contig_or_cb_arg == NULL);
+}
+
+static void
+test_nvme_allocate_request(void)
+{
+ struct spdk_nvme_qpair qpair;
+ struct nvme_payload payload;
+ uint32_t payload_struct_size = sizeof(payload);
+ spdk_nvme_cmd_cb cb_fn = (spdk_nvme_cmd_cb)0x1234;
+ void *cb_arg = (void *)0x6789;
+ struct nvme_request *req = NULL;
+ struct nvme_request dummy_req;
+
+ /* Fill the whole payload struct with a known pattern */
+ memset(&payload, 0x5a, payload_struct_size);
+ STAILQ_INIT(&qpair.free_req);
+ STAILQ_INIT(&qpair.queued_req);
+
+ /* Test trying to allocate a request when no requests are available */
+ req = nvme_allocate_request(&qpair, &payload, payload_struct_size, 0,
+ cb_fn, cb_arg);
+ CU_ASSERT(req == NULL);
+
+ /* put a dummy on the queue, and then allocate one */
+ STAILQ_INSERT_HEAD(&qpair.free_req, &dummy_req, stailq);
+ req = nvme_allocate_request(&qpair, &payload, payload_struct_size, 0,
+ cb_fn, cb_arg);
+
+ /* all the req elements should now match the passed in parameters */
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ CU_ASSERT(req->cb_fn == cb_fn);
+ CU_ASSERT(req->cb_arg == cb_arg);
+ CU_ASSERT(memcmp(&req->payload, &payload, payload_struct_size) == 0);
+ CU_ASSERT(req->payload_size == payload_struct_size);
+ CU_ASSERT(req->pid == getpid());
+}
+
+static void
+test_nvme_free_request(void)
+{
+ struct nvme_request match_req;
+ struct spdk_nvme_qpair qpair;
+ struct nvme_request *req;
+
+ /* put a req on the Q, take it off and compare */
+ memset(&match_req.cmd, 0x5a, sizeof(struct spdk_nvme_cmd));
+ match_req.qpair = &qpair;
+ /* the code under tests asserts this condition */
+ match_req.num_children = 0;
+ STAILQ_INIT(&qpair.free_req);
+
+ nvme_free_request(&match_req);
+ req = STAILQ_FIRST(&match_req.qpair->free_req);
+ CU_ASSERT(req == &match_req);
+}
+
+static void
+test_nvme_allocate_request_user_copy(void)
+{
+ struct spdk_nvme_qpair qpair;
+ spdk_nvme_cmd_cb cb_fn = (spdk_nvme_cmd_cb)0x12345;
+ void *cb_arg = (void *)0x12345;
+ bool host_to_controller = true;
+ struct nvme_request *req;
+ struct nvme_request dummy_req;
+ int test_data = 0xdeadbeef;
+ void *buffer = NULL;
+ uint32_t payload_size = sizeof(int);
+
+ STAILQ_INIT(&qpair.free_req);
+ STAILQ_INIT(&qpair.queued_req);
+
+ /* no buffer or valid payload size, early NULL return */
+ req = nvme_allocate_request_user_copy(&qpair, buffer, payload_size, cb_fn,
+ cb_arg, host_to_controller);
+ CU_ASSERT(req == NULL);
+
+ /* good buffer and valid payload size */
+ buffer = malloc(payload_size);
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+ memcpy(buffer, &test_data, payload_size);
+
+ /* put a dummy on the queue */
+ STAILQ_INSERT_HEAD(&qpair.free_req, &dummy_req, stailq);
+
+ MOCK_CLEAR(spdk_malloc);
+ MOCK_CLEAR(spdk_zmalloc);
+ req = nvme_allocate_request_user_copy(&qpair, buffer, payload_size, cb_fn,
+ cb_arg, host_to_controller);
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ CU_ASSERT(req->user_cb_fn == cb_fn);
+ CU_ASSERT(req->user_cb_arg == cb_arg);
+ CU_ASSERT(req->user_buffer == buffer);
+ CU_ASSERT(req->cb_arg == req);
+ CU_ASSERT(memcmp(req->payload.contig_or_cb_arg, buffer, payload_size) == 0);
+ spdk_free(req->payload.contig_or_cb_arg);
+
+ /* same thing but additional path coverage, no copy */
+ host_to_controller = false;
+ STAILQ_INSERT_HEAD(&qpair.free_req, &dummy_req, stailq);
+
+ req = nvme_allocate_request_user_copy(&qpair, buffer, payload_size, cb_fn,
+ cb_arg, host_to_controller);
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ CU_ASSERT(req->user_cb_fn == cb_fn);
+ CU_ASSERT(req->user_cb_arg == cb_arg);
+ CU_ASSERT(req->user_buffer == buffer);
+ CU_ASSERT(req->cb_arg == req);
+ CU_ASSERT(memcmp(req->payload.contig_or_cb_arg, buffer, payload_size) != 0);
+ spdk_free(req->payload.contig_or_cb_arg);
+
+ /* good buffer and valid payload size but make spdk_zmalloc fail */
+ /* set the mock pointer to NULL for spdk_zmalloc */
+ MOCK_SET(spdk_zmalloc, NULL);
+ req = nvme_allocate_request_user_copy(&qpair, buffer, payload_size, cb_fn,
+ cb_arg, host_to_controller);
+ CU_ASSERT(req == NULL);
+ free(buffer);
+ MOCK_CLEAR(spdk_zmalloc);
+}
+
+static void
+test_nvme_ctrlr_probe(void)
+{
+ int rc = 0;
+ struct spdk_nvme_ctrlr ctrlr = {};
+ struct spdk_nvme_qpair qpair = {};
+ const struct spdk_nvme_transport_id trid = {};
+ struct spdk_nvme_probe_ctx probe_ctx = {};
+ void *devhandle = NULL;
+ void *cb_ctx = NULL;
+ struct spdk_nvme_ctrlr *dummy = NULL;
+
+ ctrlr.adminq = &qpair;
+
+ TAILQ_INIT(&probe_ctx.init_ctrlrs);
+ nvme_driver_init();
+
+ /* test when probe_cb returns false */
+
+ MOCK_SET(dummy_probe_cb, false);
+ nvme_probe_ctx_init(&probe_ctx, &trid, cb_ctx, dummy_probe_cb, NULL, NULL);
+ rc = nvme_ctrlr_probe(&trid, &probe_ctx, devhandle);
+ CU_ASSERT(rc == 1);
+
+ /* probe_cb returns true but we can't construct a ctrl */
+ MOCK_SET(dummy_probe_cb, true);
+ MOCK_SET(nvme_transport_ctrlr_construct, NULL);
+ nvme_probe_ctx_init(&probe_ctx, &trid, cb_ctx, dummy_probe_cb, NULL, NULL);
+ rc = nvme_ctrlr_probe(&trid, &probe_ctx, devhandle);
+ CU_ASSERT(rc == -1);
+
+ /* happy path */
+ MOCK_SET(dummy_probe_cb, true);
+ MOCK_SET(nvme_transport_ctrlr_construct, &ctrlr);
+ nvme_probe_ctx_init(&probe_ctx, &trid, cb_ctx, dummy_probe_cb, NULL, NULL);
+ rc = nvme_ctrlr_probe(&trid, &probe_ctx, devhandle);
+ CU_ASSERT(rc == 0);
+ dummy = TAILQ_FIRST(&probe_ctx.init_ctrlrs);
+ SPDK_CU_ASSERT_FATAL(dummy != NULL);
+ CU_ASSERT(dummy == ut_nvme_transport_ctrlr_construct);
+ TAILQ_REMOVE(&probe_ctx.init_ctrlrs, dummy, tailq);
+ MOCK_CLEAR_P(nvme_transport_ctrlr_construct);
+
+ free(g_spdk_nvme_driver);
+}
+
+static void
+test_nvme_robust_mutex_init_shared(void)
+{
+ pthread_mutex_t mtx;
+ int rc = 0;
+
+ /* test where both pthread calls succeed */
+ MOCK_SET(pthread_mutexattr_init, 0);
+ MOCK_SET(pthread_mutex_init, 0);
+ rc = nvme_robust_mutex_init_shared(&mtx);
+ CU_ASSERT(rc == 0);
+
+ /* test where we can't init attr's but init mutex works */
+ MOCK_SET(pthread_mutexattr_init, -1);
+ MOCK_SET(pthread_mutex_init, 0);
+ rc = nvme_robust_mutex_init_shared(&mtx);
+ /* for FreeBSD the only possible return value is 0 */
+#ifndef __FreeBSD__
+ CU_ASSERT(rc != 0);
+#else
+ CU_ASSERT(rc == 0);
+#endif
+
+ /* test where we can init attr's but the mutex init fails */
+ MOCK_SET(pthread_mutexattr_init, 0);
+ MOCK_SET(pthread_mutex_init, -1);
+ rc = nvme_robust_mutex_init_shared(&mtx);
+ /* for FreeBSD the only possible return value is 0 */
+#ifndef __FreeBSD__
+ CU_ASSERT(rc != 0);
+#else
+ CU_ASSERT(rc == 0);
+#endif
+}
+
+static void
+test_opc_data_transfer(void)
+{
+ enum spdk_nvme_data_transfer xfer;
+
+ xfer = spdk_nvme_opc_get_data_transfer(SPDK_NVME_OPC_FLUSH);
+ CU_ASSERT(xfer == SPDK_NVME_DATA_NONE);
+
+ xfer = spdk_nvme_opc_get_data_transfer(SPDK_NVME_OPC_WRITE);
+ CU_ASSERT(xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER);
+
+ xfer = spdk_nvme_opc_get_data_transfer(SPDK_NVME_OPC_READ);
+ CU_ASSERT(xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST);
+
+ xfer = spdk_nvme_opc_get_data_transfer(SPDK_NVME_OPC_GET_LOG_PAGE);
+ CU_ASSERT(xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST);
+}
+
+static void
+test_trid_parse_and_compare(void)
+{
+ struct spdk_nvme_transport_id trid1, trid2;
+ int ret;
+
+ /* set trid1 trid2 value to id parse */
+ ret = spdk_nvme_transport_id_parse(NULL, "trtype:PCIe traddr:0000:04:00.0");
+ CU_ASSERT(ret == -EINVAL);
+ memset(&trid1, 0, sizeof(trid1));
+ ret = spdk_nvme_transport_id_parse(&trid1, NULL);
+ CU_ASSERT(ret == -EINVAL);
+ ret = spdk_nvme_transport_id_parse(NULL, NULL);
+ CU_ASSERT(ret == -EINVAL);
+ memset(&trid1, 0, sizeof(trid1));
+ ret = spdk_nvme_transport_id_parse(&trid1, "trtype-PCIe traddr-0000-04-00.0");
+ CU_ASSERT(ret == -EINVAL);
+ memset(&trid1, 0, sizeof(trid1));
+ ret = spdk_nvme_transport_id_parse(&trid1, "trtype-PCIe traddr-0000-04-00.0-:");
+ CU_ASSERT(ret == -EINVAL);
+ memset(&trid1, 0, sizeof(trid1));
+ ret = spdk_nvme_transport_id_parse(&trid1, " \t\n:");
+ CU_ASSERT(ret == -EINVAL);
+ memset(&trid1, 0, sizeof(trid1));
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid1,
+ "trtype:rdma\n"
+ "adrfam:ipv4\n"
+ "traddr:192.168.100.8\n"
+ "trsvcid:4420\n"
+ "subnqn:nqn.2014-08.org.nvmexpress.discovery") == 0);
+ CU_ASSERT(trid1.trtype == SPDK_NVME_TRANSPORT_RDMA);
+ CU_ASSERT(trid1.adrfam == SPDK_NVMF_ADRFAM_IPV4);
+ CU_ASSERT(strcmp(trid1.traddr, "192.168.100.8") == 0);
+ CU_ASSERT(strcmp(trid1.trsvcid, "4420") == 0);
+ CU_ASSERT(strcmp(trid1.subnqn, "nqn.2014-08.org.nvmexpress.discovery") == 0);
+
+ memset(&trid2, 0, sizeof(trid2));
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid2, "trtype:PCIe traddr:0000:04:00.0") == 0);
+ CU_ASSERT(trid2.trtype == SPDK_NVME_TRANSPORT_PCIE);
+ CU_ASSERT(strcmp(trid2.traddr, "0000:04:00.0") == 0);
+
+ CU_ASSERT(spdk_nvme_transport_id_compare(&trid1, &trid2) != 0);
+
+ /* set trid1 trid2 and test id_compare */
+ memset_trid(&trid1, &trid2);
+ trid1.adrfam = SPDK_NVMF_ADRFAM_IPV6;
+ trid2.adrfam = SPDK_NVMF_ADRFAM_IPV4;
+ ret = spdk_nvme_transport_id_compare(&trid1, &trid2);
+ CU_ASSERT(ret > 0);
+
+ memset_trid(&trid1, &trid2);
+ snprintf(trid1.traddr, sizeof(trid1.traddr), "192.168.100.8");
+ snprintf(trid2.traddr, sizeof(trid2.traddr), "192.168.100.9");
+ ret = spdk_nvme_transport_id_compare(&trid1, &trid2);
+ CU_ASSERT(ret < 0);
+
+ memset_trid(&trid1, &trid2);
+ snprintf(trid1.trsvcid, sizeof(trid1.trsvcid), "4420");
+ snprintf(trid2.trsvcid, sizeof(trid2.trsvcid), "4421");
+ ret = spdk_nvme_transport_id_compare(&trid1, &trid2);
+ CU_ASSERT(ret < 0);
+
+ memset_trid(&trid1, &trid2);
+ snprintf(trid1.subnqn, sizeof(trid1.subnqn), "subnqn:nqn.2016-08.org.nvmexpress.discovery");
+ snprintf(trid2.subnqn, sizeof(trid2.subnqn), "subnqn:nqn.2017-08.org.nvmexpress.discovery");
+ ret = spdk_nvme_transport_id_compare(&trid1, &trid2);
+ CU_ASSERT(ret < 0);
+
+ memset_trid(&trid1, &trid2);
+ snprintf(trid1.subnqn, sizeof(trid1.subnqn), "subnqn:nqn.2016-08.org.nvmexpress.discovery");
+ snprintf(trid2.subnqn, sizeof(trid2.subnqn), "subnqn:nqn.2016-08.org.nvmexpress.discovery");
+ ret = spdk_nvme_transport_id_compare(&trid1, &trid2);
+ CU_ASSERT(ret == 0);
+
+ memset_trid(&trid1, &trid2);
+ snprintf(trid1.subnqn, sizeof(trid1.subnqn), "subnqn:nqn.2016-08.org.nvmexpress.discovery");
+ snprintf(trid2.subnqn, sizeof(trid2.subnqn), "subnqn:nqn.2016-08.org.Nvmexpress.discovery");
+ ret = spdk_nvme_transport_id_compare(&trid1, &trid2);
+ CU_ASSERT(ret > 0);
+
+ memset_trid(&trid1, &trid2);
+ ret = spdk_nvme_transport_id_compare(&trid1, &trid2);
+ CU_ASSERT(ret == 0);
+
+ /* Compare PCI addresses via spdk_pci_addr_compare (rather than as strings) */
+ memset_trid(&trid1, &trid2);
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid1, "trtype:PCIe traddr:0000:04:00.0") == 0);
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid2, "trtype:PCIe traddr:04:00.0") == 0);
+ CU_ASSERT(spdk_nvme_transport_id_compare(&trid1, &trid2) == 0);
+
+ memset_trid(&trid1, &trid2);
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid1, "trtype:PCIe traddr:0000:05:00.0") == 0);
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid2, "trtype:PCIe traddr:04:00.0") == 0);
+ CU_ASSERT(spdk_nvme_transport_id_compare(&trid1, &trid2) > 0);
+
+ memset_trid(&trid1, &trid2);
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid1, "trtype:PCIe traddr:0000:04:00.0") == 0);
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid2, "trtype:PCIe traddr:05:00.0") == 0);
+ CU_ASSERT(spdk_nvme_transport_id_compare(&trid1, &trid2) < 0);
+
+ memset_trid(&trid1, &trid2);
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid1, "trtype=PCIe traddr=0000:04:00.0") == 0);
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid2, "trtype=PCIe traddr=05:00.0") == 0);
+ CU_ASSERT(spdk_nvme_transport_id_compare(&trid1, &trid2) < 0);
+
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid1,
+ "trtype:tcp\n"
+ "adrfam:ipv4\n"
+ "traddr:192.168.100.8\n"
+ "trsvcid:4420\n"
+ "priority:2\n"
+ "subnqn:nqn.2014-08.org.nvmexpress.discovery") == 0);
+ CU_ASSERT(trid1.priority == 2);
+}
+
+static void
+test_spdk_nvme_transport_id_parse_trtype(void)
+{
+
+ enum spdk_nvme_transport_type *trtype;
+ enum spdk_nvme_transport_type sct;
+ char *str;
+
+ trtype = NULL;
+ str = "unit_test";
+
+ /* test function returned value when trtype is NULL but str not NULL */
+ CU_ASSERT(spdk_nvme_transport_id_parse_trtype(trtype, str) == (-EINVAL));
+
+ /* test function returned value when str is NULL but trtype not NULL */
+ trtype = &sct;
+ str = NULL;
+ CU_ASSERT(spdk_nvme_transport_id_parse_trtype(trtype, str) == (-EINVAL));
+
+ /* test function returned value when str and strtype not NULL, but str value
+ * not "PCIe" or "RDMA" */
+ str = "unit_test";
+ CU_ASSERT(spdk_nvme_transport_id_parse_trtype(trtype, str) == 0);
+ CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_CUSTOM);
+
+ /* test trtype value when use function "strcasecmp" to compare str and "PCIe",not case-sensitive */
+ str = "PCIe";
+ spdk_nvme_transport_id_parse_trtype(trtype, str);
+ CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_PCIE);
+
+ str = "pciE";
+ spdk_nvme_transport_id_parse_trtype(trtype, str);
+ CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_PCIE);
+
+ /* test trtype value when use function "strcasecmp" to compare str and "RDMA",not case-sensitive */
+ str = "RDMA";
+ spdk_nvme_transport_id_parse_trtype(trtype, str);
+ CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_RDMA);
+
+ str = "rdma";
+ spdk_nvme_transport_id_parse_trtype(trtype, str);
+ CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_RDMA);
+
+ /* test trtype value when use function "strcasecmp" to compare str and "FC",not case-sensitive */
+ str = "FC";
+ spdk_nvme_transport_id_parse_trtype(trtype, str);
+ CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_FC);
+
+ str = "fc";
+ spdk_nvme_transport_id_parse_trtype(trtype, str);
+ CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_FC);
+
+ /* test trtype value when use function "strcasecmp" to compare str and "TCP",not case-sensitive */
+ str = "TCP";
+ spdk_nvme_transport_id_parse_trtype(trtype, str);
+ CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_TCP);
+
+ str = "tcp";
+ spdk_nvme_transport_id_parse_trtype(trtype, str);
+ CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_TCP);
+}
+
+static void
+test_spdk_nvme_transport_id_parse_adrfam(void)
+{
+
+ enum spdk_nvmf_adrfam *adrfam;
+ enum spdk_nvmf_adrfam sct;
+ char *str;
+
+ adrfam = NULL;
+ str = "unit_test";
+
+ /* test function returned value when adrfam is NULL but str not NULL */
+ CU_ASSERT(spdk_nvme_transport_id_parse_adrfam(adrfam, str) == (-EINVAL));
+
+ /* test function returned value when str is NULL but adrfam not NULL */
+ adrfam = &sct;
+ str = NULL;
+ CU_ASSERT(spdk_nvme_transport_id_parse_adrfam(adrfam, str) == (-EINVAL));
+
+ /* test function returned value when str and adrfam not NULL, but str value
+ * not "IPv4" or "IPv6" or "IB" or "FC" */
+ str = "unit_test";
+ CU_ASSERT(spdk_nvme_transport_id_parse_adrfam(adrfam, str) == (-ENOENT));
+
+ /* test adrfam value when use function "strcasecmp" to compare str and "IPv4",not case-sensitive */
+ str = "IPv4";
+ spdk_nvme_transport_id_parse_adrfam(adrfam, str);
+ CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_IPV4);
+
+ str = "ipV4";
+ spdk_nvme_transport_id_parse_adrfam(adrfam, str);
+ CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_IPV4);
+
+ /* test adrfam value when use function "strcasecmp" to compare str and "IPv6",not case-sensitive */
+ str = "IPv6";
+ spdk_nvme_transport_id_parse_adrfam(adrfam, str);
+ CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_IPV6);
+
+ str = "ipV6";
+ spdk_nvme_transport_id_parse_adrfam(adrfam, str);
+ CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_IPV6);
+
+ /* test adrfam value when use function "strcasecmp" to compare str and "IB",not case-sensitive */
+ str = "IB";
+ spdk_nvme_transport_id_parse_adrfam(adrfam, str);
+ CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_IB);
+
+ str = "ib";
+ spdk_nvme_transport_id_parse_adrfam(adrfam, str);
+ CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_IB);
+
+ /* test adrfam value when use function "strcasecmp" to compare str and "FC",not case-sensitive */
+ str = "FC";
+ spdk_nvme_transport_id_parse_adrfam(adrfam, str);
+ CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_FC);
+
+ str = "fc";
+ spdk_nvme_transport_id_parse_adrfam(adrfam, str);
+ CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_FC);
+
+}
+
+static void
+test_trid_trtype_str(void)
+{
+ const char *s;
+
+ s = spdk_nvme_transport_id_trtype_str(-5);
+ CU_ASSERT(s == NULL);
+
+ s = spdk_nvme_transport_id_trtype_str(SPDK_NVME_TRANSPORT_PCIE);
+ SPDK_CU_ASSERT_FATAL(s != NULL);
+ CU_ASSERT(strcmp(s, "PCIe") == 0);
+
+ s = spdk_nvme_transport_id_trtype_str(SPDK_NVME_TRANSPORT_RDMA);
+ SPDK_CU_ASSERT_FATAL(s != NULL);
+ CU_ASSERT(strcmp(s, "RDMA") == 0);
+
+ s = spdk_nvme_transport_id_trtype_str(SPDK_NVME_TRANSPORT_FC);
+ SPDK_CU_ASSERT_FATAL(s != NULL);
+ CU_ASSERT(strcmp(s, "FC") == 0);
+
+ s = spdk_nvme_transport_id_trtype_str(SPDK_NVME_TRANSPORT_TCP);
+ SPDK_CU_ASSERT_FATAL(s != NULL);
+ CU_ASSERT(strcmp(s, "TCP") == 0);
+}
+
+static void
+test_trid_adrfam_str(void)
+{
+ const char *s;
+
+ s = spdk_nvme_transport_id_adrfam_str(-5);
+ CU_ASSERT(s == NULL);
+
+ s = spdk_nvme_transport_id_adrfam_str(SPDK_NVMF_ADRFAM_IPV4);
+ SPDK_CU_ASSERT_FATAL(s != NULL);
+ CU_ASSERT(strcmp(s, "IPv4") == 0);
+
+ s = spdk_nvme_transport_id_adrfam_str(SPDK_NVMF_ADRFAM_IPV6);
+ SPDK_CU_ASSERT_FATAL(s != NULL);
+ CU_ASSERT(strcmp(s, "IPv6") == 0);
+
+ s = spdk_nvme_transport_id_adrfam_str(SPDK_NVMF_ADRFAM_IB);
+ SPDK_CU_ASSERT_FATAL(s != NULL);
+ CU_ASSERT(strcmp(s, "IB") == 0);
+
+ s = spdk_nvme_transport_id_adrfam_str(SPDK_NVMF_ADRFAM_FC);
+ SPDK_CU_ASSERT_FATAL(s != NULL);
+ CU_ASSERT(strcmp(s, "FC") == 0);
+}
+
+/* stub callback used by the test_nvme_request_check_timeout */
+static bool ut_timeout_cb_call = false;
+static void
+dummy_timeout_cb(void *cb_arg, struct spdk_nvme_ctrlr *ctrlr,
+ struct spdk_nvme_qpair *qpair, uint16_t cid)
+{
+ ut_timeout_cb_call = true;
+}
+
+static void
+test_nvme_request_check_timeout(void)
+{
+ int rc;
+ struct spdk_nvme_qpair qpair;
+ struct nvme_request req;
+ struct spdk_nvme_ctrlr_process active_proc;
+ uint16_t cid = 0;
+ uint64_t now_tick = 0;
+
+ memset(&qpair, 0x0, sizeof(qpair));
+ memset(&req, 0x0, sizeof(req));
+ memset(&active_proc, 0x0, sizeof(active_proc));
+ req.qpair = &qpair;
+ active_proc.timeout_cb_fn = dummy_timeout_cb;
+
+ /* if have called timeout_cb_fn then return directly */
+ req.timed_out = true;
+ rc = nvme_request_check_timeout(&req, cid, &active_proc, now_tick);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(ut_timeout_cb_call == false);
+
+ /* if timeout isn't enabled then return directly */
+ req.timed_out = false;
+ req.submit_tick = 0;
+ rc = nvme_request_check_timeout(&req, cid, &active_proc, now_tick);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(ut_timeout_cb_call == false);
+
+ /* req->pid isn't right then return directly */
+ req.submit_tick = 1;
+ req.pid = g_spdk_nvme_pid + 1;
+ rc = nvme_request_check_timeout(&req, cid, &active_proc, now_tick);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(ut_timeout_cb_call == false);
+
+ /* AER command has no timeout */
+ req.pid = g_spdk_nvme_pid;
+ req.cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
+ rc = nvme_request_check_timeout(&req, cid, &active_proc, now_tick);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(ut_timeout_cb_call == false);
+
+ /* time isn't out */
+ qpair.id = 1;
+ rc = nvme_request_check_timeout(&req, cid, &active_proc, now_tick);
+ CU_ASSERT(rc == 1);
+ CU_ASSERT(ut_timeout_cb_call == false);
+
+ now_tick = 2;
+ rc = nvme_request_check_timeout(&req, cid, &active_proc, now_tick);
+ CU_ASSERT(req.timed_out == true);
+ CU_ASSERT(ut_timeout_cb_call == true);
+ CU_ASSERT(rc == 0);
+}
+
+struct nvme_completion_poll_status g_status;
+uint64_t completion_delay, timeout_in_secs;
+int g_process_comp_result;
+
+int
+spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
+{
+ spdk_delay_us(completion_delay * spdk_get_ticks_hz());
+
+ g_status.done = completion_delay < timeout_in_secs && g_process_comp_result == 0 ? true : false;
+
+ return g_process_comp_result;
+}
+
+static void
+test_nvme_wait_for_completion(void)
+{
+ struct spdk_nvme_qpair qpair;
+ int rc = 0;
+
+ memset(&qpair, 0, sizeof(qpair));
+
+ /* completion timeout */
+ memset(&g_status, 0, sizeof(g_status));
+ completion_delay = 2;
+ timeout_in_secs = 1;
+ rc = nvme_wait_for_completion_timeout(&qpair, &g_status, timeout_in_secs);
+ CU_ASSERT(g_status.timed_out == true);
+ CU_ASSERT(g_status.done == false);
+ CU_ASSERT(rc == -ECANCELED);
+
+ /* spdk_nvme_qpair_process_completions returns error */
+ memset(&g_status, 0, sizeof(g_status));
+ g_process_comp_result = -1;
+ completion_delay = 1;
+ timeout_in_secs = 2;
+ rc = nvme_wait_for_completion_timeout(&qpair, &g_status, timeout_in_secs);
+ CU_ASSERT(rc == -ECANCELED);
+ CU_ASSERT(g_status.timed_out == true);
+ CU_ASSERT(g_status.done == false);
+ CU_ASSERT(g_status.cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(g_status.cpl.status.sc == SPDK_NVME_SC_ABORTED_SQ_DELETION);
+
+ g_process_comp_result = 0;
+
+ /* complete in time */
+ memset(&g_status, 0, sizeof(g_status));
+ completion_delay = 1;
+ timeout_in_secs = 2;
+ rc = nvme_wait_for_completion_timeout(&qpair, &g_status, timeout_in_secs);
+ CU_ASSERT(g_status.timed_out == false);
+ CU_ASSERT(g_status.done == true);
+ CU_ASSERT(rc == 0);
+
+ /* nvme_wait_for_completion */
+ /* spdk_nvme_qpair_process_completions returns error */
+ memset(&g_status, 0, sizeof(g_status));
+ g_process_comp_result = -1;
+ rc = nvme_wait_for_completion(&qpair, &g_status);
+ CU_ASSERT(rc == -ECANCELED);
+ CU_ASSERT(g_status.timed_out == true);
+ CU_ASSERT(g_status.done == false);
+ CU_ASSERT(g_status.cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(g_status.cpl.status.sc == SPDK_NVME_SC_ABORTED_SQ_DELETION);
+
+ /* successful completion */
+ memset(&g_status, 0, sizeof(g_status));
+ g_process_comp_result = 0;
+ rc = nvme_wait_for_completion(&qpair, &g_status);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_status.timed_out == false);
+ CU_ASSERT(g_status.done == true);
+}
+
+static void
+test_nvme_ctrlr_probe_internal(void)
+{
+ struct spdk_nvme_probe_ctx *probe_ctx;
+ struct spdk_nvme_transport_id trid = {};
+ struct nvme_driver dummy;
+ int rc;
+
+ probe_ctx = calloc(1, sizeof(*probe_ctx));
+ CU_ASSERT(probe_ctx != NULL);
+
+ MOCK_SET(spdk_process_is_primary, true);
+ MOCK_SET(spdk_memzone_reserve, (void *)&dummy);
+ g_spdk_nvme_driver = NULL;
+ rc = nvme_driver_init();
+ CU_ASSERT(rc == 0);
+
+ ut_test_probe_internal = true;
+ MOCK_SET(dummy_probe_cb, true);
+ trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
+ nvme_probe_ctx_init(probe_ctx, &trid, NULL, dummy_probe_cb, NULL, NULL);
+ rc = nvme_probe_internal(probe_ctx, false);
+ CU_ASSERT(rc < 0);
+ CU_ASSERT(TAILQ_EMPTY(&probe_ctx->init_ctrlrs));
+
+ free(probe_ctx);
+ ut_test_probe_internal = false;
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvme", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_opc_data_transfer);
+ CU_ADD_TEST(suite, test_spdk_nvme_transport_id_parse_trtype);
+ CU_ADD_TEST(suite, test_spdk_nvme_transport_id_parse_adrfam);
+ CU_ADD_TEST(suite, test_trid_parse_and_compare);
+ CU_ADD_TEST(suite, test_trid_trtype_str);
+ CU_ADD_TEST(suite, test_trid_adrfam_str);
+ CU_ADD_TEST(suite, test_nvme_ctrlr_probe);
+ CU_ADD_TEST(suite, test_spdk_nvme_probe);
+ CU_ADD_TEST(suite, test_spdk_nvme_connect);
+ CU_ADD_TEST(suite, test_nvme_ctrlr_probe_internal);
+ CU_ADD_TEST(suite, test_nvme_init_controllers);
+ CU_ADD_TEST(suite, test_nvme_driver_init);
+ CU_ADD_TEST(suite, test_spdk_nvme_detach);
+ CU_ADD_TEST(suite, test_nvme_completion_poll_cb);
+ CU_ADD_TEST(suite, test_nvme_user_copy_cmd_complete);
+ CU_ADD_TEST(suite, test_nvme_allocate_request_null);
+ CU_ADD_TEST(suite, test_nvme_allocate_request);
+ CU_ADD_TEST(suite, test_nvme_free_request);
+ CU_ADD_TEST(suite, test_nvme_allocate_request_user_copy);
+ CU_ADD_TEST(suite, test_nvme_robust_mutex_init_shared);
+ CU_ADD_TEST(suite, test_nvme_request_check_timeout);
+ CU_ADD_TEST(suite, test_nvme_wait_for_completion);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/.gitignore
new file mode 100644
index 000000000..97a75bee8
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/.gitignore
@@ -0,0 +1 @@
+nvme_ctrlr_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/Makefile
new file mode 100644
index 000000000..3ce33dc4e
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_ctrlr_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/nvme_ctrlr_ut.c b/src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/nvme_ctrlr_ut.c
new file mode 100644
index 000000000..f5b374639
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/nvme_ctrlr_ut.c
@@ -0,0 +1,2150 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation. All rights reserved.
+ * Copyright (c) 2020 Mellanox Technologies LTD. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "spdk_internal/log.h"
+
+#include "common/lib/test_env.c"
+
+struct spdk_log_flag SPDK_LOG_NVME = {
+ .name = "nvme",
+ .enabled = false,
+};
+
+#include "nvme/nvme_ctrlr.c"
+#include "nvme/nvme_quirks.c"
+
+pid_t g_spdk_nvme_pid;
+
+struct nvme_driver _g_nvme_driver = {
+ .lock = PTHREAD_MUTEX_INITIALIZER,
+};
+
+struct nvme_driver *g_spdk_nvme_driver = &_g_nvme_driver;
+
+struct spdk_nvme_registers g_ut_nvme_regs = {};
+
+__thread int nvme_thread_ioq_index = -1;
+
+uint32_t set_size = 1;
+
+int set_status_cpl = -1;
+
+DEFINE_STUB(nvme_ctrlr_cmd_set_host_id, int,
+ (struct spdk_nvme_ctrlr *ctrlr, void *host_id, uint32_t host_id_size,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
+DEFINE_STUB_V(nvme_ns_set_identify_data, (struct spdk_nvme_ns *ns));
+DEFINE_STUB_V(nvme_qpair_abort_reqs, (struct spdk_nvme_qpair *qpair, uint32_t dnr));
+DEFINE_STUB(spdk_nvme_poll_group_remove, int, (struct spdk_nvme_poll_group *group,
+ struct spdk_nvme_qpair *qpair), 0);
+DEFINE_STUB_V(nvme_io_msg_ctrlr_update, (struct spdk_nvme_ctrlr *ctrlr));
+
+struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
+ const struct spdk_nvme_ctrlr_opts *opts,
+ void *devhandle)
+{
+ return NULL;
+}
+
+int
+nvme_transport_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
+{
+ nvme_ctrlr_destruct_finish(ctrlr);
+
+ return 0;
+}
+
+int
+nvme_transport_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return 0;
+}
+
+int
+nvme_transport_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value)
+{
+ SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 4);
+ *(uint32_t *)((uintptr_t)&g_ut_nvme_regs + offset) = value;
+ return 0;
+}
+
+int
+nvme_transport_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value)
+{
+ SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 8);
+ *(uint64_t *)((uintptr_t)&g_ut_nvme_regs + offset) = value;
+ return 0;
+}
+
+int
+nvme_transport_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value)
+{
+ SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 4);
+ *value = *(uint32_t *)((uintptr_t)&g_ut_nvme_regs + offset);
+ return 0;
+}
+
+int
+nvme_transport_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value)
+{
+ SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 8);
+ *value = *(uint64_t *)((uintptr_t)&g_ut_nvme_regs + offset);
+ return 0;
+}
+
+uint32_t
+nvme_transport_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return UINT32_MAX;
+}
+
+uint16_t
+nvme_transport_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return 1;
+}
+
+void *
+nvme_transport_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
+{
+ return NULL;
+}
+
+int
+nvme_transport_ctrlr_unmap_cmb(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return 0;
+}
+
+struct spdk_nvme_qpair *
+nvme_transport_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
+ const struct spdk_nvme_io_qpair_opts *opts)
+{
+ struct spdk_nvme_qpair *qpair;
+
+ qpair = calloc(1, sizeof(*qpair));
+ SPDK_CU_ASSERT_FATAL(qpair != NULL);
+
+ qpair->ctrlr = ctrlr;
+ qpair->id = qid;
+ qpair->qprio = opts->qprio;
+
+ return qpair;
+}
+
+int
+nvme_transport_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
+{
+ free(qpair);
+ return 0;
+}
+
+void
+nvme_transport_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
+{
+}
+
+int
+nvme_transport_qpair_reset(struct spdk_nvme_qpair *qpair)
+{
+ return 0;
+}
+
+void
+nvme_transport_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair)
+{
+}
+
+void
+nvme_transport_qpair_abort_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr)
+{
+}
+
+int
+nvme_driver_init(void)
+{
+ return 0;
+}
+
+int nvme_qpair_init(struct spdk_nvme_qpair *qpair, uint16_t id,
+ struct spdk_nvme_ctrlr *ctrlr,
+ enum spdk_nvme_qprio qprio,
+ uint32_t num_requests)
+{
+ qpair->id = id;
+ qpair->qprio = qprio;
+ qpair->ctrlr = ctrlr;
+
+ return 0;
+}
+
+static struct spdk_nvme_cpl fake_cpl = {};
+static enum spdk_nvme_generic_command_status_code set_status_code = SPDK_NVME_SC_SUCCESS;
+
+static void
+fake_cpl_sc(spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ fake_cpl.status.sc = set_status_code;
+ cb_fn(cb_arg, &fake_cpl);
+}
+
+int
+spdk_nvme_ctrlr_cmd_set_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
+ uint32_t cdw11, uint32_t cdw12, void *payload, uint32_t payload_size,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ CU_ASSERT(0);
+ return -1;
+}
+
+int
+spdk_nvme_ctrlr_cmd_get_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
+ uint32_t cdw11, void *payload, uint32_t payload_size,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ fake_cpl_sc(cb_fn, cb_arg);
+ return 0;
+}
+
+int
+spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page,
+ uint32_t nsid, void *payload, uint32_t payload_size,
+ uint64_t offset, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ fake_cpl_sc(cb_fn, cb_arg);
+ return 0;
+}
+
+int
+nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_ASYNC_EVENT_REQUEST);
+
+ /*
+ * For the purposes of this unit test, we don't need to bother emulating request submission.
+ */
+
+ return 0;
+}
+
+static int32_t g_wait_for_completion_return_val;
+
+int32_t
+spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
+{
+ return g_wait_for_completion_return_val;
+}
+
+void
+nvme_qpair_complete_error_reqs(struct spdk_nvme_qpair *qpair)
+{
+}
+
+
+void
+nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl)
+{
+ struct nvme_completion_poll_status *status = arg;
+ /* This should not happen it test env since this callback is always called
+ * before wait_for_completion_* while this field can only be set to true in
+ * wait_for_completion_* functions */
+ CU_ASSERT(status->timed_out == false);
+
+ status->cpl = *cpl;
+ status->done = true;
+}
+
+static struct nvme_completion_poll_status *g_failed_status;
+
+int
+nvme_wait_for_completion_robust_lock(
+ struct spdk_nvme_qpair *qpair,
+ struct nvme_completion_poll_status *status,
+ pthread_mutex_t *robust_mutex)
+{
+ if (spdk_nvme_qpair_process_completions(qpair, 0) < 0) {
+ g_failed_status = status;
+ status->timed_out = true;
+ return -1;
+ }
+
+ status->done = true;
+ if (set_status_cpl == 1) {
+ status->cpl.status.sc = 1;
+ }
+ return spdk_nvme_cpl_is_error(&status->cpl) ? -EIO : 0;
+}
+
+int
+nvme_wait_for_completion(struct spdk_nvme_qpair *qpair,
+ struct nvme_completion_poll_status *status)
+{
+ return nvme_wait_for_completion_robust_lock(qpair, status, NULL);
+}
+
+int
+nvme_wait_for_completion_timeout(struct spdk_nvme_qpair *qpair,
+ struct nvme_completion_poll_status *status,
+ uint64_t timeout_in_secs)
+{
+ return nvme_wait_for_completion_robust_lock(qpair, status, NULL);
+}
+
+int
+nvme_ctrlr_cmd_set_async_event_config(struct spdk_nvme_ctrlr *ctrlr,
+ union spdk_nvme_feat_async_event_configuration config, spdk_nvme_cmd_cb cb_fn,
+ void *cb_arg)
+{
+ fake_cpl_sc(cb_fn, cb_arg);
+ return 0;
+}
+
+int
+nvme_ctrlr_cmd_identify(struct spdk_nvme_ctrlr *ctrlr, uint8_t cns, uint16_t cntid, uint32_t nsid,
+ void *payload, size_t payload_size,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ if (cns == SPDK_NVME_IDENTIFY_ACTIVE_NS_LIST) {
+ uint32_t count = 0;
+ uint32_t i = 0;
+ struct spdk_nvme_ns_list *ns_list = (struct spdk_nvme_ns_list *)payload;
+
+ for (i = 1; i <= ctrlr->num_ns; i++) {
+ if (i <= nsid) {
+ continue;
+ }
+
+ ns_list->ns_list[count++] = i;
+ if (count == SPDK_COUNTOF(ns_list->ns_list)) {
+ break;
+ }
+ }
+
+ }
+
+ fake_cpl_sc(cb_fn, cb_arg);
+ return 0;
+}
+
+int
+nvme_ctrlr_cmd_set_num_queues(struct spdk_nvme_ctrlr *ctrlr,
+ uint32_t num_queues, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ fake_cpl_sc(cb_fn, cb_arg);
+ return 0;
+}
+
+int
+nvme_ctrlr_cmd_get_num_queues(struct spdk_nvme_ctrlr *ctrlr,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ CU_ASSERT(0);
+ return -1;
+}
+
+int
+nvme_ctrlr_cmd_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
+ struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ return 0;
+}
+
+int
+nvme_ctrlr_cmd_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
+ struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ return 0;
+}
+
+int
+nvme_ctrlr_cmd_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data *payload,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ return 0;
+}
+
+int
+nvme_ctrlr_cmd_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, spdk_nvme_cmd_cb cb_fn,
+ void *cb_arg)
+{
+ return 0;
+}
+
+int
+nvme_ctrlr_cmd_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, struct spdk_nvme_format *format,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ return 0;
+}
+
+int
+nvme_ctrlr_cmd_fw_commit(struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_fw_commit *fw_commit,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ CU_ASSERT(fw_commit->ca == SPDK_NVME_FW_COMMIT_REPLACE_IMG);
+ if (fw_commit->fs == 0) {
+ return -1;
+ }
+ set_status_cpl = 1;
+ if (ctrlr->is_resetting == true) {
+ set_status_cpl = 0;
+ }
+ return 0;
+}
+
+int
+nvme_ctrlr_cmd_fw_image_download(struct spdk_nvme_ctrlr *ctrlr,
+ uint32_t size, uint32_t offset, void *payload,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ if ((size != 0 && payload == NULL) || (size == 0 && payload != NULL)) {
+ return -1;
+ }
+ CU_ASSERT(offset == 0);
+ return 0;
+}
+
+void
+nvme_ns_destruct(struct spdk_nvme_ns *ns)
+{
+}
+
+int
+nvme_ns_construct(struct spdk_nvme_ns *ns, uint32_t id,
+ struct spdk_nvme_ctrlr *ctrlr)
+{
+ return 0;
+}
+
+int
+nvme_ns_update(struct spdk_nvme_ns *ns)
+{
+ return 0;
+}
+
+void
+spdk_pci_device_detach(struct spdk_pci_device *device)
+{
+}
+
+#define DECLARE_AND_CONSTRUCT_CTRLR() \
+ struct spdk_nvme_ctrlr ctrlr = {}; \
+ struct spdk_nvme_qpair adminq = {}; \
+ struct nvme_request req; \
+ \
+ STAILQ_INIT(&adminq.free_req); \
+ STAILQ_INSERT_HEAD(&adminq.free_req, &req, stailq); \
+ ctrlr.adminq = &adminq;
+
+static void
+test_nvme_ctrlr_init_en_1_rdy_0(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
+
+ /*
+ * Initial state: CC.EN = 1, CSTS.RDY = 0
+ */
+ g_ut_nvme_regs.cc.bits.en = 1;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1);
+
+ /*
+ * Transition to CSTS.RDY = 1.
+ * init() should set CC.EN = 0.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
+
+ /*
+ * Transition to CSTS.RDY = 0.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+
+ /*
+ * Transition to CC.EN = 1
+ */
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+
+ /*
+ * Transition to CSTS.RDY = 1.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
+
+ /*
+ * Transition to READY.
+ */
+ while (ctrlr.state != NVME_CTRLR_STATE_READY) {
+ nvme_ctrlr_process_init(&ctrlr);
+ }
+
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+}
+
+static void
+test_nvme_ctrlr_init_en_1_rdy_1(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
+
+ /*
+ * Initial state: CC.EN = 1, CSTS.RDY = 1
+ * init() should set CC.EN = 0.
+ */
+ g_ut_nvme_regs.cc.bits.en = 1;
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
+
+ /*
+ * Transition to CSTS.RDY = 0.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+
+ /*
+ * Transition to CC.EN = 1
+ */
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+
+ /*
+ * Transition to CSTS.RDY = 1.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
+
+ /*
+ * Transition to READY.
+ */
+ while (ctrlr.state != NVME_CTRLR_STATE_READY) {
+ nvme_ctrlr_process_init(&ctrlr);
+ }
+
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+}
+
+static void
+test_nvme_ctrlr_init_en_0_rdy_0_ams_rr(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
+
+ /*
+ * Initial state: CC.EN = 0, CSTS.RDY = 0
+ * init() should set CC.EN = 1.
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Default round robin enabled
+ */
+ g_ut_nvme_regs.cap.bits.ams = 0x0;
+ ctrlr.cap = g_ut_nvme_regs.cap;
+
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ /*
+ * Case 1: default round robin arbitration mechanism selected
+ */
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
+ CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 2: weighted round robin arbitration mechanism selected
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 3: vendor specific arbitration mechanism selected
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 4: invalid arbitration mechanism selected
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS + 1;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 5: reset to default round robin arbitration mechanism
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
+ CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
+
+ /*
+ * Transition to CSTS.RDY = 1.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
+
+ /*
+ * Transition to READY.
+ */
+ while (ctrlr.state != NVME_CTRLR_STATE_READY) {
+ nvme_ctrlr_process_init(&ctrlr);
+ }
+
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+}
+
+static void
+test_nvme_ctrlr_init_en_0_rdy_0_ams_wrr(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
+
+ /*
+ * Initial state: CC.EN = 0, CSTS.RDY = 0
+ * init() should set CC.EN = 1.
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Weighted round robin enabled
+ */
+ g_ut_nvme_regs.cap.bits.ams = SPDK_NVME_CAP_AMS_WRR;
+ ctrlr.cap = g_ut_nvme_regs.cap;
+
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ /*
+ * Case 1: default round robin arbitration mechanism selected
+ */
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
+ CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 2: weighted round robin arbitration mechanism selected
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_WRR);
+ CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_WRR);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 3: vendor specific arbitration mechanism selected
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 4: invalid arbitration mechanism selected
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS + 1;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 5: reset to weighted round robin arbitration mechanism
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_WRR);
+ CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_WRR);
+
+ /*
+ * Transition to CSTS.RDY = 1.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
+
+ /*
+ * Transition to READY.
+ */
+ while (ctrlr.state != NVME_CTRLR_STATE_READY) {
+ nvme_ctrlr_process_init(&ctrlr);
+ }
+
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+}
+static void
+test_nvme_ctrlr_init_en_0_rdy_0_ams_vs(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
+
+ /*
+ * Initial state: CC.EN = 0, CSTS.RDY = 0
+ * init() should set CC.EN = 1.
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Default round robin enabled
+ */
+ g_ut_nvme_regs.cap.bits.ams = SPDK_NVME_CAP_AMS_VS;
+ ctrlr.cap = g_ut_nvme_regs.cap;
+
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ /*
+ * Case 1: default round robin arbitration mechanism selected
+ */
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
+ CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 2: weighted round robin arbitration mechanism selected
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 3: vendor specific arbitration mechanism selected
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_VS);
+ CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_VS);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 4: invalid arbitration mechanism selected
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS + 1;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 5: reset to vendor specific arbitration mechanism
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_VS);
+ CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_VS);
+
+ /*
+ * Transition to CSTS.RDY = 1.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
+
+ /*
+ * Transition to READY.
+ */
+ while (ctrlr.state != NVME_CTRLR_STATE_READY) {
+ nvme_ctrlr_process_init(&ctrlr);
+ }
+
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+}
+
+static void
+test_nvme_ctrlr_init_en_0_rdy_0(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
+
+ /*
+ * Initial state: CC.EN = 0, CSTS.RDY = 0
+ * init() should set CC.EN = 1.
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+
+ /*
+ * Transition to CSTS.RDY = 1.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
+
+ /*
+ * Transition to READY.
+ */
+ while (ctrlr.state != NVME_CTRLR_STATE_READY) {
+ nvme_ctrlr_process_init(&ctrlr);
+ }
+
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+}
+
+static void
+test_nvme_ctrlr_init_en_0_rdy_1(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
+
+ /*
+ * Initial state: CC.EN = 0, CSTS.RDY = 1
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+
+ /*
+ * Transition to CSTS.RDY = 0.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+
+ /*
+ * Transition to CC.EN = 1
+ */
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+
+ /*
+ * Transition to CSTS.RDY = 1.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
+
+ /*
+ * Transition to READY.
+ */
+ while (ctrlr.state != NVME_CTRLR_STATE_READY) {
+ nvme_ctrlr_process_init(&ctrlr);
+ }
+
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+}
+
+static void
+setup_qpairs(struct spdk_nvme_ctrlr *ctrlr, uint32_t num_io_queues)
+{
+ uint32_t i;
+
+ CU_ASSERT(pthread_mutex_init(&ctrlr->ctrlr_lock, NULL) == 0);
+
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(ctrlr) == 0);
+
+ ctrlr->page_size = 0x1000;
+ ctrlr->opts.num_io_queues = num_io_queues;
+ ctrlr->free_io_qids = spdk_bit_array_create(num_io_queues + 1);
+ SPDK_CU_ASSERT_FATAL(ctrlr->free_io_qids != NULL);
+
+ spdk_bit_array_clear(ctrlr->free_io_qids, 0);
+ for (i = 1; i <= num_io_queues; i++) {
+ spdk_bit_array_set(ctrlr->free_io_qids, i);
+ }
+}
+
+static void
+cleanup_qpairs(struct spdk_nvme_ctrlr *ctrlr)
+{
+ nvme_ctrlr_destruct(ctrlr);
+}
+
+static void
+test_alloc_io_qpair_rr_1(void)
+{
+ struct spdk_nvme_io_qpair_opts opts;
+ struct spdk_nvme_ctrlr ctrlr = {};
+ struct spdk_nvme_qpair *q0;
+
+ setup_qpairs(&ctrlr, 1);
+
+ /*
+ * Fake to simulate the controller with default round robin
+ * arbitration mechanism.
+ */
+ g_ut_nvme_regs.cc.bits.ams = SPDK_NVME_CC_AMS_RR;
+
+ spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, sizeof(opts));
+
+ q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, NULL, 0);
+ SPDK_CU_ASSERT_FATAL(q0 != NULL);
+ SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
+ /* Only 1 I/O qpair was allocated, so this should fail */
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, NULL, 0) == NULL);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
+
+ /*
+ * Now that the qpair has been returned to the free list,
+ * we should be able to allocate it again.
+ */
+ q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, NULL, 0);
+ SPDK_CU_ASSERT_FATAL(q0 != NULL);
+ SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
+
+ /* Only 0 qprio is acceptable for default round robin arbitration mechanism */
+ opts.qprio = 1;
+ q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q0 == NULL);
+
+ opts.qprio = 2;
+ q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q0 == NULL);
+
+ opts.qprio = 3;
+ q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q0 == NULL);
+
+ /* Only 0 ~ 3 qprio is acceptable */
+ opts.qprio = 4;
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts)) == NULL);
+
+ cleanup_qpairs(&ctrlr);
+}
+
+static void
+test_alloc_io_qpair_wrr_1(void)
+{
+ struct spdk_nvme_io_qpair_opts opts;
+ struct spdk_nvme_ctrlr ctrlr = {};
+ struct spdk_nvme_qpair *q0, *q1;
+
+ setup_qpairs(&ctrlr, 2);
+
+ /*
+ * Fake to simulate the controller with weighted round robin
+ * arbitration mechanism.
+ */
+ g_ut_nvme_regs.cc.bits.ams = SPDK_NVME_CC_AMS_WRR;
+
+ spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, sizeof(opts));
+
+ /*
+ * Allocate 2 qpairs and free them
+ */
+ opts.qprio = 0;
+ q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q0 != NULL);
+ SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
+
+ opts.qprio = 1;
+ q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q1 != NULL);
+ SPDK_CU_ASSERT_FATAL(q1->qprio == 1);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
+
+ /*
+ * Allocate 2 qpairs and free them in the reverse order
+ */
+ opts.qprio = 2;
+ q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q0 != NULL);
+ SPDK_CU_ASSERT_FATAL(q0->qprio == 2);
+
+ opts.qprio = 3;
+ q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q1 != NULL);
+ SPDK_CU_ASSERT_FATAL(q1->qprio == 3);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
+
+ /* Only 0 ~ 3 qprio is acceptable */
+ opts.qprio = 4;
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts)) == NULL);
+
+ cleanup_qpairs(&ctrlr);
+}
+
+static void
+test_alloc_io_qpair_wrr_2(void)
+{
+ struct spdk_nvme_io_qpair_opts opts;
+ struct spdk_nvme_ctrlr ctrlr = {};
+ struct spdk_nvme_qpair *q0, *q1, *q2, *q3;
+
+ setup_qpairs(&ctrlr, 4);
+
+ /*
+ * Fake to simulate the controller with weighted round robin
+ * arbitration mechanism.
+ */
+ g_ut_nvme_regs.cc.bits.ams = SPDK_NVME_CC_AMS_WRR;
+
+ spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, sizeof(opts));
+
+ opts.qprio = 0;
+ q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q0 != NULL);
+ SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
+
+ opts.qprio = 1;
+ q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q1 != NULL);
+ SPDK_CU_ASSERT_FATAL(q1->qprio == 1);
+
+ opts.qprio = 2;
+ q2 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q2 != NULL);
+ SPDK_CU_ASSERT_FATAL(q2->qprio == 2);
+
+ opts.qprio = 3;
+ q3 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q3 != NULL);
+ SPDK_CU_ASSERT_FATAL(q3->qprio == 3);
+
+ /* Only 4 I/O qpairs was allocated, so this should fail */
+ opts.qprio = 0;
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts)) == NULL);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q3) == 0);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q2) == 0);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
+
+ /*
+ * Now that the qpair has been returned to the free list,
+ * we should be able to allocate it again.
+ *
+ * Allocate 4 I/O qpairs and half of them with same qprio.
+ */
+ opts.qprio = 1;
+ q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q0 != NULL);
+ SPDK_CU_ASSERT_FATAL(q0->qprio == 1);
+
+ opts.qprio = 1;
+ q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q1 != NULL);
+ SPDK_CU_ASSERT_FATAL(q1->qprio == 1);
+
+ opts.qprio = 3;
+ q2 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q2 != NULL);
+ SPDK_CU_ASSERT_FATAL(q2->qprio == 3);
+
+ opts.qprio = 3;
+ q3 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q3 != NULL);
+ SPDK_CU_ASSERT_FATAL(q3->qprio == 3);
+
+ /*
+ * Free all I/O qpairs in reverse order
+ */
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q2) == 0);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q3) == 0);
+
+ cleanup_qpairs(&ctrlr);
+}
+
+bool g_connect_qpair_called = false;
+int g_connect_qpair_return_code = 0;
+int nvme_transport_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
+{
+ g_connect_qpair_called = true;
+ return g_connect_qpair_return_code;
+}
+
+static void
+test_spdk_nvme_ctrlr_reconnect_io_qpair(void)
+{
+ struct spdk_nvme_ctrlr ctrlr = {};
+ struct spdk_nvme_qpair qpair = {};
+ int rc;
+
+ /* Various states of controller disconnect. */
+ qpair.id = 1;
+ qpair.ctrlr = &ctrlr;
+ ctrlr.is_removed = 1;
+ ctrlr.is_failed = 0;
+ ctrlr.is_resetting = 0;
+ rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
+ CU_ASSERT(rc == -ENODEV)
+
+ ctrlr.is_removed = 0;
+ ctrlr.is_failed = 1;
+ rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
+ CU_ASSERT(rc == -ENXIO)
+
+ ctrlr.is_failed = 0;
+ ctrlr.is_resetting = 1;
+ rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
+ CU_ASSERT(rc == -EAGAIN)
+
+ /* Confirm precedence for controller states: removed > resetting > failed */
+ ctrlr.is_removed = 1;
+ ctrlr.is_failed = 1;
+ rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
+ CU_ASSERT(rc == -ENODEV)
+
+ ctrlr.is_removed = 0;
+ rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
+ CU_ASSERT(rc == -EAGAIN)
+
+ ctrlr.is_resetting = 0;
+ rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
+ CU_ASSERT(rc == -ENXIO)
+
+ /* qpair not failed. Make sure we don't call down to the transport */
+ ctrlr.is_failed = 0;
+ qpair.state = NVME_QPAIR_CONNECTED;
+ g_connect_qpair_called = false;
+ rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
+ CU_ASSERT(g_connect_qpair_called == false);
+ CU_ASSERT(rc == 0)
+
+ /* transport qpair is failed. make sure we call down to the transport */
+ qpair.state = NVME_QPAIR_DISCONNECTED;
+ rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
+ CU_ASSERT(g_connect_qpair_called == true);
+ CU_ASSERT(rc == 0)
+}
+
+static void
+test_nvme_ctrlr_fail(void)
+{
+ struct spdk_nvme_ctrlr ctrlr = {};
+
+ ctrlr.opts.num_io_queues = 0;
+ nvme_ctrlr_fail(&ctrlr, false);
+
+ CU_ASSERT(ctrlr.is_failed == true);
+}
+
+static void
+test_nvme_ctrlr_construct_intel_support_log_page_list(void)
+{
+ bool res;
+ struct spdk_nvme_ctrlr ctrlr = {};
+ struct spdk_nvme_intel_log_page_directory payload = {};
+ struct spdk_pci_id pci_id = {};
+
+ /* Get quirks for a device with all 0 vendor/device id */
+ ctrlr.quirks = nvme_get_quirks(&pci_id);
+ CU_ASSERT(ctrlr.quirks == 0);
+
+ nvme_ctrlr_construct_intel_support_log_page_list(&ctrlr, &payload);
+ res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE);
+ CU_ASSERT(res == false);
+
+ /* Set the vendor to Intel, but provide no device id */
+ pci_id.class_id = SPDK_PCI_CLASS_NVME;
+ ctrlr.cdata.vid = pci_id.vendor_id = SPDK_PCI_VID_INTEL;
+ payload.temperature_statistics_log_len = 1;
+ ctrlr.quirks = nvme_get_quirks(&pci_id);
+ memset(ctrlr.log_page_supported, 0, sizeof(ctrlr.log_page_supported));
+
+ nvme_ctrlr_construct_intel_support_log_page_list(&ctrlr, &payload);
+ res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY);
+ CU_ASSERT(res == true);
+ res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE);
+ CU_ASSERT(res == true);
+ res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY);
+ CU_ASSERT(res == false);
+ res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_SMART);
+ CU_ASSERT(res == false);
+
+ /* set valid vendor id, device id and sub device id */
+ ctrlr.cdata.vid = SPDK_PCI_VID_INTEL;
+ payload.temperature_statistics_log_len = 0;
+ pci_id.vendor_id = SPDK_PCI_VID_INTEL;
+ pci_id.device_id = 0x0953;
+ pci_id.subvendor_id = SPDK_PCI_VID_INTEL;
+ pci_id.subdevice_id = 0x3702;
+ ctrlr.quirks = nvme_get_quirks(&pci_id);
+ memset(ctrlr.log_page_supported, 0, sizeof(ctrlr.log_page_supported));
+
+ nvme_ctrlr_construct_intel_support_log_page_list(&ctrlr, &payload);
+ res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY);
+ CU_ASSERT(res == true);
+ res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE);
+ CU_ASSERT(res == false);
+ res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY);
+ CU_ASSERT(res == true);
+ res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_SMART);
+ CU_ASSERT(res == false);
+}
+
+static void
+test_nvme_ctrlr_set_supported_features(void)
+{
+ bool res;
+ struct spdk_nvme_ctrlr ctrlr = {};
+
+ /* set a invalid vendor id */
+ ctrlr.cdata.vid = 0xFFFF;
+ nvme_ctrlr_set_supported_features(&ctrlr);
+ res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_FEAT_ARBITRATION);
+ CU_ASSERT(res == true);
+ res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_INTEL_FEAT_MAX_LBA);
+ CU_ASSERT(res == false);
+
+ ctrlr.cdata.vid = SPDK_PCI_VID_INTEL;
+ nvme_ctrlr_set_supported_features(&ctrlr);
+ res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_FEAT_ARBITRATION);
+ CU_ASSERT(res == true);
+ res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_INTEL_FEAT_MAX_LBA);
+ CU_ASSERT(res == true);
+}
+
+static void
+test_ctrlr_get_default_ctrlr_opts(void)
+{
+ struct spdk_nvme_ctrlr_opts opts = {};
+
+ CU_ASSERT(spdk_uuid_parse(&g_spdk_nvme_driver->default_extended_host_id,
+ "e53e9258-c93b-48b5-be1a-f025af6d232a") == 0);
+
+ memset(&opts, 0, sizeof(opts));
+
+ /* set a smaller opts_size */
+ CU_ASSERT(sizeof(opts) > 8);
+ spdk_nvme_ctrlr_get_default_ctrlr_opts(&opts, 8);
+ CU_ASSERT_EQUAL(opts.num_io_queues, DEFAULT_MAX_IO_QUEUES);
+ CU_ASSERT_TRUE(opts.use_cmb_sqs);
+ /* check below fields are not initialized by default value */
+ CU_ASSERT_EQUAL(opts.arb_mechanism, 0);
+ CU_ASSERT_EQUAL(opts.keep_alive_timeout_ms, 0);
+ CU_ASSERT_EQUAL(opts.io_queue_size, 0);
+ CU_ASSERT_EQUAL(opts.io_queue_requests, 0);
+ for (int i = 0; i < 8; i++) {
+ CU_ASSERT(opts.host_id[i] == 0);
+ }
+ for (int i = 0; i < 16; i++) {
+ CU_ASSERT(opts.extended_host_id[i] == 0);
+ }
+ CU_ASSERT(strlen(opts.hostnqn) == 0);
+ CU_ASSERT(strlen(opts.src_addr) == 0);
+ CU_ASSERT(strlen(opts.src_svcid) == 0);
+ CU_ASSERT_EQUAL(opts.admin_timeout_ms, 0);
+
+ /* set a consistent opts_size */
+ spdk_nvme_ctrlr_get_default_ctrlr_opts(&opts, sizeof(opts));
+ CU_ASSERT_EQUAL(opts.num_io_queues, DEFAULT_MAX_IO_QUEUES);
+ CU_ASSERT_TRUE(opts.use_cmb_sqs);
+ CU_ASSERT_EQUAL(opts.arb_mechanism, SPDK_NVME_CC_AMS_RR);
+ CU_ASSERT_EQUAL(opts.keep_alive_timeout_ms, 10 * 1000);
+ CU_ASSERT_EQUAL(opts.io_queue_size, DEFAULT_IO_QUEUE_SIZE);
+ CU_ASSERT_EQUAL(opts.io_queue_requests, DEFAULT_IO_QUEUE_REQUESTS);
+ for (int i = 0; i < 8; i++) {
+ CU_ASSERT(opts.host_id[i] == 0);
+ }
+ CU_ASSERT_STRING_EQUAL(opts.hostnqn,
+ "2014-08.org.nvmexpress:uuid:e53e9258-c93b-48b5-be1a-f025af6d232a");
+ CU_ASSERT(memcmp(opts.extended_host_id, &g_spdk_nvme_driver->default_extended_host_id,
+ sizeof(opts.extended_host_id)) == 0);
+ CU_ASSERT(strlen(opts.src_addr) == 0);
+ CU_ASSERT(strlen(opts.src_svcid) == 0);
+ CU_ASSERT_EQUAL(opts.admin_timeout_ms, NVME_MAX_ADMIN_TIMEOUT_IN_SECS * 1000);
+}
+
+static void
+test_ctrlr_get_default_io_qpair_opts(void)
+{
+ struct spdk_nvme_ctrlr ctrlr = {};
+ struct spdk_nvme_io_qpair_opts opts = {};
+
+ memset(&opts, 0, sizeof(opts));
+
+ /* set a smaller opts_size */
+ ctrlr.opts.io_queue_size = DEFAULT_IO_QUEUE_SIZE;
+ CU_ASSERT(sizeof(opts) > 8);
+ spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, 8);
+ CU_ASSERT_EQUAL(opts.qprio, SPDK_NVME_QPRIO_URGENT);
+ CU_ASSERT_EQUAL(opts.io_queue_size, DEFAULT_IO_QUEUE_SIZE);
+ /* check below field is not initialized by default value */
+ CU_ASSERT_EQUAL(opts.io_queue_requests, 0);
+
+ /* set a consistent opts_size */
+ ctrlr.opts.io_queue_size = DEFAULT_IO_QUEUE_SIZE;
+ ctrlr.opts.io_queue_requests = DEFAULT_IO_QUEUE_REQUESTS;
+ spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, sizeof(opts));
+ CU_ASSERT_EQUAL(opts.qprio, SPDK_NVME_QPRIO_URGENT);
+ CU_ASSERT_EQUAL(opts.io_queue_size, DEFAULT_IO_QUEUE_SIZE);
+ CU_ASSERT_EQUAL(opts.io_queue_requests, DEFAULT_IO_QUEUE_REQUESTS);
+}
+
+#if 0 /* TODO: move to PCIe-specific unit test */
+static void
+test_nvme_ctrlr_alloc_cmb(void)
+{
+ int rc;
+ uint64_t offset;
+ struct spdk_nvme_ctrlr ctrlr = {};
+
+ ctrlr.cmb_size = 0x1000000;
+ ctrlr.cmb_current_offset = 0x100;
+ rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x200, 0x1000, &offset);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(offset == 0x1000);
+ CU_ASSERT(ctrlr.cmb_current_offset == 0x1200);
+
+ rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x800, 0x1000, &offset);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(offset == 0x2000);
+ CU_ASSERT(ctrlr.cmb_current_offset == 0x2800);
+
+ rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x800000, 0x100000, &offset);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(offset == 0x100000);
+ CU_ASSERT(ctrlr.cmb_current_offset == 0x900000);
+
+ rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x8000000, 0x1000, &offset);
+ CU_ASSERT(rc == -1);
+}
+#endif
+
+static void
+test_spdk_nvme_ctrlr_update_firmware(void)
+{
+ struct spdk_nvme_ctrlr ctrlr = {};
+ void *payload = NULL;
+ int point_payload = 1;
+ int slot = 0;
+ int ret = 0;
+ struct spdk_nvme_status status;
+ enum spdk_nvme_fw_commit_action commit_action = SPDK_NVME_FW_COMMIT_REPLACE_IMG;
+
+ /* Set invalid size check function return value */
+ set_size = 5;
+ ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
+ CU_ASSERT(ret == -1);
+
+ /* When payload is NULL but set_size < min_page_size */
+ set_size = 4;
+ ctrlr.min_page_size = 5;
+ ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
+ CU_ASSERT(ret == -1);
+
+ /* When payload not NULL but min_page_size is 0 */
+ set_size = 4;
+ ctrlr.min_page_size = 0;
+ payload = &point_payload;
+ ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
+ CU_ASSERT(ret == -1);
+
+ /* Check firmware image download when payload not NULL and min_page_size not 0 , status.cpl value is 1 */
+ set_status_cpl = 1;
+ set_size = 4;
+ ctrlr.min_page_size = 5;
+ payload = &point_payload;
+ ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
+ CU_ASSERT(ret == -ENXIO);
+
+ /* Check firmware image download and set status.cpl value is 0 */
+ set_status_cpl = 0;
+ set_size = 4;
+ ctrlr.min_page_size = 5;
+ payload = &point_payload;
+ ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
+ CU_ASSERT(ret == -1);
+
+ /* Check firmware commit */
+ ctrlr.is_resetting = false;
+ set_status_cpl = 0;
+ slot = 1;
+ set_size = 4;
+ ctrlr.min_page_size = 5;
+ payload = &point_payload;
+ ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
+ CU_ASSERT(ret == -ENXIO);
+
+ /* Set size check firmware download and firmware commit */
+ ctrlr.is_resetting = true;
+ set_status_cpl = 0;
+ slot = 1;
+ set_size = 4;
+ ctrlr.min_page_size = 5;
+ payload = &point_payload;
+ ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
+ CU_ASSERT(ret == 0);
+
+ /* nvme_wait_for_completion returns an error */
+ g_wait_for_completion_return_val = -1;
+ ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
+ CU_ASSERT(ret == -ENXIO);
+ CU_ASSERT(g_failed_status != NULL);
+ CU_ASSERT(g_failed_status->timed_out == true);
+ /* status should be freed by callback, which is not triggered in test env.
+ Store status to global variable and free it manually.
+ If spdk_nvme_ctrlr_update_firmware changes its behaviour and frees the status
+ itself, we'll get a double free here.. */
+ free(g_failed_status);
+ g_failed_status = NULL;
+ g_wait_for_completion_return_val = 0;
+
+ set_status_cpl = 0;
+}
+
+int
+nvme_ctrlr_cmd_doorbell_buffer_config(struct spdk_nvme_ctrlr *ctrlr, uint64_t prp1, uint64_t prp2,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ fake_cpl_sc(cb_fn, cb_arg);
+ return 0;
+}
+
+static void
+test_spdk_nvme_ctrlr_doorbell_buffer_config(void)
+{
+ struct spdk_nvme_ctrlr ctrlr = {};
+ int ret = -1;
+
+ ctrlr.cdata.oacs.doorbell_buffer_config = 1;
+ ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
+ ctrlr.page_size = 0x1000;
+ MOCK_CLEAR(spdk_malloc);
+ MOCK_CLEAR(spdk_zmalloc);
+ ret = nvme_ctrlr_set_doorbell_buffer_config(&ctrlr);
+ CU_ASSERT(ret == 0);
+ nvme_ctrlr_free_doorbell_buffer(&ctrlr);
+}
+
+static void
+test_nvme_ctrlr_test_active_ns(void)
+{
+ uint32_t nsid, minor;
+ size_t ns_id_count;
+ struct spdk_nvme_ctrlr ctrlr = {.state = NVME_CTRLR_STATE_READY};
+
+ ctrlr.page_size = 0x1000;
+
+ for (minor = 0; minor <= 2; minor++) {
+ ctrlr.vs.bits.mjr = 1;
+ ctrlr.vs.bits.mnr = minor;
+ ctrlr.vs.bits.ter = 0;
+ ctrlr.num_ns = 1531;
+ nvme_ctrlr_identify_active_ns(&ctrlr);
+
+ for (nsid = 1; nsid <= ctrlr.num_ns; nsid++) {
+ CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, nsid) == true);
+ }
+ ctrlr.num_ns = 1559;
+ for (; nsid <= ctrlr.num_ns; nsid++) {
+ CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, nsid) == false);
+ }
+ ctrlr.num_ns = 1531;
+ for (nsid = 0; nsid < ctrlr.num_ns; nsid++) {
+ ctrlr.active_ns_list[nsid] = 0;
+ }
+ CU_ASSERT(spdk_nvme_ctrlr_get_first_active_ns(&ctrlr) == 0);
+
+ ctrlr.active_ns_list[0] = 1;
+ CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1) == true);
+ CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 2) == false);
+ nsid = spdk_nvme_ctrlr_get_first_active_ns(&ctrlr);
+ CU_ASSERT(nsid == 1);
+
+ ctrlr.active_ns_list[1] = 3;
+ CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1) == true);
+ CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 2) == false);
+ CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 3) == true);
+ nsid = spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, nsid);
+ CU_ASSERT(nsid == 3);
+ nsid = spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, nsid);
+ CU_ASSERT(nsid == 0);
+
+ memset(ctrlr.active_ns_list, 0, ctrlr.num_ns);
+ for (nsid = 0; nsid < ctrlr.num_ns; nsid++) {
+ ctrlr.active_ns_list[nsid] = nsid + 1;
+ }
+
+ ns_id_count = 0;
+ for (nsid = spdk_nvme_ctrlr_get_first_active_ns(&ctrlr);
+ nsid != 0; nsid = spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, nsid)) {
+ CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, nsid) == true);
+ ns_id_count++;
+ }
+ CU_ASSERT(ns_id_count == ctrlr.num_ns);
+
+ nvme_ctrlr_destruct(&ctrlr);
+ }
+}
+
+static void
+test_nvme_ctrlr_test_active_ns_error_case(void)
+{
+ int rc;
+ struct spdk_nvme_ctrlr ctrlr = {.state = NVME_CTRLR_STATE_READY};
+
+ ctrlr.page_size = 0x1000;
+ ctrlr.vs.bits.mjr = 1;
+ ctrlr.vs.bits.mnr = 2;
+ ctrlr.vs.bits.ter = 0;
+ ctrlr.num_ns = 2;
+
+ set_status_code = SPDK_NVME_SC_INVALID_FIELD;
+ rc = nvme_ctrlr_identify_active_ns(&ctrlr);
+ CU_ASSERT(rc == -ENXIO);
+ set_status_code = SPDK_NVME_SC_SUCCESS;
+}
+
+static void
+test_nvme_ctrlr_init_delay(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
+
+ /*
+ * Initial state: CC.EN = 0, CSTS.RDY = 0
+ * init() should set CC.EN = 1.
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ /* Test that the initialization delay works correctly. We only
+ * do the initialization delay on SSDs that require it, so
+ * set that quirk here.
+ */
+ ctrlr.quirks = NVME_QUIRK_DELAY_BEFORE_INIT;
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.state = NVME_CTRLR_STATE_INIT_DELAY;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(ctrlr.sleep_timeout_tsc != 0);
+
+ /* delay 1s, just return as sleep time isn't enough */
+ spdk_delay_us(1 * spdk_get_ticks_hz());
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(ctrlr.sleep_timeout_tsc != 0);
+
+ /* sleep timeout, start to initialize */
+ spdk_delay_us(2 * spdk_get_ticks_hz());
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+
+ /*
+ * Transition to CSTS.RDY = 1.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
+
+ /*
+ * Transition to READY.
+ */
+ while (ctrlr.state != NVME_CTRLR_STATE_READY) {
+ nvme_ctrlr_process_init(&ctrlr);
+ }
+
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+}
+
+static void
+test_spdk_nvme_ctrlr_set_trid(void)
+{
+ struct spdk_nvme_ctrlr ctrlr = {0};
+ struct spdk_nvme_transport_id new_trid = {{0}};
+
+ ctrlr.is_failed = false;
+ ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
+ snprintf(ctrlr.trid.subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
+ snprintf(ctrlr.trid.traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.8");
+ snprintf(ctrlr.trid.trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
+ CU_ASSERT(spdk_nvme_ctrlr_set_trid(&ctrlr, &new_trid) == -EPERM);
+
+ ctrlr.is_failed = true;
+ new_trid.trtype = SPDK_NVME_TRANSPORT_TCP;
+ CU_ASSERT(spdk_nvme_ctrlr_set_trid(&ctrlr, &new_trid) == -EINVAL);
+ CU_ASSERT(ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_RDMA);
+
+ new_trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
+ snprintf(new_trid.subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode2");
+ CU_ASSERT(spdk_nvme_ctrlr_set_trid(&ctrlr, &new_trid) == -EINVAL);
+ CU_ASSERT(strncmp(ctrlr.trid.subnqn, "nqn.2016-06.io.spdk:cnode1", SPDK_NVMF_NQN_MAX_LEN) == 0);
+
+
+ snprintf(new_trid.subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
+ snprintf(new_trid.traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.9");
+ snprintf(new_trid.trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4421");
+ CU_ASSERT(spdk_nvme_ctrlr_set_trid(&ctrlr, &new_trid) == 0);
+ CU_ASSERT(strncmp(ctrlr.trid.traddr, "192.168.100.9", SPDK_NVMF_TRADDR_MAX_LEN) == 0);
+ CU_ASSERT(strncmp(ctrlr.trid.trsvcid, "4421", SPDK_NVMF_TRSVCID_MAX_LEN) == 0);
+}
+
+static void
+test_nvme_ctrlr_init_set_nvmf_ioccsz(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ /* equivalent of 4096 bytes */
+ ctrlr.cdata.nvmf_specific.ioccsz = 260;
+ ctrlr.cdata.nvmf_specific.icdoff = 1;
+
+ /* Check PCI trtype, */
+ ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
+
+ ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONSTRUCT_NS);
+
+ CU_ASSERT(ctrlr.ioccsz_bytes == 0);
+ CU_ASSERT(ctrlr.icdoff == 0);
+
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /* Check RDMA trtype, */
+ ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
+
+ ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONSTRUCT_NS);
+
+ CU_ASSERT(ctrlr.ioccsz_bytes == 4096);
+ CU_ASSERT(ctrlr.icdoff == 1);
+ ctrlr.ioccsz_bytes = 0;
+ ctrlr.icdoff = 0;
+
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /* Check TCP trtype, */
+ ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_TCP;
+
+ ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONSTRUCT_NS);
+
+ CU_ASSERT(ctrlr.ioccsz_bytes == 4096);
+ CU_ASSERT(ctrlr.icdoff == 1);
+ ctrlr.ioccsz_bytes = 0;
+ ctrlr.icdoff = 0;
+
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /* Check FC trtype, */
+ ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_FC;
+
+ ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONSTRUCT_NS);
+
+ CU_ASSERT(ctrlr.ioccsz_bytes == 4096);
+ CU_ASSERT(ctrlr.icdoff == 1);
+ ctrlr.ioccsz_bytes = 0;
+ ctrlr.icdoff = 0;
+
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /* Check CUSTOM trtype, */
+ ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_CUSTOM;
+
+ ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONSTRUCT_NS);
+
+ CU_ASSERT(ctrlr.ioccsz_bytes == 0);
+ CU_ASSERT(ctrlr.icdoff == 0);
+
+ nvme_ctrlr_destruct(&ctrlr);
+}
+
+static void
+test_nvme_ctrlr_init_set_num_queues(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0); /* -> SET_NUM_QUEUES */
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
+
+ ctrlr.opts.num_io_queues = 64;
+ /* Num queues is zero-based. So, use 31 to get 32 queues */
+ fake_cpl.cdw0 = 31 + (31 << 16);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0); /* -> CONSTRUCT_NS */
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONSTRUCT_NS);
+ CU_ASSERT(ctrlr.opts.num_io_queues == 32);
+ fake_cpl.cdw0 = 0;
+
+ nvme_ctrlr_destruct(&ctrlr);
+}
+
+static void
+test_nvme_ctrlr_init_set_keep_alive_timeout(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ ctrlr.opts.keep_alive_timeout_ms = 60000;
+ ctrlr.cdata.kas = 1;
+ ctrlr.state = NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT;
+ fake_cpl.cdw0 = 120000;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0); /* -> SET_HOST_ID */
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_HOST_ID);
+ CU_ASSERT(ctrlr.opts.keep_alive_timeout_ms == 120000);
+ fake_cpl.cdw0 = 0;
+
+ /* Target does not support Get Feature "Keep Alive Timer" */
+ ctrlr.opts.keep_alive_timeout_ms = 60000;
+ ctrlr.cdata.kas = 1;
+ ctrlr.state = NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT;
+ set_status_code = SPDK_NVME_SC_INVALID_FIELD;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0); /* -> SET_HOST_ID */
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_HOST_ID);
+ CU_ASSERT(ctrlr.opts.keep_alive_timeout_ms == 60000);
+ set_status_code = SPDK_NVME_SC_SUCCESS;
+
+ /* Target fails Get Feature "Keep Alive Timer" for another reason */
+ ctrlr.opts.keep_alive_timeout_ms = 60000;
+ ctrlr.cdata.kas = 1;
+ ctrlr.state = NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT;
+ set_status_code = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0); /* -> ERROR */
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ERROR);
+ set_status_code = SPDK_NVME_SC_SUCCESS;
+
+ nvme_ctrlr_destruct(&ctrlr);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvme_ctrlr", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_1_rdy_0);
+ CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_1_rdy_1);
+ CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_0_rdy_0);
+ CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_0_rdy_1);
+ CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_0_rdy_0_ams_rr);
+ CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_0_rdy_0_ams_wrr);
+ CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_0_rdy_0_ams_vs);
+ CU_ADD_TEST(suite, test_nvme_ctrlr_init_delay);
+ CU_ADD_TEST(suite, test_alloc_io_qpair_rr_1);
+ CU_ADD_TEST(suite, test_ctrlr_get_default_ctrlr_opts);
+ CU_ADD_TEST(suite, test_ctrlr_get_default_io_qpair_opts);
+ CU_ADD_TEST(suite, test_alloc_io_qpair_wrr_1);
+ CU_ADD_TEST(suite, test_alloc_io_qpair_wrr_2);
+ CU_ADD_TEST(suite, test_spdk_nvme_ctrlr_update_firmware);
+ CU_ADD_TEST(suite, test_nvme_ctrlr_fail);
+ CU_ADD_TEST(suite, test_nvme_ctrlr_construct_intel_support_log_page_list);
+ CU_ADD_TEST(suite, test_nvme_ctrlr_set_supported_features);
+ CU_ADD_TEST(suite, test_spdk_nvme_ctrlr_doorbell_buffer_config);
+#if 0 /* TODO: move to PCIe-specific unit test */
+ CU_ADD_TEST(suite, test_nvme_ctrlr_alloc_cmb);
+#endif
+ CU_ADD_TEST(suite, test_nvme_ctrlr_test_active_ns);
+ CU_ADD_TEST(suite, test_nvme_ctrlr_test_active_ns_error_case);
+ CU_ADD_TEST(suite, test_spdk_nvme_ctrlr_reconnect_io_qpair);
+ CU_ADD_TEST(suite, test_spdk_nvme_ctrlr_set_trid);
+ CU_ADD_TEST(suite, test_nvme_ctrlr_init_set_nvmf_ioccsz);
+ CU_ADD_TEST(suite, test_nvme_ctrlr_init_set_num_queues);
+ CU_ADD_TEST(suite, test_nvme_ctrlr_init_set_keep_alive_timeout);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/.gitignore
new file mode 100644
index 000000000..1568b4763
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/.gitignore
@@ -0,0 +1 @@
+nvme_ctrlr_cmd_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/Makefile
new file mode 100644
index 000000000..5c647dd31
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_ctrlr_cmd_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/nvme_ctrlr_cmd_ut.c b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/nvme_ctrlr_cmd_ut.c
new file mode 100644
index 000000000..581d6134c
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/nvme_ctrlr_cmd_ut.c
@@ -0,0 +1,751 @@
+
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "nvme/nvme_ctrlr_cmd.c"
+
+#define CTRLR_CDATA_ELPE 5
+
+pid_t g_spdk_nvme_pid;
+
+struct nvme_request g_req;
+
+uint32_t error_num_entries;
+uint32_t health_log_nsid = 1;
+uint8_t feature = 1;
+uint32_t feature_cdw11 = 1;
+uint32_t feature_cdw12 = 1;
+uint8_t get_feature = 1;
+uint32_t get_feature_cdw11 = 1;
+uint32_t fw_img_size = 1024;
+uint32_t fw_img_offset = 0;
+uint16_t abort_cid = 1;
+uint16_t abort_sqid = 1;
+uint32_t namespace_management_nsid = 1;
+uint64_t PRP_ENTRY_1 = 4096;
+uint64_t PRP_ENTRY_2 = 4096;
+uint32_t format_nvme_nsid = 1;
+uint32_t sanitize_nvme_nsid = 1;
+uint32_t expected_host_id_size = 0xFF;
+
+uint32_t expected_feature_ns = 2;
+uint32_t expected_feature_cdw10 = SPDK_NVME_FEAT_LBA_RANGE_TYPE;
+uint32_t expected_feature_cdw11 = 1;
+uint32_t expected_feature_cdw12 = 1;
+
+typedef void (*verify_request_fn_t)(struct nvme_request *req);
+verify_request_fn_t verify_fn;
+
+static void verify_firmware_log_page(struct nvme_request *req)
+{
+ uint32_t temp_cdw10;
+
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE);
+ CU_ASSERT(req->cmd.nsid == SPDK_NVME_GLOBAL_NS_TAG);
+
+ temp_cdw10 = ((sizeof(struct spdk_nvme_firmware_page) / sizeof(uint32_t) - 1) << 16) |
+ SPDK_NVME_LOG_FIRMWARE_SLOT;
+ CU_ASSERT(req->cmd.cdw10 == temp_cdw10);
+}
+
+static void verify_health_log_page(struct nvme_request *req)
+{
+ uint32_t temp_cdw10;
+
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE);
+ CU_ASSERT(req->cmd.nsid == health_log_nsid);
+
+ temp_cdw10 = ((sizeof(struct spdk_nvme_health_information_page) / sizeof(uint32_t) - 1) << 16) |
+ SPDK_NVME_LOG_HEALTH_INFORMATION;
+ CU_ASSERT(req->cmd.cdw10 == temp_cdw10);
+}
+
+static void verify_error_log_page(struct nvme_request *req)
+{
+ uint32_t temp_cdw10;
+
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE);
+ CU_ASSERT(req->cmd.nsid == SPDK_NVME_GLOBAL_NS_TAG);
+
+ temp_cdw10 = (((sizeof(struct spdk_nvme_error_information_entry) * error_num_entries) /
+ sizeof(uint32_t) - 1) << 16) | SPDK_NVME_LOG_ERROR;
+ CU_ASSERT(req->cmd.cdw10 == temp_cdw10);
+}
+
+static void verify_set_feature_cmd(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_SET_FEATURES);
+ CU_ASSERT(req->cmd.cdw10 == feature);
+ CU_ASSERT(req->cmd.cdw11 == feature_cdw11);
+ CU_ASSERT(req->cmd.cdw12 == feature_cdw12);
+}
+
+static void verify_set_feature_ns_cmd(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_SET_FEATURES);
+ CU_ASSERT(req->cmd.cdw10 == expected_feature_cdw10);
+ CU_ASSERT(req->cmd.cdw11 == expected_feature_cdw11);
+ CU_ASSERT(req->cmd.cdw12 == expected_feature_cdw12);
+ CU_ASSERT(req->cmd.nsid == expected_feature_ns);
+}
+
+static void verify_get_feature_cmd(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_FEATURES);
+ CU_ASSERT(req->cmd.cdw10 == get_feature);
+ CU_ASSERT(req->cmd.cdw11 == get_feature_cdw11);
+}
+
+static void verify_get_feature_ns_cmd(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_FEATURES);
+ CU_ASSERT(req->cmd.cdw10 == expected_feature_cdw10);
+ CU_ASSERT(req->cmd.cdw11 == expected_feature_cdw11);
+ CU_ASSERT(req->cmd.nsid == expected_feature_ns);
+}
+
+static void verify_abort_cmd(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_ABORT);
+ CU_ASSERT(req->cmd.cdw10 == (((uint32_t)abort_cid << 16) | abort_sqid));
+}
+
+static void verify_io_cmd_raw_no_payload_build(struct nvme_request *req)
+{
+ struct spdk_nvme_cmd command = {};
+ struct nvme_payload payload = {};
+
+ CU_ASSERT(memcmp(&req->cmd, &command, sizeof(req->cmd)) == 0);
+ CU_ASSERT(memcmp(&req->payload, &payload, sizeof(req->payload)) == 0);
+}
+
+static void verify_io_raw_cmd(struct nvme_request *req)
+{
+ struct spdk_nvme_cmd command = {};
+
+ CU_ASSERT(memcmp(&req->cmd, &command, sizeof(req->cmd)) == 0);
+}
+
+static void verify_io_raw_cmd_with_md(struct nvme_request *req)
+{
+ struct spdk_nvme_cmd command = {};
+
+ CU_ASSERT(memcmp(&req->cmd, &command, sizeof(req->cmd)) == 0);
+}
+
+static void verify_set_host_id_cmd(struct nvme_request *req)
+{
+ switch (expected_host_id_size) {
+ case 8:
+ CU_ASSERT(req->cmd.cdw10 == SPDK_NVME_FEAT_HOST_IDENTIFIER);
+ CU_ASSERT(req->cmd.cdw11 == 0);
+ CU_ASSERT(req->cmd.cdw12 == 0);
+ break;
+ case 16:
+ CU_ASSERT(req->cmd.cdw10 == SPDK_NVME_FEAT_HOST_IDENTIFIER);
+ CU_ASSERT(req->cmd.cdw11 == 1);
+ CU_ASSERT(req->cmd.cdw12 == 0);
+ break;
+ default:
+ CU_ASSERT(0);
+ }
+}
+
+static void verify_intel_smart_log_page(struct nvme_request *req)
+{
+ uint32_t temp_cdw10;
+
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE);
+ CU_ASSERT(req->cmd.nsid == health_log_nsid);
+
+ temp_cdw10 = ((sizeof(struct spdk_nvme_intel_smart_information_page) /
+ sizeof(uint32_t) - 1) << 16) |
+ SPDK_NVME_INTEL_LOG_SMART;
+ CU_ASSERT(req->cmd.cdw10 == temp_cdw10);
+}
+
+static void verify_intel_temperature_log_page(struct nvme_request *req)
+{
+ uint32_t temp_cdw10;
+
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE);
+
+ temp_cdw10 = ((sizeof(struct spdk_nvme_intel_temperature_page) / sizeof(uint32_t) - 1) << 16) |
+ SPDK_NVME_INTEL_LOG_TEMPERATURE;
+ CU_ASSERT(req->cmd.cdw10 == temp_cdw10);
+}
+
+static void verify_intel_read_latency_log_page(struct nvme_request *req)
+{
+ uint32_t temp_cdw10;
+
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE);
+
+ temp_cdw10 = ((sizeof(struct spdk_nvme_intel_rw_latency_page) / sizeof(uint32_t) - 1) << 16) |
+ SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY;
+ CU_ASSERT(req->cmd.cdw10 == temp_cdw10);
+}
+
+static void verify_intel_write_latency_log_page(struct nvme_request *req)
+{
+ uint32_t temp_cdw10;
+
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE);
+
+ temp_cdw10 = ((sizeof(struct spdk_nvme_intel_rw_latency_page) / sizeof(uint32_t) - 1) << 16) |
+ SPDK_NVME_INTEL_LOG_WRITE_CMD_LATENCY;
+ CU_ASSERT(req->cmd.cdw10 == temp_cdw10);
+}
+
+static void verify_intel_get_log_page_directory(struct nvme_request *req)
+{
+ uint32_t temp_cdw10;
+
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE);
+
+ temp_cdw10 = ((sizeof(struct spdk_nvme_intel_log_page_directory) / sizeof(uint32_t) - 1) << 16) |
+ SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY;
+ CU_ASSERT(req->cmd.cdw10 == temp_cdw10);
+}
+
+static void verify_intel_marketing_description_log_page(struct nvme_request *req)
+{
+ uint32_t temp_cdw10;
+
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE);
+
+ temp_cdw10 = ((sizeof(struct spdk_nvme_intel_marketing_description_page) / sizeof(
+ uint32_t) - 1) << 16) |
+ SPDK_NVME_INTEL_MARKETING_DESCRIPTION;
+ CU_ASSERT(req->cmd.cdw10 == temp_cdw10);
+}
+
+static void verify_namespace_attach(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_NS_ATTACHMENT);
+ CU_ASSERT(req->cmd.cdw10 == SPDK_NVME_NS_CTRLR_ATTACH);
+ CU_ASSERT(req->cmd.nsid == namespace_management_nsid);
+}
+
+static void verify_namespace_detach(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_NS_ATTACHMENT);
+ CU_ASSERT(req->cmd.cdw10 == SPDK_NVME_NS_CTRLR_DETACH);
+ CU_ASSERT(req->cmd.nsid == namespace_management_nsid);
+}
+
+static void verify_namespace_create(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_NS_MANAGEMENT);
+ CU_ASSERT(req->cmd.cdw10 == SPDK_NVME_NS_MANAGEMENT_CREATE);
+ CU_ASSERT(req->cmd.nsid == 0);
+}
+
+static void verify_namespace_delete(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_NS_MANAGEMENT);
+ CU_ASSERT(req->cmd.cdw10 == SPDK_NVME_NS_MANAGEMENT_DELETE);
+ CU_ASSERT(req->cmd.nsid == namespace_management_nsid);
+}
+
+static void verify_doorbell_buffer_config(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_DOORBELL_BUFFER_CONFIG);
+ CU_ASSERT(req->cmd.dptr.prp.prp1 == PRP_ENTRY_1);
+ CU_ASSERT(req->cmd.dptr.prp.prp2 == PRP_ENTRY_2);
+}
+
+static void verify_format_nvme(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_FORMAT_NVM);
+ CU_ASSERT(req->cmd.cdw10 == 0);
+ CU_ASSERT(req->cmd.nsid == format_nvme_nsid);
+}
+
+static void verify_fw_commit(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_FIRMWARE_COMMIT);
+ CU_ASSERT(req->cmd.cdw10 == 0x09);
+}
+
+static void verify_fw_image_download(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD);
+ CU_ASSERT(req->cmd.cdw10 == (fw_img_size >> 2) - 1);
+ CU_ASSERT(req->cmd.cdw11 == fw_img_offset >> 2);
+}
+
+static void verify_nvme_sanitize(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_SANITIZE);
+ CU_ASSERT(req->cmd.cdw10 == 0x309);
+ CU_ASSERT(req->cmd.cdw11 == 0);
+ CU_ASSERT(req->cmd.nsid == sanitize_nvme_nsid);
+}
+
+struct nvme_request *
+nvme_allocate_request_user_copy(struct spdk_nvme_qpair *qpair, void *buffer, uint32_t payload_size,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg, bool host_to_controller)
+{
+ /* For the unit test, we don't actually need to copy the buffer */
+ return nvme_allocate_request_contig(qpair, buffer, payload_size, cb_fn, cb_arg);
+}
+
+int
+nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
+{
+ verify_fn(req);
+ /* stop analyzer from thinking stack variable addresses are stored in a global */
+ memset(req, 0, sizeof(*req));
+
+ return 0;
+}
+
+int
+nvme_ctrlr_submit_admin_request(struct spdk_nvme_ctrlr *ctrlr, struct nvme_request *req)
+{
+ verify_fn(req);
+ /* stop analyzer from thinking stack variable addresses are stored in a global */
+ memset(req, 0, sizeof(*req));
+
+ return 0;
+}
+
+#define DECLARE_AND_CONSTRUCT_CTRLR() \
+ struct spdk_nvme_ctrlr ctrlr = {}; \
+ struct spdk_nvme_qpair adminq = {}; \
+ struct nvme_request req; \
+ \
+ STAILQ_INIT(&adminq.free_req); \
+ STAILQ_INSERT_HEAD(&adminq.free_req, &req, stailq); \
+ ctrlr.adminq = &adminq;
+
+static void
+test_firmware_get_log_page(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_firmware_page payload = {};
+
+ verify_fn = verify_firmware_log_page;
+
+ spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_LOG_FIRMWARE_SLOT, SPDK_NVME_GLOBAL_NS_TAG,
+ &payload,
+ sizeof(payload), 0, NULL, NULL);
+}
+
+static void
+test_health_get_log_page(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_health_information_page payload = {};
+
+ verify_fn = verify_health_log_page;
+
+ spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_LOG_HEALTH_INFORMATION, health_log_nsid,
+ &payload,
+ sizeof(payload), 0, NULL, NULL);
+}
+
+static void
+test_error_get_log_page(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_error_information_entry payload = {};
+
+ ctrlr.cdata.elpe = CTRLR_CDATA_ELPE;
+
+ verify_fn = verify_error_log_page;
+
+ /* valid page */
+ error_num_entries = 1;
+ spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_LOG_ERROR, SPDK_NVME_GLOBAL_NS_TAG, &payload,
+ sizeof(payload), 0, NULL, NULL);
+}
+
+static void test_intel_smart_get_log_page(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_intel_smart_information_page payload = {};
+
+ verify_fn = verify_intel_smart_log_page;
+
+ spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_INTEL_LOG_SMART, health_log_nsid, &payload,
+ sizeof(payload), 0, NULL, NULL);
+}
+
+static void test_intel_temperature_get_log_page(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_intel_temperature_page payload = {};
+
+ verify_fn = verify_intel_temperature_log_page;
+
+ spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE, SPDK_NVME_GLOBAL_NS_TAG,
+ &payload, sizeof(payload), 0, NULL, NULL);
+}
+
+static void test_intel_read_latency_get_log_page(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_intel_rw_latency_page payload = {};
+
+ verify_fn = verify_intel_read_latency_log_page;
+
+ spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY,
+ SPDK_NVME_GLOBAL_NS_TAG,
+ &payload, sizeof(payload), 0, NULL, NULL);
+}
+
+static void test_intel_write_latency_get_log_page(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_intel_rw_latency_page payload = {};
+
+ verify_fn = verify_intel_write_latency_log_page;
+
+ spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_INTEL_LOG_WRITE_CMD_LATENCY,
+ SPDK_NVME_GLOBAL_NS_TAG,
+ &payload, sizeof(payload), 0, NULL, NULL);
+}
+
+static void test_intel_get_log_page_directory(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_intel_log_page_directory payload = {};
+
+ verify_fn = verify_intel_get_log_page_directory;
+
+ spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY,
+ SPDK_NVME_GLOBAL_NS_TAG,
+ &payload, sizeof(payload), 0, NULL, NULL);
+}
+
+static void test_intel_marketing_description_get_log_page(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_intel_marketing_description_page payload = {};
+
+ verify_fn = verify_intel_marketing_description_log_page;
+
+ spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_INTEL_MARKETING_DESCRIPTION,
+ SPDK_NVME_GLOBAL_NS_TAG,
+ &payload, sizeof(payload), 0, NULL, NULL);
+}
+
+static void test_generic_get_log_pages(void)
+{
+ test_error_get_log_page();
+ test_health_get_log_page();
+ test_firmware_get_log_page();
+}
+
+static void test_intel_get_log_pages(void)
+{
+ test_intel_get_log_page_directory();
+ test_intel_smart_get_log_page();
+ test_intel_temperature_get_log_page();
+ test_intel_read_latency_get_log_page();
+ test_intel_write_latency_get_log_page();
+ test_intel_marketing_description_get_log_page();
+}
+
+static void
+test_set_feature_cmd(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ verify_fn = verify_set_feature_cmd;
+
+ spdk_nvme_ctrlr_cmd_set_feature(&ctrlr, feature, feature_cdw11, feature_cdw12, NULL, 0, NULL, NULL);
+}
+
+static void
+test_get_feature_ns_cmd(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ verify_fn = verify_get_feature_ns_cmd;
+
+ spdk_nvme_ctrlr_cmd_get_feature_ns(&ctrlr, expected_feature_cdw10,
+ expected_feature_cdw11, NULL, 0,
+ NULL, NULL, expected_feature_ns);
+}
+
+static void
+test_set_feature_ns_cmd(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ verify_fn = verify_set_feature_ns_cmd;
+
+ spdk_nvme_ctrlr_cmd_set_feature_ns(&ctrlr, expected_feature_cdw10,
+ expected_feature_cdw11, expected_feature_cdw12,
+ NULL, 0, NULL, NULL, expected_feature_ns);
+}
+
+static void
+test_get_feature_cmd(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ verify_fn = verify_get_feature_cmd;
+
+ spdk_nvme_ctrlr_cmd_get_feature(&ctrlr, get_feature, get_feature_cdw11, NULL, 0, NULL, NULL);
+}
+
+static void
+test_abort_cmd(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_qpair qpair = {};
+
+ STAILQ_INIT(&ctrlr.queued_aborts);
+
+ verify_fn = verify_abort_cmd;
+
+ qpair.id = abort_sqid;
+ spdk_nvme_ctrlr_cmd_abort(&ctrlr, &qpair, abort_cid, NULL, NULL);
+}
+
+static void
+test_io_cmd_raw_no_payload_build(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_qpair qpair = {};
+ struct spdk_nvme_cmd cmd = {};
+
+ verify_fn = verify_io_cmd_raw_no_payload_build;
+
+ spdk_nvme_ctrlr_io_cmd_raw_no_payload_build(&ctrlr, &qpair, &cmd, NULL, NULL);
+}
+
+static void
+test_io_raw_cmd(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_qpair qpair = {};
+ struct spdk_nvme_cmd cmd = {};
+
+ verify_fn = verify_io_raw_cmd;
+
+ spdk_nvme_ctrlr_cmd_io_raw(&ctrlr, &qpair, &cmd, NULL, 1, NULL, NULL);
+}
+
+static void
+test_io_raw_cmd_with_md(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_qpair qpair = {};
+ struct spdk_nvme_cmd cmd = {};
+
+ verify_fn = verify_io_raw_cmd_with_md;
+
+ spdk_nvme_ctrlr_cmd_io_raw_with_md(&ctrlr, &qpair, &cmd, NULL, 1, NULL, NULL, NULL);
+}
+
+static int
+test_set_host_id_by_case(uint32_t host_id_size)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ int rc = 0;
+
+ expected_host_id_size = host_id_size;
+ verify_fn = verify_set_host_id_cmd;
+
+ rc = nvme_ctrlr_cmd_set_host_id(&ctrlr, NULL, expected_host_id_size, NULL, NULL);
+
+ return rc;
+}
+
+static void
+test_set_host_id_cmds(void)
+{
+ int rc = 0;
+
+ rc = test_set_host_id_by_case(8);
+ CU_ASSERT(rc == 0);
+ rc = test_set_host_id_by_case(16);
+ CU_ASSERT(rc == 0);
+ rc = test_set_host_id_by_case(1024);
+ CU_ASSERT(rc == -EINVAL);
+}
+
+static void
+test_get_log_pages(void)
+{
+ test_generic_get_log_pages();
+ test_intel_get_log_pages();
+}
+
+static void
+test_namespace_attach(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_ctrlr_list payload = {};
+
+ verify_fn = verify_namespace_attach;
+
+ nvme_ctrlr_cmd_attach_ns(&ctrlr, namespace_management_nsid, &payload, NULL, NULL);
+}
+
+static void
+test_namespace_detach(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_ctrlr_list payload = {};
+
+ verify_fn = verify_namespace_detach;
+
+ nvme_ctrlr_cmd_detach_ns(&ctrlr, namespace_management_nsid, &payload, NULL, NULL);
+}
+
+static void
+test_namespace_create(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_ns_data payload = {};
+
+ verify_fn = verify_namespace_create;
+ nvme_ctrlr_cmd_create_ns(&ctrlr, &payload, NULL, NULL);
+}
+
+static void
+test_namespace_delete(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ verify_fn = verify_namespace_delete;
+ nvme_ctrlr_cmd_delete_ns(&ctrlr, namespace_management_nsid, NULL, NULL);
+}
+
+static void
+test_doorbell_buffer_config(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ verify_fn = verify_doorbell_buffer_config;
+
+ nvme_ctrlr_cmd_doorbell_buffer_config(&ctrlr, PRP_ENTRY_1, PRP_ENTRY_2, NULL, NULL);
+}
+
+static void
+test_format_nvme(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_format format = {};
+
+ verify_fn = verify_format_nvme;
+
+ nvme_ctrlr_cmd_format(&ctrlr, format_nvme_nsid, &format, NULL, NULL);
+}
+
+static void
+test_fw_commit(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_fw_commit fw_commit = {};
+
+ fw_commit.ca = SPDK_NVME_FW_COMMIT_REPLACE_AND_ENABLE_IMG;
+ fw_commit.fs = 1;
+
+ verify_fn = verify_fw_commit;
+
+ nvme_ctrlr_cmd_fw_commit(&ctrlr, &fw_commit, NULL, NULL);
+}
+
+static void
+test_fw_image_download(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ verify_fn = verify_fw_image_download;
+
+ nvme_ctrlr_cmd_fw_image_download(&ctrlr, fw_img_size, fw_img_offset, NULL,
+ NULL, NULL);
+}
+
+static void
+test_sanitize(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_sanitize sanitize = {};
+
+ sanitize.sanact = 1;
+ sanitize.ause = 1;
+ sanitize.oipbp = 1;
+ sanitize.ndas = 1;
+
+ verify_fn = verify_nvme_sanitize;
+
+ nvme_ctrlr_cmd_sanitize(&ctrlr, sanitize_nvme_nsid, &sanitize, 0, NULL, NULL);
+
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvme_ctrlr_cmd", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_get_log_pages);
+ CU_ADD_TEST(suite, test_set_feature_cmd);
+ CU_ADD_TEST(suite, test_set_feature_ns_cmd);
+ CU_ADD_TEST(suite, test_get_feature_cmd);
+ CU_ADD_TEST(suite, test_get_feature_ns_cmd);
+ CU_ADD_TEST(suite, test_abort_cmd);
+ CU_ADD_TEST(suite, test_set_host_id_cmds);
+ CU_ADD_TEST(suite, test_io_cmd_raw_no_payload_build);
+ CU_ADD_TEST(suite, test_io_raw_cmd);
+ CU_ADD_TEST(suite, test_io_raw_cmd_with_md);
+ CU_ADD_TEST(suite, test_namespace_attach);
+ CU_ADD_TEST(suite, test_namespace_detach);
+ CU_ADD_TEST(suite, test_namespace_create);
+ CU_ADD_TEST(suite, test_namespace_delete);
+ CU_ADD_TEST(suite, test_doorbell_buffer_config);
+ CU_ADD_TEST(suite, test_format_nvme);
+ CU_ADD_TEST(suite, test_fw_commit);
+ CU_ADD_TEST(suite, test_fw_image_download);
+ CU_ADD_TEST(suite, test_sanitize);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/.gitignore
new file mode 100644
index 000000000..2813105d4
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/.gitignore
@@ -0,0 +1 @@
+nvme_ctrlr_ocssd_cmd_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/Makefile
new file mode 100644
index 000000000..9446b8d53
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_ctrlr_ocssd_cmd_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/nvme_ctrlr_ocssd_cmd_ut.c b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/nvme_ctrlr_ocssd_cmd_ut.c
new file mode 100644
index 000000000..69de8c5b0
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/nvme_ctrlr_ocssd_cmd_ut.c
@@ -0,0 +1,106 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "nvme/nvme_ctrlr_ocssd_cmd.c"
+
+#define DECLARE_AND_CONSTRUCT_CTRLR() \
+ struct spdk_nvme_ctrlr ctrlr = {}; \
+ struct spdk_nvme_qpair adminq = {}; \
+ struct nvme_request req; \
+ \
+ STAILQ_INIT(&adminq.free_req); \
+ STAILQ_INSERT_HEAD(&adminq.free_req, &req, stailq); \
+ ctrlr.adminq = &adminq;
+
+pid_t g_spdk_nvme_pid;
+struct nvme_request g_req;
+typedef void (*verify_request_fn_t)(struct nvme_request *req);
+verify_request_fn_t verify_fn;
+
+static const uint32_t expected_geometry_ns = 1;
+
+int
+nvme_ctrlr_submit_admin_request(struct spdk_nvme_ctrlr *ctrlr, struct nvme_request *req)
+{
+ verify_fn(req);
+ memset(req, 0, sizeof(*req));
+ return 0;
+}
+
+struct nvme_request *
+nvme_allocate_request_user_copy(struct spdk_nvme_qpair *qpair, void *buffer, uint32_t payload_size,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg, bool host_to_controller)
+{
+ /* For the unit test, we don't actually need to copy the buffer */
+ return nvme_allocate_request_contig(qpair, buffer, payload_size, cb_fn, cb_arg);
+}
+
+static void verify_geometry_cmd(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_OCSSD_OPC_GEOMETRY);
+ CU_ASSERT(req->cmd.nsid == expected_geometry_ns);
+}
+
+static void
+test_geometry_cmd(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ struct spdk_ocssd_geometry_data geo;
+
+ verify_fn = verify_geometry_cmd;
+
+ spdk_nvme_ocssd_ctrlr_cmd_geometry(&ctrlr, expected_geometry_ns, &geo,
+ sizeof(geo), NULL, NULL);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvme_ctrlr_cmd", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_geometry_cmd);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ns.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_ns.c/.gitignore
new file mode 100644
index 000000000..ada0ec86d
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ns.c/.gitignore
@@ -0,0 +1 @@
+nvme_ns_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ns.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_ns.c/Makefile
new file mode 100644
index 000000000..add85ee9f
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ns.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_ns_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ns.c/nvme_ns_ut.c b/src/spdk/test/unit/lib/nvme/nvme_ns.c/nvme_ns_ut.c
new file mode 100644
index 000000000..22c59e06c
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ns.c/nvme_ns_ut.c
@@ -0,0 +1,153 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "spdk/env.h"
+
+#include "nvme/nvme_ns.c"
+
+#include "common/lib/test_env.c"
+
+SPDK_LOG_REGISTER_COMPONENT("nvme", SPDK_LOG_NVME)
+
+DEFINE_STUB(nvme_wait_for_completion_robust_lock, int,
+ (struct spdk_nvme_qpair *qpair,
+ struct nvme_completion_poll_status *status,
+ pthread_mutex_t *robust_mutex), 0);
+
+int
+nvme_ctrlr_cmd_identify(struct spdk_nvme_ctrlr *ctrlr, uint8_t cns, uint16_t cntid, uint32_t nsid,
+ void *payload, size_t payload_size,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ return -1;
+}
+
+void
+nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl)
+{
+}
+
+int32_t
+spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
+{
+ return -1;
+}
+
+static void
+test_nvme_ns_construct(void)
+{
+ struct spdk_nvme_ns ns = {};
+ uint32_t id = 1;
+ struct spdk_nvme_ctrlr ctrlr = {};
+
+ nvme_ns_construct(&ns, id, &ctrlr);
+ CU_ASSERT(ns.id == 1);
+}
+
+static void
+test_nvme_ns_uuid(void)
+{
+ struct spdk_nvme_ns ns = {};
+ const struct spdk_uuid *uuid;
+ struct spdk_uuid expected_uuid;
+
+ memset(&expected_uuid, 0xA5, sizeof(expected_uuid));
+
+ /* Empty list - no UUID should be found */
+ memset(ns.id_desc_list, 0, sizeof(ns.id_desc_list));
+ uuid = spdk_nvme_ns_get_uuid(&ns);
+ CU_ASSERT(uuid == NULL);
+
+ /* NGUID only (no UUID in list) */
+ memset(ns.id_desc_list, 0, sizeof(ns.id_desc_list));
+ ns.id_desc_list[0] = 0x02; /* NIDT == NGUID */
+ ns.id_desc_list[1] = 0x10; /* NIDL */
+ memset(&ns.id_desc_list[4], 0xCC, 0x10);
+ uuid = spdk_nvme_ns_get_uuid(&ns);
+ CU_ASSERT(uuid == NULL);
+
+ /* Just UUID in the list */
+ memset(ns.id_desc_list, 0, sizeof(ns.id_desc_list));
+ ns.id_desc_list[0] = 0x03; /* NIDT == UUID */
+ ns.id_desc_list[1] = 0x10; /* NIDL */
+ memcpy(&ns.id_desc_list[4], &expected_uuid, sizeof(expected_uuid));
+ uuid = spdk_nvme_ns_get_uuid(&ns);
+ SPDK_CU_ASSERT_FATAL(uuid != NULL);
+ CU_ASSERT(memcmp(uuid, &expected_uuid, sizeof(*uuid)) == 0);
+
+ /* UUID followed by NGUID */
+ memset(ns.id_desc_list, 0, sizeof(ns.id_desc_list));
+ ns.id_desc_list[0] = 0x03; /* NIDT == UUID */
+ ns.id_desc_list[1] = 0x10; /* NIDL */
+ memcpy(&ns.id_desc_list[4], &expected_uuid, sizeof(expected_uuid));
+ ns.id_desc_list[20] = 0x02; /* NIDT == NGUID */
+ ns.id_desc_list[21] = 0x10; /* NIDL */
+ memset(&ns.id_desc_list[24], 0xCC, 0x10);
+ uuid = spdk_nvme_ns_get_uuid(&ns);
+ SPDK_CU_ASSERT_FATAL(uuid != NULL);
+ CU_ASSERT(memcmp(uuid, &expected_uuid, sizeof(*uuid)) == 0);
+
+ /* NGUID followed by UUID */
+ memset(ns.id_desc_list, 0, sizeof(ns.id_desc_list));
+ ns.id_desc_list[0] = 0x02; /* NIDT == NGUID */
+ ns.id_desc_list[1] = 0x10; /* NIDL */
+ memset(&ns.id_desc_list[4], 0xCC, 0x10);
+ ns.id_desc_list[20] = 0x03; /* NIDT = UUID */
+ ns.id_desc_list[21] = 0x10; /* NIDL */
+ memcpy(&ns.id_desc_list[24], &expected_uuid, sizeof(expected_uuid));
+ uuid = spdk_nvme_ns_get_uuid(&ns);
+ SPDK_CU_ASSERT_FATAL(uuid != NULL);
+ CU_ASSERT(memcmp(uuid, &expected_uuid, sizeof(*uuid)) == 0);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvme", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_nvme_ns_construct);
+ CU_ADD_TEST(suite, test_nvme_ns_uuid);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/.gitignore
new file mode 100644
index 000000000..5583ec23e
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/.gitignore
@@ -0,0 +1 @@
+nvme_ns_cmd_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/Makefile
new file mode 100644
index 000000000..ff451d72a
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_ns_cmd_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/nvme_ns_cmd_ut.c b/src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/nvme_ns_cmd_ut.c
new file mode 100644
index 000000000..fe0014f56
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/nvme_ns_cmd_ut.c
@@ -0,0 +1,1739 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "nvme/nvme_ns_cmd.c"
+#include "nvme/nvme.c"
+
+#include "common/lib/test_env.c"
+
+static struct nvme_driver _g_nvme_driver = {
+ .lock = PTHREAD_MUTEX_INITIALIZER,
+};
+
+static struct nvme_request *g_request = NULL;
+
+int
+spdk_pci_enumerate(struct spdk_pci_driver *driver, spdk_pci_enum_cb enum_cb, void *enum_ctx)
+{
+ return -1;
+}
+
+static void nvme_request_reset_sgl(void *cb_arg, uint32_t sgl_offset)
+{
+}
+
+static int nvme_request_next_sge(void *cb_arg, void **address, uint32_t *length)
+{
+ uint32_t *lba_count = cb_arg;
+
+ /*
+ * We need to set address to something here, since the SGL splitting code will
+ * use it to determine PRP compatibility. Just use a rather arbitrary address
+ * for now - these tests will not actually cause data to be read from or written
+ * to this address.
+ */
+ *address = (void *)(uintptr_t)0x10000000;
+ *length = *lba_count;
+ return 0;
+}
+
+bool
+spdk_nvme_transport_available_by_name(const char *transport_name)
+{
+ return true;
+}
+
+struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
+ const struct spdk_nvme_ctrlr_opts *opts,
+ void *devhandle)
+{
+ return NULL;
+}
+
+void
+nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
+{
+}
+
+int
+nvme_ctrlr_add_process(struct spdk_nvme_ctrlr *ctrlr, void *devhandle)
+{
+ return 0;
+}
+
+int
+nvme_ctrlr_process_init(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return 0;
+}
+
+void
+nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr, bool hot_remove)
+{
+}
+
+struct spdk_pci_addr
+spdk_pci_device_get_addr(struct spdk_pci_device *pci_dev)
+{
+ struct spdk_pci_addr pci_addr;
+
+ memset(&pci_addr, 0, sizeof(pci_addr));
+ return pci_addr;
+}
+
+struct spdk_pci_id
+spdk_pci_device_get_id(struct spdk_pci_device *pci_dev)
+{
+ struct spdk_pci_id pci_id;
+
+ memset(&pci_id, 0xFF, sizeof(pci_id));
+
+ return pci_id;
+}
+
+void
+spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size)
+{
+ memset(opts, 0, sizeof(*opts));
+}
+
+uint32_t
+spdk_nvme_ns_get_sector_size(struct spdk_nvme_ns *ns)
+{
+ return ns->sector_size;
+}
+
+uint32_t
+spdk_nvme_ns_get_max_io_xfer_size(struct spdk_nvme_ns *ns)
+{
+ return ns->ctrlr->max_xfer_size;
+}
+
+int
+nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
+{
+ g_request = req;
+
+ return 0;
+}
+
+void
+nvme_ctrlr_proc_get_ref(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return;
+}
+
+void
+nvme_ctrlr_proc_put_ref(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return;
+}
+
+int
+nvme_ctrlr_get_ref_count(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return 0;
+}
+
+int
+nvme_transport_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx,
+ bool direct_connect)
+{
+ return 0;
+}
+
+static void
+prepare_for_test(struct spdk_nvme_ns *ns, struct spdk_nvme_ctrlr *ctrlr,
+ struct spdk_nvme_qpair *qpair,
+ uint32_t sector_size, uint32_t md_size, uint32_t max_xfer_size,
+ uint32_t stripe_size, bool extended_lba)
+{
+ uint32_t num_requests = 32;
+ uint32_t i;
+
+ ctrlr->max_xfer_size = max_xfer_size;
+ /*
+ * Clear the flags field - we especially want to make sure the SGL_SUPPORTED flag is not set
+ * so that we test the SGL splitting path.
+ */
+ ctrlr->flags = 0;
+ ctrlr->min_page_size = 4096;
+ ctrlr->page_size = 4096;
+ memset(&ctrlr->opts, 0, sizeof(ctrlr->opts));
+ memset(ns, 0, sizeof(*ns));
+ ns->ctrlr = ctrlr;
+ ns->sector_size = sector_size;
+ ns->extended_lba_size = sector_size;
+ if (extended_lba) {
+ ns->flags |= SPDK_NVME_NS_EXTENDED_LBA_SUPPORTED;
+ ns->extended_lba_size += md_size;
+ }
+ ns->md_size = md_size;
+ ns->sectors_per_max_io = spdk_nvme_ns_get_max_io_xfer_size(ns) / ns->extended_lba_size;
+ ns->sectors_per_stripe = stripe_size / ns->extended_lba_size;
+
+ memset(qpair, 0, sizeof(*qpair));
+ qpair->ctrlr = ctrlr;
+ qpair->req_buf = calloc(num_requests, sizeof(struct nvme_request));
+ SPDK_CU_ASSERT_FATAL(qpair->req_buf != NULL);
+
+ for (i = 0; i < num_requests; i++) {
+ struct nvme_request *req = qpair->req_buf + i * sizeof(struct nvme_request);
+
+ req->qpair = qpair;
+ STAILQ_INSERT_HEAD(&qpair->free_req, req, stailq);
+ }
+
+ g_request = NULL;
+}
+
+static void
+cleanup_after_test(struct spdk_nvme_qpair *qpair)
+{
+ free(qpair->req_buf);
+}
+
+static void
+nvme_cmd_interpret_rw(const struct spdk_nvme_cmd *cmd,
+ uint64_t *lba, uint32_t *num_blocks)
+{
+ *lba = *(const uint64_t *)&cmd->cdw10;
+ *num_blocks = (cmd->cdw12 & 0xFFFFu) + 1;
+}
+
+static void
+split_test(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_qpair qpair;
+ struct spdk_nvme_ctrlr ctrlr;
+ void *payload;
+ uint64_t lba, cmd_lba;
+ uint32_t lba_count, cmd_lba_count;
+ int rc;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+ payload = malloc(512);
+ lba = 0;
+ lba_count = 1;
+
+ rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+
+ CU_ASSERT(g_request->num_children == 0);
+ nvme_cmd_interpret_rw(&g_request->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT(cmd_lba == lba);
+ CU_ASSERT(cmd_lba_count == lba_count);
+
+ free(payload);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+split_test2(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ struct nvme_request *child;
+ void *payload;
+ uint64_t lba, cmd_lba;
+ uint32_t lba_count, cmd_lba_count;
+ int rc;
+
+ /*
+ * Controller has max xfer of 128 KB (256 blocks).
+ * Submit an I/O of 256 KB starting at LBA 0, which should be split
+ * on the max I/O boundary into two I/Os of 128 KB.
+ */
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+ payload = malloc(256 * 1024);
+ lba = 0;
+ lba_count = (256 * 1024) / 512;
+
+ rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+
+ CU_ASSERT(g_request->num_children == 2);
+
+ child = TAILQ_FIRST(&g_request->children);
+ nvme_request_remove_child(g_request, child);
+ nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT(child->num_children == 0);
+ CU_ASSERT(child->payload_size == 128 * 1024);
+ CU_ASSERT(cmd_lba == 0);
+ CU_ASSERT(cmd_lba_count == 256); /* 256 * 512 byte blocks = 128 KB */
+ nvme_free_request(child);
+
+ child = TAILQ_FIRST(&g_request->children);
+ nvme_request_remove_child(g_request, child);
+ nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT(child->num_children == 0);
+ CU_ASSERT(child->payload_size == 128 * 1024);
+ CU_ASSERT(cmd_lba == 256);
+ CU_ASSERT(cmd_lba_count == 256);
+ nvme_free_request(child);
+
+ CU_ASSERT(TAILQ_EMPTY(&g_request->children));
+
+ free(payload);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+split_test3(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ struct nvme_request *child;
+ void *payload;
+ uint64_t lba, cmd_lba;
+ uint32_t lba_count, cmd_lba_count;
+ int rc;
+
+ /*
+ * Controller has max xfer of 128 KB (256 blocks).
+ * Submit an I/O of 256 KB starting at LBA 10, which should be split
+ * into two I/Os:
+ * 1) LBA = 10, count = 256 blocks
+ * 2) LBA = 266, count = 256 blocks
+ */
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+ payload = malloc(256 * 1024);
+ lba = 10; /* Start at an LBA that isn't aligned to the stripe size */
+ lba_count = (256 * 1024) / 512;
+
+ rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+
+ child = TAILQ_FIRST(&g_request->children);
+ nvme_request_remove_child(g_request, child);
+ nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT(child->num_children == 0);
+ CU_ASSERT(child->payload_size == 128 * 1024);
+ CU_ASSERT(cmd_lba == 10);
+ CU_ASSERT(cmd_lba_count == 256);
+ nvme_free_request(child);
+
+ child = TAILQ_FIRST(&g_request->children);
+ nvme_request_remove_child(g_request, child);
+ nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT(child->num_children == 0);
+ CU_ASSERT(child->payload_size == 128 * 1024);
+ CU_ASSERT(cmd_lba == 266);
+ CU_ASSERT(cmd_lba_count == 256);
+ nvme_free_request(child);
+
+ CU_ASSERT(TAILQ_EMPTY(&g_request->children));
+
+ free(payload);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+split_test4(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ struct nvme_request *child;
+ void *payload;
+ uint64_t lba, cmd_lba;
+ uint32_t lba_count, cmd_lba_count;
+ int rc;
+
+ /*
+ * Controller has max xfer of 128 KB (256 blocks) and a stripe size of 128 KB.
+ * (Same as split_test3 except with driver-assisted striping enabled.)
+ * Submit an I/O of 256 KB starting at LBA 10, which should be split
+ * into three I/Os:
+ * 1) LBA = 10, count = 246 blocks (less than max I/O size to align to stripe size)
+ * 2) LBA = 256, count = 256 blocks (aligned to stripe size and max I/O size)
+ * 3) LBA = 512, count = 10 blocks (finish off the remaining I/O size)
+ */
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 128 * 1024, false);
+ payload = malloc(256 * 1024);
+ lba = 10; /* Start at an LBA that isn't aligned to the stripe size */
+ lba_count = (256 * 1024) / 512;
+
+ rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL,
+ SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 3);
+
+ child = TAILQ_FIRST(&g_request->children);
+ nvme_request_remove_child(g_request, child);
+ nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT(child->num_children == 0);
+ CU_ASSERT(child->payload_size == (256 - 10) * 512);
+ CU_ASSERT(child->payload_offset == 0);
+ CU_ASSERT(cmd_lba == 10);
+ CU_ASSERT(cmd_lba_count == 256 - 10);
+ CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS) != 0);
+ CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_LIMITED_RETRY) == 0);
+ nvme_free_request(child);
+
+ child = TAILQ_FIRST(&g_request->children);
+ nvme_request_remove_child(g_request, child);
+ nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT(child->num_children == 0);
+ CU_ASSERT(child->payload_size == 128 * 1024);
+ CU_ASSERT(child->payload_offset == (256 - 10) * 512);
+ CU_ASSERT(cmd_lba == 256);
+ CU_ASSERT(cmd_lba_count == 256);
+ CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS) != 0);
+ CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_LIMITED_RETRY) == 0);
+ nvme_free_request(child);
+
+ child = TAILQ_FIRST(&g_request->children);
+ nvme_request_remove_child(g_request, child);
+ nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT(child->num_children == 0);
+ CU_ASSERT(child->payload_size == 10 * 512);
+ CU_ASSERT(child->payload_offset == (512 - 10) * 512);
+ CU_ASSERT(cmd_lba == 512);
+ CU_ASSERT(cmd_lba_count == 10);
+ CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS) != 0);
+ CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_LIMITED_RETRY) == 0);
+ nvme_free_request(child);
+
+ CU_ASSERT(TAILQ_EMPTY(&g_request->children));
+
+ free(payload);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_cmd_child_request(void)
+{
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ int rc = 0;
+ struct nvme_request *child, *tmp;
+ void *payload;
+ uint64_t lba = 0x1000;
+ uint32_t i = 0;
+ uint32_t offset = 0;
+ uint32_t sector_size = 512;
+ uint32_t max_io_size = 128 * 1024;
+ uint32_t sectors_per_max_io = max_io_size / sector_size;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_io_size, 0, false);
+
+ payload = malloc(128 * 1024);
+ rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, sectors_per_max_io, NULL, NULL, 0);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->payload_offset == 0);
+ CU_ASSERT(g_request->num_children == 0);
+ nvme_free_request(g_request);
+
+ rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, sectors_per_max_io - 1, NULL, NULL, 0);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->payload_offset == 0);
+ CU_ASSERT(g_request->num_children == 0);
+ nvme_free_request(g_request);
+
+ rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, sectors_per_max_io * 4, NULL, NULL, 0);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->num_children == 4);
+
+ rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, (DEFAULT_IO_QUEUE_REQUESTS + 1) * sector_size,
+ NULL,
+ NULL, 0);
+ SPDK_CU_ASSERT_FATAL(rc == -EINVAL);
+
+ TAILQ_FOREACH_SAFE(child, &g_request->children, child_tailq, tmp) {
+ nvme_request_remove_child(g_request, child);
+ CU_ASSERT(child->payload_offset == offset);
+ CU_ASSERT(child->cmd.opc == SPDK_NVME_OPC_READ);
+ CU_ASSERT(child->cmd.nsid == ns.id);
+ CU_ASSERT(child->cmd.cdw10 == (lba + sectors_per_max_io * i));
+ CU_ASSERT(child->cmd.cdw12 == ((sectors_per_max_io - 1) | 0));
+ offset += max_io_size;
+ nvme_free_request(child);
+ i++;
+ }
+
+ free(payload);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_flush(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ spdk_nvme_cmd_cb cb_fn = NULL;
+ void *cb_arg = NULL;
+ int rc;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+
+ rc = spdk_nvme_ns_cmd_flush(&ns, &qpair, cb_fn, cb_arg);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_FLUSH);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_write_zeroes(void)
+{
+ struct spdk_nvme_ns ns = { 0 };
+ struct spdk_nvme_ctrlr ctrlr = { 0 };
+ struct spdk_nvme_qpair qpair;
+ spdk_nvme_cmd_cb cb_fn = NULL;
+ void *cb_arg = NULL;
+ uint64_t cmd_lba;
+ uint32_t cmd_lba_count;
+ int rc;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+
+ rc = spdk_nvme_ns_cmd_write_zeroes(&ns, &qpair, 0, 2, cb_fn, cb_arg, 0);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_WRITE_ZEROES);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ nvme_cmd_interpret_rw(&g_request->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT_EQUAL(cmd_lba, 0);
+ CU_ASSERT_EQUAL(cmd_lba_count, 2);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_write_uncorrectable(void)
+{
+ struct spdk_nvme_ns ns = { 0 };
+ struct spdk_nvme_ctrlr ctrlr = { 0 };
+ struct spdk_nvme_qpair qpair;
+ spdk_nvme_cmd_cb cb_fn = NULL;
+ void *cb_arg = NULL;
+ uint64_t cmd_lba;
+ uint32_t cmd_lba_count;
+ int rc;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+
+ rc = spdk_nvme_ns_cmd_write_uncorrectable(&ns, &qpair, 0, 2, cb_fn, cb_arg);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_WRITE_UNCORRECTABLE);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ nvme_cmd_interpret_rw(&g_request->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT_EQUAL(cmd_lba, 0);
+ CU_ASSERT_EQUAL(cmd_lba_count, 2);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_dataset_management(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ spdk_nvme_cmd_cb cb_fn = NULL;
+ void *cb_arg = NULL;
+ struct spdk_nvme_dsm_range ranges[256];
+ uint16_t i;
+ int rc = 0;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+
+ for (i = 0; i < 256; i++) {
+ ranges[i].starting_lba = i;
+ ranges[i].length = 1;
+ ranges[i].attributes.raw = 0;
+ }
+
+ /* TRIM one LBA */
+ rc = spdk_nvme_ns_cmd_dataset_management(&ns, &qpair, SPDK_NVME_DSM_ATTR_DEALLOCATE,
+ ranges, 1, cb_fn, cb_arg);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_DATASET_MANAGEMENT);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw10 == 0);
+ CU_ASSERT(g_request->cmd.cdw11_bits.dsm.ad == 1);
+ spdk_free(g_request->payload.contig_or_cb_arg);
+ nvme_free_request(g_request);
+
+ /* TRIM 256 LBAs */
+ rc = spdk_nvme_ns_cmd_dataset_management(&ns, &qpair, SPDK_NVME_DSM_ATTR_DEALLOCATE,
+ ranges, 256, cb_fn, cb_arg);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_DATASET_MANAGEMENT);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw10 == 255u);
+ CU_ASSERT(g_request->cmd.cdw11_bits.dsm.ad == 1);
+ spdk_free(g_request->payload.contig_or_cb_arg);
+ nvme_free_request(g_request);
+
+ rc = spdk_nvme_ns_cmd_dataset_management(&ns, &qpair, SPDK_NVME_DSM_ATTR_DEALLOCATE,
+ NULL, 0, cb_fn, cb_arg);
+ CU_ASSERT(rc != 0);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_readv(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ int rc = 0;
+ void *cb_arg;
+ uint32_t lba_count = 256;
+ uint32_t sector_size = 512;
+ uint64_t sge_length = lba_count * sector_size;
+
+ cb_arg = malloc(512);
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, 128 * 1024, 0, false);
+ rc = spdk_nvme_ns_cmd_readv(&ns, &qpair, 0x1000, lba_count, NULL, &sge_length, 0,
+ nvme_request_reset_sgl, nvme_request_next_sge);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_READ);
+ CU_ASSERT(nvme_payload_type(&g_request->payload) == NVME_PAYLOAD_TYPE_SGL);
+ CU_ASSERT(g_request->payload.reset_sgl_fn == nvme_request_reset_sgl);
+ CU_ASSERT(g_request->payload.next_sge_fn == nvme_request_next_sge);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == &sge_length);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+
+ rc = spdk_nvme_ns_cmd_readv(&ns, &qpair, 0x1000, 256, NULL, cb_arg, 0, nvme_request_reset_sgl,
+ NULL);
+ CU_ASSERT(rc != 0);
+
+ free(cb_arg);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_writev(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ int rc = 0;
+ void *cb_arg;
+ uint32_t lba_count = 256;
+ uint32_t sector_size = 512;
+ uint64_t sge_length = lba_count * sector_size;
+
+ cb_arg = malloc(512);
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, 128 * 1024, 0, false);
+ rc = spdk_nvme_ns_cmd_writev(&ns, &qpair, 0x1000, lba_count, NULL, &sge_length, 0,
+ nvme_request_reset_sgl, nvme_request_next_sge);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_WRITE);
+ CU_ASSERT(nvme_payload_type(&g_request->payload) == NVME_PAYLOAD_TYPE_SGL);
+ CU_ASSERT(g_request->payload.reset_sgl_fn == nvme_request_reset_sgl);
+ CU_ASSERT(g_request->payload.next_sge_fn == nvme_request_next_sge);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == &sge_length);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+
+ rc = spdk_nvme_ns_cmd_writev(&ns, &qpair, 0x1000, 256, NULL, cb_arg, 0,
+ NULL, nvme_request_next_sge);
+ CU_ASSERT(rc != 0);
+
+ free(cb_arg);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_comparev(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ int rc = 0;
+ void *cb_arg;
+ uint32_t lba_count = 256;
+ uint32_t sector_size = 512;
+ uint64_t sge_length = lba_count * sector_size;
+
+ cb_arg = malloc(512);
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, 128 * 1024, 0, false);
+ rc = spdk_nvme_ns_cmd_comparev(&ns, &qpair, 0x1000, lba_count, NULL, &sge_length, 0,
+ nvme_request_reset_sgl, nvme_request_next_sge);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_COMPARE);
+ CU_ASSERT(nvme_payload_type(&g_request->payload) == NVME_PAYLOAD_TYPE_SGL);
+ CU_ASSERT(g_request->payload.reset_sgl_fn == nvme_request_reset_sgl);
+ CU_ASSERT(g_request->payload.next_sge_fn == nvme_request_next_sge);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == &sge_length);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+
+ rc = spdk_nvme_ns_cmd_comparev(&ns, &qpair, 0x1000, 256, NULL, cb_arg, 0,
+ nvme_request_reset_sgl, NULL);
+ CU_ASSERT(rc != 0);
+
+ free(cb_arg);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_comparev_with_md(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ int rc = 0;
+ char *buffer = NULL;
+ char *metadata = NULL;
+ uint32_t block_size, md_size;
+ struct nvme_request *child0, *child1;
+ uint32_t lba_count = 256;
+ uint32_t sector_size = 512;
+ uint64_t sge_length = lba_count * sector_size;
+
+ block_size = 512;
+ md_size = 128;
+
+ buffer = malloc((block_size + md_size) * 384);
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+ metadata = malloc(md_size * 384);
+ SPDK_CU_ASSERT_FATAL(metadata != NULL);
+
+ /*
+ * 512 byte data + 128 byte metadata
+ * Separate metadata buffer
+ * Max data transfer size 128 KB
+ * No stripe size
+ *
+ * 256 blocks * 512 bytes per block = single 128 KB I/O (no splitting required)
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 128 * 1024, 0, false);
+
+ rc = spdk_nvme_ns_cmd_comparev_with_md(&ns, &qpair, 0x1000, 256, NULL, &sge_length, 0,
+ nvme_request_reset_sgl, nvme_request_next_sge, metadata, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload.md == metadata);
+ CU_ASSERT(g_request->payload_size == 256 * 512);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 128 byte metadata
+ * Extended LBA
+ * Max data transfer size 128 KB
+ * No stripe size
+ *
+ * 256 blocks * (512 + 128) bytes per block = two I/Os:
+ * child 0: 204 blocks - 204 * (512 + 128) = 127.5 KB
+ * child 1: 52 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 128 * 1024, 0, true);
+
+ rc = spdk_nvme_ns_cmd_comparev_with_md(&ns, &qpair, 0x1000, 256, NULL, &sge_length, 0,
+ nvme_request_reset_sgl, nvme_request_next_sge, NULL, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload.md == NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 204 * (512 + 128));
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload.md == NULL);
+ CU_ASSERT(child1->payload_offset == 204 * (512 + 128));
+ CU_ASSERT(child1->payload_size == 52 * (512 + 128));
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Extended LBA
+ * Max data transfer size 128 KB
+ * No stripe size
+ * No protection information
+ *
+ * 256 blocks * (512 + 8) bytes per block = two I/Os:
+ * child 0: 252 blocks - 252 * (512 + 8) = 127.96875 KB
+ * child 1: 4 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, true);
+
+ rc = spdk_nvme_ns_cmd_comparev_with_md(&ns, &qpair, 0x1000, 256, NULL, &sge_length, 0,
+ nvme_request_reset_sgl, nvme_request_next_sge, NULL, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload.md == NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 252 * (512 + 8));
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload.md == NULL);
+ CU_ASSERT(child1->payload_offset == 252 * (512 + 8));
+ CU_ASSERT(child1->payload_size == 4 * (512 + 8));
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Extended LBA
+ * Max data transfer size 128 KB
+ * No stripe size
+ * Protection information enabled + PRACT
+ *
+ * Special case for 8-byte metadata + PI + PRACT: no metadata transferred
+ * In theory, 256 blocks * 512 bytes per block = one I/O (128 KB)
+ * However, the splitting code does not account for PRACT when calculating
+ * max sectors per transfer, so we actually get two I/Os:
+ * child 0: 252 blocks
+ * child 1: 4 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, true);
+ ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
+
+ rc = spdk_nvme_ns_cmd_comparev_with_md(&ns, &qpair, 0x1000, 256, NULL, &sge_length,
+ SPDK_NVME_IO_FLAGS_PRACT, nvme_request_reset_sgl, nvme_request_next_sge, NULL, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 252 * 512); /* NOTE: does not include metadata! */
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload.md == NULL);
+ CU_ASSERT(child1->payload_offset == 252 * 512);
+ CU_ASSERT(child1->payload_size == 4 * 512);
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Separate metadata buffer
+ * Max data transfer size 128 KB
+ * No stripe size
+ * Protection information enabled + PRACT
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, false);
+ ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
+
+ rc = spdk_nvme_ns_cmd_comparev_with_md(&ns, &qpair, 0x1000, 256, NULL, &sge_length,
+ SPDK_NVME_IO_FLAGS_PRACT, nvme_request_reset_sgl, nvme_request_next_sge, metadata, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload.md == metadata);
+ CU_ASSERT(g_request->payload_size == 256 * 512);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Separate metadata buffer
+ * Max data transfer size 128 KB
+ * No stripe size
+ * Protection information enabled + PRACT
+ *
+ * 384 blocks * 512 bytes = two I/Os:
+ * child 0: 256 blocks
+ * child 1: 128 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, false);
+ ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
+
+ rc = spdk_nvme_ns_cmd_comparev_with_md(&ns, &qpair, 0x1000, 384, NULL, &sge_length,
+ SPDK_NVME_IO_FLAGS_PRACT, nvme_request_reset_sgl, nvme_request_next_sge, metadata, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 256 * 512);
+ CU_ASSERT(child0->md_offset == 0);
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload_offset == 256 * 512);
+ CU_ASSERT(child1->payload_size == 128 * 512);
+ CU_ASSERT(child1->md_offset == 256 * 8);
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ free(buffer);
+ free(metadata);
+}
+
+static void
+test_nvme_ns_cmd_compare_and_write(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ int rc = 0;
+ uint64_t lba = 0x1000;
+ uint32_t lba_count = 256;
+ uint64_t cmd_lba;
+ uint32_t cmd_lba_count;
+ uint32_t sector_size = 512;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, 128 * 1024, 0, false);
+
+ rc = spdk_nvme_ns_cmd_compare(&ns, &qpair, NULL, lba, lba_count, NULL, NULL,
+ SPDK_NVME_IO_FLAGS_FUSE_FIRST);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_COMPARE);
+ CU_ASSERT(g_request->cmd.fuse == SPDK_NVME_CMD_FUSE_FIRST);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+
+ nvme_cmd_interpret_rw(&g_request->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT_EQUAL(cmd_lba, lba);
+ CU_ASSERT_EQUAL(cmd_lba_count, lba_count);
+
+ nvme_free_request(g_request);
+
+ rc = spdk_nvme_ns_cmd_write(&ns, &qpair, NULL, lba, lba_count, NULL, NULL,
+ SPDK_NVME_IO_FLAGS_FUSE_SECOND);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_WRITE);
+ CU_ASSERT(g_request->cmd.fuse == SPDK_NVME_CMD_FUSE_SECOND);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ nvme_cmd_interpret_rw(&g_request->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT_EQUAL(cmd_lba, lba);
+ CU_ASSERT_EQUAL(cmd_lba_count, lba_count);
+
+ nvme_free_request(g_request);
+
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_io_flags(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ void *payload;
+ uint64_t lba;
+ uint32_t lba_count;
+ uint64_t cmd_lba;
+ uint32_t cmd_lba_count;
+ int rc;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 128 * 1024, false);
+ payload = malloc(256 * 1024);
+ lba = 0;
+ lba_count = (4 * 1024) / 512;
+
+ rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL,
+ SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT((g_request->cmd.cdw12 & SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS) != 0);
+ CU_ASSERT((g_request->cmd.cdw12 & SPDK_NVME_IO_FLAGS_LIMITED_RETRY) == 0);
+ nvme_free_request(g_request);
+
+ rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL,
+ SPDK_NVME_IO_FLAGS_LIMITED_RETRY);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT((g_request->cmd.cdw12 & SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS) == 0);
+ CU_ASSERT((g_request->cmd.cdw12 & SPDK_NVME_IO_FLAGS_LIMITED_RETRY) != 0);
+ nvme_free_request(g_request);
+
+ rc = spdk_nvme_ns_cmd_write(&ns, &qpair, payload, lba, lba_count, NULL, NULL,
+ SPDK_NVME_IO_FLAGS_VALID_MASK);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ nvme_cmd_interpret_rw(&g_request->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT_EQUAL(cmd_lba_count, lba_count);
+ CU_ASSERT_EQUAL(cmd_lba, lba);
+ CU_ASSERT_EQUAL(g_request->cmd.cdw12 & SPDK_NVME_IO_FLAGS_CDW12_MASK,
+ SPDK_NVME_IO_FLAGS_CDW12_MASK);
+ nvme_free_request(g_request);
+
+ rc = spdk_nvme_ns_cmd_write(&ns, &qpair, payload, lba, lba_count, NULL, NULL,
+ ~SPDK_NVME_IO_FLAGS_VALID_MASK);
+ CU_ASSERT(rc == -EINVAL);
+
+ free(payload);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_reservation_register(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ struct spdk_nvme_reservation_register_data *payload;
+ bool ignore_key = 1;
+ spdk_nvme_cmd_cb cb_fn = NULL;
+ void *cb_arg = NULL;
+ int rc = 0;
+ uint32_t tmp_cdw10;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+ payload = malloc(sizeof(struct spdk_nvme_reservation_register_data));
+
+ rc = spdk_nvme_ns_cmd_reservation_register(&ns, &qpair, payload, ignore_key,
+ SPDK_NVME_RESERVE_REGISTER_KEY,
+ SPDK_NVME_RESERVE_PTPL_NO_CHANGES,
+ cb_fn, cb_arg);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_RESERVATION_REGISTER);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+
+ tmp_cdw10 = SPDK_NVME_RESERVE_REGISTER_KEY;
+ tmp_cdw10 |= ignore_key ? 1 << 3 : 0;
+ tmp_cdw10 |= (uint32_t)SPDK_NVME_RESERVE_PTPL_NO_CHANGES << 30;
+
+ CU_ASSERT(g_request->cmd.cdw10 == tmp_cdw10);
+
+ spdk_free(g_request->payload.contig_or_cb_arg);
+ nvme_free_request(g_request);
+ free(payload);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_reservation_release(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ struct spdk_nvme_reservation_key_data *payload;
+ bool ignore_key = 1;
+ spdk_nvme_cmd_cb cb_fn = NULL;
+ void *cb_arg = NULL;
+ int rc = 0;
+ uint32_t tmp_cdw10;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+ payload = malloc(sizeof(struct spdk_nvme_reservation_key_data));
+
+ rc = spdk_nvme_ns_cmd_reservation_release(&ns, &qpair, payload, ignore_key,
+ SPDK_NVME_RESERVE_RELEASE,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE,
+ cb_fn, cb_arg);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_RESERVATION_RELEASE);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+
+ tmp_cdw10 = SPDK_NVME_RESERVE_RELEASE;
+ tmp_cdw10 |= ignore_key ? 1 << 3 : 0;
+ tmp_cdw10 |= (uint32_t)SPDK_NVME_RESERVE_WRITE_EXCLUSIVE << 8;
+
+ CU_ASSERT(g_request->cmd.cdw10 == tmp_cdw10);
+
+ spdk_free(g_request->payload.contig_or_cb_arg);
+ nvme_free_request(g_request);
+ free(payload);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_reservation_acquire(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ struct spdk_nvme_reservation_acquire_data *payload;
+ bool ignore_key = 1;
+ spdk_nvme_cmd_cb cb_fn = NULL;
+ void *cb_arg = NULL;
+ int rc = 0;
+ uint32_t tmp_cdw10;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+ payload = malloc(sizeof(struct spdk_nvme_reservation_acquire_data));
+
+ rc = spdk_nvme_ns_cmd_reservation_acquire(&ns, &qpair, payload, ignore_key,
+ SPDK_NVME_RESERVE_ACQUIRE,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE,
+ cb_fn, cb_arg);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_RESERVATION_ACQUIRE);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+
+ tmp_cdw10 = SPDK_NVME_RESERVE_ACQUIRE;
+ tmp_cdw10 |= ignore_key ? 1 << 3 : 0;
+ tmp_cdw10 |= (uint32_t)SPDK_NVME_RESERVE_WRITE_EXCLUSIVE << 8;
+
+ CU_ASSERT(g_request->cmd.cdw10 == tmp_cdw10);
+
+ spdk_free(g_request->payload.contig_or_cb_arg);
+ nvme_free_request(g_request);
+ free(payload);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_reservation_report(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ struct spdk_nvme_reservation_status_data *payload;
+ spdk_nvme_cmd_cb cb_fn = NULL;
+ void *cb_arg = NULL;
+ int rc = 0;
+ uint32_t size = sizeof(struct spdk_nvme_reservation_status_data);
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+
+ payload = calloc(1, size);
+ SPDK_CU_ASSERT_FATAL(payload != NULL);
+
+ rc = spdk_nvme_ns_cmd_reservation_report(&ns, &qpair, payload, size, cb_fn, cb_arg);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_RESERVATION_REPORT);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+
+ CU_ASSERT(g_request->cmd.cdw10 == (size / 4));
+
+ spdk_free(g_request->payload.contig_or_cb_arg);
+ nvme_free_request(g_request);
+ free(payload);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_write_with_md(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ int rc = 0;
+ char *buffer = NULL;
+ char *metadata = NULL;
+ uint32_t block_size, md_size;
+ struct nvme_request *child0, *child1;
+
+ block_size = 512;
+ md_size = 128;
+
+ buffer = malloc((block_size + md_size) * 384);
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+ metadata = malloc(md_size * 384);
+ SPDK_CU_ASSERT_FATAL(metadata != NULL);
+
+ /*
+ * 512 byte data + 128 byte metadata
+ * Separate metadata buffer
+ * Max data transfer size 128 KB
+ * No stripe size
+ *
+ * 256 blocks * 512 bytes per block = single 128 KB I/O (no splitting required)
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 128 * 1024, 0, false);
+
+ rc = spdk_nvme_ns_cmd_write_with_md(&ns, &qpair, buffer, metadata, 0x1000, 256, NULL, NULL, 0, 0,
+ 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload.md == metadata);
+ CU_ASSERT(g_request->md_size == 256 * 128);
+ CU_ASSERT(g_request->payload_size == 256 * 512);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 128 byte metadata
+ * Extended LBA
+ * Max data transfer size 128 KB
+ * No stripe size
+ *
+ * 256 blocks * (512 + 128) bytes per block = two I/Os:
+ * child 0: 204 blocks - 204 * (512 + 128) = 127.5 KB
+ * child 1: 52 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 128 * 1024, 0, true);
+
+ rc = spdk_nvme_ns_cmd_write_with_md(&ns, &qpair, buffer, NULL, 0x1000, 256, NULL, NULL, 0, 0,
+ 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload.md == NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 204 * (512 + 128));
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload.md == NULL);
+ CU_ASSERT(child1->payload_offset == 204 * (512 + 128));
+ CU_ASSERT(child1->payload_size == 52 * (512 + 128));
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Extended LBA
+ * Max data transfer size 128 KB
+ * No stripe size
+ * No protection information
+ *
+ * 256 blocks * (512 + 8) bytes per block = two I/Os:
+ * child 0: 252 blocks - 252 * (512 + 8) = 127.96875 KB
+ * child 1: 4 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, true);
+
+ rc = spdk_nvme_ns_cmd_write_with_md(&ns, &qpair, buffer, NULL, 0x1000, 256, NULL, NULL, 0, 0,
+ 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload.md == NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 252 * (512 + 8));
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload.md == NULL);
+ CU_ASSERT(child1->payload_offset == 252 * (512 + 8));
+ CU_ASSERT(child1->payload_size == 4 * (512 + 8));
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Extended LBA
+ * Max data transfer size 128 KB
+ * No stripe size
+ * Protection information enabled + PRACT
+ *
+ * Special case for 8-byte metadata + PI + PRACT: no metadata transferred
+ * In theory, 256 blocks * 512 bytes per block = one I/O (128 KB)
+ * However, the splitting code does not account for PRACT when calculating
+ * max sectors per transfer, so we actually get two I/Os:
+ * child 0: 252 blocks
+ * child 1: 4 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, true);
+ ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
+
+ rc = spdk_nvme_ns_cmd_write_with_md(&ns, &qpair, buffer, NULL, 0x1000, 256, NULL, NULL,
+ SPDK_NVME_IO_FLAGS_PRACT, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 252 * 512); /* NOTE: does not include metadata! */
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload.md == NULL);
+ CU_ASSERT(child1->payload_offset == 252 * 512);
+ CU_ASSERT(child1->payload_size == 4 * 512);
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Separate metadata buffer
+ * Max data transfer size 128 KB
+ * No stripe size
+ * Protection information enabled + PRACT
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, false);
+ ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
+
+ rc = spdk_nvme_ns_cmd_write_with_md(&ns, &qpair, buffer, metadata, 0x1000, 256, NULL, NULL,
+ SPDK_NVME_IO_FLAGS_PRACT, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload.md == metadata);
+ CU_ASSERT(g_request->md_size == 256 * 8);
+ CU_ASSERT(g_request->payload_size == 256 * 512);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Separate metadata buffer
+ * Max data transfer size 128 KB
+ * No stripe size
+ * Protection information enabled + PRACT
+ *
+ * 384 blocks * 512 bytes = two I/Os:
+ * child 0: 256 blocks
+ * child 1: 128 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, false);
+ ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
+
+ rc = spdk_nvme_ns_cmd_write_with_md(&ns, &qpair, buffer, metadata, 0x1000, 384, NULL, NULL,
+ SPDK_NVME_IO_FLAGS_PRACT, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 256 * 512);
+ CU_ASSERT(child0->md_offset == 0);
+ CU_ASSERT(child0->md_size == 256 * 8);
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload_offset == 256 * 512);
+ CU_ASSERT(child1->payload_size == 128 * 512);
+ CU_ASSERT(child1->md_offset == 256 * 8);
+ CU_ASSERT(child1->md_size == 128 * 8);
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ free(buffer);
+ free(metadata);
+}
+
+static void
+test_nvme_ns_cmd_read_with_md(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ int rc = 0;
+ char *buffer = NULL;
+ char *metadata = NULL;
+ uint32_t block_size, md_size;
+
+ block_size = 512;
+ md_size = 128;
+
+ buffer = malloc(block_size * 256);
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+ metadata = malloc(md_size * 256);
+ SPDK_CU_ASSERT_FATAL(metadata != NULL);
+
+ /*
+ * 512 byte data + 128 byte metadata
+ * Separate metadata buffer
+ * Max data transfer size 128 KB
+ * No stripe size
+ *
+ * 256 blocks * 512 bytes per block = single 128 KB I/O (no splitting required)
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 128 * 1024, 0, false);
+
+ rc = spdk_nvme_ns_cmd_read_with_md(&ns, &qpair, buffer, metadata, 0x1000, 256, NULL, NULL, 0, 0,
+ 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload.md == metadata);
+ CU_ASSERT(g_request->md_size == 256 * md_size);
+ CU_ASSERT(g_request->payload_size == 256 * 512);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+ free(buffer);
+ free(metadata);
+}
+
+static void
+test_nvme_ns_cmd_compare_with_md(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ int rc = 0;
+ char *buffer = NULL;
+ char *metadata = NULL;
+ uint32_t block_size, md_size;
+ struct nvme_request *child0, *child1;
+
+ block_size = 512;
+ md_size = 128;
+
+ buffer = malloc((block_size + md_size) * 384);
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+ metadata = malloc(md_size * 384);
+ SPDK_CU_ASSERT_FATAL(metadata != NULL);
+
+ /*
+ * 512 byte data + 128 byte metadata
+ * Separate metadata buffer
+ * Max data transfer size 128 KB
+ * No stripe size
+ *
+ * 256 blocks * 512 bytes per block = single 128 KB I/O (no splitting required)
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 128 * 1024, 0, false);
+
+ rc = spdk_nvme_ns_cmd_compare_with_md(&ns, &qpair, buffer, metadata, 0x1000, 256,
+ NULL, NULL, 0, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload.md == metadata);
+ CU_ASSERT(g_request->payload_size == 256 * 512);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 128 byte metadata
+ * Extended LBA
+ * Max data transfer size 128 KB
+ * No stripe size
+ *
+ * 256 blocks * (512 + 128) bytes per block = two I/Os:
+ * child 0: 204 blocks - 204 * (512 + 128) = 127.5 KB
+ * child 1: 52 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 128 * 1024, 0, true);
+
+ rc = spdk_nvme_ns_cmd_compare_with_md(&ns, &qpair, buffer, NULL, 0x1000, 256,
+ NULL, NULL, 0, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload.md == NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 204 * (512 + 128));
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload.md == NULL);
+ CU_ASSERT(child1->payload_offset == 204 * (512 + 128));
+ CU_ASSERT(child1->payload_size == 52 * (512 + 128));
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Extended LBA
+ * Max data transfer size 128 KB
+ * No stripe size
+ * No protection information
+ *
+ * 256 blocks * (512 + 8) bytes per block = two I/Os:
+ * child 0: 252 blocks - 252 * (512 + 8) = 127.96875 KB
+ * child 1: 4 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, true);
+
+ rc = spdk_nvme_ns_cmd_compare_with_md(&ns, &qpair, buffer, NULL, 0x1000, 256,
+ NULL, NULL, 0, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload.md == NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 252 * (512 + 8));
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload.md == NULL);
+ CU_ASSERT(child1->payload_offset == 252 * (512 + 8));
+ CU_ASSERT(child1->payload_size == 4 * (512 + 8));
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Extended LBA
+ * Max data transfer size 128 KB
+ * No stripe size
+ * Protection information enabled + PRACT
+ *
+ * Special case for 8-byte metadata + PI + PRACT: no metadata transferred
+ * In theory, 256 blocks * 512 bytes per block = one I/O (128 KB)
+ * However, the splitting code does not account for PRACT when calculating
+ * max sectors per transfer, so we actually get two I/Os:
+ * child 0: 252 blocks
+ * child 1: 4 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, true);
+ ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
+
+ rc = spdk_nvme_ns_cmd_compare_with_md(&ns, &qpair, buffer, NULL, 0x1000, 256,
+ NULL, NULL, SPDK_NVME_IO_FLAGS_PRACT, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 252 * 512); /* NOTE: does not include metadata! */
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload.md == NULL);
+ CU_ASSERT(child1->payload_offset == 252 * 512);
+ CU_ASSERT(child1->payload_size == 4 * 512);
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Separate metadata buffer
+ * Max data transfer size 128 KB
+ * No stripe size
+ * Protection information enabled + PRACT
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, false);
+ ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
+
+ rc = spdk_nvme_ns_cmd_compare_with_md(&ns, &qpair, buffer, metadata, 0x1000, 256,
+ NULL, NULL, SPDK_NVME_IO_FLAGS_PRACT, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload.md == metadata);
+ CU_ASSERT(g_request->payload_size == 256 * 512);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Separate metadata buffer
+ * Max data transfer size 128 KB
+ * No stripe size
+ * Protection information enabled + PRACT
+ *
+ * 384 blocks * 512 bytes = two I/Os:
+ * child 0: 256 blocks
+ * child 1: 128 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, false);
+ ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
+
+ rc = spdk_nvme_ns_cmd_compare_with_md(&ns, &qpair, buffer, metadata, 0x1000, 384,
+ NULL, NULL, SPDK_NVME_IO_FLAGS_PRACT, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 256 * 512);
+ CU_ASSERT(child0->md_offset == 0);
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload_offset == 256 * 512);
+ CU_ASSERT(child1->payload_size == 128 * 512);
+ CU_ASSERT(child1->md_offset == 256 * 8);
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ free(buffer);
+ free(metadata);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvme_ns_cmd", NULL, NULL);
+
+ CU_ADD_TEST(suite, split_test);
+ CU_ADD_TEST(suite, split_test2);
+ CU_ADD_TEST(suite, split_test3);
+ CU_ADD_TEST(suite, split_test4);
+ CU_ADD_TEST(suite, test_nvme_ns_cmd_flush);
+ CU_ADD_TEST(suite, test_nvme_ns_cmd_dataset_management);
+ CU_ADD_TEST(suite, test_io_flags);
+ CU_ADD_TEST(suite, test_nvme_ns_cmd_write_zeroes);
+ CU_ADD_TEST(suite, test_nvme_ns_cmd_write_uncorrectable);
+ CU_ADD_TEST(suite, test_nvme_ns_cmd_reservation_register);
+ CU_ADD_TEST(suite, test_nvme_ns_cmd_reservation_release);
+ CU_ADD_TEST(suite, test_nvme_ns_cmd_reservation_acquire);
+ CU_ADD_TEST(suite, test_nvme_ns_cmd_reservation_report);
+ CU_ADD_TEST(suite, test_cmd_child_request);
+ CU_ADD_TEST(suite, test_nvme_ns_cmd_readv);
+ CU_ADD_TEST(suite, test_nvme_ns_cmd_read_with_md);
+ CU_ADD_TEST(suite, test_nvme_ns_cmd_writev);
+ CU_ADD_TEST(suite, test_nvme_ns_cmd_write_with_md);
+ CU_ADD_TEST(suite, test_nvme_ns_cmd_comparev);
+ CU_ADD_TEST(suite, test_nvme_ns_cmd_compare_and_write);
+ CU_ADD_TEST(suite, test_nvme_ns_cmd_compare_with_md);
+ CU_ADD_TEST(suite, test_nvme_ns_cmd_comparev_with_md);
+
+ g_spdk_nvme_driver = &_g_nvme_driver;
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/.gitignore
new file mode 100644
index 000000000..8f4f47a17
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/.gitignore
@@ -0,0 +1 @@
+nvme_ns_ocssd_cmd_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/Makefile
new file mode 100644
index 000000000..35fdb83a0
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_ns_ocssd_cmd_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/nvme_ns_ocssd_cmd_ut.c b/src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/nvme_ns_ocssd_cmd_ut.c
new file mode 100644
index 000000000..fa25a4640
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/nvme_ns_ocssd_cmd_ut.c
@@ -0,0 +1,650 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "nvme/nvme_ns_ocssd_cmd.c"
+#include "nvme/nvme_ns_cmd.c"
+#include "nvme/nvme.c"
+
+#include "common/lib/test_env.c"
+
+#define OCSSD_SECTOR_SIZE 0x1000
+
+static struct nvme_driver _g_nvme_driver = {
+ .lock = PTHREAD_MUTEX_INITIALIZER,
+};
+
+static struct nvme_request *g_request = NULL;
+
+int
+nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
+{
+ g_request = req;
+
+ return 0;
+}
+
+void
+nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
+{
+}
+
+void
+nvme_ctrlr_proc_get_ref(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return;
+}
+
+int
+nvme_ctrlr_process_init(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return 0;
+}
+
+void
+nvme_ctrlr_proc_put_ref(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return;
+}
+
+void
+spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size)
+{
+ memset(opts, 0, sizeof(*opts));
+}
+
+bool
+spdk_nvme_transport_available_by_name(const char *transport_name)
+{
+ return true;
+}
+
+struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
+ const struct spdk_nvme_ctrlr_opts *opts,
+ void *devhandle)
+{
+ return NULL;
+}
+
+int
+nvme_ctrlr_get_ref_count(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return 0;
+}
+
+int
+nvme_transport_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx,
+ bool direct_connect)
+{
+ return 0;
+}
+
+uint32_t
+spdk_nvme_ns_get_max_io_xfer_size(struct spdk_nvme_ns *ns)
+{
+ return ns->ctrlr->max_xfer_size;
+}
+
+static void
+prepare_for_test(struct spdk_nvme_ns *ns, struct spdk_nvme_ctrlr *ctrlr,
+ struct spdk_nvme_qpair *qpair,
+ uint32_t sector_size, uint32_t md_size, uint32_t max_xfer_size,
+ uint32_t stripe_size, bool extended_lba)
+{
+ uint32_t num_requests = 32;
+ uint32_t i;
+
+ ctrlr->max_xfer_size = max_xfer_size;
+ /*
+ * Clear the flags field - we especially want to make sure the SGL_SUPPORTED flag is not set
+ * so that we test the SGL splitting path.
+ */
+ ctrlr->flags = 0;
+ ctrlr->min_page_size = 4096;
+ ctrlr->page_size = 4096;
+ memset(&ctrlr->opts, 0, sizeof(ctrlr->opts));
+ memset(ns, 0, sizeof(*ns));
+ ns->ctrlr = ctrlr;
+ ns->sector_size = sector_size;
+ ns->extended_lba_size = sector_size;
+ if (extended_lba) {
+ ns->flags |= SPDK_NVME_NS_EXTENDED_LBA_SUPPORTED;
+ ns->extended_lba_size += md_size;
+ }
+ ns->md_size = md_size;
+ ns->sectors_per_max_io = spdk_nvme_ns_get_max_io_xfer_size(ns) / ns->extended_lba_size;
+ ns->sectors_per_stripe = stripe_size / ns->extended_lba_size;
+
+ memset(qpair, 0, sizeof(*qpair));
+ qpair->ctrlr = ctrlr;
+ qpair->req_buf = calloc(num_requests, sizeof(struct nvme_request));
+ SPDK_CU_ASSERT_FATAL(qpair->req_buf != NULL);
+
+ for (i = 0; i < num_requests; i++) {
+ struct nvme_request *req = qpair->req_buf + i * sizeof(struct nvme_request);
+
+ req->qpair = qpair;
+ STAILQ_INSERT_HEAD(&qpair->free_req, req, stailq);
+ }
+
+ g_request = NULL;
+}
+
+static void
+cleanup_after_test(struct spdk_nvme_qpair *qpair)
+{
+ free(qpair->req_buf);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_reset_single_entry(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = OCSSD_SECTOR_SIZE;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_xfer_size, 0, false);
+ uint64_t lba_list = 0x12345678;
+ spdk_nvme_ocssd_ns_cmd_vector_reset(&ns, &qpair, &lba_list, 1,
+ NULL, NULL, NULL);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_RESET);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw10 == lba_list);
+ CU_ASSERT(g_request->cmd.cdw12 == 0);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_reset(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = OCSSD_SECTOR_SIZE;
+ const uint32_t vector_size = 0x10;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_xfer_size, 0, false);
+ uint64_t lba_list[vector_size];
+ spdk_nvme_ocssd_ns_cmd_vector_reset(&ns, &qpair, lba_list, vector_size,
+ NULL, NULL, NULL);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_RESET);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw12 == vector_size - 1);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_read_with_md_single_entry(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = OCSSD_SECTOR_SIZE;
+ const uint32_t md_size = 0x80;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ char *buffer = malloc(sector_size);
+ char *metadata = malloc(md_size);
+ uint64_t lba_list = 0x12345678;
+
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+ SPDK_CU_ASSERT_FATAL(metadata != NULL);
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, md_size, max_xfer_size, 0, false);
+ rc = spdk_nvme_ocssd_ns_cmd_vector_read_with_md(&ns, &qpair, buffer, metadata,
+ &lba_list, 1, NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload.md == metadata);
+ CU_ASSERT(g_request->payload_size == OCSSD_SECTOR_SIZE);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == buffer);
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_READ);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw10 == lba_list);
+ CU_ASSERT(g_request->cmd.cdw12 == 0);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ free(buffer);
+ free(metadata);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_read_with_md(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = OCSSD_SECTOR_SIZE;
+ const uint32_t md_size = 0x80;
+ const uint32_t vector_size = 0x10;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ char *buffer = malloc(sector_size * vector_size);
+ char *metadata = malloc(md_size * vector_size);
+ uint64_t lba_list[vector_size];
+
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+ SPDK_CU_ASSERT_FATAL(metadata != NULL);
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, md_size, max_xfer_size, 0, false);
+ rc = spdk_nvme_ocssd_ns_cmd_vector_read_with_md(&ns, &qpair, buffer, metadata,
+ lba_list, vector_size,
+ NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload.md == metadata);
+ CU_ASSERT(g_request->payload_size == max_xfer_size);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == buffer);
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_READ);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw12 == vector_size - 1);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ free(buffer);
+ free(metadata);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_read_single_entry(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = OCSSD_SECTOR_SIZE;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ char *buffer = malloc(sector_size);
+ uint64_t lba_list = 0x12345678;
+
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_xfer_size, 0, false);
+ rc = spdk_nvme_ocssd_ns_cmd_vector_read(&ns, &qpair, buffer, &lba_list, 1,
+ NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload_size == OCSSD_SECTOR_SIZE);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == buffer);
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_READ);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw10 == lba_list);
+ CU_ASSERT(g_request->cmd.cdw12 == 0);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+ free(buffer);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_read(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = OCSSD_SECTOR_SIZE;
+ const uint32_t vector_size = 0x10;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ char *buffer = malloc(sector_size * vector_size);
+ uint64_t lba_list[vector_size];
+
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_xfer_size, 0, false);
+ rc = spdk_nvme_ocssd_ns_cmd_vector_read(&ns, &qpair, buffer, lba_list, vector_size,
+ NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload_size == max_xfer_size);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == buffer);
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_READ);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw12 == vector_size - 1);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+ free(buffer);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_write_with_md_single_entry(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = OCSSD_SECTOR_SIZE;
+ const uint32_t md_size = 0x80;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ char *buffer = malloc(sector_size);
+ char *metadata = malloc(md_size);
+ uint64_t lba_list = 0x12345678;
+
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+ SPDK_CU_ASSERT_FATAL(metadata != NULL);
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, md_size, max_xfer_size, 0, false);
+ spdk_nvme_ocssd_ns_cmd_vector_write_with_md(&ns, &qpair, buffer, metadata,
+ &lba_list, 1, NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload.md == metadata);
+ CU_ASSERT(g_request->payload_size == OCSSD_SECTOR_SIZE);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == buffer);
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_WRITE);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw10 == lba_list);
+ CU_ASSERT(g_request->cmd.cdw12 == 0);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ free(buffer);
+ free(metadata);
+}
+
+
+static void
+test_nvme_ocssd_ns_cmd_vector_write_with_md(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = OCSSD_SECTOR_SIZE;
+ const uint32_t md_size = 0x80;
+ const uint32_t vector_size = 0x10;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ char *buffer = malloc(sector_size * vector_size);
+ char *metadata = malloc(md_size * vector_size);
+ uint64_t lba_list[vector_size];
+
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+ SPDK_CU_ASSERT_FATAL(metadata != NULL);
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, md_size, max_xfer_size, 0, false);
+ spdk_nvme_ocssd_ns_cmd_vector_write_with_md(&ns, &qpair, buffer, metadata,
+ lba_list, vector_size,
+ NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload.md == metadata);
+ CU_ASSERT(g_request->payload_size == max_xfer_size);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == buffer);
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_WRITE);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw12 == vector_size - 1);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ free(buffer);
+ free(metadata);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_write_single_entry(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = OCSSD_SECTOR_SIZE;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ char *buffer = malloc(sector_size);
+ uint64_t lba_list = 0x12345678;
+
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_xfer_size, 0, false);
+ spdk_nvme_ocssd_ns_cmd_vector_write(&ns, &qpair, buffer,
+ &lba_list, 1, NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload_size == OCSSD_SECTOR_SIZE);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == buffer);
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_WRITE);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw10 == lba_list);
+ CU_ASSERT(g_request->cmd.cdw12 == 0);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ free(buffer);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_write(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = OCSSD_SECTOR_SIZE;
+ const uint32_t vector_size = 0x10;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ char *buffer = malloc(sector_size * vector_size);
+ uint64_t lba_list[vector_size];
+
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_xfer_size, 0, false);
+ spdk_nvme_ocssd_ns_cmd_vector_write(&ns, &qpair, buffer,
+ lba_list, vector_size,
+ NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload_size == max_xfer_size);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == buffer);
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_WRITE);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw12 == vector_size - 1);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ free(buffer);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_copy_single_entry(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = OCSSD_SECTOR_SIZE;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ uint64_t src_lba_list = 0x12345678;
+ uint64_t dst_lba_list = 0x87654321;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_xfer_size, 0, false);
+ spdk_nvme_ocssd_ns_cmd_vector_copy(&ns, &qpair, &dst_lba_list, &src_lba_list, 1,
+ NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_COPY);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw10 == src_lba_list);
+ CU_ASSERT(g_request->cmd.cdw12 == 0);
+ CU_ASSERT(g_request->cmd.cdw14 == dst_lba_list);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_copy(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = OCSSD_SECTOR_SIZE;
+ const uint32_t vector_size = 0x10;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ uint64_t src_lba_list[vector_size];
+ uint64_t dst_lba_list[vector_size];
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_xfer_size, 0, false);
+ spdk_nvme_ocssd_ns_cmd_vector_copy(&ns, &qpair,
+ dst_lba_list, src_lba_list, vector_size,
+ NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_COPY);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw12 == vector_size - 1);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvme_ns_cmd", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_nvme_ocssd_ns_cmd_vector_reset);
+ CU_ADD_TEST(suite, test_nvme_ocssd_ns_cmd_vector_reset_single_entry);
+ CU_ADD_TEST(suite, test_nvme_ocssd_ns_cmd_vector_read_with_md);
+ CU_ADD_TEST(suite, test_nvme_ocssd_ns_cmd_vector_read_with_md_single_entry);
+ CU_ADD_TEST(suite, test_nvme_ocssd_ns_cmd_vector_read);
+ CU_ADD_TEST(suite, test_nvme_ocssd_ns_cmd_vector_read_single_entry);
+ CU_ADD_TEST(suite, test_nvme_ocssd_ns_cmd_vector_write_with_md);
+ CU_ADD_TEST(suite, test_nvme_ocssd_ns_cmd_vector_write_with_md_single_entry);
+ CU_ADD_TEST(suite, test_nvme_ocssd_ns_cmd_vector_write);
+ CU_ADD_TEST(suite, test_nvme_ocssd_ns_cmd_vector_write_single_entry);
+ CU_ADD_TEST(suite, test_nvme_ocssd_ns_cmd_vector_copy);
+ CU_ADD_TEST(suite, test_nvme_ocssd_ns_cmd_vector_copy_single_entry);
+
+ g_spdk_nvme_driver = &_g_nvme_driver;
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_pcie.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_pcie.c/.gitignore
new file mode 100644
index 000000000..8fc291095
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_pcie.c/.gitignore
@@ -0,0 +1 @@
+nvme_pcie_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_pcie.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_pcie.c/Makefile
new file mode 100644
index 000000000..09032a935
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_pcie.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_pcie_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_pcie.c/nvme_pcie_ut.c b/src/spdk/test/unit/lib/nvme/nvme_pcie.c/nvme_pcie_ut.c
new file mode 100644
index 000000000..ccc59b4da
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_pcie.c/nvme_pcie_ut.c
@@ -0,0 +1,498 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#define UNIT_TEST_NO_VTOPHYS
+
+#include "nvme/nvme_pcie.c"
+#include "common/lib/nvme/common_stubs.h"
+
+pid_t g_spdk_nvme_pid;
+DEFINE_STUB(spdk_mem_register, int, (void *vaddr, size_t len), 0);
+DEFINE_STUB(spdk_mem_unregister, int, (void *vaddr, size_t len), 0);
+
+DEFINE_STUB(nvme_get_quirks, uint64_t, (const struct spdk_pci_id *id), 0);
+
+DEFINE_STUB(nvme_wait_for_completion, int,
+ (struct spdk_nvme_qpair *qpair,
+ struct nvme_completion_poll_status *status), 0);
+DEFINE_STUB_V(nvme_completion_poll_cb, (void *arg, const struct spdk_nvme_cpl *cpl));
+
+DEFINE_STUB(nvme_ctrlr_submit_admin_request, int, (struct spdk_nvme_ctrlr *ctrlr,
+ struct nvme_request *req), 0);
+DEFINE_STUB_V(nvme_ctrlr_free_processes, (struct spdk_nvme_ctrlr *ctrlr));
+DEFINE_STUB(nvme_ctrlr_proc_get_devhandle, struct spdk_pci_device *,
+ (struct spdk_nvme_ctrlr *ctrlr), NULL);
+
+DEFINE_STUB(spdk_pci_device_map_bar, int, (struct spdk_pci_device *dev, uint32_t bar,
+ void **mapped_addr, uint64_t *phys_addr, uint64_t *size), 0);
+DEFINE_STUB(spdk_pci_device_unmap_bar, int, (struct spdk_pci_device *dev, uint32_t bar, void *addr),
+ 0);
+DEFINE_STUB(spdk_pci_device_attach, int, (struct spdk_pci_driver *driver, spdk_pci_enum_cb enum_cb,
+ void *enum_ctx, struct spdk_pci_addr *pci_address), 0);
+DEFINE_STUB(spdk_pci_device_claim, int, (struct spdk_pci_device *dev), 0);
+DEFINE_STUB_V(spdk_pci_device_unclaim, (struct spdk_pci_device *dev));
+DEFINE_STUB_V(spdk_pci_device_detach, (struct spdk_pci_device *device));
+DEFINE_STUB(spdk_pci_device_cfg_write16, int, (struct spdk_pci_device *dev, uint16_t value,
+ uint32_t offset), 0);
+DEFINE_STUB(spdk_pci_device_cfg_read16, int, (struct spdk_pci_device *dev, uint16_t *value,
+ uint32_t offset), 0);
+DEFINE_STUB(spdk_pci_device_get_id, struct spdk_pci_id, (struct spdk_pci_device *dev), {0})
+
+DEFINE_STUB(nvme_uevent_connect, int, (void), 0);
+
+struct spdk_log_flag SPDK_LOG_NVME = {
+ .name = "nvme",
+ .enabled = false,
+};
+
+struct nvme_driver *g_spdk_nvme_driver = NULL;
+
+bool g_device_is_enumerated = false;
+
+void
+nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr, bool hot_remove)
+{
+ CU_ASSERT(ctrlr != NULL);
+ if (hot_remove) {
+ ctrlr->is_removed = true;
+ }
+
+ ctrlr->is_failed = true;
+}
+
+struct spdk_uevent_entry {
+ struct spdk_uevent uevent;
+ STAILQ_ENTRY(spdk_uevent_entry) link;
+};
+
+static STAILQ_HEAD(, spdk_uevent_entry) g_uevents = STAILQ_HEAD_INITIALIZER(g_uevents);
+
+int
+nvme_get_uevent(int fd, struct spdk_uevent *uevent)
+{
+ struct spdk_uevent_entry *entry;
+
+ if (STAILQ_EMPTY(&g_uevents)) {
+ return 0;
+ }
+
+ entry = STAILQ_FIRST(&g_uevents);
+ STAILQ_REMOVE_HEAD(&g_uevents, link);
+
+ *uevent = entry->uevent;
+
+ return 1;
+}
+
+int
+spdk_pci_enumerate(struct spdk_pci_driver *driver, spdk_pci_enum_cb enum_cb, void *enum_ctx)
+{
+ g_device_is_enumerated = true;
+
+ return 0;
+}
+
+static uint64_t g_vtophys_size = 0;
+
+DEFINE_RETURN_MOCK(spdk_vtophys, uint64_t);
+uint64_t
+spdk_vtophys(void *buf, uint64_t *size)
+{
+ if (size) {
+ *size = g_vtophys_size;
+ }
+
+ HANDLE_RETURN_MOCK(spdk_vtophys);
+
+ return (uintptr_t)buf;
+}
+
+DEFINE_STUB(spdk_pci_device_get_addr, struct spdk_pci_addr, (struct spdk_pci_device *dev), {});
+DEFINE_STUB(nvme_ctrlr_probe, int, (const struct spdk_nvme_transport_id *trid,
+ struct spdk_nvme_probe_ctx *probe_ctx, void *devhandle), 0);
+DEFINE_STUB(spdk_pci_device_is_removed, bool, (struct spdk_pci_device *dev), false);
+DEFINE_STUB(nvme_get_ctrlr_by_trid_unsafe, struct spdk_nvme_ctrlr *,
+ (const struct spdk_nvme_transport_id *trid), NULL);
+DEFINE_STUB(spdk_nvme_ctrlr_get_regs_csts, union spdk_nvme_csts_register,
+ (struct spdk_nvme_ctrlr *ctrlr), {});
+DEFINE_STUB(nvme_ctrlr_get_process, struct spdk_nvme_ctrlr_process *,
+ (struct spdk_nvme_ctrlr *ctrlr, pid_t pid), NULL);
+DEFINE_STUB(nvme_completion_is_retry, bool, (const struct spdk_nvme_cpl *cpl), false);
+DEFINE_STUB_V(spdk_nvme_qpair_print_command, (struct spdk_nvme_qpair *qpair,
+ struct spdk_nvme_cmd *cmd));
+DEFINE_STUB_V(spdk_nvme_qpair_print_completion, (struct spdk_nvme_qpair *qpair,
+ struct spdk_nvme_cpl *cpl));
+
+static void
+prp_list_prep(struct nvme_tracker *tr, struct nvme_request *req, uint32_t *prp_index)
+{
+ memset(req, 0, sizeof(*req));
+ memset(tr, 0, sizeof(*tr));
+ tr->req = req;
+ tr->prp_sgl_bus_addr = 0xDEADBEEF;
+ *prp_index = 0;
+}
+
+static void
+test_prp_list_append(void)
+{
+ struct nvme_request req;
+ struct nvme_tracker tr;
+ uint32_t prp_index;
+
+ /* Non-DWORD-aligned buffer (invalid) */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100001, 0x1000, 0x1000) == -EFAULT);
+
+ /* 512-byte buffer, 4K aligned */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000, 0x200, 0x1000) == 0);
+ CU_ASSERT(prp_index == 1);
+ CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
+
+ /* 512-byte buffer, non-4K-aligned */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x108000, 0x200, 0x1000) == 0);
+ CU_ASSERT(prp_index == 1);
+ CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x108000);
+
+ /* 4K buffer, 4K aligned */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000, 0x1000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 1);
+ CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
+
+ /* 4K buffer, non-4K aligned */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800, 0x1000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 2);
+ CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800);
+ CU_ASSERT(req.cmd.dptr.prp.prp2 == 0x101000);
+
+ /* 8K buffer, 4K aligned */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000, 0x2000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 2);
+ CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
+ CU_ASSERT(req.cmd.dptr.prp.prp2 == 0x101000);
+
+ /* 8K buffer, non-4K aligned */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800, 0x2000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 3);
+ CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800);
+ CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr);
+ CU_ASSERT(tr.u.prp[0] == 0x101000);
+ CU_ASSERT(tr.u.prp[1] == 0x102000);
+
+ /* 12K buffer, 4K aligned */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000, 0x3000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 3);
+ CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
+ CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr);
+ CU_ASSERT(tr.u.prp[0] == 0x101000);
+ CU_ASSERT(tr.u.prp[1] == 0x102000);
+
+ /* 12K buffer, non-4K aligned */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800, 0x3000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 4);
+ CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800);
+ CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr);
+ CU_ASSERT(tr.u.prp[0] == 0x101000);
+ CU_ASSERT(tr.u.prp[1] == 0x102000);
+ CU_ASSERT(tr.u.prp[2] == 0x103000);
+
+ /* Two 4K buffers, both 4K aligned */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000, 0x1000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 1);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x900000, 0x1000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 2);
+ CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
+ CU_ASSERT(req.cmd.dptr.prp.prp2 == 0x900000);
+
+ /* Two 4K buffers, first non-4K aligned, second 4K aligned */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800, 0x1000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 2);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x900000, 0x1000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 3);
+ CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800);
+ CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr);
+ CU_ASSERT(tr.u.prp[0] == 0x101000);
+ CU_ASSERT(tr.u.prp[1] == 0x900000);
+
+ /* Two 4K buffers, both non-4K aligned (invalid) */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800, 0x1000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 2);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x900800, 0x1000, 0x1000) == -EFAULT);
+ CU_ASSERT(prp_index == 2);
+
+ /* 4K buffer, 4K aligned, but vtophys fails */
+ MOCK_SET(spdk_vtophys, SPDK_VTOPHYS_ERROR);
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000, 0x1000, 0x1000) == -EFAULT);
+ MOCK_CLEAR(spdk_vtophys);
+
+ /* Largest aligned buffer that can be described in NVME_MAX_PRP_LIST_ENTRIES (plus PRP1) */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000,
+ (NVME_MAX_PRP_LIST_ENTRIES + 1) * 0x1000, 0x1000) == 0);
+ CU_ASSERT(prp_index == NVME_MAX_PRP_LIST_ENTRIES + 1);
+
+ /* Largest non-4K-aligned buffer that can be described in NVME_MAX_PRP_LIST_ENTRIES (plus PRP1) */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800,
+ NVME_MAX_PRP_LIST_ENTRIES * 0x1000, 0x1000) == 0);
+ CU_ASSERT(prp_index == NVME_MAX_PRP_LIST_ENTRIES + 1);
+
+ /* Buffer too large to be described in NVME_MAX_PRP_LIST_ENTRIES */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000,
+ (NVME_MAX_PRP_LIST_ENTRIES + 2) * 0x1000, 0x1000) == -EFAULT);
+
+ /* Non-4K-aligned buffer too large to be described in NVME_MAX_PRP_LIST_ENTRIES */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800,
+ (NVME_MAX_PRP_LIST_ENTRIES + 1) * 0x1000, 0x1000) == -EFAULT);
+}
+
+static void
+test_nvme_pcie_hotplug_monitor(void)
+{
+ struct nvme_pcie_ctrlr pctrlr = {};
+ struct spdk_uevent_entry entry = {};
+ struct nvme_driver driver;
+ pthread_mutexattr_t attr;
+ struct spdk_nvme_probe_ctx test_nvme_probe_ctx = {};
+
+ /* Initiate variables and ctrlr */
+ driver.initialized = true;
+ driver.hotplug_fd = 123;
+ CU_ASSERT(pthread_mutexattr_init(&attr) == 0);
+ CU_ASSERT(pthread_mutex_init(&driver.lock, &attr) == 0);
+ TAILQ_INIT(&driver.shared_attached_ctrlrs);
+ g_spdk_nvme_driver = &driver;
+
+ /* Case 1: SPDK_NVME_UEVENT_ADD/ NVME_VFIO */
+ entry.uevent.subsystem = SPDK_NVME_UEVENT_SUBSYSTEM_VFIO;
+ entry.uevent.action = SPDK_NVME_UEVENT_ADD;
+ snprintf(entry.uevent.traddr, sizeof(entry.uevent.traddr), "0000:05:00.0");
+ CU_ASSERT(STAILQ_EMPTY(&g_uevents));
+ STAILQ_INSERT_TAIL(&g_uevents, &entry, link);
+
+ _nvme_pcie_hotplug_monitor(&test_nvme_probe_ctx);
+
+ CU_ASSERT(STAILQ_EMPTY(&g_uevents));
+ CU_ASSERT(g_device_is_enumerated == true);
+ g_device_is_enumerated = false;
+
+ /* Case 2: SPDK_NVME_UEVENT_ADD/ NVME_UIO */
+ entry.uevent.subsystem = SPDK_NVME_UEVENT_SUBSYSTEM_UIO;
+ entry.uevent.action = SPDK_NVME_UEVENT_ADD;
+ snprintf(entry.uevent.traddr, sizeof(entry.uevent.traddr), "0000:05:00.0");
+ CU_ASSERT(STAILQ_EMPTY(&g_uevents));
+ STAILQ_INSERT_TAIL(&g_uevents, &entry, link);
+
+ _nvme_pcie_hotplug_monitor(&test_nvme_probe_ctx);
+
+ CU_ASSERT(STAILQ_EMPTY(&g_uevents));
+ CU_ASSERT(g_device_is_enumerated == true);
+ g_device_is_enumerated = false;
+
+ /* Case 3: SPDK_NVME_UEVENT_REMOVE/ NVME_UIO */
+ entry.uevent.subsystem = SPDK_NVME_UEVENT_SUBSYSTEM_UIO;
+ entry.uevent.action = SPDK_NVME_UEVENT_REMOVE;
+ snprintf(entry.uevent.traddr, sizeof(entry.uevent.traddr), "0000:05:00.0");
+ CU_ASSERT(STAILQ_EMPTY(&g_uevents));
+ STAILQ_INSERT_TAIL(&g_uevents, &entry, link);
+
+ MOCK_SET(nvme_get_ctrlr_by_trid_unsafe, &pctrlr.ctrlr);
+
+ _nvme_pcie_hotplug_monitor(&test_nvme_probe_ctx);
+
+ CU_ASSERT(STAILQ_EMPTY(&g_uevents));
+ CU_ASSERT(pctrlr.ctrlr.is_failed == true);
+ pctrlr.ctrlr.is_failed = false;
+ MOCK_CLEAR(nvme_get_ctrlr_by_trid_unsafe);
+
+ /* Case 4: SPDK_NVME_UEVENT_REMOVE/ NVME_VFIO */
+ entry.uevent.subsystem = SPDK_NVME_UEVENT_SUBSYSTEM_VFIO;
+ entry.uevent.action = SPDK_NVME_UEVENT_REMOVE;
+ snprintf(entry.uevent.traddr, sizeof(entry.uevent.traddr), "0000:05:00.0");
+ CU_ASSERT(STAILQ_EMPTY(&g_uevents));
+ STAILQ_INSERT_TAIL(&g_uevents, &entry, link);
+ MOCK_SET(nvme_get_ctrlr_by_trid_unsafe, &pctrlr.ctrlr);
+
+ _nvme_pcie_hotplug_monitor(&test_nvme_probe_ctx);
+
+ CU_ASSERT(STAILQ_EMPTY(&g_uevents));
+ CU_ASSERT(pctrlr.ctrlr.is_failed == true);
+ pctrlr.ctrlr.is_failed = false;
+ MOCK_CLEAR(nvme_get_ctrlr_by_trid_unsafe);
+
+ /* Case 5: Removed device detected in another process */
+ pctrlr.ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
+ snprintf(pctrlr.ctrlr.trid.traddr, sizeof(pctrlr.ctrlr.trid.traddr), "0000:02:00.0");
+ pctrlr.ctrlr.remove_cb = NULL;
+ pctrlr.ctrlr.is_failed = false;
+ pctrlr.ctrlr.is_removed = false;
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_driver->shared_attached_ctrlrs, &pctrlr.ctrlr, tailq);
+
+ MOCK_SET(spdk_pci_device_is_removed, false);
+
+ _nvme_pcie_hotplug_monitor(&test_nvme_probe_ctx);
+
+ CU_ASSERT(pctrlr.ctrlr.is_failed == false);
+
+ MOCK_SET(spdk_pci_device_is_removed, true);
+
+ _nvme_pcie_hotplug_monitor(&test_nvme_probe_ctx);
+
+ CU_ASSERT(pctrlr.ctrlr.is_failed == true);
+
+ pthread_mutex_destroy(&driver.lock);
+ pthread_mutexattr_destroy(&attr);
+ g_spdk_nvme_driver = NULL;
+}
+
+static void test_shadow_doorbell_update(void)
+{
+ bool ret;
+
+ /* nvme_pcie_qpair_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old) */
+ ret = nvme_pcie_qpair_need_event(10, 15, 14);
+ CU_ASSERT(ret == false);
+
+ ret = nvme_pcie_qpair_need_event(14, 15, 14);
+ CU_ASSERT(ret == true);
+}
+
+static void
+test_build_contig_hw_sgl_request(void)
+{
+ struct spdk_nvme_qpair qpair = {};
+ struct nvme_request req = {};
+ struct nvme_tracker tr = {};
+ int rc;
+
+ /* Test 1: Payload covered by a single mapping */
+ req.payload_size = 100;
+ req.payload = NVME_PAYLOAD_CONTIG(0, 0);
+ g_vtophys_size = 100;
+ MOCK_SET(spdk_vtophys, 0xDEADBEEF);
+
+ rc = nvme_pcie_qpair_build_contig_hw_sgl_request(&qpair, &req, &tr, 0);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
+ CU_ASSERT(req.cmd.dptr.sgl1.address == 0xDEADBEEF);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 100);
+
+ MOCK_CLEAR(spdk_vtophys);
+ g_vtophys_size = 0;
+ memset(&qpair, 0, sizeof(qpair));
+ memset(&req, 0, sizeof(req));
+ memset(&tr, 0, sizeof(tr));
+
+ /* Test 2: Payload covered by a single mapping, but request is at an offset */
+ req.payload_size = 100;
+ req.payload_offset = 50;
+ req.payload = NVME_PAYLOAD_CONTIG(0, 0);
+ g_vtophys_size = 1000;
+ MOCK_SET(spdk_vtophys, 0xDEADBEEF);
+
+ rc = nvme_pcie_qpair_build_contig_hw_sgl_request(&qpair, &req, &tr, 0);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
+ CU_ASSERT(req.cmd.dptr.sgl1.address == 0xDEADBEEF);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 100);
+
+ MOCK_CLEAR(spdk_vtophys);
+ g_vtophys_size = 0;
+ memset(&qpair, 0, sizeof(qpair));
+ memset(&req, 0, sizeof(req));
+ memset(&tr, 0, sizeof(tr));
+
+ /* Test 3: Payload spans two mappings */
+ req.payload_size = 100;
+ req.payload = NVME_PAYLOAD_CONTIG(0, 0);
+ g_vtophys_size = 60;
+ tr.prp_sgl_bus_addr = 0xFF0FF;
+ MOCK_SET(spdk_vtophys, 0xDEADBEEF);
+
+ rc = nvme_pcie_qpair_build_contig_hw_sgl_request(&qpair, &req, &tr, 0);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_LAST_SEGMENT);
+ CU_ASSERT(req.cmd.dptr.sgl1.address == tr.prp_sgl_bus_addr);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 2 * sizeof(struct spdk_nvme_sgl_descriptor));
+ CU_ASSERT(tr.u.sgl[0].unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
+ CU_ASSERT(tr.u.sgl[0].unkeyed.length == 60);
+ CU_ASSERT(tr.u.sgl[0].address == 0xDEADBEEF);
+ CU_ASSERT(tr.u.sgl[1].unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
+ CU_ASSERT(tr.u.sgl[1].unkeyed.length == 40);
+ CU_ASSERT(tr.u.sgl[1].address == 0xDEADBEEF);
+
+ MOCK_CLEAR(spdk_vtophys);
+ g_vtophys_size = 0;
+ memset(&qpair, 0, sizeof(qpair));
+ memset(&req, 0, sizeof(req));
+ memset(&tr, 0, sizeof(tr));
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvme_pcie", NULL, NULL);
+ CU_ADD_TEST(suite, test_prp_list_append);
+ CU_ADD_TEST(suite, test_nvme_pcie_hotplug_monitor);
+ CU_ADD_TEST(suite, test_shadow_doorbell_update);
+ CU_ADD_TEST(suite, test_build_contig_hw_sgl_request);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_poll_group.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_poll_group.c/.gitignore
new file mode 100644
index 000000000..e4223e112
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_poll_group.c/.gitignore
@@ -0,0 +1 @@
+nvme_poll_group_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_poll_group.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_poll_group.c/Makefile
new file mode 100644
index 000000000..4715b5449
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_poll_group.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_poll_group_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_poll_group.c/nvme_poll_group_ut.c b/src/spdk/test/unit/lib/nvme/nvme_poll_group.c/nvme_poll_group_ut.c
new file mode 100644
index 000000000..1503a49c5
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_poll_group.c/nvme_poll_group_ut.c
@@ -0,0 +1,484 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "nvme/nvme_poll_group.c"
+#include "common/lib/test_env.c"
+
+struct spdk_nvme_transport {
+ const char name[32];
+ TAILQ_ENTRY(spdk_nvme_transport) link;
+};
+
+struct spdk_nvme_transport t1 = {
+ .name = "transport1",
+};
+
+struct spdk_nvme_transport t2 = {
+ .name = "transport2",
+};
+
+struct spdk_nvme_transport t3 = {
+ .name = "transport3",
+};
+
+struct spdk_nvme_transport t4 = {
+ .name = "transport4",
+};
+
+int64_t g_process_completions_return_value = 0;
+int g_destroy_return_value = 0;
+
+TAILQ_HEAD(nvme_transport_list, spdk_nvme_transport) g_spdk_nvme_transports =
+ TAILQ_HEAD_INITIALIZER(g_spdk_nvme_transports);
+
+static void
+unit_test_disconnected_qpair_cb(struct spdk_nvme_qpair *qpair, void *poll_group_ctx)
+{
+
+}
+
+const struct spdk_nvme_transport *
+nvme_get_first_transport(void)
+{
+ return TAILQ_FIRST(&g_spdk_nvme_transports);
+}
+
+const struct spdk_nvme_transport *
+nvme_get_next_transport(const struct spdk_nvme_transport *transport)
+{
+ return TAILQ_NEXT(transport, link);
+}
+
+int
+nvme_transport_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair)
+{
+ struct spdk_nvme_transport_poll_group *tgroup;
+ struct spdk_nvme_qpair *iter_qp, *tmp_iter_qp;
+
+ tgroup = qpair->poll_group;
+
+ STAILQ_FOREACH_SAFE(iter_qp, &tgroup->connected_qpairs, poll_group_stailq, tmp_iter_qp) {
+ if (qpair == iter_qp) {
+ STAILQ_REMOVE(&tgroup->connected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
+ STAILQ_INSERT_TAIL(&tgroup->disconnected_qpairs, qpair, poll_group_stailq);
+ return 0;
+ }
+ }
+
+ STAILQ_FOREACH(iter_qp, &tgroup->disconnected_qpairs, poll_group_stailq) {
+ if (qpair == iter_qp) {
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+int
+nvme_transport_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair)
+{
+ struct spdk_nvme_transport_poll_group *tgroup;
+ struct spdk_nvme_qpair *iter_qp, *tmp_iter_qp;
+
+ tgroup = qpair->poll_group;
+
+ STAILQ_FOREACH_SAFE(iter_qp, &tgroup->disconnected_qpairs, poll_group_stailq, tmp_iter_qp) {
+ if (qpair == iter_qp) {
+ STAILQ_REMOVE(&tgroup->disconnected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
+ STAILQ_INSERT_TAIL(&tgroup->connected_qpairs, qpair, poll_group_stailq);
+ return 0;
+ }
+ }
+
+ STAILQ_FOREACH(iter_qp, &tgroup->connected_qpairs, poll_group_stailq) {
+ if (qpair == iter_qp) {
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+struct spdk_nvme_transport_poll_group *
+nvme_transport_poll_group_create(const struct spdk_nvme_transport *transport)
+{
+ struct spdk_nvme_transport_poll_group *group = NULL;
+
+ /* TODO: separate this transport function table from the transport specific one. */
+ group = calloc(1, sizeof(*group));
+ if (group) {
+ group->transport = transport;
+ STAILQ_INIT(&group->connected_qpairs);
+ STAILQ_INIT(&group->disconnected_qpairs);
+ }
+
+ return group;
+}
+
+int
+nvme_transport_poll_group_destroy(struct spdk_nvme_transport_poll_group *tgroup)
+{
+ return g_destroy_return_value;
+}
+
+int
+nvme_transport_poll_group_add(struct spdk_nvme_transport_poll_group *tgroup,
+ struct spdk_nvme_qpair *qpair)
+{
+ STAILQ_INSERT_TAIL(&tgroup->connected_qpairs, qpair, poll_group_stailq);
+ qpair->poll_group = tgroup;
+
+ return 0;
+}
+
+int
+nvme_transport_poll_group_remove(struct spdk_nvme_transport_poll_group *tgroup,
+ struct spdk_nvme_qpair *qpair)
+{
+ struct spdk_nvme_qpair *iter_qp, *tmp_iter_qp;
+
+ STAILQ_FOREACH_SAFE(iter_qp, &tgroup->connected_qpairs, poll_group_stailq, tmp_iter_qp) {
+ if (qpair == iter_qp) {
+ STAILQ_REMOVE(&tgroup->connected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
+ return 0;
+ }
+ }
+
+ STAILQ_FOREACH_SAFE(iter_qp, &tgroup->disconnected_qpairs, poll_group_stailq, tmp_iter_qp) {
+ if (qpair == iter_qp) {
+ STAILQ_REMOVE(&tgroup->disconnected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
+ return 0;
+ }
+ }
+
+ return -ENODEV;
+}
+
+int64_t
+nvme_transport_poll_group_process_completions(struct spdk_nvme_transport_poll_group *group,
+ uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
+{
+ return g_process_completions_return_value;
+}
+
+static void
+test_spdk_nvme_poll_group_create(void)
+{
+ struct spdk_nvme_poll_group *group;
+
+ /* basic case - create a poll group with no internal transport poll groups. */
+ group = spdk_nvme_poll_group_create(NULL);
+
+ SPDK_CU_ASSERT_FATAL(group != NULL);
+ CU_ASSERT(STAILQ_EMPTY(&group->tgroups));
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_poll_group_destroy(group) == 0);
+
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, &t1, link);
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, &t2, link);
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, &t3, link);
+
+ /* advanced case - create a poll group with three internal poll groups. */
+ group = spdk_nvme_poll_group_create(NULL);
+ CU_ASSERT(STAILQ_EMPTY(&group->tgroups));
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_poll_group_destroy(group) == 0);
+
+ /* Failing case - failed to allocate a poll group. */
+ MOCK_SET(calloc, NULL);
+ group = spdk_nvme_poll_group_create(NULL);
+ CU_ASSERT(group == NULL);
+ MOCK_CLEAR(calloc);
+
+ TAILQ_REMOVE(&g_spdk_nvme_transports, &t1, link);
+ TAILQ_REMOVE(&g_spdk_nvme_transports, &t2, link);
+ TAILQ_REMOVE(&g_spdk_nvme_transports, &t3, link);
+}
+
+static void
+test_spdk_nvme_poll_group_add_remove(void)
+{
+ struct spdk_nvme_poll_group *group;
+ struct spdk_nvme_transport_poll_group *tgroup = NULL, *tmp_tgroup, *tgroup_1 = NULL,
+ *tgroup_2 = NULL,
+ *tgroup_4 = NULL;
+ struct spdk_nvme_qpair *qpair;
+ struct spdk_nvme_qpair qpair1_1 = {0};
+ struct spdk_nvme_qpair qpair1_2 = {0};
+ struct spdk_nvme_qpair qpair2_1 = {0};
+ struct spdk_nvme_qpair qpair2_2 = {0};
+ struct spdk_nvme_qpair qpair4_1 = {0};
+ struct spdk_nvme_qpair qpair4_2 = {0};
+ int i = 0;
+
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, &t1, link);
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, &t2, link);
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, &t3, link);
+
+ group = spdk_nvme_poll_group_create(NULL);
+ SPDK_CU_ASSERT_FATAL(group != NULL);
+ CU_ASSERT(STAILQ_EMPTY(&group->tgroups));
+
+ /* Add qpairs to a single transport. */
+ qpair1_1.transport = &t1;
+ qpair1_1.state = NVME_QPAIR_DISCONNECTED;
+ qpair1_2.transport = &t1;
+ qpair1_2.state = NVME_QPAIR_ENABLED;
+ CU_ASSERT(spdk_nvme_poll_group_add(group, &qpair1_1) == 0);
+ CU_ASSERT(spdk_nvme_poll_group_add(group, &qpair1_2) == -EINVAL);
+ STAILQ_FOREACH(tmp_tgroup, &group->tgroups, link) {
+ if (tmp_tgroup->transport == &t1) {
+ tgroup = tmp_tgroup;
+ } else {
+ CU_ASSERT(STAILQ_EMPTY(&tmp_tgroup->connected_qpairs));
+ }
+ i++;
+ }
+ CU_ASSERT(i == 1);
+ SPDK_CU_ASSERT_FATAL(tgroup != NULL);
+ qpair = STAILQ_FIRST(&tgroup->connected_qpairs);
+ SPDK_CU_ASSERT_FATAL(qpair == &qpair1_1);
+ qpair = STAILQ_NEXT(qpair, poll_group_stailq);
+ CU_ASSERT(qpair == NULL);
+
+ /* Add qpairs to a second transport. */
+ qpair2_1.transport = &t2;
+ qpair2_2.transport = &t2;
+ CU_ASSERT(spdk_nvme_poll_group_add(group, &qpair2_1) == 0);
+ CU_ASSERT(spdk_nvme_poll_group_add(group, &qpair2_2) == 0);
+ qpair4_1.transport = &t4;
+ qpair4_2.transport = &t4;
+ /* Add qpairs for a transport that doesn't exist. */
+ CU_ASSERT(spdk_nvme_poll_group_add(group, &qpair4_1) == -ENODEV);
+ CU_ASSERT(spdk_nvme_poll_group_add(group, &qpair4_2) == -ENODEV);
+ i = 0;
+ STAILQ_FOREACH(tmp_tgroup, &group->tgroups, link) {
+ if (tmp_tgroup->transport == &t1) {
+ tgroup_1 = tmp_tgroup;
+ } else if (tmp_tgroup->transport == &t2) {
+ tgroup_2 = tmp_tgroup;
+ } else {
+ CU_ASSERT(STAILQ_EMPTY(&tmp_tgroup->connected_qpairs));
+ }
+ i++;
+ }
+ CU_ASSERT(i == 2);
+ SPDK_CU_ASSERT_FATAL(tgroup_1 != NULL);
+ qpair = STAILQ_FIRST(&tgroup_1->connected_qpairs);
+ SPDK_CU_ASSERT_FATAL(qpair == &qpair1_1);
+ qpair = STAILQ_NEXT(qpair, poll_group_stailq);
+ CU_ASSERT(qpair == NULL);
+ SPDK_CU_ASSERT_FATAL(tgroup_2 != NULL);
+ qpair = STAILQ_FIRST(&tgroup_2->connected_qpairs);
+ SPDK_CU_ASSERT_FATAL(qpair == &qpair2_1);
+ qpair = STAILQ_NEXT(qpair, poll_group_stailq);
+ SPDK_CU_ASSERT_FATAL(qpair == &qpair2_2);
+ qpair = STAILQ_NEXT(qpair, poll_group_stailq);
+ CU_ASSERT(qpair == NULL);
+
+ /* Try removing a qpair that belongs to a transport not in our poll group. */
+ CU_ASSERT(spdk_nvme_poll_group_remove(group, &qpair4_1) == -ENODEV);
+
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, &t4, link);
+ CU_ASSERT(spdk_nvme_poll_group_add(group, &qpair4_1) == 0);
+ CU_ASSERT(spdk_nvme_poll_group_add(group, &qpair4_2) == 0);
+ STAILQ_FOREACH(tmp_tgroup, &group->tgroups, link) {
+ if (tmp_tgroup->transport == &t1) {
+ tgroup_1 = tmp_tgroup;
+ } else if (tmp_tgroup->transport == &t2) {
+ tgroup_2 = tmp_tgroup;
+ } else if (tmp_tgroup->transport == &t4) {
+ tgroup_4 = tmp_tgroup;
+ } else {
+ CU_ASSERT(STAILQ_EMPTY(&tmp_tgroup->connected_qpairs));
+ }
+ }
+ SPDK_CU_ASSERT_FATAL(tgroup_1 != NULL);
+ qpair = STAILQ_FIRST(&tgroup_1->connected_qpairs);
+ SPDK_CU_ASSERT_FATAL(qpair == &qpair1_1);
+ qpair = STAILQ_NEXT(qpair, poll_group_stailq);
+ CU_ASSERT(qpair == NULL);
+ SPDK_CU_ASSERT_FATAL(tgroup_2 != NULL);
+ qpair = STAILQ_FIRST(&tgroup_2->connected_qpairs);
+ SPDK_CU_ASSERT_FATAL(qpair == &qpair2_1);
+ qpair = STAILQ_NEXT(qpair, poll_group_stailq);
+ SPDK_CU_ASSERT_FATAL(qpair == &qpair2_2);
+ qpair = STAILQ_NEXT(qpair, poll_group_stailq);
+ CU_ASSERT(qpair == NULL);
+ SPDK_CU_ASSERT_FATAL(tgroup_4 != NULL);
+ qpair = STAILQ_FIRST(&tgroup_4->connected_qpairs);
+ SPDK_CU_ASSERT_FATAL(qpair == &qpair4_1);
+ qpair = STAILQ_NEXT(qpair, poll_group_stailq);
+ SPDK_CU_ASSERT_FATAL(qpair == &qpair4_2);
+ qpair = STAILQ_NEXT(qpair, poll_group_stailq);
+ CU_ASSERT(qpair == NULL);
+
+ /* remove all qpairs */
+ CU_ASSERT(spdk_nvme_poll_group_remove(group, &qpair1_1) == 0);
+ CU_ASSERT(spdk_nvme_poll_group_remove(group, &qpair2_1) == 0);
+ CU_ASSERT(spdk_nvme_poll_group_remove(group, &qpair2_2) == 0);
+ CU_ASSERT(spdk_nvme_poll_group_remove(group, &qpair4_1) == 0);
+ CU_ASSERT(spdk_nvme_poll_group_remove(group, &qpair4_2) == 0);
+ /* Confirm the fourth transport group was created. */
+ i = 0;
+ STAILQ_FOREACH_SAFE(tgroup, &group->tgroups, link, tmp_tgroup) {
+ CU_ASSERT(STAILQ_EMPTY(&tgroup->connected_qpairs));
+ STAILQ_REMOVE(&group->tgroups, tgroup, spdk_nvme_transport_poll_group, link);
+ free(tgroup);
+ i++;
+ }
+ CU_ASSERT(i == 3);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_poll_group_destroy(group) == 0);
+
+ TAILQ_REMOVE(&g_spdk_nvme_transports, &t1, link);
+ TAILQ_REMOVE(&g_spdk_nvme_transports, &t2, link);
+ TAILQ_REMOVE(&g_spdk_nvme_transports, &t3, link);
+ TAILQ_REMOVE(&g_spdk_nvme_transports, &t4, link);
+}
+
+static void
+test_spdk_nvme_poll_group_process_completions(void)
+{
+ struct spdk_nvme_poll_group *group;
+ struct spdk_nvme_transport_poll_group *tgroup, *tmp_tgroup;
+ struct spdk_nvme_qpair qpair1_1 = {0};
+
+ group = spdk_nvme_poll_group_create(NULL);
+ SPDK_CU_ASSERT_FATAL(group != NULL);
+
+ /* If we don't have any transport poll groups, we shouldn't get any completions. */
+ g_process_completions_return_value = 32;
+ CU_ASSERT(spdk_nvme_poll_group_process_completions(group, 128,
+ unit_test_disconnected_qpair_cb) == 0);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_poll_group_destroy(group) == 0);
+
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, &t1, link);
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, &t2, link);
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, &t3, link);
+
+ /* try it with three transport poll groups. */
+ group = spdk_nvme_poll_group_create(NULL);
+ SPDK_CU_ASSERT_FATAL(group != NULL);
+ qpair1_1.state = NVME_QPAIR_DISCONNECTED;
+ qpair1_1.transport = &t1;
+ CU_ASSERT(spdk_nvme_poll_group_add(group, &qpair1_1) == 0);
+ qpair1_1.state = NVME_QPAIR_ENABLED;
+ CU_ASSERT(nvme_poll_group_connect_qpair(&qpair1_1) == 0);
+ CU_ASSERT(spdk_nvme_poll_group_process_completions(group, 128,
+ unit_test_disconnected_qpair_cb) == 32);
+ CU_ASSERT(spdk_nvme_poll_group_remove(group, &qpair1_1) == 0);
+ STAILQ_FOREACH_SAFE(tgroup, &group->tgroups, link, tmp_tgroup) {
+ CU_ASSERT(STAILQ_EMPTY(&tgroup->connected_qpairs));
+ STAILQ_REMOVE(&group->tgroups, tgroup, spdk_nvme_transport_poll_group, link);
+ free(tgroup);
+ }
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_poll_group_destroy(group) == 0);
+
+ TAILQ_REMOVE(&g_spdk_nvme_transports, &t1, link);
+ TAILQ_REMOVE(&g_spdk_nvme_transports, &t2, link);
+ TAILQ_REMOVE(&g_spdk_nvme_transports, &t3, link);
+}
+
+static void
+test_spdk_nvme_poll_group_destroy(void)
+{
+ struct spdk_nvme_poll_group *group;
+ struct spdk_nvme_transport_poll_group *tgroup, *tgroup_1, *tgroup_2;
+ struct spdk_nvme_qpair qpair1_1 = {0};
+ int num_tgroups = 0;
+
+ /* Simple destruction of empty poll group. */
+ group = spdk_nvme_poll_group_create(NULL);
+ SPDK_CU_ASSERT_FATAL(group != NULL);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_poll_group_destroy(group) == 0);
+
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, &t1, link);
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, &t2, link);
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, &t3, link);
+ group = spdk_nvme_poll_group_create(NULL);
+ SPDK_CU_ASSERT_FATAL(group != NULL);
+
+ qpair1_1.transport = &t1;
+ CU_ASSERT(spdk_nvme_poll_group_add(group, &qpair1_1) == 0);
+
+ /* Don't remove busy poll groups. */
+ g_destroy_return_value = -EBUSY;
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_poll_group_destroy(group) == -EBUSY);
+ STAILQ_FOREACH(tgroup, &group->tgroups, link) {
+ num_tgroups++;
+ }
+ CU_ASSERT(num_tgroups == 1);
+
+ /* destroy poll group with internal poll groups. */
+ g_destroy_return_value = 0;
+ tgroup_1 = STAILQ_FIRST(&group->tgroups);
+ tgroup_2 = STAILQ_NEXT(tgroup_1, link);
+ CU_ASSERT(tgroup_2 == NULL)
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_poll_group_destroy(group) == 0);
+ free(tgroup_1);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("nvme_ns_cmd", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "nvme_poll_group_create_test", test_spdk_nvme_poll_group_create) == NULL ||
+ CU_add_test(suite, "nvme_poll_group_add_remove_test",
+ test_spdk_nvme_poll_group_add_remove) == NULL ||
+ CU_add_test(suite, "nvme_poll_group_process_completions",
+ test_spdk_nvme_poll_group_process_completions) == NULL ||
+ CU_add_test(suite, "nvme_poll_group_destroy_test", test_spdk_nvme_poll_group_destroy) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_qpair.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_qpair.c/.gitignore
new file mode 100644
index 000000000..1bb18e997
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_qpair.c/.gitignore
@@ -0,0 +1 @@
+nvme_qpair_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_qpair.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_qpair.c/Makefile
new file mode 100644
index 000000000..d7762a384
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_qpair.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_qpair_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_qpair.c/nvme_qpair_ut.c b/src/spdk/test/unit/lib/nvme/nvme_qpair.c/nvme_qpair_ut.c
new file mode 100644
index 000000000..e34c70413
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_qpair.c/nvme_qpair_ut.c
@@ -0,0 +1,625 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "common/lib/test_env.c"
+
+pid_t g_spdk_nvme_pid;
+
+bool trace_flag = false;
+#define SPDK_LOG_NVME trace_flag
+
+#include "nvme/nvme_qpair.c"
+
+struct nvme_driver _g_nvme_driver = {
+ .lock = PTHREAD_MUTEX_INITIALIZER,
+};
+
+DEFINE_STUB_V(nvme_transport_qpair_abort_reqs, (struct spdk_nvme_qpair *qpair, uint32_t dnr));
+DEFINE_STUB(nvme_transport_qpair_submit_request, int,
+ (struct spdk_nvme_qpair *qpair, struct nvme_request *req), 0);
+DEFINE_STUB(spdk_nvme_ctrlr_free_io_qpair, int, (struct spdk_nvme_qpair *qpair), 0);
+DEFINE_STUB_V(nvme_transport_ctrlr_disconnect_qpair, (struct spdk_nvme_ctrlr *ctrlr,
+ struct spdk_nvme_qpair *qpair));
+DEFINE_STUB_V(nvme_ctrlr_disconnect_qpair, (struct spdk_nvme_qpair *qpair));
+
+void
+nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr, bool hot_remove)
+{
+ if (hot_remove) {
+ ctrlr->is_removed = true;
+ }
+ ctrlr->is_failed = true;
+}
+
+static bool g_called_transport_process_completions = false;
+static int32_t g_transport_process_completions_rc = 0;
+int32_t
+nvme_transport_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
+{
+ g_called_transport_process_completions = true;
+ return g_transport_process_completions_rc;
+}
+
+static void
+prepare_submit_request_test(struct spdk_nvme_qpair *qpair,
+ struct spdk_nvme_ctrlr *ctrlr)
+{
+ memset(ctrlr, 0, sizeof(*ctrlr));
+ ctrlr->free_io_qids = NULL;
+ TAILQ_INIT(&ctrlr->active_io_qpairs);
+ TAILQ_INIT(&ctrlr->active_procs);
+ MOCK_CLEAR(spdk_zmalloc);
+ nvme_qpair_init(qpair, 1, ctrlr, 0, 32);
+}
+
+static void
+cleanup_submit_request_test(struct spdk_nvme_qpair *qpair)
+{
+ free(qpair->req_buf);
+}
+
+static void
+expected_success_callback(void *arg, const struct spdk_nvme_cpl *cpl)
+{
+ CU_ASSERT(!spdk_nvme_cpl_is_error(cpl));
+}
+
+static void
+expected_failure_callback(void *arg, const struct spdk_nvme_cpl *cpl)
+{
+ CU_ASSERT(spdk_nvme_cpl_is_error(cpl));
+}
+
+static void
+test3(void)
+{
+ struct spdk_nvme_qpair qpair = {};
+ struct nvme_request *req;
+ struct spdk_nvme_ctrlr ctrlr = {};
+
+ prepare_submit_request_test(&qpair, &ctrlr);
+
+ req = nvme_allocate_request_null(&qpair, expected_success_callback, NULL);
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+
+ CU_ASSERT(nvme_qpair_submit_request(&qpair, req) == 0);
+
+ nvme_free_request(req);
+
+ cleanup_submit_request_test(&qpair);
+}
+
+static void
+test_ctrlr_failed(void)
+{
+ struct spdk_nvme_qpair qpair = {};
+ struct nvme_request *req;
+ struct spdk_nvme_ctrlr ctrlr = {};
+ char payload[4096];
+
+ prepare_submit_request_test(&qpair, &ctrlr);
+
+ req = nvme_allocate_request_contig(&qpair, payload, sizeof(payload), expected_failure_callback,
+ NULL);
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+
+ /* Set the controller to failed.
+ * Set the controller to resetting so that the qpair won't get re-enabled.
+ */
+ ctrlr.is_failed = true;
+ ctrlr.is_resetting = true;
+
+ CU_ASSERT(nvme_qpair_submit_request(&qpair, req) != 0);
+
+ cleanup_submit_request_test(&qpair);
+}
+
+static void struct_packing(void)
+{
+ /* ctrlr is the first field in nvme_qpair after the fields
+ * that are used in the I/O path. Make sure the I/O path fields
+ * all fit into two cache lines.
+ */
+ CU_ASSERT(offsetof(struct spdk_nvme_qpair, ctrlr) <= 128);
+}
+
+static int g_num_cb_failed = 0;
+static int g_num_cb_passed = 0;
+
+static void
+dummy_cb_fn(void *cb_arg, const struct spdk_nvme_cpl *cpl)
+{
+ if (cpl->status.sc == SPDK_NVME_SC_SUCCESS) {
+ g_num_cb_passed++;
+ } else {
+ g_num_cb_failed++;
+ }
+}
+
+static void test_nvme_qpair_process_completions(void)
+{
+ struct spdk_nvme_qpair admin_qp = {0};
+ struct spdk_nvme_qpair qpair = {0};
+ struct spdk_nvme_ctrlr ctrlr = {0};
+ struct nvme_request dummy_1 = {{0}};
+ struct nvme_request dummy_2 = {{0}};
+ int rc;
+
+ dummy_1.cb_fn = dummy_cb_fn;
+ dummy_2.cb_fn = dummy_cb_fn;
+ dummy_1.qpair = &qpair;
+ dummy_2.qpair = &qpair;
+
+ TAILQ_INIT(&ctrlr.active_io_qpairs);
+ TAILQ_INIT(&ctrlr.active_procs);
+ nvme_qpair_init(&qpair, 1, &ctrlr, 0, 32);
+ nvme_qpair_init(&admin_qp, 0, &ctrlr, 0, 32);
+
+ ctrlr.adminq = &admin_qp;
+
+ STAILQ_INIT(&qpair.queued_req);
+ STAILQ_INSERT_TAIL(&qpair.queued_req, &dummy_1, stailq);
+ STAILQ_INSERT_TAIL(&qpair.queued_req, &dummy_2, stailq);
+
+ /* If the controller is failed, return -ENXIO */
+ ctrlr.is_failed = true;
+ ctrlr.is_removed = false;
+ rc = spdk_nvme_qpair_process_completions(&qpair, 0);
+ CU_ASSERT(rc == -ENXIO);
+ CU_ASSERT(!STAILQ_EMPTY(&qpair.queued_req));
+ CU_ASSERT(g_num_cb_passed == 0);
+ CU_ASSERT(g_num_cb_failed == 0);
+
+ /* Same if the qpair is failed at the transport layer. */
+ ctrlr.is_failed = false;
+ ctrlr.is_removed = false;
+ qpair.state = NVME_QPAIR_DISCONNECTED;
+ rc = spdk_nvme_qpair_process_completions(&qpair, 0);
+ CU_ASSERT(rc == -ENXIO);
+ CU_ASSERT(!STAILQ_EMPTY(&qpair.queued_req));
+ CU_ASSERT(g_num_cb_passed == 0);
+ CU_ASSERT(g_num_cb_failed == 0);
+
+ /* If the controller is removed, make sure we abort the requests. */
+ ctrlr.is_failed = true;
+ ctrlr.is_removed = true;
+ qpair.state = NVME_QPAIR_CONNECTED;
+ rc = spdk_nvme_qpair_process_completions(&qpair, 0);
+ CU_ASSERT(rc == -ENXIO);
+ CU_ASSERT(STAILQ_EMPTY(&qpair.queued_req));
+ CU_ASSERT(g_num_cb_passed == 0);
+ CU_ASSERT(g_num_cb_failed == 2);
+
+ /* If we are resetting, make sure that we don't call into the transport. */
+ STAILQ_INSERT_TAIL(&qpair.queued_req, &dummy_1, stailq);
+ dummy_1.queued = true;
+ STAILQ_INSERT_TAIL(&qpair.queued_req, &dummy_2, stailq);
+ dummy_2.queued = true;
+ g_num_cb_failed = 0;
+ ctrlr.is_failed = false;
+ ctrlr.is_removed = false;
+ ctrlr.is_resetting = true;
+ rc = spdk_nvme_qpair_process_completions(&qpair, 0);
+ CU_ASSERT(rc == -ENXIO);
+ CU_ASSERT(g_called_transport_process_completions == false);
+ /* We also need to make sure we didn't abort the requests. */
+ CU_ASSERT(!STAILQ_EMPTY(&qpair.queued_req));
+ CU_ASSERT(g_num_cb_passed == 0);
+ CU_ASSERT(g_num_cb_failed == 0);
+
+ /* The case where we aren't resetting, but are enabling the qpair is the same as above. */
+ ctrlr.is_resetting = false;
+ qpair.state = NVME_QPAIR_ENABLING;
+ rc = spdk_nvme_qpair_process_completions(&qpair, 0);
+ CU_ASSERT(rc == -ENXIO);
+ CU_ASSERT(g_called_transport_process_completions == false);
+ CU_ASSERT(!STAILQ_EMPTY(&qpair.queued_req));
+ CU_ASSERT(g_num_cb_passed == 0);
+ CU_ASSERT(g_num_cb_failed == 0);
+
+ /* For other qpair states, we want to enable the qpair. */
+ qpair.state = NVME_QPAIR_CONNECTED;
+ rc = spdk_nvme_qpair_process_completions(&qpair, 1);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_called_transport_process_completions == true);
+ /* These should have been submitted to the lower layer. */
+ CU_ASSERT(STAILQ_EMPTY(&qpair.queued_req));
+ CU_ASSERT(g_num_cb_passed == 0);
+ CU_ASSERT(g_num_cb_failed == 0);
+ CU_ASSERT(nvme_qpair_get_state(&qpair) == NVME_QPAIR_ENABLED);
+
+ g_called_transport_process_completions = false;
+ g_transport_process_completions_rc = -ENXIO;
+
+ /* Fail the controller if we get an error from the transport on admin qpair. */
+ admin_qp.state = NVME_QPAIR_ENABLED;
+ rc = spdk_nvme_qpair_process_completions(&admin_qp, 0);
+ CU_ASSERT(rc == -ENXIO);
+ CU_ASSERT(g_called_transport_process_completions == true);
+ CU_ASSERT(ctrlr.is_failed == true);
+
+ /* Don't fail the controller for regular qpairs. */
+ ctrlr.is_failed = false;
+ g_called_transport_process_completions = false;
+ rc = spdk_nvme_qpair_process_completions(&qpair, 0);
+ CU_ASSERT(rc == -ENXIO);
+ CU_ASSERT(g_called_transport_process_completions == true);
+ CU_ASSERT(ctrlr.is_failed == false);
+
+ /* Make sure we don't modify the return value from the transport. */
+ ctrlr.is_failed = false;
+ g_called_transport_process_completions = false;
+ g_transport_process_completions_rc = 23;
+ rc = spdk_nvme_qpair_process_completions(&qpair, 0);
+ CU_ASSERT(rc == 23);
+ CU_ASSERT(g_called_transport_process_completions == true);
+ CU_ASSERT(ctrlr.is_failed == false);
+
+ free(qpair.req_buf);
+ free(admin_qp.req_buf);
+}
+
+static void test_nvme_completion_is_retry(void)
+{
+ struct spdk_nvme_cpl cpl = {};
+
+ cpl.status.sct = SPDK_NVME_SCT_GENERIC;
+ cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
+ cpl.status.dnr = 0;
+ CU_ASSERT_TRUE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_FORMAT_IN_PROGRESS;
+ cpl.status.dnr = 1;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+ cpl.status.dnr = 0;
+ CU_ASSERT_TRUE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_INVALID_FIELD;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_COMMAND_ID_CONFLICT;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_DATA_TRANSFER_ERROR;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_ABORTED_POWER_LOSS;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_ABORTED_FAILED_FUSED;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_ABORTED_MISSING_FUSED;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_INVALID_SGL_SEG_DESCRIPTOR;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_INVALID_NUM_SGL_DESCIRPTORS;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_METADATA_SGL_LENGTH_INVALID;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_SGL_DESCRIPTOR_TYPE_INVALID;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_INVALID_CONTROLLER_MEM_BUF;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_INVALID_PRP_OFFSET;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_ATOMIC_WRITE_UNIT_EXCEEDED;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_CAPACITY_EXCEEDED;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_RESERVATION_CONFLICT;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = 0x70;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sct = SPDK_NVME_SCT_MEDIA_ERROR;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sct = SPDK_NVME_SCT_PATH;
+ cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
+ cpl.status.dnr = 0;
+ CU_ASSERT_TRUE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sct = SPDK_NVME_SCT_PATH;
+ cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
+ cpl.status.dnr = 1;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sct = 0x4;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+}
+
+#ifdef DEBUG
+static void
+test_get_status_string(void)
+{
+ const char *status_string;
+ struct spdk_nvme_status status;
+
+ status.sct = SPDK_NVME_SCT_GENERIC;
+ status.sc = SPDK_NVME_SC_SUCCESS;
+ status_string = spdk_nvme_cpl_get_status_string(&status);
+ CU_ASSERT(strcmp(status_string, "SUCCESS") == 0);
+
+ status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
+ status.sc = SPDK_NVME_SC_COMPLETION_QUEUE_INVALID;
+ status_string = spdk_nvme_cpl_get_status_string(&status);
+ CU_ASSERT(strcmp(status_string, "INVALID COMPLETION QUEUE") == 0);
+
+ status.sct = SPDK_NVME_SCT_MEDIA_ERROR;
+ status.sc = SPDK_NVME_SC_UNRECOVERED_READ_ERROR;
+ status_string = spdk_nvme_cpl_get_status_string(&status);
+ CU_ASSERT(strcmp(status_string, "UNRECOVERED READ ERROR") == 0);
+
+ status.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
+ status.sc = 0;
+ status_string = spdk_nvme_cpl_get_status_string(&status);
+ CU_ASSERT(strcmp(status_string, "VENDOR SPECIFIC") == 0);
+
+ status.sct = 0x4;
+ status.sc = 0;
+ status_string = spdk_nvme_cpl_get_status_string(&status);
+ CU_ASSERT(strcmp(status_string, "RESERVED") == 0);
+}
+#endif
+
+static void
+test_nvme_qpair_add_cmd_error_injection(void)
+{
+ struct spdk_nvme_qpair qpair = {};
+ struct spdk_nvme_ctrlr ctrlr = {};
+ int rc;
+
+ prepare_submit_request_test(&qpair, &ctrlr);
+ ctrlr.adminq = &qpair;
+
+ /* Admin error injection at submission path */
+ MOCK_CLEAR(spdk_zmalloc);
+ rc = spdk_nvme_qpair_add_cmd_error_injection(&ctrlr, NULL,
+ SPDK_NVME_OPC_GET_FEATURES, true, 5000, 1,
+ SPDK_NVME_SCT_GENERIC, SPDK_NVME_SC_INVALID_FIELD);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(!TAILQ_EMPTY(&qpair.err_cmd_head));
+
+ /* Remove cmd error injection */
+ spdk_nvme_qpair_remove_cmd_error_injection(&ctrlr, NULL, SPDK_NVME_OPC_GET_FEATURES);
+
+ CU_ASSERT(TAILQ_EMPTY(&qpair.err_cmd_head));
+
+ /* IO error injection at completion path */
+ rc = spdk_nvme_qpair_add_cmd_error_injection(&ctrlr, &qpair,
+ SPDK_NVME_OPC_READ, false, 0, 1,
+ SPDK_NVME_SCT_MEDIA_ERROR, SPDK_NVME_SC_UNRECOVERED_READ_ERROR);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(!TAILQ_EMPTY(&qpair.err_cmd_head));
+
+ /* Provide the same opc, and check whether allocate a new entry */
+ rc = spdk_nvme_qpair_add_cmd_error_injection(&ctrlr, &qpair,
+ SPDK_NVME_OPC_READ, false, 0, 1,
+ SPDK_NVME_SCT_MEDIA_ERROR, SPDK_NVME_SC_UNRECOVERED_READ_ERROR);
+
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&qpair.err_cmd_head));
+ CU_ASSERT(TAILQ_NEXT(TAILQ_FIRST(&qpair.err_cmd_head), link) == NULL);
+
+ /* Remove cmd error injection */
+ spdk_nvme_qpair_remove_cmd_error_injection(&ctrlr, &qpair, SPDK_NVME_OPC_READ);
+
+ CU_ASSERT(TAILQ_EMPTY(&qpair.err_cmd_head));
+
+ rc = spdk_nvme_qpair_add_cmd_error_injection(&ctrlr, &qpair,
+ SPDK_NVME_OPC_COMPARE, true, 0, 5,
+ SPDK_NVME_SCT_GENERIC, SPDK_NVME_SC_COMPARE_FAILURE);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(!TAILQ_EMPTY(&qpair.err_cmd_head));
+
+ /* Remove cmd error injection */
+ spdk_nvme_qpair_remove_cmd_error_injection(&ctrlr, &qpair, SPDK_NVME_OPC_COMPARE);
+
+ CU_ASSERT(TAILQ_EMPTY(&qpair.err_cmd_head));
+
+ cleanup_submit_request_test(&qpair);
+}
+
+static void
+test_nvme_qpair_submit_request(void)
+{
+ int rc;
+ struct spdk_nvme_qpair qpair = {};
+ struct spdk_nvme_ctrlr ctrlr = {};
+ struct nvme_request *req, *req1, *req2, *req3, *req2_1, *req2_2, *req2_3;
+
+ prepare_submit_request_test(&qpair, &ctrlr);
+
+ /*
+ * Build a request chain like the following:
+ * req
+ * |
+ * ---------------
+ * | | |
+ * req1 req2 req3
+ * |
+ * ---------------
+ * | | |
+ * req2_1 req2_2 req2_3
+ */
+ req = nvme_allocate_request_null(&qpair, NULL, NULL);
+ CU_ASSERT(req != NULL);
+ TAILQ_INIT(&req->children);
+
+ req1 = nvme_allocate_request_null(&qpair, NULL, NULL);
+ CU_ASSERT(req1 != NULL);
+ req->num_children++;
+ TAILQ_INSERT_TAIL(&req->children, req1, child_tailq);
+ req1->parent = req;
+
+ req2 = nvme_allocate_request_null(&qpair, NULL, NULL);
+ CU_ASSERT(req2 != NULL);
+ TAILQ_INIT(&req2->children);
+ req->num_children++;
+ TAILQ_INSERT_TAIL(&req->children, req2, child_tailq);
+ req2->parent = req;
+
+ req3 = nvme_allocate_request_null(&qpair, NULL, NULL);
+ CU_ASSERT(req3 != NULL);
+ req->num_children++;
+ TAILQ_INSERT_TAIL(&req->children, req3, child_tailq);
+ req3->parent = req;
+
+ req2_1 = nvme_allocate_request_null(&qpair, NULL, NULL);
+ CU_ASSERT(req2_1 != NULL);
+ req2->num_children++;
+ TAILQ_INSERT_TAIL(&req2->children, req2_1, child_tailq);
+ req2_1->parent = req2;
+
+ req2_2 = nvme_allocate_request_null(&qpair, NULL, NULL);
+ CU_ASSERT(req2_2 != NULL);
+ req2->num_children++;
+ TAILQ_INSERT_TAIL(&req2->children, req2_2, child_tailq);
+ req2_2->parent = req2;
+
+ req2_3 = nvme_allocate_request_null(&qpair, NULL, NULL);
+ CU_ASSERT(req2_3 != NULL);
+ req2->num_children++;
+ TAILQ_INSERT_TAIL(&req2->children, req2_3, child_tailq);
+ req2_3->parent = req2;
+
+ ctrlr.is_failed = true;
+ rc = nvme_qpair_submit_request(&qpair, req);
+ SPDK_CU_ASSERT_FATAL(rc == -ENXIO);
+
+ cleanup_submit_request_test(&qpair);
+}
+
+static void
+test_nvme_qpair_resubmit_request_with_transport_failed(void)
+{
+ int rc;
+ struct spdk_nvme_qpair qpair = {};
+ struct spdk_nvme_ctrlr ctrlr = {};
+ struct nvme_request *req;
+
+ prepare_submit_request_test(&qpair, &ctrlr);
+
+ req = nvme_allocate_request_null(&qpair, dummy_cb_fn, NULL);
+ CU_ASSERT(req != NULL);
+ TAILQ_INIT(&req->children);
+
+ STAILQ_INSERT_TAIL(&qpair.queued_req, req, stailq);
+ req->queued = true;
+
+ g_transport_process_completions_rc = 1;
+ qpair.state = NVME_QPAIR_ENABLED;
+ g_num_cb_failed = 0;
+ MOCK_SET(nvme_transport_qpair_submit_request, -EINVAL);
+ rc = spdk_nvme_qpair_process_completions(&qpair, g_transport_process_completions_rc);
+ MOCK_CLEAR(nvme_transport_qpair_submit_request);
+ CU_ASSERT(rc == g_transport_process_completions_rc);
+ CU_ASSERT(STAILQ_EMPTY(&qpair.queued_req));
+ CU_ASSERT(g_num_cb_failed == 1);
+
+ cleanup_submit_request_test(&qpair);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvme_qpair", NULL, NULL);
+
+ CU_ADD_TEST(suite, test3);
+ CU_ADD_TEST(suite, test_ctrlr_failed);
+ CU_ADD_TEST(suite, struct_packing);
+ CU_ADD_TEST(suite, test_nvme_qpair_process_completions);
+ CU_ADD_TEST(suite, test_nvme_completion_is_retry);
+#ifdef DEBUG
+ CU_ADD_TEST(suite, test_get_status_string);
+#endif
+ CU_ADD_TEST(suite, test_nvme_qpair_add_cmd_error_injection);
+ CU_ADD_TEST(suite, test_nvme_qpair_submit_request);
+ CU_ADD_TEST(suite, test_nvme_qpair_resubmit_request_with_transport_failed);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_quirks.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_quirks.c/.gitignore
new file mode 100644
index 000000000..eca86651b
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_quirks.c/.gitignore
@@ -0,0 +1 @@
+nvme_quirks_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_quirks.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_quirks.c/Makefile
new file mode 100644
index 000000000..d86887f0e
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_quirks.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_quirks_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_quirks.c/nvme_quirks_ut.c b/src/spdk/test/unit/lib/nvme/nvme_quirks.c/nvme_quirks_ut.c
new file mode 100644
index 000000000..c3e799251
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_quirks.c/nvme_quirks_ut.c
@@ -0,0 +1,92 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "nvme/nvme_quirks.c"
+
+SPDK_LOG_REGISTER_COMPONENT("nvme", SPDK_LOG_NVME)
+
+static void
+test_nvme_quirks_striping(void)
+{
+ struct spdk_pci_id pci_id = {};
+ uint64_t quirks = 0;
+
+ /* Non-Intel device should not have striping enabled */
+ quirks = nvme_get_quirks(&pci_id);
+ CU_ASSERT((quirks & NVME_INTEL_QUIRK_STRIPING) == 0);
+
+ /* Set the vendor id to Intel, but no device id. No striping. */
+ pci_id.class_id = SPDK_PCI_CLASS_NVME;
+ pci_id.vendor_id = SPDK_PCI_VID_INTEL;
+ quirks = nvme_get_quirks(&pci_id);
+ CU_ASSERT((quirks & NVME_INTEL_QUIRK_STRIPING) == 0);
+
+ /* Device ID 0x0953 should have striping enabled */
+ pci_id.device_id = 0x0953;
+ quirks = nvme_get_quirks(&pci_id);
+ CU_ASSERT((quirks & NVME_INTEL_QUIRK_STRIPING) != 0);
+
+ /* Even if specific subvendor/subdevice ids are set,
+ * striping should be enabled.
+ */
+ pci_id.subvendor_id = SPDK_PCI_VID_INTEL;
+ pci_id.subdevice_id = 0x3704;
+ quirks = nvme_get_quirks(&pci_id);
+ CU_ASSERT((quirks & NVME_INTEL_QUIRK_STRIPING) != 0);
+
+ pci_id.subvendor_id = 1234;
+ pci_id.subdevice_id = 42;
+ quirks = nvme_get_quirks(&pci_id);
+ CU_ASSERT((quirks & NVME_INTEL_QUIRK_STRIPING) != 0);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvme_quirks", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_nvme_quirks_striping);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_rdma.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_rdma.c/.gitignore
new file mode 100644
index 000000000..66265b955
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_rdma.c/.gitignore
@@ -0,0 +1 @@
+nvme_rdma_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_rdma.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_rdma.c/Makefile
new file mode 100644
index 000000000..7ea42632b
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_rdma.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_rdma_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_rdma.c/nvme_rdma_ut.c b/src/spdk/test/unit/lib/nvme/nvme_rdma.c/nvme_rdma_ut.c
new file mode 100644
index 000000000..8342e84d3
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_rdma.c/nvme_rdma_ut.c
@@ -0,0 +1,406 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+#include "spdk_cunit.h"
+#include "nvme/nvme_rdma.c"
+#include "common/lib/nvme/common_stubs.h"
+#include "common/lib/test_rdma.c"
+
+SPDK_LOG_REGISTER_COMPONENT("nvme", SPDK_LOG_NVME)
+
+DEFINE_STUB(spdk_mem_map_set_translation, int, (struct spdk_mem_map *map, uint64_t vaddr,
+ uint64_t size, uint64_t translation), 0);
+DEFINE_STUB(spdk_mem_map_clear_translation, int, (struct spdk_mem_map *map, uint64_t vaddr,
+ uint64_t size), 0);
+
+DEFINE_STUB(spdk_mem_map_alloc, struct spdk_mem_map *, (uint64_t default_translation,
+ const struct spdk_mem_map_ops *ops, void *cb_ctx), NULL);
+DEFINE_STUB_V(spdk_mem_map_free, (struct spdk_mem_map **pmap));
+
+DEFINE_STUB(nvme_poll_group_connect_qpair, int, (struct spdk_nvme_qpair *qpair), 0);
+
+DEFINE_STUB_V(nvme_qpair_resubmit_requests, (struct spdk_nvme_qpair *qpair, uint32_t num_requests));
+DEFINE_STUB(spdk_nvme_poll_group_process_completions, int64_t, (struct spdk_nvme_poll_group *group,
+ uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb), 0)
+
+/* used to mock out having to split an SGL over a memory region */
+uint64_t g_mr_size;
+struct ibv_mr g_nvme_rdma_mr;
+
+uint64_t
+spdk_mem_map_translate(const struct spdk_mem_map *map, uint64_t vaddr, uint64_t *size)
+{
+ if (g_mr_size != 0) {
+ *(uint32_t *)size = g_mr_size;
+ }
+
+ return (uint64_t)&g_nvme_rdma_mr;
+}
+
+struct nvme_rdma_ut_bdev_io {
+ struct iovec iovs[NVME_RDMA_MAX_SGL_DESCRIPTORS];
+ int iovpos;
+};
+
+/* essentially a simplification of bdev_nvme_next_sge and bdev_nvme_reset_sgl */
+static void nvme_rdma_ut_reset_sgl(void *cb_arg, uint32_t offset)
+{
+ struct nvme_rdma_ut_bdev_io *bio = cb_arg;
+ struct iovec *iov;
+
+ for (bio->iovpos = 0; bio->iovpos < NVME_RDMA_MAX_SGL_DESCRIPTORS; bio->iovpos++) {
+ iov = &bio->iovs[bio->iovpos];
+ /* Only provide offsets at the beginning of an iov */
+ if (offset == 0) {
+ break;
+ }
+
+ offset -= iov->iov_len;
+ }
+
+ SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_RDMA_MAX_SGL_DESCRIPTORS);
+}
+
+static int nvme_rdma_ut_next_sge(void *cb_arg, void **address, uint32_t *length)
+{
+ struct nvme_rdma_ut_bdev_io *bio = cb_arg;
+ struct iovec *iov;
+
+ SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_RDMA_MAX_SGL_DESCRIPTORS);
+
+ iov = &bio->iovs[bio->iovpos];
+
+ *address = iov->iov_base;
+ *length = iov->iov_len;
+ bio->iovpos++;
+
+ return 0;
+}
+
+static void
+test_nvme_rdma_build_sgl_request(void)
+{
+ struct nvme_rdma_qpair rqpair;
+ struct spdk_nvme_ctrlr ctrlr = {0};
+ struct spdk_nvmf_cmd cmd = {{0}};
+ struct spdk_nvme_rdma_req rdma_req = {0};
+ struct nvme_request req = {{0}};
+ struct nvme_rdma_ut_bdev_io bio;
+ struct spdk_nvme_rdma_mr_map rmap = {0};
+ struct spdk_mem_map *map = NULL;
+ uint64_t i;
+ int rc;
+
+ rmap.map = map;
+
+ ctrlr.max_sges = NVME_RDMA_MAX_SGL_DESCRIPTORS;
+ ctrlr.cdata.nvmf_specific.msdbd = 16;
+
+ rqpair.mr_map = &rmap;
+ rqpair.qpair.ctrlr = &ctrlr;
+ rqpair.cmds = &cmd;
+ cmd.sgl[0].address = 0x1111;
+ rdma_req.id = 0;
+ rdma_req.req = &req;
+
+ req.payload.reset_sgl_fn = nvme_rdma_ut_reset_sgl;
+ req.payload.next_sge_fn = nvme_rdma_ut_next_sge;
+ req.payload.contig_or_cb_arg = &bio;
+ req.qpair = &rqpair.qpair;
+
+ g_nvme_rdma_mr.rkey = 1;
+
+ for (i = 0; i < NVME_RDMA_MAX_SGL_DESCRIPTORS; i++) {
+ bio.iovs[i].iov_base = (void *)i;
+ bio.iovs[i].iov_len = 0;
+ }
+
+ /* Test case 1: single SGL. Expected: PASS */
+ bio.iovpos = 0;
+ req.payload_offset = 0;
+ req.payload_size = 0x1000;
+ bio.iovs[0].iov_len = 0x1000;
+ rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(bio.iovpos == 1);
+ CU_ASSERT(req.cmd.dptr.sgl1.keyed.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK);
+ CU_ASSERT(req.cmd.dptr.sgl1.keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS);
+ CU_ASSERT(req.cmd.dptr.sgl1.keyed.length == req.payload_size);
+ CU_ASSERT(req.cmd.dptr.sgl1.keyed.key == g_nvme_rdma_mr.rkey);
+ CU_ASSERT(req.cmd.dptr.sgl1.address == (uint64_t)bio.iovs[0].iov_base);
+ CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
+
+ /* Test case 2: multiple SGL. Expected: PASS */
+ bio.iovpos = 0;
+ req.payload_offset = 0;
+ req.payload_size = 0x4000;
+ for (i = 0; i < 4; i++) {
+ bio.iovs[i].iov_len = 0x1000;
+ }
+ rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(bio.iovpos == 4);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_LAST_SEGMENT);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 4 * sizeof(struct spdk_nvme_sgl_descriptor));
+ CU_ASSERT(req.cmd.dptr.sgl1.address == (uint64_t)0);
+ CU_ASSERT(rdma_req.send_sgl[0].length == 4 * sizeof(struct spdk_nvme_sgl_descriptor) + sizeof(
+ struct spdk_nvme_cmd))
+ for (i = 0; i < 4; i++) {
+ CU_ASSERT(cmd.sgl[i].keyed.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK);
+ CU_ASSERT(cmd.sgl[i].keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS);
+ CU_ASSERT(cmd.sgl[i].keyed.length == bio.iovs[i].iov_len);
+ CU_ASSERT(cmd.sgl[i].keyed.key == g_nvme_rdma_mr.rkey);
+ CU_ASSERT(cmd.sgl[i].address == (uint64_t)bio.iovs[i].iov_base);
+ }
+
+ /* Test case 3: Multiple SGL, SGL 2X mr size. Expected: FAIL */
+ bio.iovpos = 0;
+ req.payload_offset = 0;
+ g_mr_size = 0x800;
+ rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
+ SPDK_CU_ASSERT_FATAL(rc != 0);
+ CU_ASSERT(bio.iovpos == 1);
+
+ /* Test case 4: Multiple SGL, SGL size smaller than I/O size. Expected: FAIL */
+ bio.iovpos = 0;
+ req.payload_offset = 0;
+ req.payload_size = 0x6000;
+ g_mr_size = 0x0;
+ rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
+ SPDK_CU_ASSERT_FATAL(rc != 0);
+ CU_ASSERT(bio.iovpos == NVME_RDMA_MAX_SGL_DESCRIPTORS);
+
+ /* Test case 5: SGL length exceeds 3 bytes. Expected: FAIL */
+ req.payload_size = 0x1000 + (1 << 24);
+ bio.iovs[0].iov_len = 0x1000;
+ bio.iovs[1].iov_len = 1 << 24;
+ rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
+ SPDK_CU_ASSERT_FATAL(rc != 0);
+}
+
+static void
+test_nvme_rdma_build_sgl_inline_request(void)
+{
+ struct nvme_rdma_qpair rqpair;
+ struct spdk_nvme_ctrlr ctrlr = {0};
+ struct spdk_nvmf_cmd cmd = {{0}};
+ struct spdk_nvme_rdma_req rdma_req = {0};
+ struct nvme_request req = {{0}};
+ struct nvme_rdma_ut_bdev_io bio;
+ struct spdk_nvme_rdma_mr_map rmap = {0};
+ struct spdk_mem_map *map = NULL;
+ int rc;
+
+ rmap.map = map;
+
+ ctrlr.max_sges = NVME_RDMA_MAX_SGL_DESCRIPTORS;
+ ctrlr.cdata.nvmf_specific.msdbd = 16;
+
+ rqpair.mr_map = &rmap;
+ rqpair.qpair.ctrlr = &ctrlr;
+ rqpair.cmds = &cmd;
+ cmd.sgl[0].address = 0x1111;
+ rdma_req.id = 0;
+ rdma_req.req = &req;
+
+ req.payload.reset_sgl_fn = nvme_rdma_ut_reset_sgl;
+ req.payload.next_sge_fn = nvme_rdma_ut_next_sge;
+ req.payload.contig_or_cb_arg = &bio;
+ req.qpair = &rqpair.qpair;
+
+ g_nvme_rdma_mr.lkey = 2;
+
+ /* Test case 1: single inline SGL. Expected: PASS */
+ bio.iovpos = 0;
+ req.payload_offset = 0;
+ req.payload_size = 0x1000;
+ bio.iovs[0].iov_base = (void *)0xdeadbeef;
+ bio.iovs[0].iov_len = 0x1000;
+ rc = nvme_rdma_build_sgl_inline_request(&rqpair, &rdma_req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(bio.iovpos == 1);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
+ CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
+ CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
+ CU_ASSERT(rdma_req.send_sgl[1].length == req.payload_size);
+ CU_ASSERT(rdma_req.send_sgl[1].addr == (uint64_t)bio.iovs[0].iov_base);
+ CU_ASSERT(rdma_req.send_sgl[1].lkey == g_nvme_rdma_mr.lkey);
+
+ /* Test case 2: SGL length exceeds 3 bytes. Expected: PASS */
+ bio.iovpos = 0;
+ req.payload_offset = 0;
+ req.payload_size = 1 << 24;
+ bio.iovs[0].iov_len = 1 << 24;
+ rc = nvme_rdma_build_sgl_inline_request(&rqpair, &rdma_req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(bio.iovpos == 1);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
+ CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
+ CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
+ CU_ASSERT(rdma_req.send_sgl[1].length == req.payload_size);
+ CU_ASSERT(rdma_req.send_sgl[1].addr == (uint64_t)bio.iovs[0].iov_base);
+ CU_ASSERT(rdma_req.send_sgl[1].lkey == g_nvme_rdma_mr.lkey);
+}
+
+static void
+test_nvme_rdma_build_contig_request(void)
+{
+ struct nvme_rdma_qpair rqpair;
+ struct spdk_nvme_ctrlr ctrlr = {0};
+ struct spdk_nvmf_cmd cmd = {{0}};
+ struct spdk_nvme_rdma_req rdma_req = {0};
+ struct nvme_request req = {{0}};
+ struct spdk_nvme_rdma_mr_map rmap = {0};
+ struct spdk_mem_map *map = NULL;
+ int rc;
+
+ rmap.map = map;
+
+ ctrlr.max_sges = NVME_RDMA_MAX_SGL_DESCRIPTORS;
+ ctrlr.cdata.nvmf_specific.msdbd = 16;
+
+ rqpair.mr_map = &rmap;
+ rqpair.qpair.ctrlr = &ctrlr;
+ rqpair.cmds = &cmd;
+ cmd.sgl[0].address = 0x1111;
+ rdma_req.id = 0;
+ rdma_req.req = &req;
+
+ req.payload.contig_or_cb_arg = (void *)0xdeadbeef;
+ req.qpair = &rqpair.qpair;
+
+ g_nvme_rdma_mr.rkey = 2;
+
+ /* Test case 1: contig request. Expected: PASS */
+ req.payload_offset = 0;
+ req.payload_size = 0x1000;
+ rc = nvme_rdma_build_contig_request(&rqpair, &rdma_req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(req.cmd.dptr.sgl1.keyed.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK);
+ CU_ASSERT(req.cmd.dptr.sgl1.keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS);
+ CU_ASSERT(req.cmd.dptr.sgl1.keyed.length == req.payload_size);
+ CU_ASSERT(req.cmd.dptr.sgl1.keyed.key == g_nvme_rdma_mr.rkey);
+ CU_ASSERT(req.cmd.dptr.sgl1.address == (uint64_t)req.payload.contig_or_cb_arg);
+ CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
+
+ /* Test case 2: SGL length exceeds 3 bytes. Expected: FAIL */
+ req.payload_offset = 0;
+ req.payload_size = 1 << 24;
+ rc = nvme_rdma_build_contig_request(&rqpair, &rdma_req);
+ SPDK_CU_ASSERT_FATAL(rc != 0);
+}
+
+static void
+test_nvme_rdma_build_contig_inline_request(void)
+{
+ struct nvme_rdma_qpair rqpair;
+ struct spdk_nvme_ctrlr ctrlr = {0};
+ struct spdk_nvmf_cmd cmd = {{0}};
+ struct spdk_nvme_rdma_req rdma_req = {0};
+ struct nvme_request req = {{0}};
+ struct spdk_nvme_rdma_mr_map rmap = {0};
+ struct spdk_mem_map *map = NULL;
+ int rc;
+
+ rmap.map = map;
+
+ ctrlr.max_sges = NVME_RDMA_MAX_SGL_DESCRIPTORS;
+ ctrlr.cdata.nvmf_specific.msdbd = 16;
+
+ rqpair.mr_map = &rmap;
+ rqpair.qpair.ctrlr = &ctrlr;
+ rqpair.cmds = &cmd;
+ cmd.sgl[0].address = 0x1111;
+ rdma_req.id = 0;
+ rdma_req.req = &req;
+
+ req.payload.contig_or_cb_arg = (void *)0xdeadbeef;
+ req.qpair = &rqpair.qpair;
+
+ g_nvme_rdma_mr.rkey = 2;
+
+ /* Test case 1: single inline SGL. Expected: PASS */
+ req.payload_offset = 0;
+ req.payload_size = 0x1000;
+ rc = nvme_rdma_build_contig_inline_request(&rqpair, &rdma_req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
+ CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
+ CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
+ CU_ASSERT(rdma_req.send_sgl[1].length == req.payload_size);
+ CU_ASSERT(rdma_req.send_sgl[1].addr == (uint64_t)req.payload.contig_or_cb_arg);
+ CU_ASSERT(rdma_req.send_sgl[1].lkey == g_nvme_rdma_mr.lkey);
+
+ /* Test case 2: SGL length exceeds 3 bytes. Expected: PASS */
+ req.payload_offset = 0;
+ req.payload_size = 1 << 24;
+ rc = nvme_rdma_build_contig_inline_request(&rqpair, &rdma_req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
+ CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
+ CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
+ CU_ASSERT(rdma_req.send_sgl[1].length == req.payload_size);
+ CU_ASSERT(rdma_req.send_sgl[1].addr == (uint64_t)req.payload.contig_or_cb_arg);
+ CU_ASSERT(rdma_req.send_sgl[1].lkey == g_nvme_rdma_mr.lkey);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvme_rdma", NULL, NULL);
+ CU_ADD_TEST(suite, test_nvme_rdma_build_sgl_request);
+ CU_ADD_TEST(suite, test_nvme_rdma_build_sgl_inline_request);
+ CU_ADD_TEST(suite, test_nvme_rdma_build_contig_request);
+ CU_ADD_TEST(suite, test_nvme_rdma_build_contig_inline_request);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_tcp.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_tcp.c/.gitignore
new file mode 100644
index 000000000..c0cf6e92c
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_tcp.c/.gitignore
@@ -0,0 +1 @@
+nvme_tcp_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_tcp.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_tcp.c/Makefile
new file mode 100644
index 000000000..612f2b793
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_tcp.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_tcp_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_tcp.c/nvme_tcp_ut.c b/src/spdk/test/unit/lib/nvme/nvme_tcp.c/nvme_tcp_ut.c
new file mode 100644
index 000000000..ed817fe2d
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_tcp.c/nvme_tcp_ut.c
@@ -0,0 +1,459 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "common/lib/test_sock.c"
+
+#include "nvme/nvme_tcp.c"
+#include "common/lib/nvme/common_stubs.h"
+
+SPDK_LOG_REGISTER_COMPONENT("nvme", SPDK_LOG_NVME);
+
+DEFINE_STUB(nvme_qpair_submit_request,
+ int, (struct spdk_nvme_qpair *qpair, struct nvme_request *req), 0);
+
+DEFINE_STUB(spdk_sock_set_priority,
+ int, (struct spdk_sock *sock, int priority), 0);
+
+DEFINE_STUB(spdk_nvme_poll_group_remove, int, (struct spdk_nvme_poll_group *group,
+ struct spdk_nvme_qpair *qpair), 0);
+
+static void
+test_nvme_tcp_pdu_set_data_buf(void)
+{
+ struct nvme_tcp_pdu pdu = {};
+ struct iovec iov[NVME_TCP_MAX_SGL_DESCRIPTORS] = {};
+ uint32_t data_len;
+ uint64_t i;
+
+ /* 1st case: input is a single SGL entry. */
+ iov[0].iov_base = (void *)0xDEADBEEF;
+ iov[0].iov_len = 4096;
+
+ nvme_tcp_pdu_set_data_buf(&pdu, iov, 1, 1024, 512);
+
+ CU_ASSERT(pdu.data_iovcnt == 1);
+ CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF + 1024);
+ CU_ASSERT(pdu.data_iov[0].iov_len == 512);
+
+ /* 2nd case: simulate split on multiple SGL entries. */
+ iov[0].iov_base = (void *)0xDEADBEEF;
+ iov[0].iov_len = 4096;
+ iov[1].iov_base = (void *)0xFEEDBEEF;
+ iov[1].iov_len = 512 * 7;
+ iov[2].iov_base = (void *)0xF00DF00D;
+ iov[2].iov_len = 4096 * 2;
+
+ nvme_tcp_pdu_set_data_buf(&pdu, iov, 3, 0, 2048);
+
+ CU_ASSERT(pdu.data_iovcnt == 1);
+ CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF);
+ CU_ASSERT(pdu.data_iov[0].iov_len == 2048);
+
+ nvme_tcp_pdu_set_data_buf(&pdu, iov, 3, 2048, 2048 + 512 * 3);
+
+ CU_ASSERT(pdu.data_iovcnt == 2);
+ CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF + 2048);
+ CU_ASSERT(pdu.data_iov[0].iov_len == 2048);
+ CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF);
+ CU_ASSERT(pdu.data_iov[1].iov_len == 512 * 3);
+
+ nvme_tcp_pdu_set_data_buf(&pdu, iov, 3, 4096 + 512 * 3, 512 * 4 + 4096 * 2);
+
+ CU_ASSERT(pdu.data_iovcnt == 2);
+ CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xFEEDBEEF + 512 * 3);
+ CU_ASSERT(pdu.data_iov[0].iov_len == 512 * 4);
+ CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xF00DF00D);
+ CU_ASSERT(pdu.data_iov[1].iov_len == 4096 * 2);
+
+ /* 3rd case: Number of input SGL entries is equal to the number of PDU SGL
+ * entries.
+ */
+ data_len = 0;
+ for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) {
+ iov[i].iov_base = (void *)(0xDEADBEEF + i);
+ iov[i].iov_len = 512 * (i + 1);
+ data_len += 512 * (i + 1);
+ }
+
+ nvme_tcp_pdu_set_data_buf(&pdu, iov, NVME_TCP_MAX_SGL_DESCRIPTORS, 0, data_len);
+
+ CU_ASSERT(pdu.data_iovcnt == NVME_TCP_MAX_SGL_DESCRIPTORS);
+ for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) {
+ CU_ASSERT((uint64_t)pdu.data_iov[i].iov_base == 0xDEADBEEF + i);
+ CU_ASSERT(pdu.data_iov[i].iov_len == 512 * (i + 1));
+ }
+}
+
+static void
+test_nvme_tcp_build_iovs(void)
+{
+ const uintptr_t pdu_iov_len = 4096;
+ struct nvme_tcp_pdu pdu = {};
+ struct iovec iovs[5] = {};
+ uint32_t mapped_length = 0;
+ int rc;
+
+ pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD;
+ pdu.hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd);
+ pdu.hdr.common.plen = pdu.hdr.common.hlen + SPDK_NVME_TCP_DIGEST_LEN + pdu_iov_len * 2 +
+ SPDK_NVME_TCP_DIGEST_LEN;
+ pdu.data_len = pdu_iov_len * 2;
+ pdu.padding_len = 0;
+
+ pdu.data_iov[0].iov_base = (void *)0xDEADBEEF;
+ pdu.data_iov[0].iov_len = pdu_iov_len;
+ pdu.data_iov[1].iov_base = (void *)(0xDEADBEEF + pdu_iov_len);
+ pdu.data_iov[1].iov_len = pdu_iov_len;
+ pdu.data_iovcnt = 2;
+
+ rc = nvme_tcp_build_iovs(iovs, 5, &pdu, true, true, &mapped_length);
+ CU_ASSERT(rc == 4);
+ CU_ASSERT(iovs[0].iov_base == (void *)&pdu.hdr.raw);
+ CU_ASSERT(iovs[0].iov_len == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN);
+ CU_ASSERT(iovs[1].iov_base == (void *)0xDEADBEEF);
+ CU_ASSERT(iovs[1].iov_len == pdu_iov_len);
+ CU_ASSERT(iovs[2].iov_base == (void *)(0xDEADBEEF + pdu_iov_len));
+ CU_ASSERT(iovs[2].iov_len == pdu_iov_len);
+ CU_ASSERT(iovs[3].iov_base == (void *)pdu.data_digest);
+ CU_ASSERT(iovs[3].iov_len == SPDK_NVME_TCP_DIGEST_LEN);
+ CU_ASSERT(mapped_length == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN +
+ pdu_iov_len * 2 + SPDK_NVME_TCP_DIGEST_LEN);
+
+ /* Add a new data_iov entry, update pdu iov count and data length */
+ pdu.data_iov[2].iov_base = (void *)(0xBAADF00D);
+ pdu.data_iov[2].iov_len = 123;
+ pdu.data_iovcnt = 3;
+ pdu.data_len += 123;
+ pdu.hdr.common.plen += 123;
+
+ rc = nvme_tcp_build_iovs(iovs, 5, &pdu, true, true, &mapped_length);
+ CU_ASSERT(rc == 5);
+ CU_ASSERT(iovs[0].iov_base == (void *)&pdu.hdr.raw);
+ CU_ASSERT(iovs[0].iov_len == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN);
+ CU_ASSERT(iovs[1].iov_base == (void *)0xDEADBEEF);
+ CU_ASSERT(iovs[1].iov_len == pdu_iov_len);
+ CU_ASSERT(iovs[2].iov_base == (void *)(0xDEADBEEF + pdu_iov_len));
+ CU_ASSERT(iovs[2].iov_len == pdu_iov_len);
+ CU_ASSERT(iovs[3].iov_base == (void *)(0xBAADF00D));
+ CU_ASSERT(iovs[3].iov_len == 123);
+ CU_ASSERT(iovs[4].iov_base == (void *)pdu.data_digest);
+ CU_ASSERT(iovs[4].iov_len == SPDK_NVME_TCP_DIGEST_LEN);
+ CU_ASSERT(mapped_length == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN +
+ pdu_iov_len * 2 + SPDK_NVME_TCP_DIGEST_LEN + 123);
+}
+
+struct nvme_tcp_ut_bdev_io {
+ struct iovec iovs[NVME_TCP_MAX_SGL_DESCRIPTORS];
+ int iovpos;
+};
+
+/* essentially a simplification of bdev_nvme_next_sge and bdev_nvme_reset_sgl */
+static void
+nvme_tcp_ut_reset_sgl(void *cb_arg, uint32_t offset)
+{
+ struct nvme_tcp_ut_bdev_io *bio = cb_arg;
+ struct iovec *iov;
+
+ for (bio->iovpos = 0; bio->iovpos < NVME_TCP_MAX_SGL_DESCRIPTORS; bio->iovpos++) {
+ iov = &bio->iovs[bio->iovpos];
+ /* Offset must be aligned with the start of any SGL entry */
+ if (offset == 0) {
+ break;
+ }
+
+ SPDK_CU_ASSERT_FATAL(offset >= iov->iov_len);
+ offset -= iov->iov_len;
+ }
+
+ SPDK_CU_ASSERT_FATAL(offset == 0);
+ SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_TCP_MAX_SGL_DESCRIPTORS);
+}
+
+static int
+nvme_tcp_ut_next_sge(void *cb_arg, void **address, uint32_t *length)
+{
+ struct nvme_tcp_ut_bdev_io *bio = cb_arg;
+ struct iovec *iov;
+
+ SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_TCP_MAX_SGL_DESCRIPTORS);
+
+ iov = &bio->iovs[bio->iovpos];
+
+ *address = iov->iov_base;
+ *length = iov->iov_len;
+ bio->iovpos++;
+
+ return 0;
+}
+
+static void
+test_nvme_tcp_build_sgl_request(void)
+{
+ struct nvme_tcp_qpair tqpair;
+ struct spdk_nvme_ctrlr ctrlr = {0};
+ struct nvme_tcp_req tcp_req = {0};
+ struct nvme_request req = {{0}};
+ struct nvme_tcp_ut_bdev_io bio;
+ uint64_t i;
+ int rc;
+
+ ctrlr.max_sges = NVME_TCP_MAX_SGL_DESCRIPTORS;
+ tqpair.qpair.ctrlr = &ctrlr;
+ tcp_req.req = &req;
+
+ req.payload.reset_sgl_fn = nvme_tcp_ut_reset_sgl;
+ req.payload.next_sge_fn = nvme_tcp_ut_next_sge;
+ req.payload.contig_or_cb_arg = &bio;
+ req.qpair = &tqpair.qpair;
+
+ for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) {
+ bio.iovs[i].iov_base = (void *)(0xFEEDB000 + i * 0x1000);
+ bio.iovs[i].iov_len = 0;
+ }
+
+ /* Test case 1: Single SGL. Expected: PASS */
+ bio.iovpos = 0;
+ req.payload_offset = 0;
+ req.payload_size = 0x1000;
+ bio.iovs[0].iov_len = 0x1000;
+ rc = nvme_tcp_build_sgl_request(&tqpair, &tcp_req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(bio.iovpos == 1);
+ CU_ASSERT((uint64_t)tcp_req.iov[0].iov_base == (uint64_t)bio.iovs[0].iov_base);
+ CU_ASSERT(tcp_req.iov[0].iov_len == bio.iovs[0].iov_len);
+ CU_ASSERT(tcp_req.iovcnt == 1);
+
+ /* Test case 2: Multiple SGL. Expected: PASS */
+ bio.iovpos = 0;
+ req.payload_offset = 0;
+ req.payload_size = 0x4000;
+ for (i = 0; i < 4; i++) {
+ bio.iovs[i].iov_len = 0x1000;
+ }
+ rc = nvme_tcp_build_sgl_request(&tqpair, &tcp_req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(bio.iovpos == 4);
+ CU_ASSERT(tcp_req.iovcnt == 4);
+ for (i = 0; i < 4; i++) {
+ CU_ASSERT(tcp_req.iov[i].iov_len == bio.iovs[i].iov_len);
+ CU_ASSERT((uint64_t)tcp_req.iov[i].iov_base == (uint64_t)bio.iovs[i].iov_base);
+ }
+
+ /* Test case 3: Payload is bigger than SGL. Expected: FAIL */
+ bio.iovpos = 0;
+ req.payload_offset = 0;
+ req.payload_size = 0x17000;
+ for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) {
+ bio.iovs[i].iov_len = 0x1000;
+ }
+ rc = nvme_tcp_build_sgl_request(&tqpair, &tcp_req);
+ SPDK_CU_ASSERT_FATAL(rc != 0);
+ CU_ASSERT(bio.iovpos == NVME_TCP_MAX_SGL_DESCRIPTORS);
+ for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) {
+ CU_ASSERT(tcp_req.iov[i].iov_len == bio.iovs[i].iov_len);
+ CU_ASSERT((uint64_t)tcp_req.iov[i].iov_base == (uint64_t)bio.iovs[i].iov_base);
+ }
+}
+
+static void
+test_nvme_tcp_pdu_set_data_buf_with_md(void)
+{
+ struct nvme_tcp_pdu pdu = {};
+ struct iovec iovs[7] = {};
+ struct spdk_dif_ctx dif_ctx = {};
+ int rc;
+
+ pdu.dif_ctx = &dif_ctx;
+
+ rc = spdk_dif_ctx_init(&dif_ctx, 520, 8, true, false, SPDK_DIF_DISABLE, 0,
+ 0, 0, 0, 0, 0);
+ CU_ASSERT(rc == 0);
+
+ /* Single iovec case */
+ iovs[0].iov_base = (void *)0xDEADBEEF;
+ iovs[0].iov_len = 2080;
+
+ nvme_tcp_pdu_set_data_buf(&pdu, iovs, 1, 0, 500);
+
+ CU_ASSERT(dif_ctx.data_offset == 0);
+ CU_ASSERT(pdu.data_len == 500);
+ CU_ASSERT(pdu.data_iovcnt == 1);
+ CU_ASSERT(pdu.data_iov[0].iov_base == (void *)0xDEADBEEF);
+ CU_ASSERT(pdu.data_iov[0].iov_len == 500);
+
+ nvme_tcp_pdu_set_data_buf(&pdu, iovs, 1, 500, 1000);
+
+ CU_ASSERT(dif_ctx.data_offset == 500);
+ CU_ASSERT(pdu.data_len == 1000);
+ CU_ASSERT(pdu.data_iovcnt == 1);
+ CU_ASSERT(pdu.data_iov[0].iov_base == (void *)(0xDEADBEEF + 500));
+ CU_ASSERT(pdu.data_iov[0].iov_len == 1016);
+
+ nvme_tcp_pdu_set_data_buf(&pdu, iovs, 1, 1500, 548);
+
+ CU_ASSERT(dif_ctx.data_offset == 1500);
+ CU_ASSERT(pdu.data_len == 548);
+ CU_ASSERT(pdu.data_iovcnt == 1);
+ CU_ASSERT(pdu.data_iov[0].iov_base == (void *)(0xDEADBEEF + 1516));
+ CU_ASSERT(pdu.data_iov[0].iov_len == 564);
+
+ /* Multiple iovecs case */
+ iovs[0].iov_base = (void *)0xDEADBEEF;
+ iovs[0].iov_len = 256;
+ iovs[1].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x1000));
+ iovs[1].iov_len = 256 + 1;
+ iovs[2].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x2000));
+ iovs[2].iov_len = 4;
+ iovs[3].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x3000));
+ iovs[3].iov_len = 3 + 123;
+ iovs[4].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x4000));
+ iovs[4].iov_len = 389 + 6;
+ iovs[5].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x5000));
+ iovs[5].iov_len = 2 + 512 + 8 + 432;
+ iovs[6].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x6000));
+ iovs[6].iov_len = 80 + 8;
+
+ nvme_tcp_pdu_set_data_buf(&pdu, iovs, 7, 0, 500);
+
+ CU_ASSERT(dif_ctx.data_offset == 0);
+ CU_ASSERT(pdu.data_len == 500);
+ CU_ASSERT(pdu.data_iovcnt == 2);
+ CU_ASSERT(pdu.data_iov[0].iov_base == (void *)0xDEADBEEF);
+ CU_ASSERT(pdu.data_iov[0].iov_len == 256);
+ CU_ASSERT(pdu.data_iov[1].iov_base == (void *)(0xDEADBEEF + 0x1000));
+ CU_ASSERT(pdu.data_iov[1].iov_len == 244);
+
+ nvme_tcp_pdu_set_data_buf(&pdu, iovs, 7, 500, 1000);
+
+ CU_ASSERT(dif_ctx.data_offset == 500);
+ CU_ASSERT(pdu.data_len == 1000);
+ CU_ASSERT(pdu.data_iovcnt == 5);
+ CU_ASSERT(pdu.data_iov[0].iov_base == (void *)(0xDEADBEEF + 0x1000 + 244));
+ CU_ASSERT(pdu.data_iov[0].iov_len == 13);
+ CU_ASSERT(pdu.data_iov[1].iov_base == (void *)(0xDEADBEEF + 0x2000));
+ CU_ASSERT(pdu.data_iov[1].iov_len == 4);
+ CU_ASSERT(pdu.data_iov[2].iov_base == (void *)(0xDEADBEEF + 0x3000));
+ CU_ASSERT(pdu.data_iov[2].iov_len == 3 + 123);
+ CU_ASSERT(pdu.data_iov[3].iov_base == (void *)(0xDEADBEEF + 0x4000));
+ CU_ASSERT(pdu.data_iov[3].iov_len == 395);
+ CU_ASSERT(pdu.data_iov[4].iov_base == (void *)(0xDEADBEEF + 0x5000));
+ CU_ASSERT(pdu.data_iov[4].iov_len == 478);
+
+ nvme_tcp_pdu_set_data_buf(&pdu, iovs, 7, 1500, 548);
+
+ CU_ASSERT(dif_ctx.data_offset == 1500);
+ CU_ASSERT(pdu.data_len == 548);
+ CU_ASSERT(pdu.data_iovcnt == 2);
+ CU_ASSERT(pdu.data_iov[0].iov_base == (void *)(0xDEADBEEF + 0x5000 + 478));
+ CU_ASSERT(pdu.data_iov[0].iov_len == 476);
+ CU_ASSERT(pdu.data_iov[1].iov_base == (void *)(0xDEADBEEF + 0x6000));
+ CU_ASSERT(pdu.data_iov[1].iov_len == 88);
+}
+
+static void
+test_nvme_tcp_build_iovs_with_md(void)
+{
+ struct nvme_tcp_pdu pdu = {};
+ struct iovec iovs[11] = {};
+ struct spdk_dif_ctx dif_ctx = {};
+ uint32_t mapped_length = 0;
+ int rc;
+
+ rc = spdk_dif_ctx_init(&dif_ctx, 520, 8, true, false, SPDK_DIF_DISABLE, 0,
+ 0, 0, 0, 0, 0);
+ CU_ASSERT(rc == 0);
+
+ pdu.dif_ctx = &dif_ctx;
+
+ pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD;
+ pdu.hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd);
+ pdu.hdr.common.plen = pdu.hdr.common.hlen + SPDK_NVME_TCP_DIGEST_LEN + 512 * 8 +
+ SPDK_NVME_TCP_DIGEST_LEN;
+ pdu.data_len = 512 * 8;
+ pdu.padding_len = 0;
+
+ pdu.data_iov[0].iov_base = (void *)0xDEADBEEF;
+ pdu.data_iov[0].iov_len = (512 + 8) * 8;
+ pdu.data_iovcnt = 1;
+
+ rc = nvme_tcp_build_iovs(iovs, 11, &pdu, true, true, &mapped_length);
+ CU_ASSERT(rc == 10);
+ CU_ASSERT(iovs[0].iov_base == (void *)&pdu.hdr.raw);
+ CU_ASSERT(iovs[0].iov_len == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN);
+ CU_ASSERT(iovs[1].iov_base == (void *)0xDEADBEEF);
+ CU_ASSERT(iovs[1].iov_len == 512);
+ CU_ASSERT(iovs[2].iov_base == (void *)(0xDEADBEEF + 520));
+ CU_ASSERT(iovs[2].iov_len == 512);
+ CU_ASSERT(iovs[3].iov_base == (void *)(0xDEADBEEF + 520 * 2));
+ CU_ASSERT(iovs[3].iov_len == 512);
+ CU_ASSERT(iovs[4].iov_base == (void *)(0xDEADBEEF + 520 * 3));
+ CU_ASSERT(iovs[4].iov_len == 512);
+ CU_ASSERT(iovs[5].iov_base == (void *)(0xDEADBEEF + 520 * 4));
+ CU_ASSERT(iovs[5].iov_len == 512);
+ CU_ASSERT(iovs[6].iov_base == (void *)(0xDEADBEEF + 520 * 5));
+ CU_ASSERT(iovs[6].iov_len == 512);
+ CU_ASSERT(iovs[7].iov_base == (void *)(0xDEADBEEF + 520 * 6));
+ CU_ASSERT(iovs[7].iov_len == 512);
+ CU_ASSERT(iovs[8].iov_base == (void *)(0xDEADBEEF + 520 * 7));
+ CU_ASSERT(iovs[8].iov_len == 512);
+ CU_ASSERT(iovs[9].iov_base == (void *)pdu.data_digest);
+ CU_ASSERT(iovs[9].iov_len == SPDK_NVME_TCP_DIGEST_LEN);
+ CU_ASSERT(mapped_length == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN +
+ 512 * 8 + SPDK_NVME_TCP_DIGEST_LEN);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvme_tcp", NULL, NULL);
+ CU_ADD_TEST(suite, test_nvme_tcp_pdu_set_data_buf);
+ CU_ADD_TEST(suite, test_nvme_tcp_build_iovs);
+ CU_ADD_TEST(suite, test_nvme_tcp_build_sgl_request);
+ CU_ADD_TEST(suite, test_nvme_tcp_pdu_set_data_buf_with_md);
+ CU_ADD_TEST(suite, test_nvme_tcp_build_iovs_with_md);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_uevent.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_uevent.c/.gitignore
new file mode 100644
index 000000000..1cb0d98ad
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_uevent.c/.gitignore
@@ -0,0 +1 @@
+nvme_uevent_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_uevent.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_uevent.c/Makefile
new file mode 100644
index 000000000..98687efb8
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_uevent.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_uevent_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_uevent.c/nvme_uevent_ut.c b/src/spdk/test/unit/lib/nvme/nvme_uevent.c/nvme_uevent_ut.c
new file mode 100644
index 000000000..a9775c983
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_uevent.c/nvme_uevent_ut.c
@@ -0,0 +1,165 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "spdk/env.h"
+
+#include "common/lib/test_env.c"
+
+#include "nvme/nvme_uevent.c"
+
+#ifdef __linux__
+
+enum uevent_parse_event_return_type {
+ uevent_abnormal_exit = -1,
+ uevent_normal_exit = 0,
+ uevent_expected_continue = 1
+};
+
+#define SPDK_NVME_UEVENT_SUBSYSTEM_NULL 0xFF
+
+static void
+test_nvme_uevent_parse_event(void)
+{
+ char *commands;
+ struct spdk_uevent uevent = {};
+ int rc = uevent_normal_exit;
+
+ /* Simulate commands to check expected behaviors */
+ /* Linux kernel puts null characters after every uevent */
+
+ /* Case 1: Add wrong non-uio or vfio-pci /devices/pci0000:80/0000:80:01.0/0000:81:00.0/uio/uio0 */
+ commands =
+ "ACTION=add\0DEVPATH=/devices/pci0000:80/0000:80:01.0/0000:81:00.0/uio/uio0\0SUBSYSTEM= \0DRIVER= \0PCI_SLOT_NAME= \0";
+ uevent.subsystem = SPDK_NVME_UEVENT_SUBSYSTEM_NULL;
+ uevent.action = 0;
+
+ rc = parse_event(commands, &uevent);
+
+ CU_ASSERT(rc == uevent_abnormal_exit);
+ CU_ASSERT(uevent.subsystem == SPDK_NVME_UEVENT_SUBSYSTEM_NULL);
+ CU_ASSERT(uevent.action == SPDK_NVME_UEVENT_ADD);
+
+ /* Case 2: Add uio /devices/pci0000:80/0000:80:01.0/0000:81:00.0/uio/uio0 */
+ commands =
+ "ACTION=add \0DEVPATH=/devices/pci0000:80/0000:80:01.0/0000:81:00.0/uio/uio0\0SUBSYSTEM=uio\0DRIVER=\0PCI_SLOT_NAME= \0";
+ uevent.subsystem = SPDK_NVME_UEVENT_SUBSYSTEM_NULL;
+ uevent.action = 0;
+
+ rc = parse_event(commands, &uevent);
+
+ CU_ASSERT(rc == uevent_expected_continue);
+ CU_ASSERT(uevent.subsystem == SPDK_NVME_UEVENT_SUBSYSTEM_UIO);
+ CU_ASSERT(uevent.action == SPDK_NVME_UEVENT_ADD);
+
+ /* Case 3: Remove uio /devices/pci0000:80/0000:80:01.0/0000:81:00.0/uio/uio0 */
+ commands =
+ "ACTION=remove\0DEVPATH=/devices/pci0000:80/0000:80:01.0/0000:81:00.0/uio/uio0\0SUBSYSTEM=uio\0DRIVER=\0PCI_SLOT_NAME= \0";
+ uevent.subsystem = SPDK_NVME_UEVENT_SUBSYSTEM_NULL;
+
+ rc = parse_event(commands, &uevent);
+
+ CU_ASSERT(rc == uevent_expected_continue);
+ CU_ASSERT(uevent.subsystem == SPDK_NVME_UEVENT_SUBSYSTEM_UIO);
+ CU_ASSERT(uevent.action == SPDK_NVME_UEVENT_REMOVE);
+
+ /* Case 4: Add vfio-pci 0000:81:00.0 */
+ commands = "ACTION=bind\0DEVPATH=\0SUBSYSTEM= \0DRIVER=vfio-pci\0PCI_SLOT_NAME=0000:81:00.0\0";
+ uevent.subsystem = SPDK_NVME_UEVENT_SUBSYSTEM_NULL;
+
+ rc = parse_event(commands, &uevent);
+
+ CU_ASSERT(rc == uevent_expected_continue);
+ CU_ASSERT(uevent.subsystem == SPDK_NVME_UEVENT_SUBSYSTEM_VFIO);
+ CU_ASSERT(uevent.action == SPDK_NVME_UEVENT_ADD);
+
+ /* Case 5: Remove vfio-pci 0000:81:00.0 */
+ commands = "ACTION=remove\0DEVPATH= \0SUBSYSTEM= \0DRIVER=vfio-pci \0PCI_SLOT_NAME=0000:81:00.0\0";
+ uevent.subsystem = SPDK_NVME_UEVENT_SUBSYSTEM_NULL;
+
+ rc = parse_event(commands, &uevent);
+
+ CU_ASSERT(rc == uevent_expected_continue);
+ CU_ASSERT(uevent.subsystem == SPDK_NVME_UEVENT_SUBSYSTEM_VFIO);
+ CU_ASSERT(uevent.action == SPDK_NVME_UEVENT_REMOVE);
+
+ /* Case 6: Add wrong vfio-pci addr 000000 */
+ commands = "ACTION=bind\0DEVPATH= \0SUBSYSTEM= \0DRIVER=vfio-pci \0PCI_SLOT_NAME=000000\0";
+ uevent.subsystem = SPDK_NVME_UEVENT_SUBSYSTEM_NULL;
+
+ rc = parse_event(commands, &uevent);
+
+ CU_ASSERT(rc == uevent_abnormal_exit);
+ CU_ASSERT(uevent.subsystem == SPDK_NVME_UEVENT_SUBSYSTEM_VFIO);
+ CU_ASSERT(uevent.action == SPDK_NVME_UEVENT_ADD);
+
+ /* Case 7: Add wrong type vfio 0000:81:00.0 */
+ commands = "ACTION=bind\0DEVPATH= \0SUBSYSTEM= \0DRIVER=vfio \0PCI_SLOT_NAME=0000:81:00.0\0";
+ uevent.subsystem = SPDK_NVME_UEVENT_SUBSYSTEM_NULL;
+ uevent.action = 0;
+ rc = parse_event(commands, &uevent);
+
+ CU_ASSERT(rc == uevent_abnormal_exit);
+ CU_ASSERT(uevent.subsystem == SPDK_NVME_UEVENT_SUBSYSTEM_NULL);
+ CU_ASSERT(uevent.action == SPDK_NVME_UEVENT_ADD);
+}
+
+#else
+
+static void
+test_nvme_uevent_parse_event(void)
+{
+ CU_ASSERT(1);
+}
+
+#endif
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvme_uevent", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_nvme_uevent_parse_event);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvmf/Makefile b/src/spdk/test/unit/lib/nvmf/Makefile
new file mode 100644
index 000000000..94d5dde63
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/Makefile
@@ -0,0 +1,48 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = tcp.c ctrlr.c subsystem.c ctrlr_discovery.c ctrlr_bdev.c
+
+DIRS-$(CONFIG_RDMA) += rdma.c
+
+DIRS-$(CONFIG_FC) += fc.c fc_ls.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/nvmf/ctrlr.c/.gitignore b/src/spdk/test/unit/lib/nvmf/ctrlr.c/.gitignore
new file mode 100644
index 000000000..65e849431
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/ctrlr.c/.gitignore
@@ -0,0 +1 @@
+ctrlr_ut
diff --git a/src/spdk/test/unit/lib/nvmf/ctrlr.c/Makefile b/src/spdk/test/unit/lib/nvmf/ctrlr.c/Makefile
new file mode 100644
index 000000000..c68c589ab
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/ctrlr.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = ctrlr_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvmf/ctrlr.c/ctrlr_ut.c b/src/spdk/test/unit/lib/nvmf/ctrlr.c/ctrlr_ut.c
new file mode 100644
index 000000000..1da8f9d54
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/ctrlr.c/ctrlr_ut.c
@@ -0,0 +1,1711 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation. All rights reserved.
+ * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+#include "spdk_internal/mock.h"
+#include "spdk_internal/thread.h"
+
+#include "common/lib/ut_multithread.c"
+#include "nvmf/ctrlr.c"
+
+SPDK_LOG_REGISTER_COMPONENT("nvmf", SPDK_LOG_NVMF)
+
+struct spdk_bdev {
+ int ut_mock;
+ uint64_t blockcnt;
+};
+
+const char subsystem_default_sn[SPDK_NVME_CTRLR_SN_LEN + 1] = "subsys_default_sn";
+const char subsystem_default_mn[SPDK_NVME_CTRLR_MN_LEN + 1] = "subsys_default_mn";
+
+DEFINE_STUB(spdk_nvmf_tgt_find_subsystem,
+ struct spdk_nvmf_subsystem *,
+ (struct spdk_nvmf_tgt *tgt, const char *subnqn),
+ NULL);
+
+DEFINE_STUB(spdk_nvmf_poll_group_create,
+ struct spdk_nvmf_poll_group *,
+ (struct spdk_nvmf_tgt *tgt),
+ NULL);
+
+DEFINE_STUB(spdk_nvmf_subsystem_get_sn,
+ const char *,
+ (const struct spdk_nvmf_subsystem *subsystem),
+ subsystem_default_sn);
+
+DEFINE_STUB(spdk_nvmf_subsystem_get_mn,
+ const char *,
+ (const struct spdk_nvmf_subsystem *subsystem),
+ subsystem_default_mn);
+
+DEFINE_STUB(spdk_nvmf_subsystem_get_first_ns,
+ struct spdk_nvmf_ns *,
+ (struct spdk_nvmf_subsystem *subsystem),
+ NULL);
+
+DEFINE_STUB(spdk_nvmf_subsystem_get_next_ns,
+ struct spdk_nvmf_ns *,
+ (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns),
+ NULL);
+
+DEFINE_STUB(spdk_nvmf_subsystem_host_allowed,
+ bool,
+ (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn),
+ true);
+
+DEFINE_STUB(nvmf_subsystem_add_ctrlr,
+ int,
+ (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr),
+ 0);
+
+DEFINE_STUB(nvmf_subsystem_get_ctrlr,
+ struct spdk_nvmf_ctrlr *,
+ (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid),
+ NULL);
+
+DEFINE_STUB(nvmf_ctrlr_dsm_supported,
+ bool,
+ (struct spdk_nvmf_ctrlr *ctrlr),
+ false);
+
+DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported,
+ bool,
+ (struct spdk_nvmf_ctrlr *ctrlr),
+ false);
+
+DEFINE_STUB_V(nvmf_get_discovery_log_page,
+ (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov,
+ uint32_t iovcnt, uint64_t offset, uint32_t length));
+
+DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid,
+ int,
+ (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid),
+ 0);
+
+DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed,
+ bool,
+ (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid),
+ true);
+
+DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct spdk_nvmf_request *req),
+ 0);
+
+DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct spdk_nvmf_request *req),
+ 0);
+
+DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct spdk_nvmf_request *req),
+ 0);
+
+DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req),
+ 0);
+
+DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct spdk_nvmf_request *req),
+ 0);
+
+DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct spdk_nvmf_request *req),
+ 0);
+
+DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct spdk_nvmf_request *req),
+ 0);
+
+DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct spdk_nvmf_request *req),
+ 0);
+
+DEFINE_STUB(nvmf_transport_req_complete,
+ int,
+ (struct spdk_nvmf_request *req),
+ 0);
+
+DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx));
+
+DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx, bool,
+ (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd,
+ struct spdk_dif_ctx *dif_ctx),
+ true);
+
+DEFINE_STUB_V(nvmf_transport_qpair_abort_request,
+ (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req));
+
+DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd));
+DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl));
+
+int
+spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx)
+{
+ return 0;
+}
+
+void
+nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata,
+ bool dif_insert_or_strip)
+{
+ uint64_t num_blocks;
+
+ SPDK_CU_ASSERT_FATAL(ns->bdev != NULL);
+ num_blocks = ns->bdev->blockcnt;
+ nsdata->nsze = num_blocks;
+ nsdata->ncap = num_blocks;
+ nsdata->nuse = num_blocks;
+ nsdata->nlbaf = 0;
+ nsdata->flbas.format = 0;
+ nsdata->lbaf[0].lbads = spdk_u32log2(512);
+}
+
+static void
+test_get_log_page(void)
+{
+ struct spdk_nvmf_subsystem subsystem = {};
+ struct spdk_nvmf_request req = {};
+ struct spdk_nvmf_qpair qpair = {};
+ struct spdk_nvmf_ctrlr ctrlr = {};
+ union nvmf_h2c_msg cmd = {};
+ union nvmf_c2h_msg rsp = {};
+ char data[4096];
+
+ subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
+
+ ctrlr.subsys = &subsystem;
+
+ qpair.ctrlr = &ctrlr;
+
+ req.qpair = &qpair;
+ req.cmd = &cmd;
+ req.rsp = &rsp;
+ req.data = &data;
+ req.length = sizeof(data);
+
+ /* Get Log Page - all valid */
+ memset(&cmd, 0, sizeof(cmd));
+ memset(&rsp, 0, sizeof(rsp));
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
+ cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
+ cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = (req.length / 4 - 1);
+ CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
+
+ /* Get Log Page with invalid log ID */
+ memset(&cmd, 0, sizeof(cmd));
+ memset(&rsp, 0, sizeof(rsp));
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
+ cmd.nvme_cmd.cdw10 = 0;
+ CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
+
+ /* Get Log Page with invalid offset (not dword aligned) */
+ memset(&cmd, 0, sizeof(cmd));
+ memset(&rsp, 0, sizeof(rsp));
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
+ cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
+ cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = (req.length / 4 - 1);
+ cmd.nvme_cmd.cdw12 = 2;
+ CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
+
+ /* Get Log Page without data buffer */
+ memset(&cmd, 0, sizeof(cmd));
+ memset(&rsp, 0, sizeof(rsp));
+ req.data = NULL;
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
+ cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
+ cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = (req.length / 4 - 1);
+ CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
+ req.data = data;
+}
+
+static void
+test_process_fabrics_cmd(void)
+{
+ struct spdk_nvmf_request req = {};
+ int ret;
+ struct spdk_nvmf_qpair req_qpair = {};
+ union nvmf_h2c_msg req_cmd = {};
+ union nvmf_c2h_msg req_rsp = {};
+
+ req.qpair = &req_qpair;
+ req.cmd = &req_cmd;
+ req.rsp = &req_rsp;
+ req.qpair->ctrlr = NULL;
+
+ /* No ctrlr and invalid command check */
+ req.cmd->nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET;
+ ret = nvmf_ctrlr_process_fabrics_cmd(&req);
+ CU_ASSERT_EQUAL(req.rsp->nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR);
+ CU_ASSERT_EQUAL(ret, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+}
+
+static bool
+nvme_status_success(const struct spdk_nvme_status *status)
+{
+ return status->sct == SPDK_NVME_SCT_GENERIC && status->sc == SPDK_NVME_SC_SUCCESS;
+}
+
+static void
+test_connect(void)
+{
+ struct spdk_nvmf_fabric_connect_data connect_data;
+ struct spdk_nvmf_poll_group group;
+ struct spdk_nvmf_subsystem_poll_group *sgroups;
+ struct spdk_nvmf_transport transport;
+ struct spdk_nvmf_transport_ops tops = {};
+ struct spdk_nvmf_subsystem subsystem;
+ struct spdk_nvmf_request req;
+ struct spdk_nvmf_qpair admin_qpair;
+ struct spdk_nvmf_qpair qpair;
+ struct spdk_nvmf_qpair qpair2;
+ struct spdk_nvmf_ctrlr ctrlr;
+ struct spdk_nvmf_tgt tgt;
+ union nvmf_h2c_msg cmd;
+ union nvmf_c2h_msg rsp;
+ const uint8_t hostid[16] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
+ };
+ const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1";
+ const char hostnqn[] = "nqn.2016-06.io.spdk:host1";
+ int rc;
+
+ memset(&group, 0, sizeof(group));
+ group.thread = spdk_get_thread();
+
+ memset(&ctrlr, 0, sizeof(ctrlr));
+ ctrlr.subsys = &subsystem;
+ ctrlr.qpair_mask = spdk_bit_array_create(3);
+ SPDK_CU_ASSERT_FATAL(ctrlr.qpair_mask != NULL);
+ ctrlr.vcprop.cc.bits.en = 1;
+ ctrlr.vcprop.cc.bits.iosqes = 6;
+ ctrlr.vcprop.cc.bits.iocqes = 4;
+
+ memset(&admin_qpair, 0, sizeof(admin_qpair));
+ admin_qpair.group = &group;
+
+ memset(&tgt, 0, sizeof(tgt));
+ memset(&transport, 0, sizeof(transport));
+ transport.ops = &tops;
+ transport.opts.max_aq_depth = 32;
+ transport.opts.max_queue_depth = 64;
+ transport.opts.max_qpairs_per_ctrlr = 3;
+ transport.tgt = &tgt;
+
+ memset(&qpair, 0, sizeof(qpair));
+ qpair.transport = &transport;
+ qpair.group = &group;
+ qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
+ TAILQ_INIT(&qpair.outstanding);
+
+ memset(&connect_data, 0, sizeof(connect_data));
+ memcpy(connect_data.hostid, hostid, sizeof(hostid));
+ connect_data.cntlid = 0xFFFF;
+ snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn);
+ snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn);
+
+ memset(&subsystem, 0, sizeof(subsystem));
+ subsystem.thread = spdk_get_thread();
+ subsystem.id = 1;
+ TAILQ_INIT(&subsystem.ctrlrs);
+ subsystem.tgt = &tgt;
+ subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
+ subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
+ snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn);
+
+ sgroups = calloc(subsystem.id + 1, sizeof(struct spdk_nvmf_subsystem_poll_group));
+ group.sgroups = sgroups;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC;
+ cmd.connect_cmd.cid = 1;
+ cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT;
+ cmd.connect_cmd.recfmt = 0;
+ cmd.connect_cmd.qid = 0;
+ cmd.connect_cmd.sqsize = 31;
+ cmd.connect_cmd.cattr = 0;
+ cmd.connect_cmd.kato = 120000;
+
+ memset(&req, 0, sizeof(req));
+ req.qpair = &qpair;
+ req.length = sizeof(connect_data);
+ req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
+ req.data = &connect_data;
+ req.cmd = &cmd;
+ req.rsp = &rsp;
+
+ MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem);
+ MOCK_SET(spdk_nvmf_poll_group_create, &group);
+
+ /* Valid admin connect command */
+ memset(&rsp, 0, sizeof(rsp));
+ sgroups[subsystem.id].io_outstanding++;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
+ CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
+ CU_ASSERT(qpair.ctrlr != NULL);
+ CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
+ nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
+ spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
+ free(qpair.ctrlr);
+ qpair.ctrlr = NULL;
+
+ /* Valid admin connect command with kato = 0 */
+ cmd.connect_cmd.kato = 0;
+ memset(&rsp, 0, sizeof(rsp));
+ sgroups[subsystem.id].io_outstanding++;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
+ CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
+ CU_ASSERT(qpair.ctrlr != NULL && qpair.ctrlr->keep_alive_poller == NULL);
+ CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
+ spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
+ free(qpair.ctrlr);
+ qpair.ctrlr = NULL;
+ cmd.connect_cmd.kato = 120000;
+
+ /* Invalid data length */
+ memset(&rsp, 0, sizeof(rsp));
+ req.length = sizeof(connect_data) - 1;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ req.length = sizeof(connect_data);
+
+ /* Invalid recfmt */
+ memset(&rsp, 0, sizeof(rsp));
+ cmd.connect_cmd.recfmt = 1234;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ cmd.connect_cmd.recfmt = 0;
+
+ /* Subsystem not found */
+ memset(&rsp, 0, sizeof(rsp));
+ MOCK_SET(spdk_nvmf_tgt_find_subsystem, NULL);
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 256);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem);
+
+ /* Unterminated hostnqn */
+ memset(&rsp, 0, sizeof(rsp));
+ memset(connect_data.hostnqn, 'b', sizeof(connect_data.hostnqn));
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 512);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn);
+
+ /* Host not allowed */
+ memset(&rsp, 0, sizeof(rsp));
+ MOCK_SET(spdk_nvmf_subsystem_host_allowed, false);
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_HOST);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ MOCK_SET(spdk_nvmf_subsystem_host_allowed, true);
+
+ /* Invalid sqsize == 0 */
+ memset(&rsp, 0, sizeof(rsp));
+ cmd.connect_cmd.sqsize = 0;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ cmd.connect_cmd.sqsize = 31;
+
+ /* Invalid admin sqsize > max_aq_depth */
+ memset(&rsp, 0, sizeof(rsp));
+ cmd.connect_cmd.sqsize = 32;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ cmd.connect_cmd.sqsize = 31;
+
+ /* Invalid I/O sqsize > max_queue_depth */
+ memset(&rsp, 0, sizeof(rsp));
+ cmd.connect_cmd.qid = 1;
+ cmd.connect_cmd.sqsize = 64;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ cmd.connect_cmd.qid = 0;
+ cmd.connect_cmd.sqsize = 31;
+
+ /* Invalid cntlid for admin queue */
+ memset(&rsp, 0, sizeof(rsp));
+ connect_data.cntlid = 0x1234;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ connect_data.cntlid = 0xFFFF;
+
+ ctrlr.admin_qpair = &admin_qpair;
+ ctrlr.subsys = &subsystem;
+
+ /* Valid I/O queue connect command */
+ memset(&rsp, 0, sizeof(rsp));
+ MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr);
+ cmd.connect_cmd.qid = 1;
+ cmd.connect_cmd.sqsize = 63;
+ sgroups[subsystem.id].io_outstanding++;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
+ CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
+ CU_ASSERT(qpair.ctrlr == &ctrlr);
+ CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
+ qpair.ctrlr = NULL;
+ cmd.connect_cmd.sqsize = 31;
+
+ /* Non-existent controller */
+ memset(&rsp, 0, sizeof(rsp));
+ MOCK_SET(nvmf_subsystem_get_ctrlr, NULL);
+ sgroups[subsystem.id].io_outstanding++;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
+ MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr);
+
+ /* I/O connect to discovery controller */
+ memset(&rsp, 0, sizeof(rsp));
+ subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY;
+ subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
+ sgroups[subsystem.id].io_outstanding++;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
+
+ /* I/O connect to discovery controller with keep-alive-timeout != 0 */
+ cmd.connect_cmd.qid = 0;
+ cmd.connect_cmd.kato = 120000;
+ memset(&rsp, 0, sizeof(rsp));
+ subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY;
+ subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
+ sgroups[subsystem.id].io_outstanding++;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
+ CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
+ CU_ASSERT(qpair.ctrlr != NULL);
+ CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL);
+ CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
+ nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
+ spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
+ free(qpair.ctrlr);
+ qpair.ctrlr = NULL;
+
+ /* I/O connect to discovery controller with keep-alive-timeout == 0.
+ * Then, a fixed timeout value is set to keep-alive-timeout.
+ */
+ cmd.connect_cmd.kato = 0;
+ memset(&rsp, 0, sizeof(rsp));
+ subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY;
+ subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
+ sgroups[subsystem.id].io_outstanding++;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
+ CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
+ CU_ASSERT(qpair.ctrlr != NULL);
+ CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL);
+ CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
+ nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
+ spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
+ free(qpair.ctrlr);
+ qpair.ctrlr = NULL;
+ cmd.connect_cmd.qid = 1;
+ cmd.connect_cmd.kato = 120000;
+ subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
+
+ /* I/O connect to disabled controller */
+ memset(&rsp, 0, sizeof(rsp));
+ ctrlr.vcprop.cc.bits.en = 0;
+ sgroups[subsystem.id].io_outstanding++;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
+ ctrlr.vcprop.cc.bits.en = 1;
+
+ /* I/O connect with invalid IOSQES */
+ memset(&rsp, 0, sizeof(rsp));
+ ctrlr.vcprop.cc.bits.iosqes = 3;
+ sgroups[subsystem.id].io_outstanding++;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
+ ctrlr.vcprop.cc.bits.iosqes = 6;
+
+ /* I/O connect with invalid IOCQES */
+ memset(&rsp, 0, sizeof(rsp));
+ ctrlr.vcprop.cc.bits.iocqes = 3;
+ sgroups[subsystem.id].io_outstanding++;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
+ ctrlr.vcprop.cc.bits.iocqes = 4;
+
+ /* I/O connect with too many existing qpairs */
+ memset(&rsp, 0, sizeof(rsp));
+ spdk_bit_array_set(ctrlr.qpair_mask, 0);
+ spdk_bit_array_set(ctrlr.qpair_mask, 1);
+ spdk_bit_array_set(ctrlr.qpair_mask, 2);
+ sgroups[subsystem.id].io_outstanding++;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
+ spdk_bit_array_clear(ctrlr.qpair_mask, 0);
+ spdk_bit_array_clear(ctrlr.qpair_mask, 1);
+ spdk_bit_array_clear(ctrlr.qpair_mask, 2);
+
+ /* I/O connect with duplicate queue ID */
+ memset(&rsp, 0, sizeof(rsp));
+ memset(&qpair2, 0, sizeof(qpair2));
+ qpair2.group = &group;
+ qpair2.qid = 1;
+ spdk_bit_array_set(ctrlr.qpair_mask, 1);
+ cmd.connect_cmd.qid = 1;
+ sgroups[subsystem.id].io_outstanding++;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
+
+ /* Clean up globals */
+ MOCK_CLEAR(spdk_nvmf_tgt_find_subsystem);
+ MOCK_CLEAR(spdk_nvmf_poll_group_create);
+
+ spdk_bit_array_free(&ctrlr.qpair_mask);
+ free(sgroups);
+}
+
+static void
+test_get_ns_id_desc_list(void)
+{
+ struct spdk_nvmf_subsystem subsystem;
+ struct spdk_nvmf_qpair qpair;
+ struct spdk_nvmf_ctrlr ctrlr;
+ struct spdk_nvmf_request req;
+ struct spdk_nvmf_ns *ns_ptrs[1];
+ struct spdk_nvmf_ns ns;
+ union nvmf_h2c_msg cmd;
+ union nvmf_c2h_msg rsp;
+ struct spdk_bdev bdev;
+ uint8_t buf[4096];
+
+ memset(&subsystem, 0, sizeof(subsystem));
+ ns_ptrs[0] = &ns;
+ subsystem.ns = ns_ptrs;
+ subsystem.max_nsid = 1;
+ subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
+
+ memset(&ns, 0, sizeof(ns));
+ ns.opts.nsid = 1;
+ ns.bdev = &bdev;
+
+ memset(&qpair, 0, sizeof(qpair));
+ qpair.ctrlr = &ctrlr;
+
+ memset(&ctrlr, 0, sizeof(ctrlr));
+ ctrlr.subsys = &subsystem;
+ ctrlr.vcprop.cc.bits.en = 1;
+
+ memset(&req, 0, sizeof(req));
+ req.qpair = &qpair;
+ req.cmd = &cmd;
+ req.rsp = &rsp;
+ req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
+ req.data = buf;
+ req.length = sizeof(buf);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_IDENTIFY;
+ cmd.nvme_cmd.cdw10_bits.identify.cns = SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST;
+
+ /* Invalid NSID */
+ cmd.nvme_cmd.nsid = 0;
+ memset(&rsp, 0, sizeof(rsp));
+ CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
+
+ /* Valid NSID, but ns has no IDs defined */
+ cmd.nvme_cmd.nsid = 1;
+ memset(&rsp, 0, sizeof(rsp));
+ CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
+ CU_ASSERT(spdk_mem_all_zero(buf, sizeof(buf)));
+
+ /* Valid NSID, only EUI64 defined */
+ ns.opts.eui64[0] = 0x11;
+ ns.opts.eui64[7] = 0xFF;
+ memset(&rsp, 0, sizeof(rsp));
+ CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
+ CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
+ CU_ASSERT(buf[1] == 8);
+ CU_ASSERT(buf[4] == 0x11);
+ CU_ASSERT(buf[11] == 0xFF);
+ CU_ASSERT(buf[13] == 0);
+
+ /* Valid NSID, only NGUID defined */
+ memset(ns.opts.eui64, 0, sizeof(ns.opts.eui64));
+ ns.opts.nguid[0] = 0x22;
+ ns.opts.nguid[15] = 0xEE;
+ memset(&rsp, 0, sizeof(rsp));
+ CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
+ CU_ASSERT(buf[0] == SPDK_NVME_NIDT_NGUID);
+ CU_ASSERT(buf[1] == 16);
+ CU_ASSERT(buf[4] == 0x22);
+ CU_ASSERT(buf[19] == 0xEE);
+ CU_ASSERT(buf[21] == 0);
+
+ /* Valid NSID, both EUI64 and NGUID defined */
+ ns.opts.eui64[0] = 0x11;
+ ns.opts.eui64[7] = 0xFF;
+ ns.opts.nguid[0] = 0x22;
+ ns.opts.nguid[15] = 0xEE;
+ memset(&rsp, 0, sizeof(rsp));
+ CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
+ CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
+ CU_ASSERT(buf[1] == 8);
+ CU_ASSERT(buf[4] == 0x11);
+ CU_ASSERT(buf[11] == 0xFF);
+ CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID);
+ CU_ASSERT(buf[13] == 16);
+ CU_ASSERT(buf[16] == 0x22);
+ CU_ASSERT(buf[31] == 0xEE);
+ CU_ASSERT(buf[33] == 0);
+
+ /* Valid NSID, EUI64, NGUID, and UUID defined */
+ ns.opts.eui64[0] = 0x11;
+ ns.opts.eui64[7] = 0xFF;
+ ns.opts.nguid[0] = 0x22;
+ ns.opts.nguid[15] = 0xEE;
+ ns.opts.uuid.u.raw[0] = 0x33;
+ ns.opts.uuid.u.raw[15] = 0xDD;
+ memset(&rsp, 0, sizeof(rsp));
+ CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
+ CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
+ CU_ASSERT(buf[1] == 8);
+ CU_ASSERT(buf[4] == 0x11);
+ CU_ASSERT(buf[11] == 0xFF);
+ CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID);
+ CU_ASSERT(buf[13] == 16);
+ CU_ASSERT(buf[16] == 0x22);
+ CU_ASSERT(buf[31] == 0xEE);
+ CU_ASSERT(buf[32] == SPDK_NVME_NIDT_UUID);
+ CU_ASSERT(buf[33] == 16);
+ CU_ASSERT(buf[36] == 0x33);
+ CU_ASSERT(buf[51] == 0xDD);
+ CU_ASSERT(buf[53] == 0);
+}
+
+static void
+test_identify_ns(void)
+{
+ struct spdk_nvmf_subsystem subsystem = {};
+ struct spdk_nvmf_transport transport = {};
+ struct spdk_nvmf_qpair admin_qpair = { .transport = &transport};
+ struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair };
+ struct spdk_nvme_cmd cmd = {};
+ struct spdk_nvme_cpl rsp = {};
+ struct spdk_nvme_ns_data nsdata = {};
+ struct spdk_bdev bdev[3] = {{.blockcnt = 1234}, {.blockcnt = 0}, {.blockcnt = 5678}};
+ struct spdk_nvmf_ns ns[3] = {{.bdev = &bdev[0]}, {.bdev = NULL}, {.bdev = &bdev[2]}};
+ struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]};
+
+ subsystem.ns = ns_arr;
+ subsystem.max_nsid = SPDK_COUNTOF(ns_arr);
+
+ /* Invalid NSID 0 */
+ cmd.nsid = 0;
+ memset(&nsdata, 0, sizeof(nsdata));
+ memset(&rsp, 0, sizeof(rsp));
+ CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
+ &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
+ CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
+
+ /* Valid NSID 1 */
+ cmd.nsid = 1;
+ memset(&nsdata, 0, sizeof(nsdata));
+ memset(&rsp, 0, sizeof(rsp));
+ CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
+ &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
+ CU_ASSERT(nsdata.nsze == 1234);
+
+ /* Valid but inactive NSID 2 */
+ cmd.nsid = 2;
+ memset(&nsdata, 0, sizeof(nsdata));
+ memset(&rsp, 0, sizeof(rsp));
+ CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
+ &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
+ CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
+
+ /* Valid NSID 3 */
+ cmd.nsid = 3;
+ memset(&nsdata, 0, sizeof(nsdata));
+ memset(&rsp, 0, sizeof(rsp));
+ CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
+ &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
+ CU_ASSERT(nsdata.nsze == 5678);
+
+ /* Invalid NSID 4 */
+ cmd.nsid = 4;
+ memset(&nsdata, 0, sizeof(nsdata));
+ memset(&rsp, 0, sizeof(rsp));
+ CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
+ &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
+ CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
+
+ /* Invalid NSID 0xFFFFFFFF (NS management not supported) */
+ cmd.nsid = 0xFFFFFFFF;
+ memset(&nsdata, 0, sizeof(nsdata));
+ memset(&rsp, 0, sizeof(rsp));
+ CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
+ &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
+ CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
+}
+
+static void
+test_set_get_features(void)
+{
+ struct spdk_nvmf_subsystem subsystem = {};
+ struct spdk_nvmf_qpair admin_qpair = {};
+ struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair };
+ union nvmf_h2c_msg cmd = {};
+ union nvmf_c2h_msg rsp = {};
+ struct spdk_nvmf_ns ns[3];
+ struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]};;
+ struct spdk_nvmf_request req;
+ int rc;
+
+ subsystem.ns = ns_arr;
+ subsystem.max_nsid = SPDK_COUNTOF(ns_arr);
+ admin_qpair.ctrlr = &ctrlr;
+ req.qpair = &admin_qpair;
+ cmd.nvme_cmd.nsid = 1;
+ req.cmd = &cmd;
+ req.rsp = &rsp;
+
+ /* Set SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
+ cmd.nvme_cmd.cdw11_bits.feat_rsv_persistence.bits.ptpl = 1;
+ ns[0].ptpl_file = "testcfg";
+ rc = nvmf_ctrlr_set_features_reservation_persistence(&req);
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_FEATURE_ID_NOT_SAVEABLE);
+ CU_ASSERT(ns[0].ptpl_activated == true);
+
+ /* Get SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
+ cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_HOST_RESERVE_PERSIST;
+ rc = nvmf_ctrlr_get_features_reservation_persistence(&req);
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
+ CU_ASSERT(rsp.nvme_cpl.cdw0 == 1);
+
+
+ /* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
+ cmd.nvme_cmd.cdw11 = 0x42;
+ cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
+
+ rc = nvmf_ctrlr_get_features(&req);
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+
+ /* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
+ cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */
+ cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
+
+ rc = nvmf_ctrlr_get_features(&req);
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
+
+ /* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
+ cmd.nvme_cmd.cdw11 = 0x42;
+ cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
+
+ rc = nvmf_ctrlr_set_features(&req);
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+
+ /* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
+ cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */
+ cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
+
+ rc = nvmf_ctrlr_set_features(&req);
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
+
+ /* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid THSEL */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
+ cmd.nvme_cmd.cdw11 = 0x42;
+ cmd.nvme_cmd.cdw11_bits.feat_temp_threshold.bits.thsel = 0x3; /* Set reserved value */
+ cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
+
+ rc = nvmf_ctrlr_set_features(&req);
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
+
+
+ /* get SPDK_NVME_FEAT_ERROR_RECOVERY - generic */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
+ cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
+
+ rc = nvmf_ctrlr_get_features(&req);
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+
+ /* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE set */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
+ cmd.nvme_cmd.cdw11 = 0x42;
+ cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x1;
+ cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
+
+ rc = nvmf_ctrlr_set_features(&req);
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
+
+ /* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE cleared */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
+ cmd.nvme_cmd.cdw11 = 0x42;
+ cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x0;
+ cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
+
+ rc = nvmf_ctrlr_set_features(&req);
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+}
+
+/*
+ * Reservation Unit Test Configuration
+ * -------- -------- --------
+ * | Host A | | Host B | | Host C |
+ * -------- -------- --------
+ * / \ | |
+ * -------- -------- ------- -------
+ * |Ctrlr1_A| |Ctrlr2_A| |Ctrlr_B| |Ctrlr_C|
+ * -------- -------- ------- -------
+ * \ \ / /
+ * \ \ / /
+ * \ \ / /
+ * --------------------------------------
+ * | NAMESPACE 1 |
+ * --------------------------------------
+ */
+
+static struct spdk_nvmf_ctrlr g_ctrlr1_A, g_ctrlr2_A, g_ctrlr_B, g_ctrlr_C;
+struct spdk_nvmf_subsystem_pg_ns_info g_ns_info;
+
+static void
+ut_reservation_init(enum spdk_nvme_reservation_type rtype)
+{
+ /* Host A has two controllers */
+ spdk_uuid_generate(&g_ctrlr1_A.hostid);
+ spdk_uuid_copy(&g_ctrlr2_A.hostid, &g_ctrlr1_A.hostid);
+
+ /* Host B has 1 controller */
+ spdk_uuid_generate(&g_ctrlr_B.hostid);
+
+ /* Host C has 1 controller */
+ spdk_uuid_generate(&g_ctrlr_C.hostid);
+
+ memset(&g_ns_info, 0, sizeof(g_ns_info));
+ g_ns_info.rtype = rtype;
+ g_ns_info.reg_hostid[0] = g_ctrlr1_A.hostid;
+ g_ns_info.reg_hostid[1] = g_ctrlr_B.hostid;
+ g_ns_info.reg_hostid[2] = g_ctrlr_C.hostid;
+}
+
+static void
+test_reservation_write_exclusive(void)
+{
+ struct spdk_nvmf_request req = {};
+ union nvmf_h2c_msg cmd = {};
+ union nvmf_c2h_msg rsp = {};
+ int rc;
+
+ req.cmd = &cmd;
+ req.rsp = &rsp;
+
+ /* Host A holds reservation with type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE */
+ ut_reservation_init(SPDK_NVME_RESERVE_WRITE_EXCLUSIVE);
+ g_ns_info.holder_id = g_ctrlr1_A.hostid;
+
+ /* Test Case: Issue a Read command from Host A and Host B */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+
+ /* Test Case: Issue a DSM Write command from Host A and Host B */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT;
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
+ SPDK_CU_ASSERT_FATAL(rc < 0);
+ SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
+
+ /* Test Case: Issue a Write command from Host C */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
+ SPDK_CU_ASSERT_FATAL(rc < 0);
+ SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
+
+ /* Test Case: Issue a Read command from Host B */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+
+ /* Unregister Host C */
+ memset(&g_ns_info.reg_hostid[2], 0, sizeof(struct spdk_uuid));
+
+ /* Test Case: Read and Write commands from non-registrant Host C */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
+ SPDK_CU_ASSERT_FATAL(rc < 0);
+ SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+}
+
+static void
+test_reservation_exclusive_access(void)
+{
+ struct spdk_nvmf_request req = {};
+ union nvmf_h2c_msg cmd = {};
+ union nvmf_c2h_msg rsp = {};
+ int rc;
+
+ req.cmd = &cmd;
+ req.rsp = &rsp;
+
+ /* Host A holds reservation with type SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS */
+ ut_reservation_init(SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS);
+ g_ns_info.holder_id = g_ctrlr1_A.hostid;
+
+ /* Test Case: Issue a Read command from Host B */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
+ SPDK_CU_ASSERT_FATAL(rc < 0);
+ SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
+
+ /* Test Case: Issue a Reservation Release command from a valid Registrant */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_RESERVATION_RELEASE;
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+}
+
+static void
+_test_reservation_write_exclusive_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype)
+{
+ struct spdk_nvmf_request req = {};
+ union nvmf_h2c_msg cmd = {};
+ union nvmf_c2h_msg rsp = {};
+ int rc;
+
+ req.cmd = &cmd;
+ req.rsp = &rsp;
+
+ /* SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY and SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS */
+ ut_reservation_init(rtype);
+ g_ns_info.holder_id = g_ctrlr1_A.hostid;
+
+ /* Test Case: Issue a Read command from Host A and Host C */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+
+ /* Test Case: Issue a DSM Write command from Host A and Host C */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT;
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+
+ /* Unregister Host C */
+ memset(&g_ns_info.reg_hostid[2], 0, sizeof(struct spdk_uuid));
+
+ /* Test Case: Read and Write commands from non-registrant Host C */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
+ SPDK_CU_ASSERT_FATAL(rc < 0);
+ SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
+}
+
+static void
+test_reservation_write_exclusive_regs_only_and_all_regs(void)
+{
+ _test_reservation_write_exclusive_regs_only_and_all_regs(
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY);
+ _test_reservation_write_exclusive_regs_only_and_all_regs(
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS);
+}
+
+static void
+_test_reservation_exclusive_access_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype)
+{
+ struct spdk_nvmf_request req = {};
+ union nvmf_h2c_msg cmd = {};
+ union nvmf_c2h_msg rsp = {};
+ int rc;
+
+ req.cmd = &cmd;
+ req.rsp = &rsp;
+
+ /* SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY and SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS */
+ ut_reservation_init(rtype);
+ g_ns_info.holder_id = g_ctrlr1_A.hostid;
+
+ /* Test Case: Issue a Write command from Host B */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+
+ /* Unregister Host B */
+ memset(&g_ns_info.reg_hostid[1], 0, sizeof(struct spdk_uuid));
+
+ /* Test Case: Issue a Read command from Host B */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
+ SPDK_CU_ASSERT_FATAL(rc < 0);
+ SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
+ SPDK_CU_ASSERT_FATAL(rc < 0);
+ SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
+}
+
+static void
+test_reservation_exclusive_access_regs_only_and_all_regs(void)
+{
+ _test_reservation_exclusive_access_regs_only_and_all_regs(
+ SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY);
+ _test_reservation_exclusive_access_regs_only_and_all_regs(
+ SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS);
+}
+
+static void
+test_reservation_notification_log_page(void)
+{
+ struct spdk_nvmf_ctrlr ctrlr;
+ struct spdk_nvmf_qpair qpair;
+ struct spdk_nvmf_ns ns;
+ struct spdk_nvmf_request req;
+ union nvmf_h2c_msg cmd = {};
+ union nvmf_c2h_msg rsp = {};
+ union spdk_nvme_async_event_completion event = {};
+ struct spdk_nvme_reservation_notification_log logs[3];
+
+ memset(&ctrlr, 0, sizeof(ctrlr));
+ ctrlr.thread = spdk_get_thread();
+ TAILQ_INIT(&ctrlr.log_head);
+ ns.nsid = 1;
+
+ /* Test Case: Mask all the reservation notifications */
+ ns.mask = SPDK_NVME_REGISTRATION_PREEMPTED_MASK |
+ SPDK_NVME_RESERVATION_RELEASED_MASK |
+ SPDK_NVME_RESERVATION_PREEMPTED_MASK;
+ nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
+ SPDK_NVME_REGISTRATION_PREEMPTED);
+ nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
+ SPDK_NVME_RESERVATION_RELEASED);
+ nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
+ SPDK_NVME_RESERVATION_PREEMPTED);
+ poll_threads();
+ SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&ctrlr.log_head));
+
+ /* Test Case: Unmask all the reservation notifications,
+ * 3 log pages are generated, and AER was triggered.
+ */
+ ns.mask = 0;
+ ctrlr.num_avail_log_pages = 0;
+ req.cmd = &cmd;
+ req.rsp = &rsp;
+ ctrlr.aer_req[0] = &req;
+ ctrlr.nr_aer_reqs = 1;
+ req.qpair = &qpair;
+ TAILQ_INIT(&qpair.outstanding);
+ qpair.ctrlr = NULL;
+ qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+
+ nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
+ SPDK_NVME_REGISTRATION_PREEMPTED);
+ nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
+ SPDK_NVME_RESERVATION_RELEASED);
+ nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
+ SPDK_NVME_RESERVATION_PREEMPTED);
+ poll_threads();
+ event.raw = rsp.nvme_cpl.cdw0;
+ SPDK_CU_ASSERT_FATAL(event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_IO);
+ SPDK_CU_ASSERT_FATAL(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL);
+ SPDK_CU_ASSERT_FATAL(event.bits.log_page_identifier == SPDK_NVME_LOG_RESERVATION_NOTIFICATION);
+ SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 3);
+
+ /* Test Case: Get Log Page to clear the log pages */
+ nvmf_get_reservation_notification_log_page(&ctrlr, (void *)logs, 0, sizeof(logs));
+ SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 0);
+}
+
+static void
+test_get_dif_ctx(void)
+{
+ struct spdk_nvmf_subsystem subsystem = {};
+ struct spdk_nvmf_request req = {};
+ struct spdk_nvmf_qpair qpair = {};
+ struct spdk_nvmf_ctrlr ctrlr = {};
+ struct spdk_nvmf_ns ns = {};
+ struct spdk_nvmf_ns *_ns = NULL;
+ struct spdk_bdev bdev = {};
+ union nvmf_h2c_msg cmd = {};
+ struct spdk_dif_ctx dif_ctx = {};
+ bool ret;
+
+ ctrlr.subsys = &subsystem;
+
+ qpair.ctrlr = &ctrlr;
+
+ req.qpair = &qpair;
+ req.cmd = &cmd;
+
+ ns.bdev = &bdev;
+
+ ctrlr.dif_insert_or_strip = false;
+
+ ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
+ CU_ASSERT(ret == false);
+
+ ctrlr.dif_insert_or_strip = true;
+ qpair.state = SPDK_NVMF_QPAIR_UNINITIALIZED;
+
+ ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
+ CU_ASSERT(ret == false);
+
+ qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
+ cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FABRIC;
+
+ ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
+ CU_ASSERT(ret == false);
+
+ cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FLUSH;
+
+ ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
+ CU_ASSERT(ret == false);
+
+ qpair.qid = 1;
+
+ ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
+ CU_ASSERT(ret == false);
+
+ cmd.nvme_cmd.nsid = 1;
+
+ ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
+ CU_ASSERT(ret == false);
+
+ subsystem.max_nsid = 1;
+ subsystem.ns = &_ns;
+ subsystem.ns[0] = &ns;
+
+ ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
+ CU_ASSERT(ret == false);
+
+ cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_WRITE;
+
+ ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
+ CU_ASSERT(ret == true);
+}
+
+static void
+test_identify_ctrlr(void)
+{
+ struct spdk_nvmf_subsystem subsystem = {
+ .subtype = SPDK_NVMF_SUBTYPE_NVME
+ };
+ struct spdk_nvmf_transport_ops tops = {};
+ struct spdk_nvmf_transport transport = {
+ .ops = &tops,
+ .opts = {
+ .in_capsule_data_size = 4096,
+ },
+ };
+ struct spdk_nvmf_qpair admin_qpair = { .transport = &transport};
+ struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair };
+ struct spdk_nvme_ctrlr_data cdata = {};
+ uint32_t expected_ioccsz;
+
+ nvmf_ctrlr_cdata_init(&transport, &subsystem, &ctrlr.cdata);
+
+ /* Check ioccsz, TCP transport */
+ tops.type = SPDK_NVME_TRANSPORT_TCP;
+ expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
+ CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
+
+ /* Check ioccsz, RDMA transport */
+ tops.type = SPDK_NVME_TRANSPORT_RDMA;
+ expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
+ CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
+
+ /* Check ioccsz, TCP transport with dif_insert_or_strip */
+ tops.type = SPDK_NVME_TRANSPORT_TCP;
+ ctrlr.dif_insert_or_strip = true;
+ expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
+ CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
+}
+
+static int
+custom_admin_cmd_hdlr(struct spdk_nvmf_request *req)
+{
+ req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
+
+ return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
+};
+
+static void
+test_custom_admin_cmd(void)
+{
+ struct spdk_nvmf_subsystem subsystem;
+ struct spdk_nvmf_qpair qpair;
+ struct spdk_nvmf_ctrlr ctrlr;
+ struct spdk_nvmf_request req;
+ struct spdk_nvmf_ns *ns_ptrs[1];
+ struct spdk_nvmf_ns ns;
+ union nvmf_h2c_msg cmd;
+ union nvmf_c2h_msg rsp;
+ struct spdk_bdev bdev;
+ uint8_t buf[4096];
+ int rc;
+
+ memset(&subsystem, 0, sizeof(subsystem));
+ ns_ptrs[0] = &ns;
+ subsystem.ns = ns_ptrs;
+ subsystem.max_nsid = 1;
+ subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
+
+ memset(&ns, 0, sizeof(ns));
+ ns.opts.nsid = 1;
+ ns.bdev = &bdev;
+
+ memset(&qpair, 0, sizeof(qpair));
+ qpair.ctrlr = &ctrlr;
+
+ memset(&ctrlr, 0, sizeof(ctrlr));
+ ctrlr.subsys = &subsystem;
+ ctrlr.vcprop.cc.bits.en = 1;
+
+ memset(&req, 0, sizeof(req));
+ req.qpair = &qpair;
+ req.cmd = &cmd;
+ req.rsp = &rsp;
+ req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
+ req.data = buf;
+ req.length = sizeof(buf);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.nvme_cmd.opc = 0xc1;
+ cmd.nvme_cmd.nsid = 0;
+ memset(&rsp, 0, sizeof(rsp));
+
+ spdk_nvmf_set_custom_admin_cmd_hdlr(cmd.nvme_cmd.opc, custom_admin_cmd_hdlr);
+
+ /* Ensure that our hdlr is being called */
+ rc = nvmf_ctrlr_process_admin_cmd(&req);
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
+}
+
+static void
+test_fused_compare_and_write(void)
+{
+ struct spdk_nvmf_request req = {};
+ struct spdk_nvmf_qpair qpair = {};
+ struct spdk_nvme_cmd cmd = {};
+ union nvmf_c2h_msg rsp = {};
+ struct spdk_nvmf_ctrlr ctrlr = {};
+ struct spdk_nvmf_subsystem subsystem = {};
+ struct spdk_nvmf_ns ns = {};
+ struct spdk_nvmf_ns *subsys_ns[1] = {};
+ struct spdk_bdev bdev = {};
+
+ struct spdk_nvmf_poll_group group = {};
+ struct spdk_nvmf_subsystem_poll_group sgroups = {};
+ struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
+
+ ns.bdev = &bdev;
+
+ subsystem.id = 0;
+ subsystem.max_nsid = 1;
+ subsys_ns[0] = &ns;
+ subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
+
+ /* Enable controller */
+ ctrlr.vcprop.cc.bits.en = 1;
+ ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
+
+ group.num_sgroups = 1;
+ sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
+ sgroups.num_ns = 1;
+ sgroups.ns_info = &ns_info;
+ TAILQ_INIT(&sgroups.queued);
+ group.sgroups = &sgroups;
+ TAILQ_INIT(&qpair.outstanding);
+
+ qpair.ctrlr = &ctrlr;
+ qpair.group = &group;
+ qpair.qid = 1;
+ qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
+
+ cmd.nsid = 1;
+
+ req.qpair = &qpair;
+ req.cmd = (union nvmf_h2c_msg *)&cmd;
+ req.rsp = &rsp;
+
+ /* SUCCESS/SUCCESS */
+ cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
+ cmd.opc = SPDK_NVME_OPC_COMPARE;
+
+ spdk_nvmf_request_exec(&req);
+ CU_ASSERT(qpair.first_fused_req != NULL);
+ CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
+
+ cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
+ cmd.opc = SPDK_NVME_OPC_WRITE;
+
+ spdk_nvmf_request_exec(&req);
+ CU_ASSERT(qpair.first_fused_req == NULL);
+ CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
+
+ /* Wrong sequence */
+ cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
+ cmd.opc = SPDK_NVME_OPC_WRITE;
+
+ spdk_nvmf_request_exec(&req);
+ CU_ASSERT(!nvme_status_success(&rsp.nvme_cpl.status));
+ CU_ASSERT(qpair.first_fused_req == NULL);
+
+ /* Write as FUSE_FIRST (Wrong op code) */
+ cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
+ cmd.opc = SPDK_NVME_OPC_WRITE;
+
+ spdk_nvmf_request_exec(&req);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
+ CU_ASSERT(qpair.first_fused_req == NULL);
+
+ /* Compare as FUSE_SECOND (Wrong op code) */
+ cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
+ cmd.opc = SPDK_NVME_OPC_COMPARE;
+
+ spdk_nvmf_request_exec(&req);
+ CU_ASSERT(qpair.first_fused_req != NULL);
+ CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
+
+ cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
+ cmd.opc = SPDK_NVME_OPC_COMPARE;
+
+ spdk_nvmf_request_exec(&req);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
+ CU_ASSERT(qpair.first_fused_req == NULL);
+}
+
+static void
+test_multi_async_event_reqs(void)
+{
+ struct spdk_nvmf_subsystem subsystem = {};
+ struct spdk_nvmf_qpair qpair = {};
+ struct spdk_nvmf_ctrlr ctrlr = {};
+ struct spdk_nvmf_request req[5] = {};
+ struct spdk_nvmf_ns *ns_ptrs[1] = {};
+ struct spdk_nvmf_ns ns = {};
+ union nvmf_h2c_msg cmd[5] = {};
+ union nvmf_c2h_msg rsp[5] = {};
+
+ struct spdk_nvmf_poll_group group = {};
+ struct spdk_nvmf_subsystem_poll_group sgroups = {};
+
+ int i;
+
+ ns_ptrs[0] = &ns;
+ subsystem.ns = ns_ptrs;
+ subsystem.max_nsid = 1;
+ subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
+
+ ns.opts.nsid = 1;
+ group.sgroups = &sgroups;
+
+ qpair.ctrlr = &ctrlr;
+ qpair.group = &group;
+ TAILQ_INIT(&qpair.outstanding);
+
+ ctrlr.subsys = &subsystem;
+ ctrlr.vcprop.cc.bits.en = 1;
+
+ for (i = 0; i < 5; i++) {
+ cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
+ cmd[i].nvme_cmd.nsid = 1;
+ cmd[i].nvme_cmd.cid = i;
+
+ req[i].qpair = &qpair;
+ req[i].cmd = &cmd[i];
+ req[i].rsp = &rsp[i];
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link);
+ }
+
+ /* Target can store NVMF_MAX_ASYNC_EVENTS reqs */
+ sgroups.io_outstanding = NVMF_MAX_ASYNC_EVENTS;
+ for (i = 0; i < NVMF_MAX_ASYNC_EVENTS; i++) {
+ CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
+ CU_ASSERT(ctrlr.nr_aer_reqs == i + 1);
+ }
+ CU_ASSERT(sgroups.io_outstanding == 0);
+
+ /* Exceeding the NVMF_MAX_ASYNC_EVENTS reports error */
+ CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[4]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(ctrlr.nr_aer_reqs == NVMF_MAX_ASYNC_EVENTS);
+ CU_ASSERT(rsp[4].nvme_cpl.status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp[4].nvme_cpl.status.sc = SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED);
+
+ /* Test if the aer_reqs keep continuous when abort a req in the middle */
+ CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 2) == true);
+ CU_ASSERT(ctrlr.aer_req[0] == &req[0]);
+ CU_ASSERT(ctrlr.aer_req[1] == &req[1]);
+ CU_ASSERT(ctrlr.aer_req[2] == &req[3]);
+
+ CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 3) == true);
+ CU_ASSERT(ctrlr.aer_req[0] == &req[0]);
+ CU_ASSERT(ctrlr.aer_req[1] == &req[1]);
+ CU_ASSERT(ctrlr.aer_req[2] == NULL);
+ CU_ASSERT(ctrlr.nr_aer_reqs == 2);
+
+ TAILQ_REMOVE(&qpair.outstanding, &req[0], link);
+ TAILQ_REMOVE(&qpair.outstanding, &req[1], link);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvmf", NULL, NULL);
+ CU_ADD_TEST(suite, test_get_log_page);
+ CU_ADD_TEST(suite, test_process_fabrics_cmd);
+ CU_ADD_TEST(suite, test_connect);
+ CU_ADD_TEST(suite, test_get_ns_id_desc_list);
+ CU_ADD_TEST(suite, test_identify_ns);
+ CU_ADD_TEST(suite, test_reservation_write_exclusive);
+ CU_ADD_TEST(suite, test_reservation_exclusive_access);
+ CU_ADD_TEST(suite, test_reservation_write_exclusive_regs_only_and_all_regs);
+ CU_ADD_TEST(suite, test_reservation_exclusive_access_regs_only_and_all_regs);
+ CU_ADD_TEST(suite, test_reservation_notification_log_page);
+ CU_ADD_TEST(suite, test_get_dif_ctx);
+ CU_ADD_TEST(suite, test_set_get_features);
+ CU_ADD_TEST(suite, test_identify_ctrlr);
+ CU_ADD_TEST(suite, test_custom_admin_cmd);
+ CU_ADD_TEST(suite, test_fused_compare_and_write);
+ CU_ADD_TEST(suite, test_multi_async_event_reqs);
+
+ allocate_threads(1);
+ set_thread(0);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ free_threads();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvmf/ctrlr_bdev.c/.gitignore b/src/spdk/test/unit/lib/nvmf/ctrlr_bdev.c/.gitignore
new file mode 100644
index 000000000..78fca1017
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/ctrlr_bdev.c/.gitignore
@@ -0,0 +1 @@
+ctrlr_bdev_ut
diff --git a/src/spdk/test/unit/lib/nvmf/ctrlr_bdev.c/Makefile b/src/spdk/test/unit/lib/nvmf/ctrlr_bdev.c/Makefile
new file mode 100644
index 000000000..1d22f14be
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/ctrlr_bdev.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = ctrlr_bdev_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvmf/ctrlr_bdev.c/ctrlr_bdev_ut.c b/src/spdk/test/unit/lib/nvmf/ctrlr_bdev.c/ctrlr_bdev_ut.c
new file mode 100644
index 000000000..0df9c983b
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/ctrlr_bdev.c/ctrlr_bdev_ut.c
@@ -0,0 +1,415 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "spdk_internal/mock.h"
+
+#include "nvmf/ctrlr_bdev.c"
+
+
+SPDK_LOG_REGISTER_COMPONENT("nvmf", SPDK_LOG_NVMF)
+
+DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req), -1);
+
+DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), "test");
+
+struct spdk_bdev {
+ uint32_t blocklen;
+ uint64_t num_blocks;
+ uint32_t md_len;
+};
+
+uint32_t
+spdk_bdev_get_block_size(const struct spdk_bdev *bdev)
+{
+ return bdev->blocklen;
+}
+
+uint64_t
+spdk_bdev_get_num_blocks(const struct spdk_bdev *bdev)
+{
+ return bdev->num_blocks;
+}
+
+uint32_t
+spdk_bdev_get_optimal_io_boundary(const struct spdk_bdev *bdev)
+{
+ abort();
+ return 0;
+}
+
+uint32_t
+spdk_bdev_get_md_size(const struct spdk_bdev *bdev)
+{
+ return bdev->md_len;
+}
+
+DEFINE_STUB(spdk_bdev_comparev_and_writev_blocks, int,
+ (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct iovec *compare_iov, int compare_iovcnt,
+ struct iovec *write_iov, int write_iovcnt,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg),
+ 0);
+
+DEFINE_STUB(nvmf_ctrlr_process_io_cmd, int, (struct spdk_nvmf_request *req), 0);
+
+DEFINE_STUB_V(spdk_bdev_io_get_nvme_fused_status, (const struct spdk_bdev_io *bdev_io,
+ uint32_t *cdw0, int *cmp_sct, int *cmp_sc, int *wr_sct, int *wr_sc));
+
+DEFINE_STUB(spdk_bdev_is_md_interleaved, bool, (const struct spdk_bdev *bdev), false);
+
+DEFINE_STUB(spdk_bdev_get_dif_type, enum spdk_dif_type,
+ (const struct spdk_bdev *bdev), SPDK_DIF_DISABLE);
+
+DEFINE_STUB(spdk_bdev_is_dif_head_of_md, bool, (const struct spdk_bdev *bdev), false);
+
+DEFINE_STUB(spdk_bdev_is_dif_check_enabled, bool,
+ (const struct spdk_bdev *bdev, enum spdk_dif_check_type check_type), false);
+
+DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *,
+ (struct spdk_bdev_desc *desc), NULL);
+
+DEFINE_STUB(spdk_bdev_flush_blocks, int,
+ (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg),
+ 0);
+
+DEFINE_STUB(spdk_bdev_unmap_blocks, int,
+ (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg),
+ 0);
+
+DEFINE_STUB(spdk_bdev_io_type_supported, bool,
+ (struct spdk_bdev *bdev, enum spdk_bdev_io_type io_type), false);
+
+DEFINE_STUB(spdk_bdev_queue_io_wait, int,
+ (struct spdk_bdev *bdev, struct spdk_io_channel *ch,
+ struct spdk_bdev_io_wait_entry *entry),
+ 0);
+
+DEFINE_STUB(spdk_bdev_write_blocks, int,
+ (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, void *buf,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg),
+ 0);
+
+DEFINE_STUB(spdk_bdev_writev_blocks, int,
+ (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg),
+ 0);
+
+DEFINE_STUB(spdk_bdev_read_blocks, int,
+ (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, void *buf,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg),
+ 0);
+
+DEFINE_STUB(spdk_bdev_readv_blocks, int,
+ (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg),
+ 0);
+
+DEFINE_STUB(spdk_bdev_write_zeroes_blocks, int,
+ (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg),
+ 0);
+
+DEFINE_STUB(spdk_bdev_nvme_io_passthru, int,
+ (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ const struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes,
+ spdk_bdev_io_completion_cb cb, void *cb_arg),
+ 0);
+
+DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
+
+DEFINE_STUB(spdk_nvmf_subsystem_get_nqn, const char *,
+ (const struct spdk_nvmf_subsystem *subsystem), NULL);
+
+struct spdk_nvmf_ns *
+spdk_nvmf_subsystem_get_ns(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid)
+{
+ abort();
+ return NULL;
+}
+
+struct spdk_nvmf_ns *
+spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem)
+{
+ abort();
+ return NULL;
+}
+
+struct spdk_nvmf_ns *
+spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns)
+{
+ abort();
+ return NULL;
+}
+
+DEFINE_STUB_V(spdk_bdev_io_get_nvme_status,
+ (const struct spdk_bdev_io *bdev_io, uint32_t *cdw0, int *sct, int *sc));
+
+int
+spdk_dif_ctx_init(struct spdk_dif_ctx *ctx, uint32_t block_size, uint32_t md_size,
+ bool md_interleave, bool dif_loc, enum spdk_dif_type dif_type, uint32_t dif_flags,
+ uint32_t init_ref_tag, uint16_t apptag_mask, uint16_t app_tag,
+ uint32_t data_offset, uint16_t guard_seed)
+{
+ ctx->block_size = block_size;
+ ctx->md_size = md_size;
+ ctx->init_ref_tag = init_ref_tag;
+
+ return 0;
+}
+
+static void
+test_get_rw_params(void)
+{
+ struct spdk_nvme_cmd cmd = {0};
+ uint64_t lba;
+ uint64_t count;
+
+ lba = 0;
+ count = 0;
+ to_le64(&cmd.cdw10, 0x1234567890ABCDEF);
+ to_le32(&cmd.cdw12, 0x9875 | SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS);
+ nvmf_bdev_ctrlr_get_rw_params(&cmd, &lba, &count);
+ CU_ASSERT(lba == 0x1234567890ABCDEF);
+ CU_ASSERT(count == 0x9875 + 1); /* NOTE: this field is 0's based, hence the +1 */
+}
+
+static void
+test_lba_in_range(void)
+{
+ /* Trivial cases (no overflow) */
+ CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 0, 1) == true);
+ CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 0, 1000) == true);
+ CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 0, 1001) == false);
+ CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1, 999) == true);
+ CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1, 1000) == false);
+ CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 999, 1) == true);
+ CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1000, 1) == false);
+ CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1001, 1) == false);
+
+ /* Overflow edge cases */
+ CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, 0, UINT64_MAX) == true);
+ CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, 1, UINT64_MAX) == false);
+ CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, UINT64_MAX - 1, 1) == true);
+ CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, UINT64_MAX, 1) == false);
+}
+
+static void
+test_get_dif_ctx(void)
+{
+ struct spdk_bdev bdev = {};
+ struct spdk_nvme_cmd cmd = {};
+ struct spdk_dif_ctx dif_ctx = {};
+ bool ret;
+
+ bdev.md_len = 0;
+
+ ret = nvmf_bdev_ctrlr_get_dif_ctx(&bdev, &cmd, &dif_ctx);
+ CU_ASSERT(ret == false);
+
+ to_le64(&cmd.cdw10, 0x1234567890ABCDEF);
+ bdev.blocklen = 520;
+ bdev.md_len = 8;
+
+ ret = nvmf_bdev_ctrlr_get_dif_ctx(&bdev, &cmd, &dif_ctx);
+ CU_ASSERT(ret == true);
+ CU_ASSERT(dif_ctx.block_size = 520);
+ CU_ASSERT(dif_ctx.md_size == 8);
+ CU_ASSERT(dif_ctx.init_ref_tag == 0x90ABCDEF);
+}
+
+static void
+test_spdk_nvmf_bdev_ctrlr_compare_and_write_cmd(void)
+{
+ int rc;
+ struct spdk_bdev bdev = {};
+ struct spdk_bdev_desc *desc = NULL;
+ struct spdk_io_channel ch = {};
+
+ struct spdk_nvmf_request cmp_req = {};
+ union nvmf_c2h_msg cmp_rsp = {};
+
+ struct spdk_nvmf_request write_req = {};
+ union nvmf_c2h_msg write_rsp = {};
+
+ struct spdk_nvmf_qpair qpair = {};
+
+ struct spdk_nvme_cmd cmp_cmd = {};
+ struct spdk_nvme_cmd write_cmd = {};
+
+ struct spdk_nvmf_ctrlr ctrlr = {};
+ struct spdk_nvmf_subsystem subsystem = {};
+ struct spdk_nvmf_ns ns = {};
+ struct spdk_nvmf_ns *subsys_ns[1] = {};
+
+ struct spdk_nvmf_poll_group group = {};
+ struct spdk_nvmf_subsystem_poll_group sgroups = {};
+ struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
+
+ bdev.blocklen = 512;
+ bdev.num_blocks = 10;
+ ns.bdev = &bdev;
+
+ subsystem.id = 0;
+ subsystem.max_nsid = 1;
+ subsys_ns[0] = &ns;
+ subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
+
+ /* Enable controller */
+ ctrlr.vcprop.cc.bits.en = 1;
+ ctrlr.subsys = &subsystem;
+
+ group.num_sgroups = 1;
+ sgroups.num_ns = 1;
+ sgroups.ns_info = &ns_info;
+ group.sgroups = &sgroups;
+
+ qpair.ctrlr = &ctrlr;
+ qpair.group = &group;
+
+ cmp_req.qpair = &qpair;
+ cmp_req.cmd = (union nvmf_h2c_msg *)&cmp_cmd;
+ cmp_req.rsp = &cmp_rsp;
+
+ cmp_cmd.nsid = 1;
+ cmp_cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
+ cmp_cmd.opc = SPDK_NVME_OPC_COMPARE;
+
+ write_req.qpair = &qpair;
+ write_req.cmd = (union nvmf_h2c_msg *)&write_cmd;
+ write_req.rsp = &write_rsp;
+
+ write_cmd.nsid = 1;
+ write_cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
+ write_cmd.opc = SPDK_NVME_OPC_WRITE;
+
+ /* 1. SUCCESS */
+ cmp_cmd.cdw10 = 1; /* SLBA: CDW10 and CDW11 */
+ cmp_cmd.cdw12 = 1; /* NLB: CDW12 bits 15:00, 0's based */
+
+ write_cmd.cdw10 = 1; /* SLBA: CDW10 and CDW11 */
+ write_cmd.cdw12 = 1; /* NLB: CDW12 bits 15:00, 0's based */
+ write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
+
+ rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, desc, &ch, &cmp_req, &write_req);
+
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
+ CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0);
+ CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0);
+ CU_ASSERT(write_rsp.nvme_cpl.status.sct == 0);
+ CU_ASSERT(write_rsp.nvme_cpl.status.sc == 0);
+
+ /* 2. Fused command start lba / num blocks mismatch */
+ cmp_cmd.cdw10 = 1; /* SLBA: CDW10 and CDW11 */
+ cmp_cmd.cdw12 = 2; /* NLB: CDW12 bits 15:00, 0's based */
+
+ write_cmd.cdw10 = 1; /* SLBA: CDW10 and CDW11 */
+ write_cmd.cdw12 = 1; /* NLB: CDW12 bits 15:00, 0's based */
+ write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
+
+ rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, desc, &ch, &cmp_req, &write_req);
+
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0);
+ CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0);
+ CU_ASSERT(write_rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(write_rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
+
+ /* 3. SPDK_NVME_SC_LBA_OUT_OF_RANGE */
+ cmp_cmd.cdw10 = 1; /* SLBA: CDW10 and CDW11 */
+ cmp_cmd.cdw12 = 100; /* NLB: CDW12 bits 15:00, 0's based */
+
+ write_cmd.cdw10 = 1; /* SLBA: CDW10 and CDW11 */
+ write_cmd.cdw12 = 100; /* NLB: CDW12 bits 15:00, 0's based */
+ write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
+
+ rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, desc, &ch, &cmp_req, &write_req);
+
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0);
+ CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0);
+ CU_ASSERT(write_rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(write_rsp.nvme_cpl.status.sc == SPDK_NVME_SC_LBA_OUT_OF_RANGE);
+
+ /* 4. SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID */
+ cmp_cmd.cdw10 = 1; /* SLBA: CDW10 and CDW11 */
+ cmp_cmd.cdw12 = 1; /* NLB: CDW12 bits 15:00, 0's based */
+
+ write_cmd.cdw10 = 1; /* SLBA: CDW10 and CDW11 */
+ write_cmd.cdw12 = 1; /* NLB: CDW12 bits 15:00, 0's based */
+ write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen - 1;
+
+ rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, desc, &ch, &cmp_req, &write_req);
+
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0);
+ CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0);
+ CU_ASSERT(write_rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(write_rsp.nvme_cpl.status.sc == SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvmf", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_get_rw_params);
+ CU_ADD_TEST(suite, test_lba_in_range);
+ CU_ADD_TEST(suite, test_get_dif_ctx);
+
+ CU_ADD_TEST(suite, test_spdk_nvmf_bdev_ctrlr_compare_and_write_cmd);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvmf/ctrlr_discovery.c/.gitignore b/src/spdk/test/unit/lib/nvmf/ctrlr_discovery.c/.gitignore
new file mode 100644
index 000000000..a975a97ec
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/ctrlr_discovery.c/.gitignore
@@ -0,0 +1 @@
+ctrlr_discovery_ut
diff --git a/src/spdk/test/unit/lib/nvmf/ctrlr_discovery.c/Makefile b/src/spdk/test/unit/lib/nvmf/ctrlr_discovery.c/Makefile
new file mode 100644
index 000000000..d289bc3e8
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/ctrlr_discovery.c/Makefile
@@ -0,0 +1,39 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+SPDK_LIB_LIST = json
+TEST_FILE = ctrlr_discovery_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvmf/ctrlr_discovery.c/ctrlr_discovery_ut.c b/src/spdk/test/unit/lib/nvmf/ctrlr_discovery.c/ctrlr_discovery_ut.c
new file mode 100644
index 000000000..29e923de8
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/ctrlr_discovery.c/ctrlr_discovery_ut.c
@@ -0,0 +1,303 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+#include "spdk_internal/mock.h"
+
+#include "common/lib/test_env.c"
+#include "nvmf/ctrlr_discovery.c"
+#include "nvmf/subsystem.c"
+
+SPDK_LOG_REGISTER_COMPONENT("nvmf", SPDK_LOG_NVMF)
+
+DEFINE_STUB_V(spdk_bdev_module_release_bdev,
+ (struct spdk_bdev *bdev));
+
+DEFINE_STUB(spdk_bdev_get_block_size, uint32_t,
+ (const struct spdk_bdev *bdev), 512);
+
+DEFINE_STUB(spdk_nvmf_transport_stop_listen,
+ int,
+ (struct spdk_nvmf_transport *transport,
+ const struct spdk_nvme_transport_id *trid), 0);
+
+int
+spdk_bdev_open(struct spdk_bdev *bdev, bool write, spdk_bdev_remove_cb_t remove_cb,
+ void *remove_ctx, struct spdk_bdev_desc **desc)
+{
+ return 0;
+}
+
+void
+spdk_bdev_close(struct spdk_bdev_desc *desc)
+{
+}
+
+const char *
+spdk_bdev_get_name(const struct spdk_bdev *bdev)
+{
+ return "test";
+}
+
+const struct spdk_uuid *
+spdk_bdev_get_uuid(const struct spdk_bdev *bdev)
+{
+ return &bdev->uuid;
+}
+
+int
+spdk_nvmf_transport_listen(struct spdk_nvmf_transport *transport,
+ const struct spdk_nvme_transport_id *trid)
+{
+ return 0;
+}
+
+static struct spdk_nvmf_listener g_listener = {};
+
+struct spdk_nvmf_listener *
+nvmf_transport_find_listener(struct spdk_nvmf_transport *transport,
+ const struct spdk_nvme_transport_id *trid)
+{
+ return &g_listener;
+}
+
+void
+nvmf_transport_listener_discover(struct spdk_nvmf_transport *transport,
+ struct spdk_nvme_transport_id *trid,
+ struct spdk_nvmf_discovery_log_page_entry *entry)
+{
+ entry->trtype = 42;
+}
+
+struct spdk_nvmf_transport_ops g_transport_ops = {};
+
+static struct spdk_nvmf_transport g_transport = {
+ .ops = &g_transport_ops
+};
+
+struct spdk_nvmf_transport *
+spdk_nvmf_transport_create(const char *transport_name,
+ struct spdk_nvmf_transport_opts *tprt_opts)
+{
+ if (strcasecmp(transport_name, spdk_nvme_transport_id_trtype_str(SPDK_NVME_TRANSPORT_RDMA))) {
+ return &g_transport;
+ }
+
+ return NULL;
+}
+
+struct spdk_nvmf_subsystem *
+spdk_nvmf_tgt_find_subsystem(struct spdk_nvmf_tgt *tgt, const char *subnqn)
+{
+ return NULL;
+}
+
+struct spdk_nvmf_transport *
+spdk_nvmf_tgt_get_transport(struct spdk_nvmf_tgt *tgt, const char *transport_name)
+{
+ return &g_transport;
+}
+
+int
+spdk_nvme_transport_id_parse_trtype(enum spdk_nvme_transport_type *trtype, const char *str)
+{
+ if (trtype == NULL || str == NULL) {
+ return -EINVAL;
+ }
+
+ if (strcasecmp(str, "PCIe") == 0) {
+ *trtype = SPDK_NVME_TRANSPORT_PCIE;
+ } else if (strcasecmp(str, "RDMA") == 0) {
+ *trtype = SPDK_NVME_TRANSPORT_RDMA;
+ } else {
+ return -ENOENT;
+ }
+ return 0;
+}
+
+int
+spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1,
+ const struct spdk_nvme_transport_id *trid2)
+{
+ return 0;
+}
+
+void
+nvmf_ctrlr_ns_changed(struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid)
+{
+}
+
+void
+nvmf_ctrlr_destruct(struct spdk_nvmf_ctrlr *ctrlr)
+{
+}
+
+int
+nvmf_poll_group_update_subsystem(struct spdk_nvmf_poll_group *group,
+ struct spdk_nvmf_subsystem *subsystem)
+{
+ return 0;
+}
+
+int
+nvmf_poll_group_add_subsystem(struct spdk_nvmf_poll_group *group,
+ struct spdk_nvmf_subsystem *subsystem,
+ spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
+{
+ return 0;
+}
+
+void
+nvmf_poll_group_remove_subsystem(struct spdk_nvmf_poll_group *group,
+ struct spdk_nvmf_subsystem *subsystem,
+ spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
+{
+}
+
+void
+nvmf_poll_group_pause_subsystem(struct spdk_nvmf_poll_group *group,
+ struct spdk_nvmf_subsystem *subsystem,
+ spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
+{
+}
+
+void
+nvmf_poll_group_resume_subsystem(struct spdk_nvmf_poll_group *group,
+ struct spdk_nvmf_subsystem *subsystem,
+ spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
+{
+}
+
+static void
+_subsystem_add_listen_done(void *cb_arg, int status)
+{
+ SPDK_CU_ASSERT_FATAL(status == 0);
+}
+
+static void
+test_discovery_log(void)
+{
+ struct spdk_nvmf_tgt tgt = {};
+ struct spdk_nvmf_subsystem *subsystem;
+ uint8_t buffer[8192];
+ struct iovec iov;
+ struct spdk_nvmf_discovery_log_page *disc_log;
+ struct spdk_nvmf_discovery_log_page_entry *entry;
+ struct spdk_nvme_transport_id trid = {};
+
+ iov.iov_base = buffer;
+ iov.iov_len = 8192;
+
+ tgt.max_subsystems = 1024;
+ tgt.subsystems = calloc(tgt.max_subsystems, sizeof(struct spdk_nvmf_subsystem *));
+ SPDK_CU_ASSERT_FATAL(tgt.subsystems != NULL);
+
+ /* Add one subsystem and verify that the discovery log contains it */
+ subsystem = spdk_nvmf_subsystem_create(&tgt, "nqn.2016-06.io.spdk:subsystem1",
+ SPDK_NVMF_SUBTYPE_NVME, 0);
+ subsystem->allow_any_host = true;
+ SPDK_CU_ASSERT_FATAL(subsystem != NULL);
+
+ trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
+ trid.adrfam = SPDK_NVMF_ADRFAM_IPV4;
+ snprintf(trid.traddr, sizeof(trid.traddr), "1234");
+ snprintf(trid.trsvcid, sizeof(trid.trsvcid), "5678");
+ spdk_nvmf_subsystem_add_listener(subsystem, &trid, _subsystem_add_listen_done, NULL);
+ subsystem->state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
+
+ /* Get only genctr (first field in the header) */
+ memset(buffer, 0xCC, sizeof(buffer));
+ disc_log = (struct spdk_nvmf_discovery_log_page *)buffer;
+ nvmf_get_discovery_log_page(&tgt, "nqn.2016-06.io.spdk:host1", &iov, 1, 0,
+ sizeof(disc_log->genctr));
+ CU_ASSERT(disc_log->genctr == 2); /* one added subsystem and listener */
+
+ /* Get only the header, no entries */
+ memset(buffer, 0xCC, sizeof(buffer));
+ disc_log = (struct spdk_nvmf_discovery_log_page *)buffer;
+ nvmf_get_discovery_log_page(&tgt, "nqn.2016-06.io.spdk:host1", &iov, 1, 0, sizeof(*disc_log));
+ CU_ASSERT(disc_log->genctr == 2);
+ CU_ASSERT(disc_log->numrec == 1);
+
+ /* Offset 0, exact size match */
+ memset(buffer, 0xCC, sizeof(buffer));
+ disc_log = (struct spdk_nvmf_discovery_log_page *)buffer;
+ nvmf_get_discovery_log_page(&tgt, "nqn.2016-06.io.spdk:host1", &iov, 1, 0,
+ sizeof(*disc_log) + sizeof(disc_log->entries[0]));
+ CU_ASSERT(disc_log->genctr != 0);
+ CU_ASSERT(disc_log->numrec == 1);
+ CU_ASSERT(disc_log->entries[0].trtype == 42);
+
+ /* Offset 0, oversize buffer */
+ memset(buffer, 0xCC, sizeof(buffer));
+ disc_log = (struct spdk_nvmf_discovery_log_page *)buffer;
+ nvmf_get_discovery_log_page(&tgt, "nqn.2016-06.io.spdk:host1", &iov, 1, 0, sizeof(buffer));
+ CU_ASSERT(disc_log->genctr != 0);
+ CU_ASSERT(disc_log->numrec == 1);
+ CU_ASSERT(disc_log->entries[0].trtype == 42);
+ CU_ASSERT(spdk_mem_all_zero(buffer + sizeof(*disc_log) + sizeof(disc_log->entries[0]),
+ sizeof(buffer) - (sizeof(*disc_log) + sizeof(disc_log->entries[0]))));
+
+ /* Get just the first entry, no header */
+ memset(buffer, 0xCC, sizeof(buffer));
+ entry = (struct spdk_nvmf_discovery_log_page_entry *)buffer;
+ nvmf_get_discovery_log_page(&tgt, "nqn.2016-06.io.spdk:host1", &iov,
+ 1,
+ offsetof(struct spdk_nvmf_discovery_log_page, entries[0]),
+ sizeof(*entry));
+ CU_ASSERT(entry->trtype == 42);
+ subsystem->state = SPDK_NVMF_SUBSYSTEM_INACTIVE;
+ spdk_nvmf_subsystem_destroy(subsystem);
+ free(tgt.subsystems);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvmf", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_discovery_log);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvmf/fc.c/.gitignore b/src/spdk/test/unit/lib/nvmf/fc.c/.gitignore
new file mode 100644
index 000000000..3895b84ab
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/fc.c/.gitignore
@@ -0,0 +1 @@
+fc_ut
diff --git a/src/spdk/test/unit/lib/nvmf/fc.c/Makefile b/src/spdk/test/unit/lib/nvmf/fc.c/Makefile
new file mode 100644
index 000000000..7f54f1520
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/fc.c/Makefile
@@ -0,0 +1,58 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) 2018 Broadcom. All Rights Reserved.
+# The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../../)
+include $(SPDK_ROOT_DIR)/mk/config.mk
+
+CFLAGS += -I$(SPDK_ROOT_DIR)/test/common/lib -I$(SPDK_ROOT_DIR)/lib \
+-I$(SPDK_ROOT_DIR)/lib/nvmf
+
+ifneq ($(strip $(CONFIG_FC_PATH)),)
+CFLAGS += -I$(CONFIG_FC_PATH)
+endif
+
+TEST_FILE = fc_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
+
+# Disable clang warning: taking address of packed member of class or structure may result in an unaligned pointer value [-Werror,-Waddress-of-packed-member]
+ifeq ($(CC),clang)
+ CLANG_VERSION := $(shell $(CC) -v 2>&1 | \
+ sed -n "s/.*version \([0-9]*\.[0-9]*\).*/\1/p")
+
+CLANG_MAJOR_VERSION := $(shell echo $(CLANG_VERSION) | cut -f1 -d.)
+
+ifeq ($(shell test $(CLANG_MAJOR_VERSION) -ge 4 && echo 1), 1)
+ CFLAGS += -Wno-address-of-packed-member
+endif
+endif
diff --git a/src/spdk/test/unit/lib/nvmf/fc.c/fc_ut.c b/src/spdk/test/unit/lib/nvmf/fc.c/fc_ut.c
new file mode 100644
index 000000000..a8d4b3b96
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/fc.c/fc_ut.c
@@ -0,0 +1,505 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (c) 2018-2019 Broadcom. All Rights Reserved.
+ * The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* NVMF FC Transport Unit Test */
+
+#include "spdk/env.h"
+#include "spdk_cunit.h"
+#include "spdk/nvmf.h"
+#include "spdk/endian.h"
+#include "spdk/trace.h"
+#include "spdk_internal/log.h"
+
+#include "ut_multithread.c"
+
+#include "transport.h"
+#include "nvmf_internal.h"
+
+#include "nvmf_fc.h"
+
+#include "json/json_util.c"
+#include "json/json_write.c"
+#include "nvmf/nvmf.c"
+#include "nvmf/transport.c"
+#include "nvmf/subsystem.c"
+#include "nvmf/fc.c"
+#include "nvmf/fc_ls.c"
+
+/*
+ * SPDK Stuff
+ */
+
+#ifdef SPDK_CONFIG_RDMA
+const struct spdk_nvmf_transport_ops spdk_nvmf_transport_rdma = {
+ .type = SPDK_NVME_TRANSPORT_RDMA,
+ .opts_init = NULL,
+ .create = NULL,
+ .destroy = NULL,
+
+ .listen = NULL,
+ .stop_listen = NULL,
+ .accept = NULL,
+
+ .listener_discover = NULL,
+
+ .poll_group_create = NULL,
+ .poll_group_destroy = NULL,
+ .poll_group_add = NULL,
+ .poll_group_poll = NULL,
+
+ .req_free = NULL,
+ .req_complete = NULL,
+
+ .qpair_fini = NULL,
+ .qpair_get_peer_trid = NULL,
+ .qpair_get_local_trid = NULL,
+ .qpair_get_listen_trid = NULL,
+};
+#endif
+
+const struct spdk_nvmf_transport_ops spdk_nvmf_transport_tcp = {
+ .type = SPDK_NVME_TRANSPORT_TCP,
+};
+
+struct spdk_trace_histories *g_trace_histories;
+
+DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
+ uint32_t size, uint64_t object_id, uint64_t arg1));
+DEFINE_STUB(spdk_nvme_transport_id_compare, int,
+ (const struct spdk_nvme_transport_id *trid1,
+ const struct spdk_nvme_transport_id *trid2), 0);
+DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix));
+DEFINE_STUB_V(spdk_trace_register_description,
+ (const char *name, uint16_t tpoint_id, uint8_t owner_type,
+ uint8_t object_type, uint8_t new_object, uint8_t arg1_type,
+ const char *arg1_name));
+DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn));
+DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), "fc_ut_test");
+DEFINE_STUB_V(nvmf_ctrlr_destruct, (struct spdk_nvmf_ctrlr *ctrlr));
+DEFINE_STUB_V(nvmf_qpair_free_aer, (struct spdk_nvmf_qpair *qpair));
+DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_desc *desc),
+ NULL);
+DEFINE_STUB_V(spdk_nvmf_request_exec, (struct spdk_nvmf_request *req));
+DEFINE_STUB_V(nvmf_ctrlr_ns_changed, (struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid));
+DEFINE_STUB(spdk_bdev_open, int, (struct spdk_bdev *bdev, bool write,
+ spdk_bdev_remove_cb_t remove_cb,
+ void *remove_ctx, struct spdk_bdev_desc **desc), 0);
+DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
+DEFINE_STUB(spdk_bdev_module_claim_bdev, int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
+ struct spdk_bdev_module *module), 0);
+DEFINE_STUB_V(spdk_bdev_module_release_bdev, (struct spdk_bdev *bdev));
+DEFINE_STUB(spdk_bdev_get_block_size, uint32_t, (const struct spdk_bdev *bdev), 512);
+DEFINE_STUB(spdk_bdev_get_num_blocks, uint64_t, (const struct spdk_bdev *bdev), 1024);
+
+DEFINE_STUB(nvmf_ctrlr_async_event_ns_notice, int, (struct spdk_nvmf_ctrlr *ctrlr), 0);
+DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
+ enum spdk_nvme_transport_type trtype));
+DEFINE_STUB_V(spdk_nvmf_ctrlr_data_init, (struct spdk_nvmf_transport_opts *opts,
+ struct spdk_nvmf_ctrlr_data *cdata));
+DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req),
+ -ENOSPC);
+
+const char *
+spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype)
+{
+ switch (trtype) {
+ case SPDK_NVME_TRANSPORT_PCIE:
+ return "PCIe";
+ case SPDK_NVME_TRANSPORT_RDMA:
+ return "RDMA";
+ case SPDK_NVME_TRANSPORT_FC:
+ return "FC";
+ default:
+ return NULL;
+ }
+}
+
+const char *
+spdk_nvme_transport_id_adrfam_str(enum spdk_nvmf_adrfam adrfam)
+{
+ switch (adrfam) {
+ case SPDK_NVMF_ADRFAM_IPV4:
+ return "IPv4";
+ case SPDK_NVMF_ADRFAM_IPV6:
+ return "IPv6";
+ case SPDK_NVMF_ADRFAM_IB:
+ return "IB";
+ case SPDK_NVMF_ADRFAM_FC:
+ return "FC";
+ default:
+ return NULL;
+ }
+}
+
+const struct spdk_uuid *
+spdk_bdev_get_uuid(const struct spdk_bdev *bdev)
+{
+ return &bdev->uuid;
+}
+
+static bool g_lld_init_called = false;
+
+int
+nvmf_fc_lld_init(void)
+{
+ g_lld_init_called = true;
+ return 0;
+}
+
+static bool g_lld_fini_called = false;
+
+void
+nvmf_fc_lld_fini(void)
+{
+ g_lld_fini_called = true;
+}
+
+DEFINE_STUB_V(nvmf_fc_lld_start, (void));
+DEFINE_STUB(nvmf_fc_init_q, int, (struct spdk_nvmf_fc_hwqp *hwqp), 0);
+DEFINE_STUB_V(nvmf_fc_reinit_q, (void *queues_prev, void *queues_curr));
+DEFINE_STUB(nvmf_fc_init_rqpair_buffers, int, (struct spdk_nvmf_fc_hwqp *hwqp), 0);
+DEFINE_STUB(nvmf_fc_set_q_online_state, int, (struct spdk_nvmf_fc_hwqp *hwqp, bool online), 0);
+DEFINE_STUB(nvmf_fc_put_xchg, int, (struct spdk_nvmf_fc_hwqp *hwqp, struct spdk_nvmf_fc_xchg *xri),
+ 0);
+DEFINE_STUB(nvmf_fc_recv_data, int, (struct spdk_nvmf_fc_request *fc_req), 0);
+DEFINE_STUB(nvmf_fc_send_data, int, (struct spdk_nvmf_fc_request *fc_req), 0);
+DEFINE_STUB_V(nvmf_fc_rqpair_buffer_release, (struct spdk_nvmf_fc_hwqp *hwqp, uint16_t buff_idx));
+DEFINE_STUB(nvmf_fc_xmt_rsp, int, (struct spdk_nvmf_fc_request *fc_req, uint8_t *ersp_buf,
+ uint32_t ersp_len), 0);
+DEFINE_STUB(nvmf_fc_xmt_ls_rsp, int, (struct spdk_nvmf_fc_nport *tgtport,
+ struct spdk_nvmf_fc_ls_rqst *ls_rqst), 0);
+DEFINE_STUB(nvmf_fc_issue_abort, int, (struct spdk_nvmf_fc_hwqp *hwqp,
+ struct spdk_nvmf_fc_xchg *xri,
+ spdk_nvmf_fc_caller_cb cb, void *cb_args), 0);
+DEFINE_STUB(nvmf_fc_xmt_bls_rsp, int, (struct spdk_nvmf_fc_hwqp *hwqp,
+ uint16_t ox_id, uint16_t rx_id,
+ uint16_t rpi, bool rjt, uint8_t rjt_exp,
+ spdk_nvmf_fc_caller_cb cb, void *cb_args), 0);
+DEFINE_STUB(nvmf_fc_alloc_srsr_bufs, struct spdk_nvmf_fc_srsr_bufs *, (size_t rqst_len,
+ size_t rsp_len), NULL);
+DEFINE_STUB_V(nvmf_fc_free_srsr_bufs, (struct spdk_nvmf_fc_srsr_bufs *srsr_bufs));
+DEFINE_STUB(nvmf_fc_xmt_srsr_req, int, (struct spdk_nvmf_fc_hwqp *hwqp,
+ struct spdk_nvmf_fc_srsr_bufs *xmt_srsr_bufs,
+ spdk_nvmf_fc_caller_cb cb, void *cb_args), 0);
+DEFINE_STUB(nvmf_fc_q_sync_available, bool, (void), true);
+DEFINE_STUB(nvmf_fc_issue_q_sync, int, (struct spdk_nvmf_fc_hwqp *hwqp, uint64_t u_id,
+ uint16_t skip_rq), 0);
+DEFINE_STUB(nvmf_fc_assign_conn_to_hwqp, bool, (struct spdk_nvmf_fc_hwqp *hwqp,
+ uint64_t *conn_id, uint32_t sq_size), true);
+DEFINE_STUB(nvmf_fc_get_hwqp_from_conn_id, struct spdk_nvmf_fc_hwqp *,
+ (struct spdk_nvmf_fc_hwqp *queues,
+ uint32_t num_queues, uint64_t conn_id), NULL);
+DEFINE_STUB_V(nvmf_fc_release_conn, (struct spdk_nvmf_fc_hwqp *hwqp, uint64_t conn_id,
+ uint32_t sq_size));
+DEFINE_STUB_V(nvmf_fc_dump_all_queues, (struct spdk_nvmf_fc_hwqp *ls_queue,
+ struct spdk_nvmf_fc_hwqp *io_queues,
+ uint32_t num_io_queues,
+ struct spdk_nvmf_fc_queue_dump_info *dump_info));
+DEFINE_STUB_V(nvmf_fc_get_xri_info, (struct spdk_nvmf_fc_hwqp *hwqp,
+ struct spdk_nvmf_fc_xchg_info *info));
+DEFINE_STUB(nvmf_fc_get_rsvd_thread, struct spdk_thread *, (void), NULL);
+
+uint32_t
+nvmf_fc_process_queue(struct spdk_nvmf_fc_hwqp *hwqp)
+{
+ hwqp->lcore_id++;
+ return 0; /* always return 0 or else it will poll forever */
+}
+
+struct spdk_nvmf_fc_xchg *
+nvmf_fc_get_xri(struct spdk_nvmf_fc_hwqp *hwqp)
+{
+ static struct spdk_nvmf_fc_xchg xchg;
+
+ xchg.xchg_id = 1;
+ return &xchg;
+}
+
+#define MAX_FC_UT_POLL_THREADS 8
+static struct spdk_nvmf_poll_group *g_poll_groups[MAX_FC_UT_POLL_THREADS] = {0};
+#define MAX_FC_UT_HWQPS MAX_FC_UT_POLL_THREADS
+static struct spdk_nvmf_tgt *g_nvmf_tgt = NULL;
+static struct spdk_nvmf_transport *g_nvmf_tprt = NULL;
+uint8_t g_fc_port_handle = 0xff;
+struct spdk_nvmf_fc_hwqp lld_q[MAX_FC_UT_HWQPS];
+
+static void
+_add_transport_done(void *arg, int status)
+{
+ CU_ASSERT(status == 0);
+}
+
+static void
+_add_transport_done_dup_err(void *arg, int status)
+{
+ CU_ASSERT(status == -EEXIST);
+}
+
+static void
+create_transport_test(void)
+{
+ const struct spdk_nvmf_transport_ops *ops = NULL;
+ struct spdk_nvmf_transport_opts opts = { 0 };
+ struct spdk_nvmf_target_opts tgt_opts = {
+ .name = "nvmf_test_tgt",
+ .max_subsystems = 0
+ };
+
+ allocate_threads(8);
+ set_thread(0);
+
+ g_nvmf_tgt = spdk_nvmf_tgt_create(&tgt_opts);
+ SPDK_CU_ASSERT_FATAL(g_nvmf_tgt != NULL);
+
+ ops = nvmf_get_transport_ops(SPDK_NVME_TRANSPORT_NAME_FC);
+ SPDK_CU_ASSERT_FATAL(ops != NULL);
+
+ ops->opts_init(&opts);
+
+ g_lld_init_called = false;
+ g_nvmf_tprt = spdk_nvmf_transport_create("FC", &opts);
+ SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
+
+ CU_ASSERT(g_lld_init_called == true);
+ CU_ASSERT(opts.max_queue_depth == g_nvmf_tprt->opts.max_queue_depth);
+ CU_ASSERT(opts.max_qpairs_per_ctrlr == g_nvmf_tprt->opts.max_qpairs_per_ctrlr);
+ CU_ASSERT(opts.in_capsule_data_size == g_nvmf_tprt->opts.in_capsule_data_size);
+ CU_ASSERT(opts.max_io_size == g_nvmf_tprt->opts.max_io_size);
+ CU_ASSERT(opts.io_unit_size == g_nvmf_tprt->opts.io_unit_size);
+ CU_ASSERT(opts.max_aq_depth == g_nvmf_tprt->opts.max_aq_depth);
+
+ set_thread(0);
+
+ spdk_nvmf_tgt_add_transport(g_nvmf_tgt, g_nvmf_tprt,
+ _add_transport_done, 0);
+ poll_thread(0);
+
+ /* Add transport again - should get error */
+ spdk_nvmf_tgt_add_transport(g_nvmf_tgt, g_nvmf_tprt,
+ _add_transport_done_dup_err, 0);
+ poll_thread(0);
+
+ /* create transport with bad args/options */
+#ifndef SPDK_CONFIG_RDMA
+ CU_ASSERT(spdk_nvmf_transport_create("RDMA", &opts) == NULL);
+#endif
+ CU_ASSERT(spdk_nvmf_transport_create("Bogus Transport", &opts) == NULL);
+ opts.max_io_size = 1024 ^ 3;
+ CU_ASSERT(spdk_nvmf_transport_create("FC", &opts) == NULL);
+ opts.max_io_size = 999;
+ opts.io_unit_size = 1024;
+ CU_ASSERT(spdk_nvmf_transport_create("FC", &opts) == NULL);
+}
+
+static void
+port_init_cb(uint8_t port_handle, enum spdk_fc_event event_type, void *arg, int err)
+{
+ CU_ASSERT(err == 0);
+ CU_ASSERT(port_handle == 2);
+ g_fc_port_handle = port_handle;
+}
+
+static void
+create_fc_port_test(void)
+{
+ struct spdk_nvmf_fc_hw_port_init_args init_args = { 0 };
+ struct spdk_nvmf_fc_port *fc_port = NULL;
+ int err;
+
+ SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
+
+ init_args.port_handle = 2;
+ init_args.io_queue_cnt = spdk_min(MAX_FC_UT_HWQPS, spdk_env_get_core_count());
+ init_args.ls_queue_size = 100;
+ init_args.io_queue_size = 100;
+ init_args.io_queues = (void *)lld_q;
+
+ set_thread(0);
+ err = nvmf_fc_master_enqueue_event(SPDK_FC_HW_PORT_INIT, (void *)&init_args, port_init_cb);
+ CU_ASSERT(err == 0);
+ poll_thread(0);
+
+ fc_port = nvmf_fc_port_lookup(g_fc_port_handle);
+ CU_ASSERT(fc_port != NULL);
+}
+
+static void
+online_fc_port_test(void)
+{
+ struct spdk_nvmf_fc_port *fc_port;
+ struct spdk_nvmf_fc_hw_port_online_args args;
+ int err;
+
+ SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
+
+ fc_port = nvmf_fc_port_lookup(g_fc_port_handle);
+ SPDK_CU_ASSERT_FATAL(fc_port != NULL);
+
+ set_thread(0);
+ args.port_handle = g_fc_port_handle;
+ err = nvmf_fc_master_enqueue_event(SPDK_FC_HW_PORT_ONLINE, (void *)&args, port_init_cb);
+ CU_ASSERT(err == 0);
+ poll_threads();
+ set_thread(0);
+ if (err == 0) {
+ uint32_t i;
+ for (i = 0; i < fc_port->num_io_queues; i++) {
+ CU_ASSERT(fc_port->io_queues[i].fgroup != 0);
+ CU_ASSERT(fc_port->io_queues[i].fgroup != 0);
+ CU_ASSERT(fc_port->io_queues[i].fgroup->hwqp_count != 0);
+ }
+ }
+}
+
+static void
+create_poll_groups_test(void)
+{
+ unsigned i;
+
+ SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
+
+ for (i = 0; i < MAX_FC_UT_POLL_THREADS; i++) {
+ set_thread(i);
+ g_poll_groups[i] = spdk_nvmf_poll_group_create(g_nvmf_tgt);
+ poll_thread(i);
+ CU_ASSERT(g_poll_groups[i] != NULL);
+ }
+ set_thread(0);
+}
+
+static void
+poll_group_poll_test(void)
+{
+ unsigned i;
+ unsigned poll_cnt = 10;
+ struct spdk_nvmf_fc_port *fc_port = NULL;
+
+ SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
+
+ set_thread(0);
+ fc_port = nvmf_fc_port_lookup(g_fc_port_handle);
+ SPDK_CU_ASSERT_FATAL(fc_port != NULL);
+
+ for (i = 0; i < fc_port->num_io_queues; i++) {
+ fc_port->io_queues[i].lcore_id = 0;
+ }
+
+ for (i = 0; i < poll_cnt; i++) {
+ /* this should cause spdk_nvmf_fc_poll_group_poll to be called() */
+ poll_threads();
+ }
+
+ /* check if hwqp's lcore_id has been updated */
+ for (i = 0; i < fc_port->num_io_queues; i++) {
+ CU_ASSERT(fc_port->io_queues[i].lcore_id == poll_cnt);
+ }
+}
+
+static void
+remove_hwqps_from_poll_groups_test(void)
+{
+ unsigned i;
+ struct spdk_nvmf_fc_port *fc_port = NULL;
+
+ SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
+
+ fc_port = nvmf_fc_port_lookup(g_fc_port_handle);
+ SPDK_CU_ASSERT_FATAL(fc_port != NULL);
+
+ for (i = 0; i < fc_port->num_io_queues; i++) {
+ nvmf_fc_poll_group_remove_hwqp(&fc_port->io_queues[i]);
+ poll_threads();
+ CU_ASSERT(fc_port->io_queues[i].fgroup == 0);
+ }
+}
+
+static void
+destroy_transport_test(void)
+{
+ unsigned i;
+
+ set_thread(0);
+ SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
+
+ for (i = 0; i < MAX_FC_UT_POLL_THREADS; i++) {
+ set_thread(i);
+ spdk_nvmf_poll_group_destroy(g_poll_groups[i], NULL, NULL);
+ poll_thread(0);
+ }
+
+ SPDK_CU_ASSERT_FATAL(g_nvmf_tgt != NULL);
+ g_lld_fini_called = false;
+ spdk_nvmf_tgt_destroy(g_nvmf_tgt, NULL, NULL);
+ poll_threads();
+ CU_ASSERT(g_lld_fini_called == true);
+}
+
+static int
+nvmf_fc_tests_init(void)
+{
+ return 0;
+}
+
+static int
+nvmf_fc_tests_fini(void)
+{
+ free_threads();
+ return 0;
+}
+
+int main(int argc, char **argv)
+{
+ unsigned int num_failures = 0;
+ CU_pSuite suite = NULL;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("NVMf-FC", nvmf_fc_tests_init, nvmf_fc_tests_fini);
+
+ CU_ADD_TEST(suite, create_transport_test);
+ CU_ADD_TEST(suite, create_poll_groups_test);
+ CU_ADD_TEST(suite, create_fc_port_test);
+ CU_ADD_TEST(suite, online_fc_port_test);
+ CU_ADD_TEST(suite, poll_group_poll_test);
+ CU_ADD_TEST(suite, remove_hwqps_from_poll_groups_test);
+ CU_ADD_TEST(suite, destroy_transport_test);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvmf/fc_ls.c/.gitignore b/src/spdk/test/unit/lib/nvmf/fc_ls.c/.gitignore
new file mode 100644
index 000000000..ac5b0c40e
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/fc_ls.c/.gitignore
@@ -0,0 +1 @@
+fc_ls_ut
diff --git a/src/spdk/test/unit/lib/nvmf/fc_ls.c/Makefile b/src/spdk/test/unit/lib/nvmf/fc_ls.c/Makefile
new file mode 100644
index 000000000..d9143e627
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/fc_ls.c/Makefile
@@ -0,0 +1,45 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) 2018 Broadcom. All Rights Reserved.
+# The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../../)
+include $(SPDK_ROOT_DIR)/mk/config.mk
+
+CFLAGS += -I$(SPDK_ROOT_DIR)/test/common/lib -I$(SPDK_ROOT_DIR)/lib/nvmf
+
+ifneq ($(strip $(CONFIG_FC_PATH)),)
+CFLAGS += -I$(CONFIG_FC_PATH)
+endif
+
+TEST_FILE = fc_ls_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvmf/fc_ls.c/fc_ls_ut.c b/src/spdk/test/unit/lib/nvmf/fc_ls.c/fc_ls_ut.c
new file mode 100644
index 000000000..68eb81960
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/fc_ls.c/fc_ls_ut.c
@@ -0,0 +1,1070 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (c) 2018-2019 Broadcom. All Rights Reserved.
+ * The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* NVMF FC LS Command Processor Unit Test */
+
+#include "spdk/env.h"
+#include "spdk_cunit.h"
+#include "spdk/nvmf.h"
+#include "spdk/endian.h"
+#include "spdk/trace.h"
+#include "spdk_internal/log.h"
+
+#include "ut_multithread.c"
+
+#include "transport.h"
+#include "nvmf_internal.h"
+#include "nvmf_fc.h"
+
+#include "fc_ls.c"
+
+#define LAST_RSLT_STOP_TEST 999
+
+void spdk_set_thread(struct spdk_thread *thread);
+
+/*
+ * SPDK Stuff
+ */
+
+DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req), -ENOSPC);
+DEFINE_STUB(spdk_nvmf_subsystem_host_allowed, bool,
+ (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn), true);
+DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
+ enum spdk_nvme_transport_type trtype));
+
+static const char *fc_ut_subsystem_nqn =
+ "nqn.2017-11.io.spdk:sn.390c0dc7c87011e786b300a0989adc53:subsystem.good";
+static struct spdk_nvmf_host fc_ut_initiator = {
+ .nqn = "nqn.2017-11.fc_host",
+};
+static struct spdk_nvmf_host *fc_ut_host = &fc_ut_initiator;
+static struct spdk_nvmf_tgt g_nvmf_tgt;
+static struct spdk_nvmf_transport_opts g_nvmf_transport_opts = {
+ .max_queue_depth = 128,
+ .max_qpairs_per_ctrlr = 4,
+ .max_aq_depth = 32,
+};
+static uint32_t g_hw_queue_depth = 1024;
+static struct spdk_nvmf_subsystem g_nvmf_subsystem;
+
+void nvmf_fc_request_abort(struct spdk_nvmf_fc_request *fc_req, bool send_abts,
+ spdk_nvmf_fc_caller_cb cb, void *cb_args);
+void spdk_bdev_io_abort(struct spdk_bdev_io *bdev_io, void *ctx);
+void nvmf_fc_request_abort_complete(void *arg1);
+bool nvmf_fc_req_in_xfer(struct spdk_nvmf_fc_request *fc_req);
+
+struct spdk_nvmf_subsystem *
+spdk_nvmf_tgt_find_subsystem(struct spdk_nvmf_tgt *tgt, const char *subnqn)
+{
+ if (!strcmp(subnqn, g_nvmf_subsystem.subnqn)) {
+ return &g_nvmf_subsystem;
+ }
+ return NULL;
+}
+
+int
+spdk_nvmf_poll_group_add(struct spdk_nvmf_poll_group *group,
+ struct spdk_nvmf_qpair *qpair)
+{
+ qpair->state = SPDK_NVMF_QPAIR_ACTIVE;
+ return 0;
+}
+
+const struct spdk_nvmf_transport_ops spdk_nvmf_transport_fc = {
+ .type = (enum spdk_nvme_transport_type) SPDK_NVMF_TRTYPE_FC,
+ .create = NULL,
+ .destroy = NULL,
+
+ .listen = NULL,
+ .stop_listen = NULL,
+ .accept = NULL,
+
+ .listener_discover = NULL,
+
+ .poll_group_create = NULL,
+ .poll_group_destroy = NULL,
+ .poll_group_add = NULL,
+ .poll_group_poll = NULL,
+
+ .req_complete = NULL,
+
+ .qpair_fini = NULL,
+
+};
+
+struct spdk_nvmf_transport g_nvmf_transport = {
+ .ops = &spdk_nvmf_transport_fc,
+ .tgt = &g_nvmf_tgt,
+};
+
+struct spdk_nvmf_transport *
+spdk_nvmf_tgt_get_transport(struct spdk_nvmf_tgt *tgt, const char *transport_name)
+{
+ return &g_nvmf_transport;
+}
+
+int
+spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx)
+{
+ cb_fn(ctx);
+ return 0;
+}
+
+void
+spdk_nvmf_tgt_new_qpair(struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair)
+{
+ uint32_t i;
+ struct spdk_nvmf_fc_conn *fc_conn;
+ struct spdk_nvmf_fc_hwqp *hwqp = NULL, *sel_hwqp = NULL;
+ struct spdk_nvmf_fc_ls_add_conn_api_data *api_data = NULL;
+ struct spdk_nvmf_fc_port *fc_port;
+
+ fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
+ api_data = &fc_conn->create_opd->u.add_conn;
+
+ /* Pick a hwqp with least load */
+ fc_port = fc_conn->fc_assoc->tgtport->fc_port;
+ for (i = 0; i < fc_port->num_io_queues; i ++) {
+ hwqp = &fc_port->io_queues[i];
+ if (!sel_hwqp || (hwqp->rq_size > sel_hwqp->rq_size)) {
+ sel_hwqp = hwqp;
+ }
+ }
+
+ if (!nvmf_fc_assign_conn_to_hwqp(sel_hwqp,
+ &fc_conn->conn_id,
+ fc_conn->max_queue_depth)) {
+ goto err;
+ }
+
+ fc_conn->hwqp = sel_hwqp;
+
+ /* If this is for ADMIN connection, then update assoc ID. */
+ if (fc_conn->qpair.qid == 0) {
+ fc_conn->fc_assoc->assoc_id = fc_conn->conn_id;
+ }
+
+ nvmf_fc_poller_api_func(sel_hwqp, SPDK_NVMF_FC_POLLER_API_ADD_CONNECTION, &api_data->args);
+
+ return;
+err:
+ nvmf_fc_ls_add_conn_failure(api_data->assoc, api_data->ls_rqst,
+ api_data->args.fc_conn, api_data->aq_conn);
+}
+
+struct spdk_nvmf_fc_conn *
+nvmf_fc_hwqp_find_fc_conn(struct spdk_nvmf_fc_hwqp *hwqp, uint64_t conn_id)
+{
+ struct spdk_nvmf_fc_conn *fc_conn;
+
+ TAILQ_FOREACH(fc_conn, &hwqp->connection_list, link) {
+ if (fc_conn->conn_id == conn_id) {
+ return fc_conn;
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ * LLD functions
+ */
+
+static inline uint64_t
+nvmf_fc_gen_conn_id(uint32_t qnum, struct spdk_nvmf_fc_hwqp *hwqp)
+{
+ static uint16_t conn_cnt = 0;
+ return ((uint64_t) qnum | (conn_cnt++ << 8));
+}
+
+bool
+nvmf_fc_assign_conn_to_hwqp(struct spdk_nvmf_fc_hwqp *hwqp,
+ uint64_t *conn_id, uint32_t sq_size)
+{
+ SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_LS, "Assign connection to HWQP\n");
+
+
+ if (hwqp->rq_size < sq_size) {
+ return false; /* queue has no space for this connection */
+ }
+
+ hwqp->rq_size -= sq_size;
+ hwqp->num_conns++;
+
+ /* create connection ID */
+ *conn_id = nvmf_fc_gen_conn_id(hwqp->hwqp_id, hwqp);
+
+ SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_LS,
+ "New connection assigned to HWQP%d (free %d), conn_id 0x%lx\n",
+ hwqp->hwqp_id, hwqp->rq_size, *conn_id);
+ return true;
+}
+
+struct spdk_nvmf_fc_hwqp *
+nvmf_fc_get_hwqp_from_conn_id(struct spdk_nvmf_fc_hwqp *queues,
+ uint32_t num_queues, uint64_t conn_id)
+{
+ return &queues[(conn_id & 0xff) % num_queues];
+}
+
+void
+nvmf_fc_release_conn(struct spdk_nvmf_fc_hwqp *hwqp, uint64_t conn_id,
+ uint32_t sq_size)
+{
+ hwqp->rq_size += sq_size;
+}
+
+struct spdk_nvmf_fc_srsr_bufs *
+nvmf_fc_alloc_srsr_bufs(size_t rqst_len, size_t rsp_len)
+{
+ struct spdk_nvmf_fc_srsr_bufs *srsr_bufs;
+
+ srsr_bufs = calloc(1, sizeof(struct spdk_nvmf_fc_srsr_bufs));
+ if (!srsr_bufs) {
+ return NULL;
+ }
+
+ srsr_bufs->rqst = calloc(1, rqst_len + rsp_len);
+ if (srsr_bufs->rqst) {
+ srsr_bufs->rqst_len = rqst_len;
+ srsr_bufs->rsp = srsr_bufs->rqst + rqst_len;
+ srsr_bufs->rsp_len = rsp_len;
+ } else {
+ free(srsr_bufs);
+ srsr_bufs = NULL;
+ }
+
+ return srsr_bufs;
+}
+
+void
+nvmf_fc_free_srsr_bufs(struct spdk_nvmf_fc_srsr_bufs *srsr_bufs)
+{
+ if (srsr_bufs) {
+ free(srsr_bufs->rqst);
+ free(srsr_bufs);
+ }
+}
+
+/*
+ * The Tests
+ */
+
+enum _test_run_type {
+ TEST_RUN_TYPE_CREATE_ASSOC = 1,
+ TEST_RUN_TYPE_CREATE_CONN,
+ TEST_RUN_TYPE_DISCONNECT,
+ TEST_RUN_TYPE_CONN_BAD_ASSOC,
+ TEST_RUN_TYPE_FAIL_LS_RSP,
+ TEST_RUN_TYPE_DISCONNECT_BAD_ASSOC,
+ TEST_RUN_TYPE_CREATE_MAX_ASSOC,
+};
+
+static uint32_t g_test_run_type = 0;
+static uint64_t g_curr_assoc_id = 0;
+static uint16_t g_create_conn_test_cnt = 0;
+static uint16_t g_max_assoc_conn_test = 0;
+static int g_last_rslt = 0;
+static bool g_spdk_nvmf_fc_xmt_srsr_req = false;
+static struct spdk_nvmf_fc_remote_port_info g_rem_port;
+
+static void
+run_create_assoc_test(const char *subnqn,
+ struct spdk_nvmf_host *host,
+ struct spdk_nvmf_fc_nport *tgt_port)
+{
+ struct spdk_nvmf_fc_ls_rqst ls_rqst;
+ struct spdk_nvmf_fc_ls_cr_assoc_rqst ca_rqst;
+ uint8_t respbuf[128];
+
+ memset(&ca_rqst, 0, sizeof(struct spdk_nvmf_fc_ls_cr_assoc_rqst));
+
+ ca_rqst.w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION;
+ to_be32(&ca_rqst.desc_list_len,
+ sizeof(struct spdk_nvmf_fc_ls_cr_assoc_rqst) -
+ (2 * sizeof(uint32_t)));
+ to_be32(&ca_rqst.assoc_cmd.desc_tag, FCNVME_LSDESC_CREATE_ASSOC_CMD);
+ to_be32(&ca_rqst.assoc_cmd.desc_len,
+ sizeof(struct spdk_nvmf_fc_lsdesc_cr_assoc_cmd) -
+ (2 * sizeof(uint32_t)));
+ to_be16(&ca_rqst.assoc_cmd.ersp_ratio, (g_nvmf_transport.opts.max_aq_depth / 2));
+ to_be16(&ca_rqst.assoc_cmd.sqsize, g_nvmf_transport.opts.max_aq_depth - 1);
+ snprintf(&ca_rqst.assoc_cmd.subnqn[0], strlen(subnqn) + 1, "%s", subnqn);
+ snprintf(&ca_rqst.assoc_cmd.hostnqn[0], strlen(host->nqn) + 1, "%s", host->nqn);
+ ls_rqst.rqstbuf.virt = &ca_rqst;
+ ls_rqst.rspbuf.virt = respbuf;
+ ls_rqst.rqst_len = sizeof(struct spdk_nvmf_fc_ls_cr_assoc_rqst);
+ ls_rqst.rsp_len = 0;
+ ls_rqst.rpi = 5000;
+ ls_rqst.private_data = NULL;
+ ls_rqst.s_id = 0;
+ ls_rqst.nport = tgt_port;
+ ls_rqst.rport = &g_rem_port;
+ ls_rqst.nvmf_tgt = &g_nvmf_tgt;
+
+ nvmf_fc_handle_ls_rqst(&ls_rqst);
+ poll_thread(0);
+}
+
+static void
+run_create_conn_test(struct spdk_nvmf_host *host,
+ struct spdk_nvmf_fc_nport *tgt_port,
+ uint64_t assoc_id,
+ uint16_t qid)
+{
+ struct spdk_nvmf_fc_ls_rqst ls_rqst;
+ struct spdk_nvmf_fc_ls_cr_conn_rqst cc_rqst;
+ uint8_t respbuf[128];
+
+ memset(&cc_rqst, 0, sizeof(struct spdk_nvmf_fc_ls_cr_conn_rqst));
+
+ /* fill in request descriptor */
+ cc_rqst.w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION;
+ to_be32(&cc_rqst.desc_list_len,
+ sizeof(struct spdk_nvmf_fc_ls_cr_conn_rqst) -
+ (2 * sizeof(uint32_t)));
+
+ /* fill in connect command descriptor */
+ to_be32(&cc_rqst.connect_cmd.desc_tag, FCNVME_LSDESC_CREATE_CONN_CMD);
+ to_be32(&cc_rqst.connect_cmd.desc_len,
+ sizeof(struct spdk_nvmf_fc_lsdesc_cr_conn_cmd) -
+ (2 * sizeof(uint32_t)));
+
+ to_be16(&cc_rqst.connect_cmd.ersp_ratio, (g_nvmf_transport.opts.max_queue_depth / 2));
+ to_be16(&cc_rqst.connect_cmd.sqsize, g_nvmf_transport.opts.max_queue_depth - 1);
+ to_be16(&cc_rqst.connect_cmd.qid, qid);
+
+ /* fill in association id descriptor */
+ to_be32(&cc_rqst.assoc_id.desc_tag, FCNVME_LSDESC_ASSOC_ID),
+ to_be32(&cc_rqst.assoc_id.desc_len,
+ sizeof(struct spdk_nvmf_fc_lsdesc_assoc_id) -
+ (2 * sizeof(uint32_t)));
+ cc_rqst.assoc_id.association_id = assoc_id; /* alreday be64 */
+
+ ls_rqst.rqstbuf.virt = &cc_rqst;
+ ls_rqst.rspbuf.virt = respbuf;
+ ls_rqst.rqst_len = sizeof(struct spdk_nvmf_fc_ls_cr_conn_rqst);
+ ls_rqst.rsp_len = 0;
+ ls_rqst.rpi = 5000;
+ ls_rqst.private_data = NULL;
+ ls_rqst.s_id = 0;
+ ls_rqst.nport = tgt_port;
+ ls_rqst.rport = &g_rem_port;
+ ls_rqst.nvmf_tgt = &g_nvmf_tgt;
+
+ nvmf_fc_handle_ls_rqst(&ls_rqst);
+ poll_thread(0);
+}
+
+static void
+run_disconn_test(struct spdk_nvmf_fc_nport *tgt_port,
+ uint64_t assoc_id)
+{
+ struct spdk_nvmf_fc_ls_rqst ls_rqst;
+ struct spdk_nvmf_fc_ls_disconnect_rqst dc_rqst;
+ uint8_t respbuf[128];
+
+ memset(&dc_rqst, 0, sizeof(struct spdk_nvmf_fc_ls_disconnect_rqst));
+
+ /* fill in request descriptor */
+ dc_rqst.w0.ls_cmd = FCNVME_LS_DISCONNECT;
+ to_be32(&dc_rqst.desc_list_len,
+ sizeof(struct spdk_nvmf_fc_ls_disconnect_rqst) -
+ (2 * sizeof(uint32_t)));
+
+ /* fill in disconnect command descriptor */
+ to_be32(&dc_rqst.disconn_cmd.desc_tag, FCNVME_LSDESC_DISCONN_CMD);
+ to_be32(&dc_rqst.disconn_cmd.desc_len,
+ sizeof(struct spdk_nvmf_fc_lsdesc_disconn_cmd) -
+ (2 * sizeof(uint32_t)));
+
+ /* fill in association id descriptor */
+ to_be32(&dc_rqst.assoc_id.desc_tag, FCNVME_LSDESC_ASSOC_ID),
+ to_be32(&dc_rqst.assoc_id.desc_len,
+ sizeof(struct spdk_nvmf_fc_lsdesc_assoc_id) -
+ (2 * sizeof(uint32_t)));
+ dc_rqst.assoc_id.association_id = assoc_id; /* alreday be64 */
+
+ ls_rqst.rqstbuf.virt = &dc_rqst;
+ ls_rqst.rspbuf.virt = respbuf;
+ ls_rqst.rqst_len = sizeof(struct spdk_nvmf_fc_ls_disconnect_rqst);
+ ls_rqst.rsp_len = 0;
+ ls_rqst.rpi = 5000;
+ ls_rqst.private_data = NULL;
+ ls_rqst.s_id = 0;
+ ls_rqst.nport = tgt_port;
+ ls_rqst.rport = &g_rem_port;
+ ls_rqst.nvmf_tgt = &g_nvmf_tgt;
+
+ nvmf_fc_handle_ls_rqst(&ls_rqst);
+ poll_thread(0);
+}
+
+static void
+disconnect_assoc_cb(void *cb_data, uint32_t err)
+{
+ CU_ASSERT(err == 0);
+}
+
+static int
+handle_ca_rsp(struct spdk_nvmf_fc_ls_rqst *ls_rqst, bool max_assoc_test)
+{
+ struct spdk_nvmf_fc_ls_acc_hdr *acc_hdr =
+ (struct spdk_nvmf_fc_ls_acc_hdr *) ls_rqst->rspbuf.virt;
+
+
+ if (acc_hdr->rqst.w0.ls_cmd == FCNVME_LS_CREATE_ASSOCIATION) {
+ if (acc_hdr->w0.ls_cmd == FCNVME_LS_ACC) {
+ struct spdk_nvmf_fc_ls_cr_assoc_acc *acc =
+ (struct spdk_nvmf_fc_ls_cr_assoc_acc *)ls_rqst->rspbuf.virt;
+
+ CU_ASSERT(from_be32(&acc_hdr->desc_list_len) ==
+ sizeof(struct spdk_nvmf_fc_ls_cr_assoc_acc) - 8);
+ CU_ASSERT(from_be32(&acc_hdr->rqst.desc_len) ==
+ sizeof(struct spdk_nvmf_fc_lsdesc_rqst) - 8);
+ CU_ASSERT(from_be32(&acc_hdr->rqst.desc_tag) ==
+ FCNVME_LSDESC_RQST);
+ CU_ASSERT(from_be32(&acc->assoc_id.desc_tag) ==
+ FCNVME_LSDESC_ASSOC_ID);
+ CU_ASSERT(from_be32(&acc->assoc_id.desc_len) ==
+ sizeof(struct spdk_nvmf_fc_lsdesc_assoc_id) - 8);
+ CU_ASSERT(from_be32(&acc->conn_id.desc_tag) ==
+ FCNVME_LSDESC_CONN_ID);
+ CU_ASSERT(from_be32(&acc->conn_id.desc_len) ==
+ sizeof(struct spdk_nvmf_fc_lsdesc_conn_id) - 8);
+
+ g_curr_assoc_id = acc->assoc_id.association_id;
+ g_create_conn_test_cnt++;
+ return 0;
+ } else if (max_assoc_test) {
+ /* reject reason code should be insufficient resources */
+ struct spdk_nvmf_fc_ls_rjt *rjt =
+ (struct spdk_nvmf_fc_ls_rjt *)ls_rqst->rspbuf.virt;
+ if (rjt->rjt.reason_code == FCNVME_RJT_RC_INSUFF_RES) {
+ return LAST_RSLT_STOP_TEST;
+ }
+ }
+ CU_FAIL("Unexpected reject response for create association");
+ } else {
+ CU_FAIL("Response not for create association");
+ }
+
+ return -EINVAL;
+}
+
+static int
+handle_cc_rsp(struct spdk_nvmf_fc_ls_rqst *ls_rqst)
+{
+ struct spdk_nvmf_fc_ls_acc_hdr *acc_hdr =
+ (struct spdk_nvmf_fc_ls_acc_hdr *) ls_rqst->rspbuf.virt;
+
+ if (acc_hdr->rqst.w0.ls_cmd == FCNVME_LS_CREATE_CONNECTION) {
+ if (acc_hdr->w0.ls_cmd == FCNVME_LS_ACC) {
+ struct spdk_nvmf_fc_ls_cr_conn_acc *acc =
+ (struct spdk_nvmf_fc_ls_cr_conn_acc *)ls_rqst->rspbuf.virt;
+
+ CU_ASSERT(from_be32(&acc_hdr->desc_list_len) ==
+ sizeof(struct spdk_nvmf_fc_ls_cr_conn_acc) - 8);
+ CU_ASSERT(from_be32(&acc_hdr->rqst.desc_len) ==
+ sizeof(struct spdk_nvmf_fc_lsdesc_rqst) - 8);
+ CU_ASSERT(from_be32(&acc_hdr->rqst.desc_tag) ==
+ FCNVME_LSDESC_RQST);
+ CU_ASSERT(from_be32(&acc->conn_id.desc_tag) ==
+ FCNVME_LSDESC_CONN_ID);
+ CU_ASSERT(from_be32(&acc->conn_id.desc_len) ==
+ sizeof(struct spdk_nvmf_fc_lsdesc_conn_id) - 8);
+ g_create_conn_test_cnt++;
+ return 0;
+ }
+
+ if (acc_hdr->w0.ls_cmd == FCNVME_LS_RJT) {
+ struct spdk_nvmf_fc_ls_rjt *rjt =
+ (struct spdk_nvmf_fc_ls_rjt *)ls_rqst->rspbuf.virt;
+ if (g_create_conn_test_cnt == g_nvmf_transport.opts.max_qpairs_per_ctrlr) {
+ /* expected to get reject for too many connections */
+ CU_ASSERT(rjt->rjt.reason_code ==
+ FCNVME_RJT_RC_INV_PARAM);
+ CU_ASSERT(rjt->rjt.reason_explanation ==
+ FCNVME_RJT_EXP_INV_Q_ID);
+ } else if (!g_max_assoc_conn_test) {
+ CU_FAIL("Unexpected reject response create connection");
+ }
+ } else {
+ CU_FAIL("Unexpected response code for create connection");
+ }
+ } else {
+ CU_FAIL("Response not for create connection");
+ }
+
+ return -EINVAL;
+}
+
+static int
+handle_disconn_rsp(struct spdk_nvmf_fc_ls_rqst *ls_rqst)
+{
+ struct spdk_nvmf_fc_ls_acc_hdr *acc_hdr =
+ (struct spdk_nvmf_fc_ls_acc_hdr *) ls_rqst->rspbuf.virt;
+
+ if (acc_hdr->rqst.w0.ls_cmd == FCNVME_LS_DISCONNECT) {
+ if (acc_hdr->w0.ls_cmd == FCNVME_LS_ACC) {
+ CU_ASSERT(from_be32(&acc_hdr->desc_list_len) ==
+ sizeof(struct spdk_nvmf_fc_ls_disconnect_acc) - 8);
+ CU_ASSERT(from_be32(&acc_hdr->rqst.desc_len) ==
+ sizeof(struct spdk_nvmf_fc_lsdesc_rqst) - 8);
+ CU_ASSERT(from_be32(&acc_hdr->rqst.desc_tag) ==
+ FCNVME_LSDESC_RQST);
+ return 0;
+ } else {
+ CU_FAIL("Unexpected reject response for disconnect");
+ }
+ } else {
+ CU_FAIL("Response not for create connection");
+ }
+
+ return -EINVAL;
+}
+
+static int
+handle_conn_bad_assoc_rsp(struct spdk_nvmf_fc_ls_rqst *ls_rqst)
+{
+ struct spdk_nvmf_fc_ls_acc_hdr *acc_hdr =
+ (struct spdk_nvmf_fc_ls_acc_hdr *) ls_rqst->rspbuf.virt;
+
+ if (acc_hdr->rqst.w0.ls_cmd == FCNVME_LS_CREATE_CONNECTION) {
+ if (acc_hdr->w0.ls_cmd == FCNVME_LS_RJT) {
+ struct spdk_nvmf_fc_ls_rjt *rjt =
+ (struct spdk_nvmf_fc_ls_rjt *)ls_rqst->rspbuf.virt;
+
+ CU_ASSERT(from_be32(&rjt->desc_list_len) ==
+ sizeof(struct spdk_nvmf_fc_ls_rjt) - 8);
+ CU_ASSERT(from_be32(&rjt->rqst.desc_tag) ==
+ FCNVME_LSDESC_RQST);
+ CU_ASSERT(from_be32(&rjt->rjt.desc_len) ==
+ sizeof(struct spdk_nvmf_fc_lsdesc_rjt) - 8);
+ CU_ASSERT(from_be32(&rjt->rjt.desc_tag) ==
+ FCNVME_LSDESC_RJT);
+ CU_ASSERT(rjt->rjt.reason_code ==
+ FCNVME_RJT_RC_INV_ASSOC);
+ CU_ASSERT(rjt->rjt.reason_explanation ==
+ FCNVME_RJT_EXP_NONE);
+ /* make sure reserved fields are 0 */
+ CU_ASSERT(rjt->rjt.rsvd8 == 0);
+ CU_ASSERT(rjt->rjt.rsvd12 == 0);
+ return 0;
+ } else {
+ CU_FAIL("Unexpected accept response for create conn. on bad assoc_id");
+ }
+ } else {
+ CU_FAIL("Response not for create connection on bad assoc_id");
+ }
+
+ return -EINVAL;
+}
+
+static int
+handle_disconn_bad_assoc_rsp(struct spdk_nvmf_fc_ls_rqst *ls_rqst)
+{
+ struct spdk_nvmf_fc_ls_acc_hdr *acc_hdr =
+ (struct spdk_nvmf_fc_ls_acc_hdr *) ls_rqst->rspbuf.virt;
+
+ if (acc_hdr->rqst.w0.ls_cmd == FCNVME_LS_DISCONNECT) {
+ if (acc_hdr->w0.ls_cmd == FCNVME_LS_RJT) {
+ struct spdk_nvmf_fc_ls_rjt *rjt =
+ (struct spdk_nvmf_fc_ls_rjt *)ls_rqst->rspbuf.virt;
+
+ CU_ASSERT(from_be32(&rjt->desc_list_len) ==
+ sizeof(struct spdk_nvmf_fc_ls_rjt) - 8);
+ CU_ASSERT(from_be32(&rjt->rqst.desc_tag) ==
+ FCNVME_LSDESC_RQST);
+ CU_ASSERT(from_be32(&rjt->rjt.desc_len) ==
+ sizeof(struct spdk_nvmf_fc_lsdesc_rjt) - 8);
+ CU_ASSERT(from_be32(&rjt->rjt.desc_tag) ==
+ FCNVME_LSDESC_RJT);
+ CU_ASSERT(rjt->rjt.reason_code ==
+ FCNVME_RJT_RC_INV_ASSOC);
+ CU_ASSERT(rjt->rjt.reason_explanation ==
+ FCNVME_RJT_EXP_NONE);
+ return 0;
+ } else {
+ CU_FAIL("Unexpected accept response for disconnect on bad assoc_id");
+ }
+ } else {
+ CU_FAIL("Response not for dsconnect on bad assoc_id");
+ }
+
+ return -EINVAL;
+}
+
+
+static struct spdk_nvmf_fc_port g_fc_port = {
+ .num_io_queues = 16,
+};
+
+static struct spdk_nvmf_fc_nport g_tgt_port;
+
+static uint64_t assoc_id[1024];
+
+#define FC_LS_UT_MAX_IO_QUEUES 16
+struct spdk_nvmf_fc_hwqp g_fc_hwqp[FC_LS_UT_MAX_IO_QUEUES];
+struct spdk_nvmf_fc_poll_group g_fgroup[FC_LS_UT_MAX_IO_QUEUES];
+struct spdk_nvmf_poll_group g_poll_group[FC_LS_UT_MAX_IO_QUEUES];
+static bool threads_allocated = false;
+
+static void
+ls_assign_hwqp_threads(void)
+{
+ uint32_t i;
+
+ for (i = 0; i < g_fc_port.num_io_queues; i++) {
+ struct spdk_nvmf_fc_hwqp *hwqp = &g_fc_port.io_queues[i];
+ if (hwqp->thread == NULL) {
+ hwqp->thread = spdk_get_thread();
+ }
+ }
+}
+
+static void
+ls_prepare_threads(void)
+{
+ if (threads_allocated == false) {
+ allocate_threads(8);
+ set_thread(0);
+ }
+ threads_allocated = true;
+}
+
+static void
+setup_polling_threads(void)
+{
+ ls_prepare_threads();
+ set_thread(0);
+ ls_assign_hwqp_threads();
+}
+
+static int
+ls_tests_init(void)
+{
+ uint16_t i;
+
+ bzero(&g_nvmf_tgt, sizeof(g_nvmf_tgt));
+
+ g_nvmf_transport.opts = g_nvmf_transport_opts;
+
+ snprintf(g_nvmf_subsystem.subnqn, sizeof(g_nvmf_subsystem.subnqn), "%s", fc_ut_subsystem_nqn);
+ g_fc_port.hw_port_status = SPDK_FC_PORT_ONLINE;
+ g_fc_port.io_queues = g_fc_hwqp;
+ for (i = 0; i < g_fc_port.num_io_queues; i++) {
+ struct spdk_nvmf_fc_hwqp *hwqp = &g_fc_port.io_queues[i];
+ hwqp->lcore_id = i;
+ hwqp->hwqp_id = i;
+ hwqp->thread = NULL;
+ hwqp->fc_port = &g_fc_port;
+ hwqp->num_conns = 0;
+ hwqp->rq_size = g_hw_queue_depth;
+ TAILQ_INIT(&hwqp->connection_list);
+ TAILQ_INIT(&hwqp->in_use_reqs);
+
+ bzero(&g_poll_group[i], sizeof(struct spdk_nvmf_poll_group));
+ bzero(&g_fgroup[i], sizeof(struct spdk_nvmf_fc_poll_group));
+ TAILQ_INIT(&g_poll_group[i].tgroups);
+ TAILQ_INIT(&g_poll_group[i].qpairs);
+ g_fgroup[i].group.transport = &g_nvmf_transport;
+ g_fgroup[i].group.group = &g_poll_group[i];
+ hwqp->fgroup = &g_fgroup[i];
+ }
+
+ nvmf_fc_ls_init(&g_fc_port);
+ bzero(&g_tgt_port, sizeof(struct spdk_nvmf_fc_nport));
+ g_tgt_port.fc_port = &g_fc_port;
+ TAILQ_INIT(&g_tgt_port.rem_port_list);
+ TAILQ_INIT(&g_tgt_port.fc_associations);
+
+ bzero(&g_rem_port, sizeof(struct spdk_nvmf_fc_remote_port_info));
+ TAILQ_INSERT_TAIL(&g_tgt_port.rem_port_list, &g_rem_port, link);
+
+ return 0;
+}
+
+static int
+ls_tests_fini(void)
+{
+ nvmf_fc_ls_fini(&g_fc_port);
+ free_threads();
+ return 0;
+}
+
+static void
+create_single_assoc_test(void)
+{
+ setup_polling_threads();
+ /* main test driver */
+ g_test_run_type = TEST_RUN_TYPE_CREATE_ASSOC;
+ run_create_assoc_test(fc_ut_subsystem_nqn, fc_ut_host, &g_tgt_port);
+
+ if (g_last_rslt == 0) {
+ /* disconnect the association */
+ g_test_run_type = TEST_RUN_TYPE_DISCONNECT;
+ run_disconn_test(&g_tgt_port, g_curr_assoc_id);
+ g_create_conn_test_cnt = 0;
+ }
+}
+
+static void
+create_max_conns_test(void)
+{
+ uint16_t qid = 1;
+
+ setup_polling_threads();
+ /* main test driver */
+ g_test_run_type = TEST_RUN_TYPE_CREATE_ASSOC;
+ run_create_assoc_test(fc_ut_subsystem_nqn, fc_ut_host, &g_tgt_port);
+
+ if (g_last_rslt == 0) {
+ g_test_run_type = TEST_RUN_TYPE_CREATE_CONN;
+ /* create connections until we get too many connections error */
+ while (g_last_rslt == 0) {
+ if (g_create_conn_test_cnt > g_nvmf_transport.opts.max_qpairs_per_ctrlr) {
+ CU_FAIL("Did not get CIOC failure for too many connections");
+ break;
+ }
+ run_create_conn_test(fc_ut_host, &g_tgt_port, g_curr_assoc_id, qid++);
+ }
+
+ /* disconnect the association */
+ g_last_rslt = 0;
+ g_test_run_type = TEST_RUN_TYPE_DISCONNECT;
+ run_disconn_test(&g_tgt_port, g_curr_assoc_id);
+ g_create_conn_test_cnt = 0;
+ }
+}
+
+static void
+invalid_connection_test(void)
+{
+ setup_polling_threads();
+ /* run test to create connection to invalid association */
+ g_test_run_type = TEST_RUN_TYPE_CONN_BAD_ASSOC;
+ run_create_conn_test(fc_ut_host, &g_tgt_port, g_curr_assoc_id, 1);
+}
+
+static void
+create_max_aq_conns_test(void)
+{
+ /* run test to create max. associations with max. connections */
+ uint32_t i, j;
+ uint32_t create_assoc_test_cnt = 0;
+
+ setup_polling_threads();
+ g_max_assoc_conn_test = 1;
+ g_last_rslt = 0;
+ while (1) {
+ g_test_run_type = TEST_RUN_TYPE_CREATE_MAX_ASSOC;
+ run_create_assoc_test(fc_ut_subsystem_nqn, fc_ut_host, &g_tgt_port);
+ if (g_last_rslt == 0) {
+ assoc_id[create_assoc_test_cnt++] = g_curr_assoc_id;
+ g_test_run_type = TEST_RUN_TYPE_CREATE_CONN;
+ for (j = 1; j < g_nvmf_transport.opts.max_qpairs_per_ctrlr; j++) {
+ if (g_last_rslt == 0) {
+ run_create_conn_test(fc_ut_host, &g_tgt_port, g_curr_assoc_id, (uint16_t) j);
+ }
+ }
+ } else {
+ break;
+ }
+ }
+
+ if (g_last_rslt == LAST_RSLT_STOP_TEST) {
+ uint32_t ma = (((g_hw_queue_depth / g_nvmf_transport.opts.max_queue_depth) *
+ (g_fc_port.num_io_queues - 1))) /
+ (g_nvmf_transport.opts.max_qpairs_per_ctrlr - 1);
+ if (create_assoc_test_cnt < ma) {
+ printf("(%d assocs - should be %d) ", create_assoc_test_cnt, ma);
+ CU_FAIL("Didn't create max. associations");
+ } else {
+ printf("(%d assocs.) ", create_assoc_test_cnt);
+ }
+ g_last_rslt = 0;
+ }
+
+ for (i = 0; i < create_assoc_test_cnt; i++) {
+ int ret;
+ g_spdk_nvmf_fc_xmt_srsr_req = false;
+ ret = nvmf_fc_delete_association(&g_tgt_port, from_be64(&assoc_id[i]), true, false,
+ disconnect_assoc_cb, 0);
+ CU_ASSERT(ret == 0);
+ poll_thread(0);
+
+#if (NVMF_FC_LS_SEND_LS_DISCONNECT == 1)
+ if (ret == 0) {
+ /* check that LS disconnect was sent */
+ CU_ASSERT(g_spdk_nvmf_fc_xmt_srsr_req);
+ }
+#endif
+ }
+ g_max_assoc_conn_test = 0;
+}
+
+static void
+xmt_ls_rsp_failure_test(void)
+{
+ setup_polling_threads();
+ g_test_run_type = TEST_RUN_TYPE_FAIL_LS_RSP;
+ run_create_assoc_test(fc_ut_subsystem_nqn, fc_ut_host, &g_tgt_port);
+ if (g_last_rslt == 0) {
+ /* check target port for associations */
+ CU_ASSERT(g_tgt_port.assoc_count == 0);
+ }
+}
+
+static void
+disconnect_bad_assoc_test(void)
+{
+ setup_polling_threads();
+ g_test_run_type = TEST_RUN_TYPE_DISCONNECT_BAD_ASSOC;
+ run_disconn_test(&g_tgt_port, 0xffff);
+}
+
+/*
+ * SPDK functions that are called by LS processing
+ */
+
+int
+nvmf_fc_xmt_ls_rsp(struct spdk_nvmf_fc_nport *g_tgt_port,
+ struct spdk_nvmf_fc_ls_rqst *ls_rqst)
+{
+ switch (g_test_run_type) {
+ case TEST_RUN_TYPE_CREATE_ASSOC:
+ g_last_rslt = handle_ca_rsp(ls_rqst, false);
+ break;
+ case TEST_RUN_TYPE_CREATE_CONN:
+ g_last_rslt = handle_cc_rsp(ls_rqst);
+ break;
+ case TEST_RUN_TYPE_DISCONNECT:
+ g_last_rslt = handle_disconn_rsp(ls_rqst);
+ break;
+ case TEST_RUN_TYPE_CONN_BAD_ASSOC:
+ g_last_rslt = handle_conn_bad_assoc_rsp(ls_rqst);
+ break;
+ case TEST_RUN_TYPE_FAIL_LS_RSP:
+ g_last_rslt = handle_ca_rsp(ls_rqst, false);
+ return 1;
+ case TEST_RUN_TYPE_DISCONNECT_BAD_ASSOC:
+ g_last_rslt = handle_disconn_bad_assoc_rsp(ls_rqst);
+ break;
+ case TEST_RUN_TYPE_CREATE_MAX_ASSOC:
+ g_last_rslt = handle_ca_rsp(ls_rqst, true);
+ break;
+
+ default:
+ CU_FAIL("LS Response for Invalid Test Type");
+ g_last_rslt = 1;
+ }
+
+ return 0;
+}
+
+int
+nvmf_fc_xmt_srsr_req(struct spdk_nvmf_fc_hwqp *hwqp,
+ struct spdk_nvmf_fc_srsr_bufs *srsr_bufs,
+ spdk_nvmf_fc_caller_cb cb, void *cb_args)
+{
+ struct spdk_nvmf_fc_ls_disconnect_rqst *dc_rqst =
+ (struct spdk_nvmf_fc_ls_disconnect_rqst *)
+ srsr_bufs->rqst;
+
+ CU_ASSERT(dc_rqst->w0.ls_cmd == FCNVME_LS_DISCONNECT);
+ CU_ASSERT(from_be32(&dc_rqst->desc_list_len) ==
+ sizeof(struct spdk_nvmf_fc_ls_disconnect_rqst) -
+ (2 * sizeof(uint32_t)));
+ CU_ASSERT(from_be32(&dc_rqst->assoc_id.desc_tag) ==
+ FCNVME_LSDESC_ASSOC_ID);
+ CU_ASSERT(from_be32(&dc_rqst->assoc_id.desc_len) ==
+ sizeof(struct spdk_nvmf_fc_lsdesc_assoc_id) -
+ (2 * sizeof(uint32_t)));
+
+ g_spdk_nvmf_fc_xmt_srsr_req = true;
+
+ if (cb) {
+ cb(hwqp, 0, cb_args);
+ }
+
+ return 0;
+}
+
+DEFINE_STUB_V(nvmf_fc_request_abort, (struct spdk_nvmf_fc_request *fc_req,
+ bool send_abts, spdk_nvmf_fc_caller_cb cb, void *cb_args));
+DEFINE_STUB_V(spdk_bdev_io_abort, (struct spdk_bdev_io *bdev_io, void *ctx));
+DEFINE_STUB_V(nvmf_fc_request_abort_complete, (void *arg1));
+
+static void
+usage(const char *program_name)
+{
+ printf("%s [options]\n", program_name);
+ printf("options:\n");
+ spdk_log_usage(stdout, "-t");
+ printf(" -i value - Number of IO Queues (default: %u)\n",
+ g_fc_port.num_io_queues);
+ printf(" -d value - HW queue depth (default: %u)\n",
+ g_hw_queue_depth);
+ printf(" -q value - SQ size (default: %u)\n",
+ g_nvmf_transport_opts.max_queue_depth);
+ printf(" -c value - Connection count (default: %u)\n",
+ g_nvmf_transport_opts.max_qpairs_per_ctrlr);
+ printf(" -u test# - Unit test# to run\n");
+ printf(" 0 : Run all tests (default)\n");
+ printf(" 1 : CASS/DISC create single assoc test\n");
+ printf(" 2 : Max. conns. test\n");
+ printf(" 3 : CIOC to invalid assoc_id connection test\n");
+ printf(" 4 : Create/delete max assoc conns test\n");
+ printf(" 5 : LS response failure test\n");
+ printf(" 6 : Disconnect bad assoc_id test\n");
+}
+
+int main(int argc, char **argv)
+{
+ unsigned int num_failures = 0;
+ CU_pSuite suite = NULL;
+ int test = 0;
+ long int val;
+ int op;
+
+ while ((op = getopt(argc, argv, "a:q:c:t:u:d:i:")) != -1) {
+ switch (op) {
+ case 'q':
+ val = spdk_strtol(optarg, 10);
+ if (val < 16) {
+ fprintf(stderr, "SQ size must be at least 16\n");
+ return -EINVAL;
+ }
+ g_nvmf_transport_opts.max_queue_depth = (uint16_t)val;
+ break;
+ case 'c':
+ val = spdk_strtol(optarg, 10);
+ if (val < 2) {
+ fprintf(stderr, "Connection count must be at least 2\n");
+ return -EINVAL;
+ }
+ g_nvmf_transport_opts.max_qpairs_per_ctrlr = (uint16_t)val;
+ break;
+ case 't':
+ if (spdk_log_set_flag(optarg) < 0) {
+ fprintf(stderr, "Unknown trace flag '%s'\n", optarg);
+ usage(argv[0]);
+ return -EINVAL;
+ }
+ break;
+ case 'u':
+ test = (int)spdk_strtol(optarg, 10);
+ break;
+ case 'd':
+ val = spdk_strtol(optarg, 10);
+ if (val < 16) {
+ fprintf(stderr, "HW queue depth must be at least 16\n");
+ return -EINVAL;
+ }
+ g_hw_queue_depth = (uint32_t)val;
+ break;
+ case 'i':
+ val = spdk_strtol(optarg, 10);
+ if (val < 2) {
+ fprintf(stderr, "Number of io queues must be at least 2\n");
+ return -EINVAL;
+ }
+ if (val > FC_LS_UT_MAX_IO_QUEUES) {
+ fprintf(stderr, "Number of io queues can't be greater than %d\n",
+ FC_LS_UT_MAX_IO_QUEUES);
+ return -EINVAL;
+ }
+ g_fc_port.num_io_queues = (uint32_t)val;
+ break;
+
+
+ default:
+ usage(argv[0]);
+ return -EINVAL;
+ }
+ }
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("FC-NVMe LS", ls_tests_init, ls_tests_fini);
+
+ if (test == 0) {
+
+ CU_ADD_TEST(suite, create_single_assoc_test);
+
+ CU_ADD_TEST(suite, create_max_conns_test);
+ CU_ADD_TEST(suite, invalid_connection_test);
+ CU_ADD_TEST(suite, disconnect_bad_assoc_test);
+
+ CU_ADD_TEST(suite, create_max_aq_conns_test);
+ CU_ADD_TEST(suite, xmt_ls_rsp_failure_test);
+
+ } else {
+
+ switch (test) {
+ case 1:
+ CU_ADD_TEST(suite, create_single_assoc_test);
+ break;
+ case 2:
+ CU_ADD_TEST(suite, create_max_conns_test);
+ break;
+ case 3:
+ CU_ADD_TEST(suite, invalid_connection_test);
+ break;
+ case 4:
+ CU_ADD_TEST(suite, create_max_aq_conns_test);
+ break;
+ case 5:
+ CU_ADD_TEST(suite, xmt_ls_rsp_failure_test);
+ break;
+ case 6:
+ CU_ADD_TEST(suite, disconnect_bad_assoc_test);
+ break;
+
+ default:
+ fprintf(stderr, "Invalid test number\n");
+ usage(argv[0]);
+ CU_cleanup_registry();
+ return -EINVAL;
+ }
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvmf/rdma.c/.gitignore b/src/spdk/test/unit/lib/nvmf/rdma.c/.gitignore
new file mode 100644
index 000000000..0adb59d10
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/rdma.c/.gitignore
@@ -0,0 +1 @@
+rdma_ut
diff --git a/src/spdk/test/unit/lib/nvmf/rdma.c/Makefile b/src/spdk/test/unit/lib/nvmf/rdma.c/Makefile
new file mode 100644
index 000000000..ad4998663
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/rdma.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = rdma_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvmf/rdma.c/rdma_ut.c b/src/spdk/test/unit/lib/nvmf/rdma.c/rdma_ut.c
new file mode 100644
index 000000000..b0af58d18
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/rdma.c/rdma_ut.c
@@ -0,0 +1,1283 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation. All rights reserved.
+ * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+#include "spdk_cunit.h"
+#include "common/lib/test_env.c"
+#include "common/lib/test_rdma.c"
+#include "nvmf/rdma.c"
+#include "nvmf/transport.c"
+
+uint64_t g_mr_size;
+uint64_t g_mr_next_size;
+struct ibv_mr g_rdma_mr;
+
+#define RDMA_UT_UNITS_IN_MAX_IO 16
+
+struct spdk_nvmf_transport_opts g_rdma_ut_transport_opts = {
+ .max_queue_depth = SPDK_NVMF_RDMA_DEFAULT_MAX_QUEUE_DEPTH,
+ .max_qpairs_per_ctrlr = SPDK_NVMF_RDMA_DEFAULT_MAX_QPAIRS_PER_CTRLR,
+ .in_capsule_data_size = SPDK_NVMF_RDMA_DEFAULT_IN_CAPSULE_DATA_SIZE,
+ .max_io_size = (SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE * RDMA_UT_UNITS_IN_MAX_IO),
+ .io_unit_size = SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE,
+ .max_aq_depth = SPDK_NVMF_RDMA_DEFAULT_AQ_DEPTH,
+ .num_shared_buffers = SPDK_NVMF_RDMA_DEFAULT_NUM_SHARED_BUFFERS,
+};
+
+SPDK_LOG_REGISTER_COMPONENT("nvmf", SPDK_LOG_NVMF)
+DEFINE_STUB(spdk_mem_map_set_translation, int, (struct spdk_mem_map *map, uint64_t vaddr,
+ uint64_t size, uint64_t translation), 0);
+DEFINE_STUB(spdk_mem_map_clear_translation, int, (struct spdk_mem_map *map, uint64_t vaddr,
+ uint64_t size), 0);
+DEFINE_STUB(spdk_mem_map_alloc, struct spdk_mem_map *, (uint64_t default_translation,
+ const struct spdk_mem_map_ops *ops, void *cb_ctx), NULL);
+DEFINE_STUB(spdk_nvmf_qpair_disconnect, int, (struct spdk_nvmf_qpair *qpair,
+ nvmf_qpair_disconnect_cb cb_fn, void *ctx), 0);
+DEFINE_STUB_V(spdk_mem_map_free, (struct spdk_mem_map **pmap));
+
+struct spdk_trace_histories *g_trace_histories;
+DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn));
+DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix));
+DEFINE_STUB_V(spdk_trace_register_description, (const char *name,
+ uint16_t tpoint_id, uint8_t owner_type, uint8_t object_type, uint8_t new_object,
+ uint8_t arg1_type, const char *arg1_name));
+DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
+ uint32_t size, uint64_t object_id, uint64_t arg1));
+
+DEFINE_STUB_V(spdk_nvmf_ctrlr_data_init, (struct spdk_nvmf_transport_opts *opts,
+ struct spdk_nvmf_ctrlr_data *cdata));
+DEFINE_STUB_V(spdk_nvmf_request_exec, (struct spdk_nvmf_request *req));
+DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req), 0);
+DEFINE_STUB(spdk_nvme_transport_id_compare, int, (const struct spdk_nvme_transport_id *trid1,
+ const struct spdk_nvme_transport_id *trid2), 0);
+DEFINE_STUB_V(nvmf_ctrlr_abort_aer, (struct spdk_nvmf_ctrlr *ctrlr));
+DEFINE_STUB(spdk_nvmf_request_get_dif_ctx, bool, (struct spdk_nvmf_request *req,
+ struct spdk_dif_ctx *dif_ctx), false);
+DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
+ enum spdk_nvme_transport_type trtype));
+DEFINE_STUB_V(spdk_nvmf_tgt_new_qpair, (struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair));
+DEFINE_STUB(nvmf_ctrlr_abort_request, int, (struct spdk_nvmf_request *req), 0);
+
+const char *
+spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype)
+{
+ switch (trtype) {
+ case SPDK_NVME_TRANSPORT_PCIE:
+ return "PCIe";
+ case SPDK_NVME_TRANSPORT_RDMA:
+ return "RDMA";
+ case SPDK_NVME_TRANSPORT_FC:
+ return "FC";
+ default:
+ return NULL;
+ }
+}
+
+int
+spdk_nvme_transport_id_populate_trstring(struct spdk_nvme_transport_id *trid, const char *trstring)
+{
+ int len, i;
+
+ if (trstring == NULL) {
+ return -EINVAL;
+ }
+
+ len = strnlen(trstring, SPDK_NVMF_TRSTRING_MAX_LEN);
+ if (len == SPDK_NVMF_TRSTRING_MAX_LEN) {
+ return -EINVAL;
+ }
+
+ /* cast official trstring to uppercase version of input. */
+ for (i = 0; i < len; i++) {
+ trid->trstring[i] = toupper(trstring[i]);
+ }
+ return 0;
+}
+
+uint64_t
+spdk_mem_map_translate(const struct spdk_mem_map *map, uint64_t vaddr, uint64_t *size)
+{
+ if (g_mr_size != 0) {
+ *(uint32_t *)size = g_mr_size;
+ if (g_mr_next_size != 0) {
+ g_mr_size = g_mr_next_size;
+ }
+ }
+
+ return (uint64_t)&g_rdma_mr;
+}
+
+static void reset_nvmf_rdma_request(struct spdk_nvmf_rdma_request *rdma_req)
+{
+ int i;
+
+ rdma_req->req.length = 0;
+ rdma_req->req.data_from_pool = false;
+ rdma_req->req.data = NULL;
+ rdma_req->data.wr.num_sge = 0;
+ rdma_req->data.wr.wr.rdma.remote_addr = 0;
+ rdma_req->data.wr.wr.rdma.rkey = 0;
+ memset(&rdma_req->req.dif, 0, sizeof(rdma_req->req.dif));
+
+ for (i = 0; i < SPDK_NVMF_MAX_SGL_ENTRIES; i++) {
+ rdma_req->req.iov[i].iov_base = 0;
+ rdma_req->req.iov[i].iov_len = 0;
+ rdma_req->req.buffers[i] = 0;
+ rdma_req->data.wr.sg_list[i].addr = 0;
+ rdma_req->data.wr.sg_list[i].length = 0;
+ rdma_req->data.wr.sg_list[i].lkey = 0;
+ }
+ rdma_req->req.iovcnt = 0;
+}
+
+static void
+test_spdk_nvmf_rdma_request_parse_sgl(void)
+{
+ struct spdk_nvmf_rdma_transport rtransport;
+ struct spdk_nvmf_rdma_device device;
+ struct spdk_nvmf_rdma_request rdma_req = {};
+ struct spdk_nvmf_rdma_recv recv;
+ struct spdk_nvmf_rdma_poll_group group;
+ struct spdk_nvmf_rdma_qpair rqpair;
+ struct spdk_nvmf_rdma_poller poller;
+ union nvmf_c2h_msg cpl;
+ union nvmf_h2c_msg cmd;
+ struct spdk_nvme_sgl_descriptor *sgl;
+ struct spdk_nvmf_transport_pg_cache_buf bufs[4];
+ struct spdk_nvme_sgl_descriptor sgl_desc[SPDK_NVMF_MAX_SGL_ENTRIES] = {{0}};
+ struct spdk_nvmf_rdma_request_data data;
+ struct spdk_nvmf_transport_pg_cache_buf buffer;
+ struct spdk_nvmf_transport_pg_cache_buf *buffer_ptr;
+ int rc, i;
+
+ data.wr.sg_list = data.sgl;
+ STAILQ_INIT(&group.group.buf_cache);
+ group.group.buf_cache_size = 0;
+ group.group.buf_cache_count = 0;
+ group.group.transport = &rtransport.transport;
+ STAILQ_INIT(&group.retired_bufs);
+ poller.group = &group;
+ rqpair.poller = &poller;
+ rqpair.max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES;
+
+ sgl = &cmd.nvme_cmd.dptr.sgl1;
+ rdma_req.recv = &recv;
+ rdma_req.req.cmd = &cmd;
+ rdma_req.req.rsp = &cpl;
+ rdma_req.data.wr.sg_list = rdma_req.data.sgl;
+ rdma_req.req.qpair = &rqpair.qpair;
+ rdma_req.req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
+
+ rtransport.transport.opts = g_rdma_ut_transport_opts;
+ rtransport.data_wr_pool = NULL;
+ rtransport.transport.data_buf_pool = NULL;
+
+ device.attr.device_cap_flags = 0;
+ g_rdma_mr.lkey = 0xABCD;
+ sgl->keyed.key = 0xEEEE;
+ sgl->address = 0xFFFF;
+ rdma_req.recv->buf = (void *)0xDDDD;
+
+ /* Test 1: sgl type: keyed data block subtype: address */
+ sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
+ sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
+
+ /* Part 1: simple I/O, one SGL smaller than the transport io unit size */
+ MOCK_SET(spdk_mempool_get, (void *)0x2000);
+ reset_nvmf_rdma_request(&rdma_req);
+ sgl->keyed.length = rtransport.transport.opts.io_unit_size / 2;
+
+ device.map = (void *)0x0;
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size / 2);
+ CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.num_sge == 1);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
+ CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.sg_list[0].length == rtransport.transport.opts.io_unit_size / 2);
+ CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == g_rdma_mr.lkey);
+
+ /* Part 2: simple I/O, one SGL larger than the transport io unit size (equal to the max io size) */
+ reset_nvmf_rdma_request(&rdma_req);
+ sgl->keyed.length = rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO;
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO);
+ CU_ASSERT(rdma_req.data.wr.num_sge == RDMA_UT_UNITS_IN_MAX_IO);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
+ for (i = 0; i < RDMA_UT_UNITS_IN_MAX_IO; i++) {
+ CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey);
+ }
+
+ /* Part 3: simple I/O one SGL larger than the transport max io size */
+ reset_nvmf_rdma_request(&rdma_req);
+ sgl->keyed.length = rtransport.transport.opts.max_io_size * 2;
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == -1);
+
+ /* Part 4: Pretend there are no buffer pools */
+ MOCK_SET(spdk_mempool_get, NULL);
+ reset_nvmf_rdma_request(&rdma_req);
+ sgl->keyed.length = rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO;
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == false);
+ CU_ASSERT(rdma_req.req.data == NULL);
+ CU_ASSERT(rdma_req.data.wr.num_sge == 0);
+ CU_ASSERT(rdma_req.req.buffers[0] == NULL);
+ CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0);
+ CU_ASSERT(rdma_req.data.wr.sg_list[0].length == 0);
+ CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == 0);
+
+ rdma_req.recv->buf = (void *)0xDDDD;
+ /* Test 2: sgl type: keyed data block subtype: offset (in capsule data) */
+ sgl->generic.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
+ sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET;
+
+ /* Part 1: Normal I/O smaller than in capsule data size no offset */
+ reset_nvmf_rdma_request(&rdma_req);
+ sgl->address = 0;
+ sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size;
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(rdma_req.req.data == (void *)0xDDDD);
+ CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.in_capsule_data_size);
+ CU_ASSERT(rdma_req.req.data_from_pool == false);
+
+ /* Part 2: I/O offset + length too large */
+ reset_nvmf_rdma_request(&rdma_req);
+ sgl->address = rtransport.transport.opts.in_capsule_data_size;
+ sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size;
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == -1);
+
+ /* Part 3: I/O too large */
+ reset_nvmf_rdma_request(&rdma_req);
+ sgl->address = 0;
+ sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size * 2;
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == -1);
+
+ /* Test 3: Multi SGL */
+ sgl->generic.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT;
+ sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET;
+ sgl->address = 0;
+ rdma_req.recv->buf = (void *)&sgl_desc;
+ MOCK_SET(spdk_mempool_get, &data);
+
+ /* part 1: 2 segments each with 1 wr. */
+ reset_nvmf_rdma_request(&rdma_req);
+ sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor);
+ for (i = 0; i < 2; i++) {
+ sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
+ sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
+ sgl_desc[i].keyed.length = rtransport.transport.opts.io_unit_size;
+ sgl_desc[i].address = 0x4000 + i * rtransport.transport.opts.io_unit_size;
+ sgl_desc[i].keyed.key = 0x44;
+ }
+
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 2);
+ CU_ASSERT(rdma_req.data.wr.num_sge == 1);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000);
+ CU_ASSERT(rdma_req.data.wr.next == &data.wr);
+ CU_ASSERT(data.wr.wr.rdma.rkey == 0x44);
+ CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size);
+ CU_ASSERT(data.wr.num_sge == 1);
+ CU_ASSERT(data.wr.next == &rdma_req.rsp.wr);
+
+ /* part 2: 2 segments, each with 1 wr containing 8 sge_elements */
+ reset_nvmf_rdma_request(&rdma_req);
+ sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor);
+ for (i = 0; i < 2; i++) {
+ sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
+ sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
+ sgl_desc[i].keyed.length = rtransport.transport.opts.io_unit_size * 8;
+ sgl_desc[i].address = 0x4000 + i * 8 * rtransport.transport.opts.io_unit_size;
+ sgl_desc[i].keyed.key = 0x44;
+ }
+
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 16);
+ CU_ASSERT(rdma_req.req.iovcnt == 16);
+ CU_ASSERT(rdma_req.data.wr.num_sge == 8);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000);
+ CU_ASSERT(rdma_req.data.wr.next == &data.wr);
+ CU_ASSERT(data.wr.wr.rdma.rkey == 0x44);
+ CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size * 8);
+ CU_ASSERT(data.wr.num_sge == 8);
+ CU_ASSERT(data.wr.next == &rdma_req.rsp.wr);
+
+ /* part 3: 2 segments, one very large, one very small */
+ reset_nvmf_rdma_request(&rdma_req);
+ for (i = 0; i < 2; i++) {
+ sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
+ sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
+ sgl_desc[i].keyed.key = 0x44;
+ }
+
+ sgl_desc[0].keyed.length = rtransport.transport.opts.io_unit_size * 15 +
+ rtransport.transport.opts.io_unit_size / 2;
+ sgl_desc[0].address = 0x4000;
+ sgl_desc[1].keyed.length = rtransport.transport.opts.io_unit_size / 2;
+ sgl_desc[1].address = 0x4000 + rtransport.transport.opts.io_unit_size * 15 +
+ rtransport.transport.opts.io_unit_size / 2;
+
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 16);
+ CU_ASSERT(rdma_req.req.iovcnt == 17);
+ CU_ASSERT(rdma_req.data.wr.num_sge == 16);
+ for (i = 0; i < 15; i++) {
+ CU_ASSERT(rdma_req.data.sgl[i].length == rtransport.transport.opts.io_unit_size);
+ }
+ CU_ASSERT(rdma_req.data.sgl[15].length == rtransport.transport.opts.io_unit_size / 2);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000);
+ CU_ASSERT(rdma_req.data.wr.next == &data.wr);
+ CU_ASSERT(data.wr.wr.rdma.rkey == 0x44);
+ CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size * 15 +
+ rtransport.transport.opts.io_unit_size / 2);
+ CU_ASSERT(data.sgl[0].length == rtransport.transport.opts.io_unit_size / 2);
+ CU_ASSERT(data.wr.num_sge == 1);
+ CU_ASSERT(data.wr.next == &rdma_req.rsp.wr);
+
+ /* Test 4: use PG buffer cache */
+ sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
+ sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
+ sgl->address = 0xFFFF;
+ rdma_req.recv->buf = (void *)0xDDDD;
+ g_rdma_mr.lkey = 0xABCD;
+ sgl->keyed.key = 0xEEEE;
+
+ for (i = 0; i < 4; i++) {
+ STAILQ_INSERT_TAIL(&group.group.buf_cache, &bufs[i], link);
+ }
+
+ /* part 1: use the four buffers from the pg cache */
+ group.group.buf_cache_size = 4;
+ group.group.buf_cache_count = 4;
+ MOCK_SET(spdk_mempool_get, (void *)0x2000);
+ reset_nvmf_rdma_request(&rdma_req);
+ sgl->keyed.length = rtransport.transport.opts.io_unit_size * 4;
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4);
+ CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&bufs[0] + NVMF_DATA_BUFFER_MASK) &
+ ~NVMF_DATA_BUFFER_MASK));
+ CU_ASSERT(rdma_req.data.wr.num_sge == 4);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
+ CU_ASSERT(group.group.buf_cache_count == 0);
+ CU_ASSERT(STAILQ_EMPTY(&group.group.buf_cache));
+ for (i = 0; i < 4; i++) {
+ CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == (uint64_t)&bufs[i]);
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (((uint64_t)&bufs[i] + NVMF_DATA_BUFFER_MASK) &
+ ~NVMF_DATA_BUFFER_MASK));
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
+ }
+
+ /* part 2: now that we have used the buffers from the cache, try again. We should get mempool buffers. */
+ reset_nvmf_rdma_request(&rdma_req);
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4);
+ CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.num_sge == 4);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
+ CU_ASSERT(group.group.buf_cache_count == 0);
+ CU_ASSERT(STAILQ_EMPTY(&group.group.buf_cache));
+ for (i = 0; i < 4; i++) {
+ CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
+ CU_ASSERT(group.group.buf_cache_count == 0);
+ }
+
+ /* part 3: half and half */
+ group.group.buf_cache_count = 2;
+
+ for (i = 0; i < 2; i++) {
+ STAILQ_INSERT_TAIL(&group.group.buf_cache, &bufs[i], link);
+ }
+ reset_nvmf_rdma_request(&rdma_req);
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4);
+ CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&bufs[0] + NVMF_DATA_BUFFER_MASK) &
+ ~NVMF_DATA_BUFFER_MASK));
+ CU_ASSERT(rdma_req.data.wr.num_sge == 4);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
+ CU_ASSERT(group.group.buf_cache_count == 0);
+ for (i = 0; i < 2; i++) {
+ CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == (uint64_t)&bufs[i]);
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (((uint64_t)&bufs[i] + NVMF_DATA_BUFFER_MASK) &
+ ~NVMF_DATA_BUFFER_MASK));
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
+ }
+ for (i = 2; i < 4; i++) {
+ CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
+ }
+
+ reset_nvmf_rdma_request(&rdma_req);
+ /* Test 5 dealing with a buffer split over two Memory Regions */
+ MOCK_SET(spdk_mempool_get, (void *)&buffer);
+ sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
+ sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
+ sgl->keyed.length = rtransport.transport.opts.io_unit_size / 2;
+ g_mr_size = rtransport.transport.opts.io_unit_size / 4;
+ g_mr_next_size = rtransport.transport.opts.io_unit_size / 2;
+
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size / 2);
+ CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&buffer + NVMF_DATA_BUFFER_MASK) &
+ ~NVMF_DATA_BUFFER_MASK));
+ CU_ASSERT(rdma_req.data.wr.num_sge == 1);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
+ CU_ASSERT(rdma_req.req.buffers[0] == &buffer);
+ CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == (((uint64_t)&buffer + NVMF_DATA_BUFFER_MASK) &
+ ~NVMF_DATA_BUFFER_MASK));
+ CU_ASSERT(rdma_req.data.wr.sg_list[0].length == rtransport.transport.opts.io_unit_size / 2);
+ CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == g_rdma_mr.lkey);
+ buffer_ptr = STAILQ_FIRST(&group.retired_bufs);
+ CU_ASSERT(buffer_ptr == &buffer);
+ STAILQ_REMOVE(&group.retired_bufs, buffer_ptr, spdk_nvmf_transport_pg_cache_buf, link);
+ CU_ASSERT(STAILQ_EMPTY(&group.retired_bufs));
+ g_mr_size = 0;
+ g_mr_next_size = 0;
+
+ reset_nvmf_rdma_request(&rdma_req);
+}
+
+static struct spdk_nvmf_rdma_recv *
+create_recv(struct spdk_nvmf_rdma_qpair *rqpair, enum spdk_nvme_nvm_opcode opc)
+{
+ struct spdk_nvmf_rdma_recv *rdma_recv;
+ union nvmf_h2c_msg *cmd;
+ struct spdk_nvme_sgl_descriptor *sgl;
+
+ rdma_recv = calloc(1, sizeof(*rdma_recv));
+ rdma_recv->qpair = rqpair;
+ cmd = calloc(1, sizeof(*cmd));
+ rdma_recv->sgl[0].addr = (uintptr_t)cmd;
+ cmd->nvme_cmd.opc = opc;
+ sgl = &cmd->nvme_cmd.dptr.sgl1;
+ sgl->keyed.key = 0xEEEE;
+ sgl->address = 0xFFFF;
+ sgl->keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
+ sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
+ sgl->keyed.length = 1;
+
+ return rdma_recv;
+}
+
+static void
+free_recv(struct spdk_nvmf_rdma_recv *rdma_recv)
+{
+ free((void *)rdma_recv->sgl[0].addr);
+ free(rdma_recv);
+}
+
+static struct spdk_nvmf_rdma_request *
+create_req(struct spdk_nvmf_rdma_qpair *rqpair,
+ struct spdk_nvmf_rdma_recv *rdma_recv)
+{
+ struct spdk_nvmf_rdma_request *rdma_req;
+ union nvmf_c2h_msg *cpl;
+
+ rdma_req = calloc(1, sizeof(*rdma_req));
+ rdma_req->recv = rdma_recv;
+ rdma_req->req.qpair = &rqpair->qpair;
+ rdma_req->state = RDMA_REQUEST_STATE_NEW;
+ rdma_req->data.wr.wr_id = (uintptr_t)&rdma_req->data.rdma_wr;
+ rdma_req->data.wr.sg_list = rdma_req->data.sgl;
+ cpl = calloc(1, sizeof(*cpl));
+ rdma_req->rsp.sgl[0].addr = (uintptr_t)cpl;
+ rdma_req->req.rsp = cpl;
+
+ return rdma_req;
+}
+
+static void
+free_req(struct spdk_nvmf_rdma_request *rdma_req)
+{
+ free((void *)rdma_req->rsp.sgl[0].addr);
+ free(rdma_req);
+}
+
+static void
+qpair_reset(struct spdk_nvmf_rdma_qpair *rqpair,
+ struct spdk_nvmf_rdma_poller *poller,
+ struct spdk_nvmf_rdma_device *device,
+ struct spdk_nvmf_rdma_resources *resources)
+{
+ memset(rqpair, 0, sizeof(*rqpair));
+ STAILQ_INIT(&rqpair->pending_rdma_write_queue);
+ STAILQ_INIT(&rqpair->pending_rdma_read_queue);
+ rqpair->poller = poller;
+ rqpair->device = device;
+ rqpair->resources = resources;
+ rqpair->qpair.qid = 1;
+ rqpair->ibv_state = IBV_QPS_RTS;
+ rqpair->qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
+ rqpair->max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES;
+ rqpair->max_send_depth = 16;
+ rqpair->max_read_depth = 16;
+ resources->recvs_to_post.first = resources->recvs_to_post.last = NULL;
+}
+
+static void
+poller_reset(struct spdk_nvmf_rdma_poller *poller,
+ struct spdk_nvmf_rdma_poll_group *group)
+{
+ memset(poller, 0, sizeof(*poller));
+ STAILQ_INIT(&poller->qpairs_pending_recv);
+ STAILQ_INIT(&poller->qpairs_pending_send);
+ poller->group = group;
+}
+
+static void
+test_spdk_nvmf_rdma_request_process(void)
+{
+ struct spdk_nvmf_rdma_transport rtransport = {};
+ struct spdk_nvmf_rdma_poll_group group = {};
+ struct spdk_nvmf_rdma_poller poller = {};
+ struct spdk_nvmf_rdma_device device = {};
+ struct spdk_nvmf_rdma_resources resources = {};
+ struct spdk_nvmf_rdma_qpair rqpair = {};
+ struct spdk_nvmf_rdma_recv *rdma_recv;
+ struct spdk_nvmf_rdma_request *rdma_req;
+ bool progress;
+
+ STAILQ_INIT(&group.group.buf_cache);
+ STAILQ_INIT(&group.group.pending_buf_queue);
+ group.group.buf_cache_size = 0;
+ group.group.buf_cache_count = 0;
+ poller_reset(&poller, &group);
+ qpair_reset(&rqpair, &poller, &device, &resources);
+
+ rtransport.transport.opts = g_rdma_ut_transport_opts;
+ rtransport.transport.data_buf_pool = spdk_mempool_create("test_data_pool", 16, 128, 0, 0);
+ rtransport.data_wr_pool = spdk_mempool_create("test_wr_pool", 128,
+ sizeof(struct spdk_nvmf_rdma_request_data),
+ 0, 0);
+ MOCK_CLEAR(spdk_mempool_get);
+
+ device.attr.device_cap_flags = 0;
+ device.map = (void *)0x0;
+ g_rdma_mr.lkey = 0xABCD;
+
+ /* Test 1: single SGL READ request */
+ rdma_recv = create_recv(&rqpair, SPDK_NVME_OPC_READ);
+ rdma_req = create_req(&rqpair, rdma_recv);
+ rqpair.current_recv_depth = 1;
+ /* NEW -> EXECUTING */
+ progress = nvmf_rdma_request_process(&rtransport, rdma_req);
+ CU_ASSERT(progress == true);
+ CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_EXECUTING);
+ CU_ASSERT(rdma_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST);
+ /* EXECUTED -> TRANSFERRING_C2H */
+ rdma_req->state = RDMA_REQUEST_STATE_EXECUTED;
+ progress = nvmf_rdma_request_process(&rtransport, rdma_req);
+ CU_ASSERT(progress == true);
+ CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST);
+ CU_ASSERT(rdma_req->recv == NULL);
+ CU_ASSERT(resources.recvs_to_post.first == &rdma_recv->wr);
+ CU_ASSERT(resources.recvs_to_post.last == &rdma_recv->wr);
+ /* COMPLETED -> FREE */
+ rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
+ progress = nvmf_rdma_request_process(&rtransport, rdma_req);
+ CU_ASSERT(progress == true);
+ CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_FREE);
+
+ free_recv(rdma_recv);
+ free_req(rdma_req);
+ poller_reset(&poller, &group);
+ qpair_reset(&rqpair, &poller, &device, &resources);
+
+ /* Test 2: single SGL WRITE request */
+ rdma_recv = create_recv(&rqpair, SPDK_NVME_OPC_WRITE);
+ rdma_req = create_req(&rqpair, rdma_recv);
+ rqpair.current_recv_depth = 1;
+ /* NEW -> TRANSFERRING_H2C */
+ progress = nvmf_rdma_request_process(&rtransport, rdma_req);
+ CU_ASSERT(progress == true);
+ CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
+ CU_ASSERT(rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER);
+ STAILQ_INIT(&poller.qpairs_pending_send);
+ /* READY_TO_EXECUTE -> EXECUTING */
+ rdma_req->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE;
+ progress = nvmf_rdma_request_process(&rtransport, rdma_req);
+ CU_ASSERT(progress == true);
+ CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_EXECUTING);
+ /* EXECUTED -> COMPLETING */
+ rdma_req->state = RDMA_REQUEST_STATE_EXECUTED;
+ progress = nvmf_rdma_request_process(&rtransport, rdma_req);
+ CU_ASSERT(progress == true);
+ CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_COMPLETING);
+ CU_ASSERT(rdma_req->recv == NULL);
+ CU_ASSERT(resources.recvs_to_post.first == &rdma_recv->wr);
+ CU_ASSERT(resources.recvs_to_post.last == &rdma_recv->wr);
+ /* COMPLETED -> FREE */
+ rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
+ progress = nvmf_rdma_request_process(&rtransport, rdma_req);
+ CU_ASSERT(progress == true);
+ CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_FREE);
+
+ free_recv(rdma_recv);
+ free_req(rdma_req);
+ poller_reset(&poller, &group);
+ qpair_reset(&rqpair, &poller, &device, &resources);
+
+ /* Test 3: WRITE+WRITE ibv_send batching */
+ {
+ struct spdk_nvmf_rdma_recv *recv1, *recv2;
+ struct spdk_nvmf_rdma_request *req1, *req2;
+ recv1 = create_recv(&rqpair, SPDK_NVME_OPC_WRITE);
+ req1 = create_req(&rqpair, recv1);
+ recv2 = create_recv(&rqpair, SPDK_NVME_OPC_WRITE);
+ req2 = create_req(&rqpair, recv2);
+
+ /* WRITE 1: NEW -> TRANSFERRING_H2C */
+ rqpair.current_recv_depth = 1;
+ nvmf_rdma_request_process(&rtransport, req1);
+ CU_ASSERT(req1->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
+
+ /* WRITE 2: NEW -> TRANSFERRING_H2C */
+ rqpair.current_recv_depth = 2;
+ nvmf_rdma_request_process(&rtransport, req2);
+ CU_ASSERT(req2->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
+
+ STAILQ_INIT(&poller.qpairs_pending_send);
+
+ /* WRITE 1 completes before WRITE 2 has finished RDMA reading */
+ /* WRITE 1: READY_TO_EXECUTE -> EXECUTING */
+ req1->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE;
+ nvmf_rdma_request_process(&rtransport, req1);
+ CU_ASSERT(req1->state == RDMA_REQUEST_STATE_EXECUTING);
+ /* WRITE 1: EXECUTED -> COMPLETING */
+ req1->state = RDMA_REQUEST_STATE_EXECUTED;
+ nvmf_rdma_request_process(&rtransport, req1);
+ CU_ASSERT(req1->state == RDMA_REQUEST_STATE_COMPLETING);
+ STAILQ_INIT(&poller.qpairs_pending_send);
+ /* WRITE 1: COMPLETED -> FREE */
+ req1->state = RDMA_REQUEST_STATE_COMPLETED;
+ nvmf_rdma_request_process(&rtransport, req1);
+ CU_ASSERT(req1->state == RDMA_REQUEST_STATE_FREE);
+
+ /* Now WRITE 2 has finished reading and completes */
+ /* WRITE 2: COMPLETED -> FREE */
+ /* WRITE 2: READY_TO_EXECUTE -> EXECUTING */
+ req2->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE;
+ nvmf_rdma_request_process(&rtransport, req2);
+ CU_ASSERT(req2->state == RDMA_REQUEST_STATE_EXECUTING);
+ /* WRITE 1: EXECUTED -> COMPLETING */
+ req2->state = RDMA_REQUEST_STATE_EXECUTED;
+ nvmf_rdma_request_process(&rtransport, req2);
+ CU_ASSERT(req2->state == RDMA_REQUEST_STATE_COMPLETING);
+ STAILQ_INIT(&poller.qpairs_pending_send);
+ /* WRITE 1: COMPLETED -> FREE */
+ req2->state = RDMA_REQUEST_STATE_COMPLETED;
+ nvmf_rdma_request_process(&rtransport, req2);
+ CU_ASSERT(req2->state == RDMA_REQUEST_STATE_FREE);
+
+ free_recv(recv1);
+ free_req(req1);
+ free_recv(recv2);
+ free_req(req2);
+ poller_reset(&poller, &group);
+ qpair_reset(&rqpair, &poller, &device, &resources);
+ }
+
+ spdk_mempool_free(rtransport.transport.data_buf_pool);
+ spdk_mempool_free(rtransport.data_wr_pool);
+}
+
+#define TEST_GROUPS_COUNT 5
+static void
+test_nvmf_rdma_get_optimal_poll_group(void)
+{
+ struct spdk_nvmf_rdma_transport rtransport = {};
+ struct spdk_nvmf_transport *transport = &rtransport.transport;
+ struct spdk_nvmf_rdma_qpair rqpair = {};
+ struct spdk_nvmf_transport_poll_group *groups[TEST_GROUPS_COUNT];
+ struct spdk_nvmf_rdma_poll_group *rgroups[TEST_GROUPS_COUNT];
+ struct spdk_nvmf_transport_poll_group *result;
+ uint32_t i;
+
+ rqpair.qpair.transport = transport;
+ pthread_mutex_init(&rtransport.lock, NULL);
+ TAILQ_INIT(&rtransport.poll_groups);
+
+ for (i = 0; i < TEST_GROUPS_COUNT; i++) {
+ groups[i] = nvmf_rdma_poll_group_create(transport);
+ CU_ASSERT(groups[i] != NULL);
+ rgroups[i] = SPDK_CONTAINEROF(groups[i], struct spdk_nvmf_rdma_poll_group, group);
+ groups[i]->transport = transport;
+ }
+ CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[0]);
+ CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[0]);
+
+ /* Emulate connection of %TEST_GROUPS_COUNT% initiators - each creates 1 admin and 1 io qp */
+ for (i = 0; i < TEST_GROUPS_COUNT; i++) {
+ rqpair.qpair.qid = 0;
+ result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair);
+ CU_ASSERT(result == groups[i]);
+ CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]);
+ CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[i]);
+
+ rqpair.qpair.qid = 1;
+ result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair);
+ CU_ASSERT(result == groups[i]);
+ CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]);
+ CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]);
+ }
+ /* wrap around, admin/io pg point to the first pg
+ Destroy all poll groups except of the last one */
+ for (i = 0; i < TEST_GROUPS_COUNT - 1; i++) {
+ nvmf_rdma_poll_group_destroy(groups[i]);
+ CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[i + 1]);
+ CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[i + 1]);
+ }
+
+ CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]);
+ CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]);
+
+ /* Check that pointers to the next admin/io poll groups are not changed */
+ rqpair.qpair.qid = 0;
+ result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair);
+ CU_ASSERT(result == groups[TEST_GROUPS_COUNT - 1]);
+ CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]);
+ CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]);
+
+ rqpair.qpair.qid = 1;
+ result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair);
+ CU_ASSERT(result == groups[TEST_GROUPS_COUNT - 1]);
+ CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]);
+ CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]);
+
+ /* Remove the last poll group, check that pointers are NULL */
+ nvmf_rdma_poll_group_destroy(groups[TEST_GROUPS_COUNT - 1]);
+ CU_ASSERT(rtransport.conn_sched.next_admin_pg == NULL);
+ CU_ASSERT(rtransport.conn_sched.next_io_pg == NULL);
+
+ /* Request optimal poll group, result must be NULL */
+ rqpair.qpair.qid = 0;
+ result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair);
+ CU_ASSERT(result == NULL);
+
+ rqpair.qpair.qid = 1;
+ result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair);
+ CU_ASSERT(result == NULL);
+
+ pthread_mutex_destroy(&rtransport.lock);
+}
+#undef TEST_GROUPS_COUNT
+
+static void
+test_spdk_nvmf_rdma_request_parse_sgl_with_md(void)
+{
+ struct spdk_nvmf_rdma_transport rtransport;
+ struct spdk_nvmf_rdma_device device;
+ struct spdk_nvmf_rdma_request rdma_req = {};
+ struct spdk_nvmf_rdma_recv recv;
+ struct spdk_nvmf_rdma_poll_group group;
+ struct spdk_nvmf_rdma_qpair rqpair;
+ struct spdk_nvmf_rdma_poller poller;
+ union nvmf_c2h_msg cpl;
+ union nvmf_h2c_msg cmd;
+ struct spdk_nvme_sgl_descriptor *sgl;
+ struct spdk_nvme_sgl_descriptor sgl_desc[SPDK_NVMF_MAX_SGL_ENTRIES] = {{0}};
+ struct spdk_nvmf_rdma_request_data data;
+ struct spdk_nvmf_transport_pg_cache_buf buffer;
+ struct spdk_nvmf_transport_pg_cache_buf *buffer_ptr;
+ const uint32_t data_bs = 512;
+ const uint32_t md_size = 8;
+ int rc, i;
+ void *aligned_buffer;
+
+ data.wr.sg_list = data.sgl;
+ STAILQ_INIT(&group.group.buf_cache);
+ group.group.buf_cache_size = 0;
+ group.group.buf_cache_count = 0;
+ group.group.transport = &rtransport.transport;
+ STAILQ_INIT(&group.retired_bufs);
+ poller.group = &group;
+ rqpair.poller = &poller;
+ rqpair.max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES;
+
+ sgl = &cmd.nvme_cmd.dptr.sgl1;
+ rdma_req.recv = &recv;
+ rdma_req.req.cmd = &cmd;
+ rdma_req.req.rsp = &cpl;
+ rdma_req.data.wr.sg_list = rdma_req.data.sgl;
+ rdma_req.req.qpair = &rqpair.qpair;
+ rdma_req.req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
+
+ rtransport.transport.opts = g_rdma_ut_transport_opts;
+ rtransport.data_wr_pool = NULL;
+ rtransport.transport.data_buf_pool = NULL;
+
+ device.attr.device_cap_flags = 0;
+ device.map = NULL;
+ g_rdma_mr.lkey = 0xABCD;
+ sgl->keyed.key = 0xEEEE;
+ sgl->address = 0xFFFF;
+ rdma_req.recv->buf = (void *)0xDDDD;
+
+ /* Test 1: sgl type: keyed data block subtype: address */
+ sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
+ sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
+
+ /* Part 1: simple I/O, one SGL smaller than the transport io unit size, block size 512 */
+ MOCK_SET(spdk_mempool_get, (void *)0x2000);
+ reset_nvmf_rdma_request(&rdma_req);
+ spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
+ SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
+ 0, 0, 0, 0, 0);
+ rdma_req.req.dif.dif_insert_or_strip = true;
+ rtransport.transport.opts.io_unit_size = data_bs * 8;
+ sgl->keyed.length = data_bs * 4;
+
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == data_bs * 4);
+ CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
+ CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4);
+ CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.num_sge == 4);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
+ CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000);
+
+ for (i = 0; i < 4; ++i) {
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size));
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs);
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey);
+ }
+
+ /* Part 2: simple I/O, one SGL equal to io unit size, io_unit_size is not aligned with md_size,
+ block size 512 */
+ MOCK_SET(spdk_mempool_get, (void *)0x2000);
+ reset_nvmf_rdma_request(&rdma_req);
+ spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
+ SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
+ 0, 0, 0, 0, 0);
+ rdma_req.req.dif.dif_insert_or_strip = true;
+ rtransport.transport.opts.io_unit_size = data_bs * 4;
+ sgl->keyed.length = data_bs * 4;
+
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == data_bs * 4);
+ CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
+ CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4);
+ CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.num_sge == 5);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
+ CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000);
+
+ for (i = 0; i < 3; ++i) {
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size));
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs);
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey);
+ }
+ CU_ASSERT(rdma_req.data.wr.sg_list[3].addr == 0x2000 + 3 * (data_bs + md_size));
+ CU_ASSERT(rdma_req.data.wr.sg_list[3].length == 488);
+ CU_ASSERT(rdma_req.data.wr.sg_list[3].lkey == g_rdma_mr.lkey);
+
+ /* 2nd buffer consumed */
+ CU_ASSERT(rdma_req.data.wr.sg_list[4].addr == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.sg_list[4].length == 24);
+ CU_ASSERT(rdma_req.data.wr.sg_list[4].lkey == g_rdma_mr.lkey);
+
+ /* Part 3: simple I/O, one SGL equal io unit size, io_unit_size is equal to block size 512 bytes */
+ MOCK_SET(spdk_mempool_get, (void *)0x2000);
+ reset_nvmf_rdma_request(&rdma_req);
+ spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
+ SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
+ 0, 0, 0, 0, 0);
+ rdma_req.req.dif.dif_insert_or_strip = true;
+ rtransport.transport.opts.io_unit_size = data_bs;
+ sgl->keyed.length = data_bs;
+
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == data_bs);
+ CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
+ CU_ASSERT(rdma_req.req.dif.elba_length == data_bs + md_size);
+ CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.num_sge == 1);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
+ CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000);
+
+ CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.sg_list[0].length == data_bs);
+ CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == g_rdma_mr.lkey);
+
+ CU_ASSERT(rdma_req.req.iovcnt == 2);
+ CU_ASSERT(rdma_req.req.iov[0].iov_base == (void *)((unsigned long)0x2000));
+ CU_ASSERT(rdma_req.req.iov[0].iov_len == data_bs);
+ /* 2nd buffer consumed for metadata */
+ CU_ASSERT(rdma_req.req.iov[1].iov_base == (void *)((unsigned long)0x2000));
+ CU_ASSERT(rdma_req.req.iov[1].iov_len == md_size);
+
+ /* Part 4: simple I/O, one SGL equal io unit size, io_unit_size is aligned with md_size,
+ block size 512 */
+ MOCK_SET(spdk_mempool_get, (void *)0x2000);
+ reset_nvmf_rdma_request(&rdma_req);
+ spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
+ SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
+ 0, 0, 0, 0, 0);
+ rdma_req.req.dif.dif_insert_or_strip = true;
+ rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 4;
+ sgl->keyed.length = data_bs * 4;
+
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == data_bs * 4);
+ CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
+ CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4);
+ CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.num_sge == 4);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
+ CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000);
+
+ for (i = 0; i < 4; ++i) {
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size));
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs);
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey);
+ }
+
+ /* Part 5: simple I/O, one SGL equal to 2x io unit size, io_unit_size is aligned with md_size,
+ block size 512 */
+ MOCK_SET(spdk_mempool_get, (void *)0x2000);
+ reset_nvmf_rdma_request(&rdma_req);
+ spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
+ SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
+ 0, 0, 0, 0, 0);
+ rdma_req.req.dif.dif_insert_or_strip = true;
+ rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 2;
+ sgl->keyed.length = data_bs * 4;
+
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == data_bs * 4);
+ CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
+ CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4);
+ CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.num_sge == 4);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
+ CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000);
+
+ for (i = 0; i < 2; ++i) {
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size));
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs);
+ }
+ for (i = 0; i < 2; ++i) {
+ CU_ASSERT(rdma_req.data.wr.sg_list[i + 2].addr == 0x2000 + i * (data_bs + md_size));
+ CU_ASSERT(rdma_req.data.wr.sg_list[i + 2].length == data_bs);
+ }
+
+ /* Part 6: simple I/O, one SGL larger than the transport io unit size, io_unit_size is not aligned to md_size,
+ block size 512 */
+ MOCK_SET(spdk_mempool_get, (void *)0x2000);
+ reset_nvmf_rdma_request(&rdma_req);
+ spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
+ SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
+ 0, 0, 0, 0, 0);
+ rdma_req.req.dif.dif_insert_or_strip = true;
+ rtransport.transport.opts.io_unit_size = data_bs * 4;
+ sgl->keyed.length = data_bs * 6;
+
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == data_bs * 6);
+ CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
+ CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 6);
+ CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.num_sge == 7);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
+ CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000);
+
+ for (i = 0; i < 3; ++i) {
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size));
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs);
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey);
+ }
+ CU_ASSERT(rdma_req.data.wr.sg_list[3].addr == 0x2000 + 3 * (data_bs + md_size));
+ CU_ASSERT(rdma_req.data.wr.sg_list[3].length == 488);
+ CU_ASSERT(rdma_req.data.wr.sg_list[3].lkey == g_rdma_mr.lkey);
+
+ /* 2nd IO buffer consumed */
+ CU_ASSERT(rdma_req.data.wr.sg_list[4].addr == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.sg_list[4].length == 24);
+ CU_ASSERT(rdma_req.data.wr.sg_list[4].lkey == g_rdma_mr.lkey);
+
+ CU_ASSERT(rdma_req.data.wr.sg_list[5].addr == 0x2000 + 24 + md_size);
+ CU_ASSERT(rdma_req.data.wr.sg_list[5].length == 512);
+ CU_ASSERT(rdma_req.data.wr.sg_list[5].lkey == g_rdma_mr.lkey);
+
+ CU_ASSERT(rdma_req.data.wr.sg_list[6].addr == 0x2000 + 24 + 512 + md_size * 2);
+ CU_ASSERT(rdma_req.data.wr.sg_list[6].length == 512);
+ CU_ASSERT(rdma_req.data.wr.sg_list[6].lkey == g_rdma_mr.lkey);
+
+ /* Part 7: simple I/O, number of SGL entries exceeds the number of entries
+ one WR can hold. Additional WR is chained */
+ MOCK_SET(spdk_mempool_get, &data);
+ aligned_buffer = (void *)((uintptr_t)((char *)&data + NVMF_DATA_BUFFER_MASK) &
+ ~NVMF_DATA_BUFFER_MASK);
+ reset_nvmf_rdma_request(&rdma_req);
+ spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
+ SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
+ 0, 0, 0, 0, 0);
+ rdma_req.req.dif.dif_insert_or_strip = true;
+ rtransport.transport.opts.io_unit_size = data_bs * 16;
+ sgl->keyed.length = data_bs * 16;
+
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == data_bs * 16);
+ CU_ASSERT(rdma_req.req.iovcnt == 2);
+ CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
+ CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 16);
+ CU_ASSERT(rdma_req.req.data == aligned_buffer);
+ CU_ASSERT(rdma_req.data.wr.num_sge == 16);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
+ /* additional wr from pool */
+ CU_ASSERT(rdma_req.data.wr.next == (void *)&data.wr);
+ CU_ASSERT(rdma_req.data.wr.next->num_sge == 1);
+ CU_ASSERT(rdma_req.data.wr.next->next == &rdma_req.rsp.wr);
+
+ /* Part 8: simple I/O, data with metadata do not fit to 1 io_buffer */
+ MOCK_SET(spdk_mempool_get, (void *)0x2000);
+ reset_nvmf_rdma_request(&rdma_req);
+ spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
+ SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
+ 0, 0, 0, 0, 0);
+ rdma_req.req.dif.dif_insert_or_strip = true;
+ rtransport.transport.opts.io_unit_size = 516;
+ sgl->keyed.length = data_bs * 2;
+
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == data_bs * 2);
+ CU_ASSERT(rdma_req.req.iovcnt == 3);
+ CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
+ CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 2);
+ CU_ASSERT(rdma_req.req.data == (void *)0x2000);
+ CU_ASSERT(rdma_req.data.wr.num_sge == 2);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
+
+ CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.sg_list[0].length == 512);
+ CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == g_rdma_mr.lkey);
+
+ /* 2nd IO buffer consumed, offset 4 bytes due to part of the metadata
+ is located at the beginning of that buffer */
+ CU_ASSERT(rdma_req.data.wr.sg_list[1].addr == 0x2000 + 4);
+ CU_ASSERT(rdma_req.data.wr.sg_list[1].length == 512);
+ CU_ASSERT(rdma_req.data.wr.sg_list[1].lkey == g_rdma_mr.lkey);
+
+ /* Test 9 dealing with a buffer split over two Memory Regions */
+ MOCK_SET(spdk_mempool_get, (void *)&buffer);
+ reset_nvmf_rdma_request(&rdma_req);
+ spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
+ SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
+ 0, 0, 0, 0, 0);
+ rdma_req.req.dif.dif_insert_or_strip = true;
+ rtransport.transport.opts.io_unit_size = data_bs * 4;
+ sgl->keyed.length = data_bs * 2;
+ g_mr_size = data_bs;
+ g_mr_next_size = rtransport.transport.opts.io_unit_size;
+
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size / 2);
+ CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&buffer + NVMF_DATA_BUFFER_MASK) &
+ ~NVMF_DATA_BUFFER_MASK));
+ CU_ASSERT(rdma_req.data.wr.num_sge == 2);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
+ CU_ASSERT(rdma_req.req.buffers[0] == &buffer);
+ for (i = 0; i < 2; i++) {
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (uint64_t)rdma_req.req.data + i *
+ (data_bs + md_size));
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs);
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey);
+ }
+ buffer_ptr = STAILQ_FIRST(&group.retired_bufs);
+ CU_ASSERT(buffer_ptr == &buffer);
+ STAILQ_REMOVE(&group.retired_bufs, buffer_ptr, spdk_nvmf_transport_pg_cache_buf, link);
+ CU_ASSERT(STAILQ_EMPTY(&group.retired_bufs));
+ g_mr_size = 0;
+ g_mr_next_size = 0;
+
+ /* Test 2: Multi SGL */
+ sgl->generic.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT;
+ sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET;
+ sgl->address = 0;
+ rdma_req.recv->buf = (void *)&sgl_desc;
+ MOCK_SET(spdk_mempool_get, &data);
+ aligned_buffer = (void *)((uintptr_t)((char *)&data + NVMF_DATA_BUFFER_MASK) &
+ ~NVMF_DATA_BUFFER_MASK);
+
+ /* part 1: 2 segments each with 1 wr. io_unit_size is aligned with data_bs + md_size */
+ reset_nvmf_rdma_request(&rdma_req);
+ spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
+ SPDK_DIF_TYPE1,
+ SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 0, 0, 0, 0, 0);
+ rdma_req.req.dif.dif_insert_or_strip = true;
+ rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 4;
+ sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor);
+
+ for (i = 0; i < 2; i++) {
+ sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
+ sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
+ sgl_desc[i].keyed.length = data_bs * 4;
+ sgl_desc[i].address = 0x4000 + i * data_bs * 4;
+ sgl_desc[i].keyed.key = 0x44;
+ }
+
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == data_bs * 4 * 2);
+ CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
+ CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4 * 2);
+ CU_ASSERT(rdma_req.data.wr.num_sge == 4);
+ for (i = 0; i < 4; ++i) {
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (uintptr_t)((unsigned char *)aligned_buffer) + i *
+ (data_bs + md_size));
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs);
+ }
+
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000);
+ CU_ASSERT(rdma_req.data.wr.next == &data.wr);
+ CU_ASSERT(data.wr.wr.rdma.rkey == 0x44);
+ CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + data_bs * 4);
+ CU_ASSERT(data.wr.num_sge == 4);
+ for (i = 0; i < 4; ++i) {
+ CU_ASSERT(data.wr.sg_list[i].addr == (uintptr_t)((unsigned char *)aligned_buffer) + i *
+ (data_bs + md_size));
+ CU_ASSERT(data.wr.sg_list[i].length == data_bs);
+ }
+
+ CU_ASSERT(data.wr.next == &rdma_req.rsp.wr);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvmf", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_spdk_nvmf_rdma_request_parse_sgl);
+ CU_ADD_TEST(suite, test_spdk_nvmf_rdma_request_process);
+ CU_ADD_TEST(suite, test_nvmf_rdma_get_optimal_poll_group);
+ CU_ADD_TEST(suite, test_spdk_nvmf_rdma_request_parse_sgl_with_md);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvmf/subsystem.c/.gitignore b/src/spdk/test/unit/lib/nvmf/subsystem.c/.gitignore
new file mode 100644
index 000000000..76ca0d330
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/subsystem.c/.gitignore
@@ -0,0 +1 @@
+subsystem_ut
diff --git a/src/spdk/test/unit/lib/nvmf/subsystem.c/Makefile b/src/spdk/test/unit/lib/nvmf/subsystem.c/Makefile
new file mode 100644
index 000000000..3d5fa6c8e
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/subsystem.c/Makefile
@@ -0,0 +1,39 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+SPDK_LIB_LIST = json
+TEST_FILE = subsystem_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvmf/subsystem.c/subsystem_ut.c b/src/spdk/test/unit/lib/nvmf/subsystem.c/subsystem_ut.c
new file mode 100644
index 000000000..149c22da1
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/subsystem.c/subsystem_ut.c
@@ -0,0 +1,1342 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation. All rights reserved.
+ * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "common/lib/ut_multithread.c"
+#include "spdk_cunit.h"
+#include "spdk_internal/mock.h"
+#include "spdk_internal/thread.h"
+
+#include "nvmf/subsystem.c"
+
+SPDK_LOG_REGISTER_COMPONENT("nvmf", SPDK_LOG_NVMF)
+
+DEFINE_STUB(spdk_bdev_module_claim_bdev,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
+ struct spdk_bdev_module *module), 0);
+
+DEFINE_STUB_V(spdk_bdev_module_release_bdev,
+ (struct spdk_bdev *bdev));
+
+DEFINE_STUB(spdk_bdev_get_block_size, uint32_t,
+ (const struct spdk_bdev *bdev), 512);
+
+DEFINE_STUB(spdk_bdev_get_md_size, uint32_t,
+ (const struct spdk_bdev *bdev), 0);
+
+DEFINE_STUB(spdk_bdev_is_md_interleaved, bool,
+ (const struct spdk_bdev *bdev), false);
+
+DEFINE_STUB(spdk_nvmf_transport_stop_listen,
+ int,
+ (struct spdk_nvmf_transport *transport,
+ const struct spdk_nvme_transport_id *trid), 0);
+
+int
+spdk_nvmf_transport_listen(struct spdk_nvmf_transport *transport,
+ const struct spdk_nvme_transport_id *trid)
+{
+ return 0;
+}
+
+void
+nvmf_transport_listener_discover(struct spdk_nvmf_transport *transport,
+ struct spdk_nvme_transport_id *trid,
+ struct spdk_nvmf_discovery_log_page_entry *entry)
+{
+ entry->trtype = 42;
+}
+
+static struct spdk_nvmf_transport g_transport = {};
+
+struct spdk_nvmf_transport *
+spdk_nvmf_transport_create(const char *transport_name,
+ struct spdk_nvmf_transport_opts *tprt_opts)
+{
+ if (strcasecmp(transport_name, spdk_nvme_transport_id_trtype_str(SPDK_NVME_TRANSPORT_RDMA))) {
+ return &g_transport;
+ }
+
+ return NULL;
+}
+
+struct spdk_nvmf_subsystem *
+spdk_nvmf_tgt_find_subsystem(struct spdk_nvmf_tgt *tgt, const char *subnqn)
+{
+ return NULL;
+}
+
+struct spdk_nvmf_transport *
+spdk_nvmf_tgt_get_transport(struct spdk_nvmf_tgt *tgt, const char *transport_name)
+{
+ if (strncmp(transport_name, SPDK_NVME_TRANSPORT_NAME_RDMA, SPDK_NVMF_TRSTRING_MAX_LEN)) {
+ return &g_transport;
+ }
+
+ return NULL;
+}
+
+int
+nvmf_poll_group_update_subsystem(struct spdk_nvmf_poll_group *group,
+ struct spdk_nvmf_subsystem *subsystem)
+{
+ return 0;
+}
+
+int
+nvmf_poll_group_add_subsystem(struct spdk_nvmf_poll_group *group,
+ struct spdk_nvmf_subsystem *subsystem,
+ spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
+{
+ return 0;
+}
+
+void
+nvmf_poll_group_remove_subsystem(struct spdk_nvmf_poll_group *group,
+ struct spdk_nvmf_subsystem *subsystem,
+ spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
+{
+}
+
+void
+nvmf_poll_group_pause_subsystem(struct spdk_nvmf_poll_group *group,
+ struct spdk_nvmf_subsystem *subsystem,
+ spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
+{
+}
+
+void
+nvmf_poll_group_resume_subsystem(struct spdk_nvmf_poll_group *group,
+ struct spdk_nvmf_subsystem *subsystem,
+ spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
+{
+}
+
+int
+spdk_nvme_transport_id_parse_trtype(enum spdk_nvme_transport_type *trtype, const char *str)
+{
+ if (trtype == NULL || str == NULL) {
+ return -EINVAL;
+ }
+
+ if (strcasecmp(str, "PCIe") == 0) {
+ *trtype = SPDK_NVME_TRANSPORT_PCIE;
+ } else if (strcasecmp(str, "RDMA") == 0) {
+ *trtype = SPDK_NVME_TRANSPORT_RDMA;
+ } else {
+ return -ENOENT;
+ }
+ return 0;
+}
+
+int
+spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1,
+ const struct spdk_nvme_transport_id *trid2)
+{
+ return 0;
+}
+
+int32_t
+spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return -1;
+}
+
+int32_t
+spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
+{
+ return -1;
+}
+
+int
+spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return -1;
+}
+
+void
+nvmf_ctrlr_destruct(struct spdk_nvmf_ctrlr *ctrlr)
+{
+}
+
+static struct spdk_nvmf_ctrlr *g_ns_changed_ctrlr = NULL;
+static uint32_t g_ns_changed_nsid = 0;
+void
+nvmf_ctrlr_ns_changed(struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid)
+{
+ g_ns_changed_ctrlr = ctrlr;
+ g_ns_changed_nsid = nsid;
+}
+
+int
+spdk_bdev_open_ext(const char *bdev_name, bool write, spdk_bdev_event_cb_t event_cb,
+ void *event_ctx, struct spdk_bdev_desc **_desc)
+{
+ return 0;
+}
+
+void
+spdk_bdev_close(struct spdk_bdev_desc *desc)
+{
+}
+
+const char *
+spdk_bdev_get_name(const struct spdk_bdev *bdev)
+{
+ return "test";
+}
+
+const struct spdk_uuid *
+spdk_bdev_get_uuid(const struct spdk_bdev *bdev)
+{
+ return &bdev->uuid;
+}
+
+static void
+test_spdk_nvmf_subsystem_add_ns(void)
+{
+ struct spdk_nvmf_tgt tgt = {};
+ struct spdk_nvmf_subsystem subsystem = {
+ .max_nsid = 0,
+ .ns = NULL,
+ .tgt = &tgt
+ };
+ struct spdk_bdev bdev1 = {}, bdev2 = {};
+ struct spdk_nvmf_ns_opts ns_opts;
+ uint32_t nsid;
+ int rc;
+
+ tgt.max_subsystems = 1024;
+ tgt.subsystems = calloc(tgt.max_subsystems, sizeof(struct spdk_nvmf_subsystem *));
+ SPDK_CU_ASSERT_FATAL(tgt.subsystems != NULL);
+
+ /* Allow NSID to be assigned automatically */
+ spdk_nvmf_ns_opts_get_defaults(&ns_opts, sizeof(ns_opts));
+ nsid = spdk_nvmf_subsystem_add_ns(&subsystem, &bdev1, &ns_opts, sizeof(ns_opts), NULL);
+ /* NSID 1 is the first unused ID */
+ CU_ASSERT(nsid == 1);
+ CU_ASSERT(subsystem.max_nsid == 1);
+ SPDK_CU_ASSERT_FATAL(subsystem.ns != NULL);
+ SPDK_CU_ASSERT_FATAL(subsystem.ns[nsid - 1] != NULL);
+ CU_ASSERT(subsystem.ns[nsid - 1]->bdev == &bdev1);
+
+ /* Request a specific NSID */
+ spdk_nvmf_ns_opts_get_defaults(&ns_opts, sizeof(ns_opts));
+ ns_opts.nsid = 5;
+ nsid = spdk_nvmf_subsystem_add_ns(&subsystem, &bdev2, &ns_opts, sizeof(ns_opts), NULL);
+ CU_ASSERT(nsid == 5);
+ CU_ASSERT(subsystem.max_nsid == 5);
+ SPDK_CU_ASSERT_FATAL(subsystem.ns[nsid - 1] != NULL);
+ CU_ASSERT(subsystem.ns[nsid - 1]->bdev == &bdev2);
+
+ /* Request an NSID that is already in use */
+ spdk_nvmf_ns_opts_get_defaults(&ns_opts, sizeof(ns_opts));
+ ns_opts.nsid = 5;
+ nsid = spdk_nvmf_subsystem_add_ns(&subsystem, &bdev2, &ns_opts, sizeof(ns_opts), NULL);
+ CU_ASSERT(nsid == 0);
+ CU_ASSERT(subsystem.max_nsid == 5);
+
+ /* Request 0xFFFFFFFF (invalid NSID, reserved for broadcast) */
+ spdk_nvmf_ns_opts_get_defaults(&ns_opts, sizeof(ns_opts));
+ ns_opts.nsid = 0xFFFFFFFF;
+ nsid = spdk_nvmf_subsystem_add_ns(&subsystem, &bdev2, &ns_opts, sizeof(ns_opts), NULL);
+ CU_ASSERT(nsid == 0);
+ CU_ASSERT(subsystem.max_nsid == 5);
+
+ rc = spdk_nvmf_subsystem_remove_ns(&subsystem, 1);
+ CU_ASSERT(rc == 0);
+ rc = spdk_nvmf_subsystem_remove_ns(&subsystem, 5);
+ CU_ASSERT(rc == 0);
+
+ free(subsystem.ns);
+ free(tgt.subsystems);
+}
+
+static void
+nvmf_test_create_subsystem(void)
+{
+ struct spdk_nvmf_tgt tgt = {};
+ char nqn[256];
+ struct spdk_nvmf_subsystem *subsystem;
+
+ tgt.max_subsystems = 1024;
+ tgt.subsystems = calloc(tgt.max_subsystems, sizeof(struct spdk_nvmf_subsystem *));
+ SPDK_CU_ASSERT_FATAL(tgt.subsystems != NULL);
+
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:subsystem1");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem != NULL);
+ CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn);
+ spdk_nvmf_subsystem_destroy(subsystem);
+
+ /* valid name with complex reverse domain */
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk-full--rev-domain.name:subsystem1");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem != NULL);
+ CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn);
+ spdk_nvmf_subsystem_destroy(subsystem);
+
+ /* Valid name discovery controller */
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:subsystem1");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem != NULL);
+ CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn);
+ spdk_nvmf_subsystem_destroy(subsystem);
+
+
+ /* Invalid name, no user supplied string */
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem == NULL);
+
+ /* Valid name, only contains top-level domain name */
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:subsystem1");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem != NULL);
+ CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn);
+ spdk_nvmf_subsystem_destroy(subsystem);
+
+ /* Invalid name, domain label > 63 characters */
+ snprintf(nqn, sizeof(nqn),
+ "nqn.2016-06.io.abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz:sub");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem == NULL);
+
+ /* Invalid name, domain label starts with digit */
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.3spdk:sub");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem == NULL);
+
+ /* Invalid name, domain label starts with - */
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.-spdk:subsystem1");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem == NULL);
+
+ /* Invalid name, domain label ends with - */
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk-:subsystem1");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem == NULL);
+
+ /* Invalid name, domain label with multiple consecutive periods */
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io..spdk:subsystem1");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem == NULL);
+
+ /* Longest valid name */
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:");
+ memset(nqn + strlen(nqn), 'a', 223 - strlen(nqn));
+ nqn[223] = '\0';
+ CU_ASSERT(strlen(nqn) == 223);
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem != NULL);
+ CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn);
+ spdk_nvmf_subsystem_destroy(subsystem);
+
+ /* Invalid name, too long */
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:");
+ memset(nqn + strlen(nqn), 'a', 224 - strlen(nqn));
+ nqn[224] = '\0';
+ CU_ASSERT(strlen(nqn) == 224);
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ CU_ASSERT(subsystem == NULL);
+
+ /* Valid name using uuid format */
+ snprintf(nqn, sizeof(nqn), "nqn.2014-08.org.nvmexpress:uuid:11111111-aaaa-bbdd-FFEE-123456789abc");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem != NULL);
+ CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn);
+ spdk_nvmf_subsystem_destroy(subsystem);
+
+ /* Invalid name user string contains an invalid utf-8 character */
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:\xFFsubsystem1");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem == NULL);
+
+ /* Valid name with non-ascii but valid utf-8 characters */
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:\xe1\x8a\x88subsystem1\xca\x80");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem != NULL);
+ CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn);
+ spdk_nvmf_subsystem_destroy(subsystem);
+
+ /* Invalid uuid (too long) */
+ snprintf(nqn, sizeof(nqn),
+ "nqn.2014-08.org.nvmexpress:uuid:11111111-aaaa-bbdd-FFEE-123456789abcdef");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem == NULL);
+
+ /* Invalid uuid (dashes placed incorrectly) */
+ snprintf(nqn, sizeof(nqn), "nqn.2014-08.org.nvmexpress:uuid:111111-11aaaa-bbdd-FFEE-123456789abc");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem == NULL);
+
+ /* Invalid uuid (invalid characters in uuid) */
+ snprintf(nqn, sizeof(nqn), "nqn.2014-08.org.nvmexpress:uuid:111hg111-aaaa-bbdd-FFEE-123456789abc");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem == NULL);
+
+ free(tgt.subsystems);
+}
+
+static void
+test_spdk_nvmf_subsystem_set_sn(void)
+{
+ struct spdk_nvmf_subsystem subsystem = {};
+
+ /* Basic valid serial number */
+ CU_ASSERT(spdk_nvmf_subsystem_set_sn(&subsystem, "abcd xyz") == 0);
+ CU_ASSERT(strcmp(subsystem.sn, "abcd xyz") == 0);
+
+ /* Exactly 20 characters (valid) */
+ CU_ASSERT(spdk_nvmf_subsystem_set_sn(&subsystem, "12345678901234567890") == 0);
+ CU_ASSERT(strcmp(subsystem.sn, "12345678901234567890") == 0);
+
+ /* 21 characters (too long, invalid) */
+ CU_ASSERT(spdk_nvmf_subsystem_set_sn(&subsystem, "123456789012345678901") < 0);
+
+ /* Non-ASCII characters (invalid) */
+ CU_ASSERT(spdk_nvmf_subsystem_set_sn(&subsystem, "abcd\txyz") < 0);
+}
+
+/*
+ * Reservation Unit Test Configuration
+ * -------- -------- --------
+ * | Host A | | Host B | | Host C |
+ * -------- -------- --------
+ * / \ | |
+ * -------- -------- ------- -------
+ * |Ctrlr1_A| |Ctrlr2_A| |Ctrlr_B| |Ctrlr_C|
+ * -------- -------- ------- -------
+ * \ \ / /
+ * \ \ / /
+ * \ \ / /
+ * --------------------------------------
+ * | NAMESPACE 1 |
+ * --------------------------------------
+ */
+static struct spdk_nvmf_subsystem g_subsystem;
+static struct spdk_nvmf_ctrlr g_ctrlr1_A, g_ctrlr2_A, g_ctrlr_B, g_ctrlr_C;
+static struct spdk_nvmf_ns g_ns;
+static struct spdk_bdev g_bdev;
+struct spdk_nvmf_subsystem_pg_ns_info g_ns_info;
+
+void
+nvmf_ctrlr_async_event_reservation_notification(struct spdk_nvmf_ctrlr *ctrlr)
+{
+}
+
+static void
+ut_reservation_init(void)
+{
+
+ TAILQ_INIT(&g_subsystem.ctrlrs);
+
+ memset(&g_ns, 0, sizeof(g_ns));
+ TAILQ_INIT(&g_ns.registrants);
+ g_ns.subsystem = &g_subsystem;
+ g_ns.ptpl_file = NULL;
+ g_ns.ptpl_activated = false;
+ spdk_uuid_generate(&g_bdev.uuid);
+ g_ns.bdev = &g_bdev;
+
+ /* Host A has two controllers */
+ spdk_uuid_generate(&g_ctrlr1_A.hostid);
+ TAILQ_INIT(&g_ctrlr1_A.log_head);
+ g_ctrlr1_A.subsys = &g_subsystem;
+ g_ctrlr1_A.num_avail_log_pages = 0;
+ TAILQ_INSERT_TAIL(&g_subsystem.ctrlrs, &g_ctrlr1_A, link);
+ spdk_uuid_copy(&g_ctrlr2_A.hostid, &g_ctrlr1_A.hostid);
+ TAILQ_INIT(&g_ctrlr2_A.log_head);
+ g_ctrlr2_A.subsys = &g_subsystem;
+ g_ctrlr2_A.num_avail_log_pages = 0;
+ TAILQ_INSERT_TAIL(&g_subsystem.ctrlrs, &g_ctrlr2_A, link);
+
+ /* Host B has 1 controller */
+ spdk_uuid_generate(&g_ctrlr_B.hostid);
+ TAILQ_INIT(&g_ctrlr_B.log_head);
+ g_ctrlr_B.subsys = &g_subsystem;
+ g_ctrlr_B.num_avail_log_pages = 0;
+ TAILQ_INSERT_TAIL(&g_subsystem.ctrlrs, &g_ctrlr_B, link);
+
+ /* Host C has 1 controller */
+ spdk_uuid_generate(&g_ctrlr_C.hostid);
+ TAILQ_INIT(&g_ctrlr_C.log_head);
+ g_ctrlr_C.subsys = &g_subsystem;
+ g_ctrlr_C.num_avail_log_pages = 0;
+ TAILQ_INSERT_TAIL(&g_subsystem.ctrlrs, &g_ctrlr_C, link);
+}
+
+static void
+ut_reservation_deinit(void)
+{
+ struct spdk_nvmf_registrant *reg, *tmp;
+ struct spdk_nvmf_reservation_log *log, *log_tmp;
+ struct spdk_nvmf_ctrlr *ctrlr, *ctrlr_tmp;
+
+ TAILQ_FOREACH_SAFE(reg, &g_ns.registrants, link, tmp) {
+ TAILQ_REMOVE(&g_ns.registrants, reg, link);
+ free(reg);
+ }
+ TAILQ_FOREACH_SAFE(log, &g_ctrlr1_A.log_head, link, log_tmp) {
+ TAILQ_REMOVE(&g_ctrlr1_A.log_head, log, link);
+ free(log);
+ }
+ g_ctrlr1_A.num_avail_log_pages = 0;
+ TAILQ_FOREACH_SAFE(log, &g_ctrlr2_A.log_head, link, log_tmp) {
+ TAILQ_REMOVE(&g_ctrlr2_A.log_head, log, link);
+ free(log);
+ }
+ g_ctrlr2_A.num_avail_log_pages = 0;
+ TAILQ_FOREACH_SAFE(log, &g_ctrlr_B.log_head, link, log_tmp) {
+ TAILQ_REMOVE(&g_ctrlr_B.log_head, log, link);
+ free(log);
+ }
+ g_ctrlr_B.num_avail_log_pages = 0;
+ TAILQ_FOREACH_SAFE(log, &g_ctrlr_C.log_head, link, log_tmp) {
+ TAILQ_REMOVE(&g_ctrlr_C.log_head, log, link);
+ free(log);
+ }
+ g_ctrlr_C.num_avail_log_pages = 0;
+
+ TAILQ_FOREACH_SAFE(ctrlr, &g_subsystem.ctrlrs, link, ctrlr_tmp) {
+ TAILQ_REMOVE(&g_subsystem.ctrlrs, ctrlr, link);
+ }
+}
+
+static struct spdk_nvmf_request *
+ut_reservation_build_req(uint32_t length)
+{
+ struct spdk_nvmf_request *req;
+
+ req = calloc(1, sizeof(*req));
+ assert(req != NULL);
+
+ req->data = calloc(1, length);
+ assert(req->data != NULL);
+ req->length = length;
+
+ req->cmd = (union nvmf_h2c_msg *)calloc(1, sizeof(union nvmf_h2c_msg));
+ assert(req->cmd != NULL);
+
+ req->rsp = (union nvmf_c2h_msg *)calloc(1, sizeof(union nvmf_c2h_msg));
+ assert(req->rsp != NULL);
+
+ return req;
+}
+
+static void
+ut_reservation_free_req(struct spdk_nvmf_request *req)
+{
+ free(req->cmd);
+ free(req->rsp);
+ free(req->data);
+ free(req);
+}
+
+static void
+ut_reservation_build_register_request(struct spdk_nvmf_request *req,
+ uint8_t rrega, uint8_t iekey,
+ uint8_t cptpl, uint64_t crkey,
+ uint64_t nrkey)
+{
+ struct spdk_nvme_reservation_register_data key;
+ struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
+
+ key.crkey = crkey;
+ key.nrkey = nrkey;
+ cmd->cdw10 = 0;
+ cmd->cdw10_bits.resv_register.rrega = rrega;
+ cmd->cdw10_bits.resv_register.iekey = iekey;
+ cmd->cdw10_bits.resv_register.cptpl = cptpl;
+ memcpy(req->data, &key, sizeof(key));
+}
+
+static void
+ut_reservation_build_acquire_request(struct spdk_nvmf_request *req,
+ uint8_t racqa, uint8_t iekey,
+ uint8_t rtype, uint64_t crkey,
+ uint64_t prkey)
+{
+ struct spdk_nvme_reservation_acquire_data key;
+ struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
+
+ key.crkey = crkey;
+ key.prkey = prkey;
+ cmd->cdw10 = 0;
+ cmd->cdw10_bits.resv_acquire.racqa = racqa;
+ cmd->cdw10_bits.resv_acquire.iekey = iekey;
+ cmd->cdw10_bits.resv_acquire.rtype = rtype;
+ memcpy(req->data, &key, sizeof(key));
+}
+
+static void
+ut_reservation_build_release_request(struct spdk_nvmf_request *req,
+ uint8_t rrela, uint8_t iekey,
+ uint8_t rtype, uint64_t crkey)
+{
+ struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
+
+ cmd->cdw10 = 0;
+ cmd->cdw10_bits.resv_release.rrela = rrela;
+ cmd->cdw10_bits.resv_release.iekey = iekey;
+ cmd->cdw10_bits.resv_release.rtype = rtype;
+ memcpy(req->data, &crkey, sizeof(crkey));
+}
+
+/*
+ * Construct four registrants for other test cases.
+ *
+ * g_ctrlr1_A register with key 0xa1.
+ * g_ctrlr2_A register with key 0xa1.
+ * g_ctrlr_B register with key 0xb1.
+ * g_ctrlr_C register with key 0xc1.
+ * */
+static void
+ut_reservation_build_registrants(void)
+{
+ struct spdk_nvmf_request *req;
+ struct spdk_nvme_cpl *rsp;
+ struct spdk_nvmf_registrant *reg;
+ uint32_t gen;
+
+ req = ut_reservation_build_req(16);
+ rsp = &req->rsp->nvme_cpl;
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ gen = g_ns.gen;
+
+ /* TEST CASE: g_ctrlr1_A register with a new key */
+ ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REGISTER_KEY,
+ 0, 0, 0, 0xa1);
+ nvmf_ns_reservation_register(&g_ns, &g_ctrlr1_A, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
+ SPDK_CU_ASSERT_FATAL(reg->rkey == 0xa1);
+ SPDK_CU_ASSERT_FATAL(g_ns.gen == gen + 1);
+
+ /* TEST CASE: g_ctrlr2_A register with a new key, because it has same
+ * Host Identifier with g_ctrlr1_A, so the register key should same.
+ */
+ ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REGISTER_KEY,
+ 0, 0, 0, 0xa2);
+ nvmf_ns_reservation_register(&g_ns, &g_ctrlr2_A, req);
+ /* Reservation conflict for other key than 0xa1 */
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
+
+ /* g_ctrlr_B register with a new key */
+ ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REGISTER_KEY,
+ 0, 0, 0, 0xb1);
+ nvmf_ns_reservation_register(&g_ns, &g_ctrlr_B, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_B.hostid);
+ SPDK_CU_ASSERT_FATAL(reg->rkey == 0xb1);
+ SPDK_CU_ASSERT_FATAL(g_ns.gen == gen + 2);
+
+ /* g_ctrlr_C register with a new key */
+ ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REGISTER_KEY,
+ 0, 0, 0, 0xc1);
+ nvmf_ns_reservation_register(&g_ns, &g_ctrlr_C, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_C.hostid);
+ SPDK_CU_ASSERT_FATAL(reg->rkey == 0xc1);
+ SPDK_CU_ASSERT_FATAL(g_ns.gen == gen + 3);
+
+ ut_reservation_free_req(req);
+}
+
+static void
+test_reservation_register(void)
+{
+ struct spdk_nvmf_request *req;
+ struct spdk_nvme_cpl *rsp;
+ struct spdk_nvmf_registrant *reg;
+ uint32_t gen;
+
+ ut_reservation_init();
+
+ req = ut_reservation_build_req(16);
+ rsp = &req->rsp->nvme_cpl;
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+
+ ut_reservation_build_registrants();
+
+ /* TEST CASE: Replace g_ctrlr1_A with a new key */
+ ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REPLACE_KEY,
+ 0, 0, 0xa1, 0xa11);
+ nvmf_ns_reservation_register(&g_ns, &g_ctrlr1_A, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
+ SPDK_CU_ASSERT_FATAL(reg->rkey == 0xa11);
+
+ /* TEST CASE: Host A with g_ctrlr1_A get reservation with
+ * type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE
+ */
+ ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE, 0xa11, 0x0);
+ gen = g_ns.gen;
+ nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr1_A, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
+ SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE);
+ SPDK_CU_ASSERT_FATAL(g_ns.crkey == 0xa11);
+ SPDK_CU_ASSERT_FATAL(g_ns.holder == reg);
+ SPDK_CU_ASSERT_FATAL(g_ns.gen == gen);
+
+ /* TEST CASE: g_ctrlr_C unregister with IEKEY enabled */
+ ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_UNREGISTER_KEY,
+ 1, 0, 0, 0);
+ nvmf_ns_reservation_register(&g_ns, &g_ctrlr_C, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_C.hostid);
+ SPDK_CU_ASSERT_FATAL(reg == NULL);
+
+ /* TEST CASE: g_ctrlr_B unregister with correct key */
+ ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_UNREGISTER_KEY,
+ 0, 0, 0xb1, 0);
+ nvmf_ns_reservation_register(&g_ns, &g_ctrlr_B, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_B.hostid);
+ SPDK_CU_ASSERT_FATAL(reg == NULL);
+
+ /* TEST CASE: g_ctrlr1_A unregister with correct key,
+ * reservation should be removed as well.
+ */
+ ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_UNREGISTER_KEY,
+ 0, 0, 0xa11, 0);
+ nvmf_ns_reservation_register(&g_ns, &g_ctrlr1_A, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
+ SPDK_CU_ASSERT_FATAL(reg == NULL);
+ SPDK_CU_ASSERT_FATAL(g_ns.rtype == 0);
+ SPDK_CU_ASSERT_FATAL(g_ns.crkey == 0);
+ SPDK_CU_ASSERT_FATAL(g_ns.holder == NULL);
+
+ ut_reservation_free_req(req);
+ ut_reservation_deinit();
+}
+
+static void
+test_reservation_register_with_ptpl(void)
+{
+ struct spdk_nvmf_request *req;
+ struct spdk_nvme_cpl *rsp;
+ struct spdk_nvmf_registrant *reg;
+ bool update_sgroup = false;
+ int rc;
+ struct spdk_nvmf_reservation_info info;
+
+ ut_reservation_init();
+
+ req = ut_reservation_build_req(16);
+ rsp = &req->rsp->nvme_cpl;
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+
+ /* TEST CASE: No persistent file, register with PTPL enabled will fail */
+ g_ns.ptpl_file = NULL;
+ ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REGISTER_KEY, 0,
+ SPDK_NVME_RESERVE_PTPL_PERSIST_POWER_LOSS, 0, 0xa1);
+ update_sgroup = nvmf_ns_reservation_register(&g_ns, &g_ctrlr1_A, req);
+ SPDK_CU_ASSERT_FATAL(update_sgroup == false);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc != SPDK_NVME_SC_SUCCESS);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
+ SPDK_CU_ASSERT_FATAL(reg == NULL);
+
+ /* TEST CASE: Enable PTPL */
+ g_ns.ptpl_file = "/tmp/Ns1PR.cfg";
+ ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REGISTER_KEY, 0,
+ SPDK_NVME_RESERVE_PTPL_PERSIST_POWER_LOSS, 0, 0xa1);
+ update_sgroup = nvmf_ns_reservation_register(&g_ns, &g_ctrlr1_A, req);
+ SPDK_CU_ASSERT_FATAL(update_sgroup == true);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ SPDK_CU_ASSERT_FATAL(g_ns.ptpl_activated == true);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
+ SPDK_CU_ASSERT_FATAL(reg != NULL);
+ SPDK_CU_ASSERT_FATAL(!spdk_uuid_compare(&g_ctrlr1_A.hostid, &reg->hostid));
+ /* Load reservation information from configuration file */
+ memset(&info, 0, sizeof(info));
+ rc = nvmf_ns_load_reservation(g_ns.ptpl_file, &info);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(info.ptpl_activated == true);
+
+ /* TEST CASE: Disable PTPL */
+ rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD;
+ ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REGISTER_KEY, 0,
+ SPDK_NVME_RESERVE_PTPL_CLEAR_POWER_ON, 0, 0xa1);
+ update_sgroup = nvmf_ns_reservation_register(&g_ns, &g_ctrlr1_A, req);
+ SPDK_CU_ASSERT_FATAL(update_sgroup == true);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ SPDK_CU_ASSERT_FATAL(g_ns.ptpl_activated == false);
+ rc = nvmf_ns_load_reservation(g_ns.ptpl_file, &info);
+ SPDK_CU_ASSERT_FATAL(rc < 0);
+ unlink(g_ns.ptpl_file);
+
+ ut_reservation_free_req(req);
+ ut_reservation_deinit();
+}
+
+static void
+test_reservation_acquire_preempt_1(void)
+{
+ struct spdk_nvmf_request *req;
+ struct spdk_nvme_cpl *rsp;
+ struct spdk_nvmf_registrant *reg;
+ uint32_t gen;
+
+ ut_reservation_init();
+
+ req = ut_reservation_build_req(16);
+ rsp = &req->rsp->nvme_cpl;
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+
+ ut_reservation_build_registrants();
+
+ gen = g_ns.gen;
+ /* ACQUIRE: Host A with g_ctrlr1_A acquire reservation with
+ * type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE.
+ */
+ ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY, 0xa1, 0x0);
+ nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr1_A, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
+ SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY);
+ SPDK_CU_ASSERT_FATAL(g_ns.crkey == 0xa1);
+ SPDK_CU_ASSERT_FATAL(g_ns.holder == reg);
+ SPDK_CU_ASSERT_FATAL(g_ns.gen == gen);
+
+ /* TEST CASE: g_ctrlr1_A holds the reservation, g_ctrlr_B preempt g_ctrl1_A,
+ * g_ctrl1_A registrant is unregistred.
+ */
+ gen = g_ns.gen;
+ ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_PREEMPT, 0,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS, 0xb1, 0xa1);
+ nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr_B, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
+ SPDK_CU_ASSERT_FATAL(reg == NULL);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_B.hostid);
+ SPDK_CU_ASSERT_FATAL(reg != NULL);
+ SPDK_CU_ASSERT_FATAL(g_ns.holder == reg);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_C.hostid);
+ SPDK_CU_ASSERT_FATAL(reg != NULL);
+ SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS);
+ SPDK_CU_ASSERT_FATAL(g_ns.gen > gen);
+
+ /* TEST CASE: g_ctrlr_B holds the reservation, g_ctrlr_C preempt g_ctrlr_B
+ * with valid key and PRKEY set to 0, all registrants other the host that issued
+ * the command are unregistered.
+ */
+ gen = g_ns.gen;
+ ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_PREEMPT, 0,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS, 0xc1, 0x0);
+ nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr_C, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr2_A.hostid);
+ SPDK_CU_ASSERT_FATAL(reg == NULL);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_B.hostid);
+ SPDK_CU_ASSERT_FATAL(reg == NULL);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_C.hostid);
+ SPDK_CU_ASSERT_FATAL(reg != NULL);
+ SPDK_CU_ASSERT_FATAL(g_ns.holder == reg);
+ SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS);
+ SPDK_CU_ASSERT_FATAL(g_ns.gen > gen);
+
+ ut_reservation_free_req(req);
+ ut_reservation_deinit();
+}
+
+static void
+test_reservation_acquire_release_with_ptpl(void)
+{
+ struct spdk_nvmf_request *req;
+ struct spdk_nvme_cpl *rsp;
+ struct spdk_nvmf_registrant *reg;
+ bool update_sgroup = false;
+ struct spdk_uuid holder_uuid;
+ int rc;
+ struct spdk_nvmf_reservation_info info;
+
+ ut_reservation_init();
+
+ req = ut_reservation_build_req(16);
+ rsp = &req->rsp->nvme_cpl;
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+
+ /* TEST CASE: Enable PTPL */
+ g_ns.ptpl_file = "/tmp/Ns1PR.cfg";
+ ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REGISTER_KEY, 0,
+ SPDK_NVME_RESERVE_PTPL_PERSIST_POWER_LOSS, 0, 0xa1);
+ update_sgroup = nvmf_ns_reservation_register(&g_ns, &g_ctrlr1_A, req);
+ SPDK_CU_ASSERT_FATAL(update_sgroup == true);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ SPDK_CU_ASSERT_FATAL(g_ns.ptpl_activated == true);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
+ SPDK_CU_ASSERT_FATAL(reg != NULL);
+ SPDK_CU_ASSERT_FATAL(!spdk_uuid_compare(&g_ctrlr1_A.hostid, &reg->hostid));
+ /* Load reservation information from configuration file */
+ memset(&info, 0, sizeof(info));
+ rc = nvmf_ns_load_reservation(g_ns.ptpl_file, &info);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(info.ptpl_activated == true);
+
+ /* TEST CASE: Acquire the reservation */
+ rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD;
+ ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY, 0xa1, 0x0);
+ update_sgroup = nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr1_A, req);
+ SPDK_CU_ASSERT_FATAL(update_sgroup == true);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ memset(&info, 0, sizeof(info));
+ rc = nvmf_ns_load_reservation(g_ns.ptpl_file, &info);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(info.ptpl_activated == true);
+ SPDK_CU_ASSERT_FATAL(info.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY);
+ SPDK_CU_ASSERT_FATAL(info.crkey == 0xa1);
+ spdk_uuid_parse(&holder_uuid, info.holder_uuid);
+ SPDK_CU_ASSERT_FATAL(!spdk_uuid_compare(&g_ctrlr1_A.hostid, &holder_uuid));
+
+ /* TEST CASE: Release the reservation */
+ rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD;
+ ut_reservation_build_release_request(req, SPDK_NVME_RESERVE_RELEASE, 0,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY, 0xa1);
+ update_sgroup = nvmf_ns_reservation_release(&g_ns, &g_ctrlr1_A, req);
+ SPDK_CU_ASSERT_FATAL(update_sgroup == true);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ memset(&info, 0, sizeof(info));
+ rc = nvmf_ns_load_reservation(g_ns.ptpl_file, &info);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(info.rtype == 0);
+ SPDK_CU_ASSERT_FATAL(info.crkey == 0);
+ SPDK_CU_ASSERT_FATAL(info.ptpl_activated == true);
+ unlink(g_ns.ptpl_file);
+
+ ut_reservation_free_req(req);
+ ut_reservation_deinit();
+}
+
+static void
+test_reservation_release(void)
+{
+ struct spdk_nvmf_request *req;
+ struct spdk_nvme_cpl *rsp;
+ struct spdk_nvmf_registrant *reg;
+
+ ut_reservation_init();
+
+ req = ut_reservation_build_req(16);
+ rsp = &req->rsp->nvme_cpl;
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+
+ ut_reservation_build_registrants();
+
+ /* ACQUIRE: Host A with g_ctrlr1_A get reservation with
+ * type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS
+ */
+ ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS, 0xa1, 0x0);
+ nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr1_A, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
+ SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS);
+ SPDK_CU_ASSERT_FATAL(g_ns.holder == reg);
+
+ /* Test Case: Host B release the reservation */
+ ut_reservation_build_release_request(req, SPDK_NVME_RESERVE_RELEASE, 0,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS, 0xb1);
+ nvmf_ns_reservation_release(&g_ns, &g_ctrlr_B, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ SPDK_CU_ASSERT_FATAL(g_ns.rtype == 0);
+ SPDK_CU_ASSERT_FATAL(g_ns.crkey == 0);
+ SPDK_CU_ASSERT_FATAL(g_ns.holder == NULL);
+
+ /* Test Case: Host C clear the registrants */
+ ut_reservation_build_release_request(req, SPDK_NVME_RESERVE_CLEAR, 0,
+ 0, 0xc1);
+ nvmf_ns_reservation_release(&g_ns, &g_ctrlr_C, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
+ SPDK_CU_ASSERT_FATAL(reg == NULL);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr2_A.hostid);
+ SPDK_CU_ASSERT_FATAL(reg == NULL);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_B.hostid);
+ SPDK_CU_ASSERT_FATAL(reg == NULL);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_C.hostid);
+ SPDK_CU_ASSERT_FATAL(reg == NULL);
+
+ ut_reservation_free_req(req);
+ ut_reservation_deinit();
+}
+
+void
+nvmf_ctrlr_reservation_notice_log(struct spdk_nvmf_ctrlr *ctrlr,
+ struct spdk_nvmf_ns *ns,
+ enum spdk_nvme_reservation_notification_log_page_type type)
+{
+ ctrlr->num_avail_log_pages++;
+}
+
+static void
+test_reservation_unregister_notification(void)
+{
+ struct spdk_nvmf_request *req;
+ struct spdk_nvme_cpl *rsp;
+
+ ut_reservation_init();
+
+ req = ut_reservation_build_req(16);
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ rsp = &req->rsp->nvme_cpl;
+
+ ut_reservation_build_registrants();
+
+ /* ACQUIRE: Host B with g_ctrlr_B get reservation with
+ * type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY
+ */
+ rsp->status.sc = 0xff;
+ ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY, 0xb1, 0x0);
+ nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr_B, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY);
+
+ /* Test Case : g_ctrlr_B holds the reservation, g_ctrlr_B unregister the registration.
+ * Reservation release notification sends to g_ctrlr1_A/g_ctrlr2_A/g_ctrlr_C only for
+ * SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY or SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY
+ * type.
+ */
+ rsp->status.sc = 0xff;
+ g_ctrlr1_A.num_avail_log_pages = 0;
+ g_ctrlr2_A.num_avail_log_pages = 0;
+ g_ctrlr_B.num_avail_log_pages = 5;
+ g_ctrlr_C.num_avail_log_pages = 0;
+ ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_UNREGISTER_KEY,
+ 0, 0, 0xb1, 0);
+ nvmf_ns_reservation_register(&g_ns, &g_ctrlr_B, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ SPDK_CU_ASSERT_FATAL(g_ns.rtype == 0);
+ SPDK_CU_ASSERT_FATAL(1 == g_ctrlr1_A.num_avail_log_pages);
+ SPDK_CU_ASSERT_FATAL(1 == g_ctrlr2_A.num_avail_log_pages);
+ SPDK_CU_ASSERT_FATAL(5 == g_ctrlr_B.num_avail_log_pages);
+ SPDK_CU_ASSERT_FATAL(1 == g_ctrlr_C.num_avail_log_pages);
+
+ ut_reservation_free_req(req);
+ ut_reservation_deinit();
+}
+
+static void
+test_reservation_release_notification(void)
+{
+ struct spdk_nvmf_request *req;
+ struct spdk_nvme_cpl *rsp;
+
+ ut_reservation_init();
+
+ req = ut_reservation_build_req(16);
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ rsp = &req->rsp->nvme_cpl;
+
+ ut_reservation_build_registrants();
+
+ /* ACQUIRE: Host B with g_ctrlr_B get reservation with
+ * type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY
+ */
+ rsp->status.sc = 0xff;
+ ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY, 0xb1, 0x0);
+ nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr_B, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY);
+
+ /* Test Case : g_ctrlr_B holds the reservation, g_ctrlr_B release the reservation.
+ * Reservation release notification sends to g_ctrlr1_A/g_ctrlr2_A/g_ctrlr_C.
+ */
+ rsp->status.sc = 0xff;
+ g_ctrlr1_A.num_avail_log_pages = 0;
+ g_ctrlr2_A.num_avail_log_pages = 0;
+ g_ctrlr_B.num_avail_log_pages = 5;
+ g_ctrlr_C.num_avail_log_pages = 0;
+ ut_reservation_build_release_request(req, SPDK_NVME_RESERVE_RELEASE, 0,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY, 0xb1);
+ nvmf_ns_reservation_release(&g_ns, &g_ctrlr_B, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ SPDK_CU_ASSERT_FATAL(g_ns.rtype == 0);
+ SPDK_CU_ASSERT_FATAL(1 == g_ctrlr1_A.num_avail_log_pages);
+ SPDK_CU_ASSERT_FATAL(1 == g_ctrlr2_A.num_avail_log_pages);
+ SPDK_CU_ASSERT_FATAL(5 == g_ctrlr_B.num_avail_log_pages);
+ SPDK_CU_ASSERT_FATAL(1 == g_ctrlr_C.num_avail_log_pages);
+
+ ut_reservation_free_req(req);
+ ut_reservation_deinit();
+}
+
+static void
+test_reservation_release_notification_write_exclusive(void)
+{
+ struct spdk_nvmf_request *req;
+ struct spdk_nvme_cpl *rsp;
+
+ ut_reservation_init();
+
+ req = ut_reservation_build_req(16);
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ rsp = &req->rsp->nvme_cpl;
+
+ ut_reservation_build_registrants();
+
+ /* ACQUIRE: Host B with g_ctrlr_B get reservation with
+ * type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE
+ */
+ rsp->status.sc = 0xff;
+ ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE, 0xb1, 0x0);
+ nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr_B, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE);
+
+ /* Test Case : g_ctrlr_B holds the reservation, g_ctrlr_B release the reservation.
+ * Because the reservation type is SPDK_NVME_RESERVE_WRITE_EXCLUSIVE,
+ * no reservation notification occurs.
+ */
+ rsp->status.sc = 0xff;
+ g_ctrlr1_A.num_avail_log_pages = 5;
+ g_ctrlr2_A.num_avail_log_pages = 5;
+ g_ctrlr_B.num_avail_log_pages = 5;
+ g_ctrlr_C.num_avail_log_pages = 5;
+ ut_reservation_build_release_request(req, SPDK_NVME_RESERVE_RELEASE, 0,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE, 0xb1);
+ nvmf_ns_reservation_release(&g_ns, &g_ctrlr_B, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ SPDK_CU_ASSERT_FATAL(g_ns.rtype == 0);
+ SPDK_CU_ASSERT_FATAL(5 == g_ctrlr1_A.num_avail_log_pages);
+ SPDK_CU_ASSERT_FATAL(5 == g_ctrlr2_A.num_avail_log_pages);
+ SPDK_CU_ASSERT_FATAL(5 == g_ctrlr_B.num_avail_log_pages);
+ SPDK_CU_ASSERT_FATAL(5 == g_ctrlr_C.num_avail_log_pages);
+
+ ut_reservation_free_req(req);
+ ut_reservation_deinit();
+}
+
+static void
+test_reservation_clear_notification(void)
+{
+ struct spdk_nvmf_request *req;
+ struct spdk_nvme_cpl *rsp;
+
+ ut_reservation_init();
+
+ req = ut_reservation_build_req(16);
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ rsp = &req->rsp->nvme_cpl;
+
+ ut_reservation_build_registrants();
+
+ /* ACQUIRE: Host B with g_ctrlr_B get reservation with
+ * type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY
+ */
+ rsp->status.sc = 0xff;
+ ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY, 0xb1, 0x0);
+ nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr_B, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY);
+
+ /* Test Case : g_ctrlr_B holds the reservation, g_ctrlr_B clear the reservation.
+ * Reservation Preempted notification sends to g_ctrlr1_A/g_ctrlr2_A/g_ctrlr_C.
+ */
+ rsp->status.sc = 0xff;
+ g_ctrlr1_A.num_avail_log_pages = 0;
+ g_ctrlr2_A.num_avail_log_pages = 0;
+ g_ctrlr_B.num_avail_log_pages = 5;
+ g_ctrlr_C.num_avail_log_pages = 0;
+ ut_reservation_build_release_request(req, SPDK_NVME_RESERVE_CLEAR, 0,
+ 0, 0xb1);
+ nvmf_ns_reservation_release(&g_ns, &g_ctrlr_B, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ SPDK_CU_ASSERT_FATAL(g_ns.rtype == 0);
+ SPDK_CU_ASSERT_FATAL(1 == g_ctrlr1_A.num_avail_log_pages);
+ SPDK_CU_ASSERT_FATAL(1 == g_ctrlr2_A.num_avail_log_pages);
+ SPDK_CU_ASSERT_FATAL(5 == g_ctrlr_B.num_avail_log_pages);
+ SPDK_CU_ASSERT_FATAL(1 == g_ctrlr_C.num_avail_log_pages);
+
+ ut_reservation_free_req(req);
+ ut_reservation_deinit();
+}
+
+static void
+test_reservation_preempt_notification(void)
+{
+ struct spdk_nvmf_request *req;
+ struct spdk_nvme_cpl *rsp;
+
+ ut_reservation_init();
+
+ req = ut_reservation_build_req(16);
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ rsp = &req->rsp->nvme_cpl;
+
+ ut_reservation_build_registrants();
+
+ /* ACQUIRE: Host B with g_ctrlr_B get reservation with
+ * type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY
+ */
+ rsp->status.sc = 0xff;
+ ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY, 0xb1, 0x0);
+ nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr_B, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY);
+
+ /* Test Case : g_ctrlr_B holds the reservation, g_ctrlr_C preempt g_ctrlr_B,
+ * g_ctrlr_B registrant is unregistred, and reservation is preempted.
+ * Registration Preempted notification sends to g_ctrlr_B.
+ * Reservation Preempted notification sends to g_ctrlr1_A/g_ctrlr2_A.
+ */
+ rsp->status.sc = 0xff;
+ g_ctrlr1_A.num_avail_log_pages = 0;
+ g_ctrlr2_A.num_avail_log_pages = 0;
+ g_ctrlr_B.num_avail_log_pages = 0;
+ g_ctrlr_C.num_avail_log_pages = 5;
+ ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_PREEMPT, 0,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS, 0xc1, 0xb1);
+ nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr_C, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS);
+ SPDK_CU_ASSERT_FATAL(1 == g_ctrlr1_A.num_avail_log_pages);
+ SPDK_CU_ASSERT_FATAL(1 == g_ctrlr2_A.num_avail_log_pages);
+ SPDK_CU_ASSERT_FATAL(1 == g_ctrlr_B.num_avail_log_pages);
+ SPDK_CU_ASSERT_FATAL(5 == g_ctrlr_C.num_avail_log_pages);
+
+ ut_reservation_free_req(req);
+ ut_reservation_deinit();
+}
+
+static void
+test_spdk_nvmf_ns_event(void)
+{
+ struct spdk_nvmf_tgt tgt = {};
+ struct spdk_nvmf_subsystem subsystem = {
+ .max_nsid = 0,
+ .ns = NULL,
+ .tgt = &tgt
+ };
+ struct spdk_nvmf_ctrlr ctrlr = {
+ .subsys = &subsystem
+ };
+ struct spdk_bdev bdev1 = {};
+ struct spdk_nvmf_ns_opts ns_opts;
+ uint32_t nsid;
+
+ tgt.max_subsystems = 1024;
+ tgt.subsystems = calloc(tgt.max_subsystems, sizeof(struct spdk_nvmf_subsystem *));
+ SPDK_CU_ASSERT_FATAL(tgt.subsystems != NULL);
+
+ /* Add one namespace */
+ spdk_nvmf_ns_opts_get_defaults(&ns_opts, sizeof(ns_opts));
+ nsid = spdk_nvmf_subsystem_add_ns(&subsystem, &bdev1, &ns_opts, sizeof(ns_opts), NULL);
+ CU_ASSERT(nsid == 1);
+ CU_ASSERT(NULL != subsystem.ns[0]);
+
+ /* Add one controller */
+ TAILQ_INIT(&subsystem.ctrlrs);
+ TAILQ_INSERT_TAIL(&subsystem.ctrlrs, &ctrlr, link);
+
+ /* Namespace resize event */
+ subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
+ g_ns_changed_nsid = 0xFFFFFFFF;
+ g_ns_changed_ctrlr = NULL;
+ nvmf_ns_event(SPDK_BDEV_EVENT_RESIZE, &bdev1, subsystem.ns[0]);
+ CU_ASSERT(SPDK_NVMF_SUBSYSTEM_PAUSING == subsystem.state);
+
+ poll_threads();
+ CU_ASSERT(1 == g_ns_changed_nsid);
+ CU_ASSERT(&ctrlr == g_ns_changed_ctrlr);
+ CU_ASSERT(SPDK_NVMF_SUBSYSTEM_ACTIVE == subsystem.state);
+
+ /* Namespace remove event */
+ subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
+ g_ns_changed_nsid = 0xFFFFFFFF;
+ g_ns_changed_ctrlr = NULL;
+ nvmf_ns_event(SPDK_BDEV_EVENT_REMOVE, &bdev1, subsystem.ns[0]);
+ CU_ASSERT(SPDK_NVMF_SUBSYSTEM_PAUSING == subsystem.state);
+ CU_ASSERT(0xFFFFFFFF == g_ns_changed_nsid);
+ CU_ASSERT(NULL == g_ns_changed_ctrlr);
+
+ poll_threads();
+ CU_ASSERT(1 == g_ns_changed_nsid);
+ CU_ASSERT(&ctrlr == g_ns_changed_ctrlr);
+ CU_ASSERT(NULL == subsystem.ns[0]);
+ CU_ASSERT(SPDK_NVMF_SUBSYSTEM_ACTIVE == subsystem.state);
+
+ free(subsystem.ns);
+ free(tgt.subsystems);
+}
+
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvmf", NULL, NULL);
+
+ CU_ADD_TEST(suite, nvmf_test_create_subsystem);
+ CU_ADD_TEST(suite, test_spdk_nvmf_subsystem_add_ns);
+ CU_ADD_TEST(suite, test_spdk_nvmf_subsystem_set_sn);
+ CU_ADD_TEST(suite, test_reservation_register);
+ CU_ADD_TEST(suite, test_reservation_register_with_ptpl);
+ CU_ADD_TEST(suite, test_reservation_acquire_preempt_1);
+ CU_ADD_TEST(suite, test_reservation_acquire_release_with_ptpl);
+ CU_ADD_TEST(suite, test_reservation_release);
+ CU_ADD_TEST(suite, test_reservation_unregister_notification);
+ CU_ADD_TEST(suite, test_reservation_release_notification);
+ CU_ADD_TEST(suite, test_reservation_release_notification_write_exclusive);
+ CU_ADD_TEST(suite, test_reservation_clear_notification);
+ CU_ADD_TEST(suite, test_reservation_preempt_notification);
+ CU_ADD_TEST(suite, test_spdk_nvmf_ns_event);
+
+ allocate_threads(1);
+ set_thread(0);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ free_threads();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvmf/tcp.c/.gitignore b/src/spdk/test/unit/lib/nvmf/tcp.c/.gitignore
new file mode 100644
index 000000000..ea821fbfa
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/tcp.c/.gitignore
@@ -0,0 +1 @@
+tcp_ut
diff --git a/src/spdk/test/unit/lib/nvmf/tcp.c/Makefile b/src/spdk/test/unit/lib/nvmf/tcp.c/Makefile
new file mode 100644
index 000000000..2f6dc9b85
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/tcp.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = tcp_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvmf/tcp.c/tcp_ut.c b/src/spdk/test/unit/lib/nvmf/tcp.c/tcp_ut.c
new file mode 100644
index 000000000..a6d6d9da3
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/tcp.c/tcp_ut.c
@@ -0,0 +1,722 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+#include "spdk/nvmf_spec.h"
+#include "spdk_cunit.h"
+
+#include "spdk_internal/mock.h"
+#include "spdk_internal/thread.h"
+
+#include "common/lib/test_env.c"
+#include "common/lib/test_sock.c"
+
+#include "nvmf/ctrlr.c"
+#include "nvmf/tcp.c"
+
+#define UT_IPV4_ADDR "192.168.0.1"
+#define UT_PORT "4420"
+#define UT_NVMF_ADRFAM_INVALID 0xf
+#define UT_MAX_QUEUE_DEPTH 128
+#define UT_MAX_QPAIRS_PER_CTRLR 128
+#define UT_IN_CAPSULE_DATA_SIZE 1024
+#define UT_MAX_IO_SIZE 4096
+#define UT_IO_UNIT_SIZE 1024
+#define UT_MAX_AQ_DEPTH 64
+#define UT_SQ_HEAD_MAX 128
+#define UT_NUM_SHARED_BUFFERS 128
+
+SPDK_LOG_REGISTER_COMPONENT("nvmf", SPDK_LOG_NVMF)
+
+DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid,
+ int,
+ (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid),
+ 0);
+
+DEFINE_STUB(nvmf_subsystem_add_ctrlr,
+ int,
+ (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr),
+ 0);
+
+DEFINE_STUB(nvmf_subsystem_get_ctrlr,
+ struct spdk_nvmf_ctrlr *,
+ (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid),
+ NULL);
+
+DEFINE_STUB(spdk_nvmf_tgt_find_subsystem,
+ struct spdk_nvmf_subsystem *,
+ (struct spdk_nvmf_tgt *tgt, const char *subnqn),
+ NULL);
+
+DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed,
+ bool,
+ (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid),
+ true);
+
+DEFINE_STUB_V(nvmf_get_discovery_log_page,
+ (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov,
+ uint32_t iovcnt, uint64_t offset, uint32_t length));
+
+DEFINE_STUB_V(nvmf_subsystem_remove_ctrlr,
+ (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr));
+
+DEFINE_STUB(spdk_nvmf_subsystem_get_first_ns,
+ struct spdk_nvmf_ns *,
+ (struct spdk_nvmf_subsystem *subsystem),
+ NULL);
+
+DEFINE_STUB(spdk_nvmf_subsystem_get_next_ns,
+ struct spdk_nvmf_ns *,
+ (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns),
+ NULL);
+
+DEFINE_STUB(spdk_nvmf_subsystem_host_allowed,
+ bool,
+ (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn),
+ true);
+
+DEFINE_STUB(nvmf_ctrlr_dsm_supported,
+ bool,
+ (struct spdk_nvmf_ctrlr *ctrlr),
+ false);
+
+DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported,
+ bool,
+ (struct spdk_nvmf_ctrlr *ctrlr),
+ false);
+
+DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct spdk_nvmf_request *req),
+ 0);
+
+DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct spdk_nvmf_request *req),
+ 0);
+
+DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct spdk_nvmf_request *req),
+ 0);
+
+DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req),
+ 0);
+
+DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct spdk_nvmf_request *req),
+ 0);
+
+DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct spdk_nvmf_request *req),
+ 0);
+
+DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct spdk_nvmf_request *req),
+ 0);
+
+DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct spdk_nvmf_request *req),
+ 0);
+
+DEFINE_STUB(spdk_nvmf_bdev_ctrlr_abort_cmd,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct spdk_nvmf_request *req, struct spdk_nvmf_request *req_to_abort),
+ 0);
+
+DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx,
+ bool,
+ (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd, struct spdk_dif_ctx *dif_ctx),
+ false);
+
+DEFINE_STUB(nvmf_transport_req_complete,
+ int,
+ (struct spdk_nvmf_request *req),
+ 0);
+
+DEFINE_STUB_V(spdk_nvmf_request_free_buffers,
+ (struct spdk_nvmf_request *req, struct spdk_nvmf_transport_poll_group *group,
+ struct spdk_nvmf_transport *transport));
+
+DEFINE_STUB(spdk_sock_get_optimal_sock_group,
+ int,
+ (struct spdk_sock *sock, struct spdk_sock_group **group),
+ 0);
+
+DEFINE_STUB(spdk_sock_group_get_ctx,
+ void *,
+ (struct spdk_sock_group *group),
+ NULL);
+
+DEFINE_STUB(spdk_sock_set_priority,
+ int,
+ (struct spdk_sock *sock, int priority),
+ 0);
+
+DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx));
+
+DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
+ enum spdk_nvme_transport_type trtype));
+DEFINE_STUB_V(spdk_nvmf_transport_register, (const struct spdk_nvmf_transport_ops *ops));
+
+DEFINE_STUB_V(spdk_nvmf_tgt_new_qpair, (struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair));
+
+DEFINE_STUB_V(nvmf_transport_qpair_abort_request,
+ (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req));
+
+DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd));
+DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl));
+
+struct spdk_trace_histories *g_trace_histories;
+
+struct spdk_bdev {
+ int ut_mock;
+ uint64_t blockcnt;
+};
+
+int
+spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1,
+ const struct spdk_nvme_transport_id *trid2)
+{
+ return 0;
+}
+
+void
+spdk_trace_register_object(uint8_t type, char id_prefix)
+{
+}
+
+void
+spdk_trace_register_description(const char *name,
+ uint16_t tpoint_id, uint8_t owner_type,
+ uint8_t object_type, uint8_t new_object,
+ uint8_t arg1_type, const char *arg1_name)
+{
+}
+
+void
+_spdk_trace_record(uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
+ uint32_t size, uint64_t object_id, uint64_t arg1)
+{
+}
+
+const char *
+spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype)
+{
+ switch (trtype) {
+ case SPDK_NVME_TRANSPORT_PCIE:
+ return "PCIe";
+ case SPDK_NVME_TRANSPORT_RDMA:
+ return "RDMA";
+ case SPDK_NVME_TRANSPORT_FC:
+ return "FC";
+ default:
+ return NULL;
+ }
+}
+
+int
+spdk_nvme_transport_id_populate_trstring(struct spdk_nvme_transport_id *trid, const char *trstring)
+{
+ int len, i;
+
+ if (trstring == NULL) {
+ return -EINVAL;
+ }
+
+ len = strnlen(trstring, SPDK_NVMF_TRSTRING_MAX_LEN);
+ if (len == SPDK_NVMF_TRSTRING_MAX_LEN) {
+ return -EINVAL;
+ }
+
+ /* cast official trstring to uppercase version of input. */
+ for (i = 0; i < len; i++) {
+ trid->trstring[i] = toupper(trstring[i]);
+ }
+ return 0;
+}
+
+int
+spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx)
+{
+ return 0;
+}
+
+int
+spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req,
+ struct spdk_nvmf_transport_poll_group *group,
+ struct spdk_nvmf_transport *transport,
+ uint32_t length)
+{
+ /* length more than 1 io unit length will fail. */
+ if (length >= transport->opts.io_unit_size) {
+ return -EINVAL;
+ }
+
+ req->iovcnt = 1;
+ req->iov[0].iov_base = (void *)0xDEADBEEF;
+
+ return 0;
+}
+
+
+void
+nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata,
+ bool dif_insert_or_strip)
+{
+ uint64_t num_blocks;
+
+ SPDK_CU_ASSERT_FATAL(ns->bdev != NULL);
+ num_blocks = ns->bdev->blockcnt;
+ nsdata->nsze = num_blocks;
+ nsdata->ncap = num_blocks;
+ nsdata->nuse = num_blocks;
+ nsdata->nlbaf = 0;
+ nsdata->flbas.format = 0;
+ nsdata->lbaf[0].lbads = spdk_u32log2(512);
+}
+
+const char *
+spdk_nvmf_subsystem_get_sn(const struct spdk_nvmf_subsystem *subsystem)
+{
+ return subsystem->sn;
+}
+
+const char *
+spdk_nvmf_subsystem_get_mn(const struct spdk_nvmf_subsystem *subsystem)
+{
+ return subsystem->mn;
+}
+
+void
+spdk_trace_add_register_fn(struct spdk_trace_register_fn *reg_fn)
+{
+}
+
+static void
+test_nvmf_tcp_create(void)
+{
+ struct spdk_thread *thread;
+ struct spdk_nvmf_transport *transport;
+ struct spdk_nvmf_tcp_transport *ttransport;
+ struct spdk_nvmf_transport_opts opts;
+
+ thread = spdk_thread_create(NULL, NULL);
+ SPDK_CU_ASSERT_FATAL(thread != NULL);
+ spdk_set_thread(thread);
+
+ /* case 1 */
+ memset(&opts, 0, sizeof(opts));
+ opts.max_queue_depth = UT_MAX_QUEUE_DEPTH;
+ opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR;
+ opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE;
+ opts.max_io_size = UT_MAX_IO_SIZE;
+ opts.io_unit_size = UT_IO_UNIT_SIZE;
+ opts.max_aq_depth = UT_MAX_AQ_DEPTH;
+ opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS;
+ /* expect success */
+ transport = nvmf_tcp_create(&opts);
+ CU_ASSERT_PTR_NOT_NULL(transport);
+ ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport);
+ SPDK_CU_ASSERT_FATAL(ttransport != NULL);
+ transport->opts = opts;
+ CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH);
+ CU_ASSERT(transport->opts.max_io_size == UT_MAX_IO_SIZE);
+ CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE);
+ CU_ASSERT(transport->opts.io_unit_size == UT_IO_UNIT_SIZE);
+ /* destroy transport */
+ spdk_mempool_free(ttransport->transport.data_buf_pool);
+ free(ttransport);
+
+ /* case 2 */
+ memset(&opts, 0, sizeof(opts));
+ opts.max_queue_depth = UT_MAX_QUEUE_DEPTH;
+ opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR;
+ opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE;
+ opts.max_io_size = UT_MAX_IO_SIZE;
+ opts.io_unit_size = UT_MAX_IO_SIZE + 1;
+ opts.max_aq_depth = UT_MAX_AQ_DEPTH;
+ opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS;
+ /* expect success */
+ transport = nvmf_tcp_create(&opts);
+ CU_ASSERT_PTR_NOT_NULL(transport);
+ ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport);
+ SPDK_CU_ASSERT_FATAL(ttransport != NULL);
+ transport->opts = opts;
+ CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH);
+ CU_ASSERT(transport->opts.max_io_size == UT_MAX_IO_SIZE);
+ CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE);
+ CU_ASSERT(transport->opts.io_unit_size == UT_MAX_IO_SIZE);
+ /* destroy transport */
+ spdk_mempool_free(ttransport->transport.data_buf_pool);
+ free(ttransport);
+
+ /* case 3 */
+ memset(&opts, 0, sizeof(opts));
+ opts.max_queue_depth = UT_MAX_QUEUE_DEPTH;
+ opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR;
+ opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE;
+ opts.max_io_size = UT_MAX_IO_SIZE;
+ opts.io_unit_size = 16;
+ opts.max_aq_depth = UT_MAX_AQ_DEPTH;
+ /* expect failse */
+ transport = nvmf_tcp_create(&opts);
+ CU_ASSERT_PTR_NULL(transport);
+
+ spdk_thread_exit(thread);
+ while (!spdk_thread_is_exited(thread)) {
+ spdk_thread_poll(thread, 0, 0);
+ }
+ spdk_thread_destroy(thread);
+}
+
+static void
+test_nvmf_tcp_destroy(void)
+{
+ struct spdk_thread *thread;
+ struct spdk_nvmf_transport *transport;
+ struct spdk_nvmf_transport_opts opts;
+
+ thread = spdk_thread_create(NULL, NULL);
+ SPDK_CU_ASSERT_FATAL(thread != NULL);
+ spdk_set_thread(thread);
+
+ /* case 1 */
+ memset(&opts, 0, sizeof(opts));
+ opts.max_queue_depth = UT_MAX_QUEUE_DEPTH;
+ opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR;
+ opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE;
+ opts.max_io_size = UT_MAX_IO_SIZE;
+ opts.io_unit_size = UT_IO_UNIT_SIZE;
+ opts.max_aq_depth = UT_MAX_AQ_DEPTH;
+ opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS;
+ transport = nvmf_tcp_create(&opts);
+ CU_ASSERT_PTR_NOT_NULL(transport);
+ transport->opts = opts;
+ /* destroy transport */
+ CU_ASSERT(nvmf_tcp_destroy(transport) == 0);
+
+ spdk_thread_exit(thread);
+ while (!spdk_thread_is_exited(thread)) {
+ spdk_thread_poll(thread, 0, 0);
+ }
+ spdk_thread_destroy(thread);
+}
+
+static void
+test_nvmf_tcp_poll_group_create(void)
+{
+ struct spdk_nvmf_transport *transport;
+ struct spdk_nvmf_transport_poll_group *group;
+ struct spdk_thread *thread;
+ struct spdk_nvmf_transport_opts opts;
+ struct spdk_sock_group grp = {};
+
+ thread = spdk_thread_create(NULL, NULL);
+ SPDK_CU_ASSERT_FATAL(thread != NULL);
+ spdk_set_thread(thread);
+
+ memset(&opts, 0, sizeof(opts));
+ opts.max_queue_depth = UT_MAX_QUEUE_DEPTH;
+ opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR;
+ opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE;
+ opts.max_io_size = UT_MAX_IO_SIZE;
+ opts.io_unit_size = UT_IO_UNIT_SIZE;
+ opts.max_aq_depth = UT_MAX_AQ_DEPTH;
+ opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS;
+ transport = nvmf_tcp_create(&opts);
+ CU_ASSERT_PTR_NOT_NULL(transport);
+ transport->opts = opts;
+ MOCK_SET(spdk_sock_group_create, &grp);
+ group = nvmf_tcp_poll_group_create(transport);
+ MOCK_CLEAR_P(spdk_sock_group_create);
+ SPDK_CU_ASSERT_FATAL(group);
+ group->transport = transport;
+ nvmf_tcp_poll_group_destroy(group);
+ nvmf_tcp_destroy(transport);
+
+ spdk_thread_exit(thread);
+ while (!spdk_thread_is_exited(thread)) {
+ spdk_thread_poll(thread, 0, 0);
+ }
+ spdk_thread_destroy(thread);
+}
+
+static void
+test_nvmf_tcp_send_c2h_data(void)
+{
+ struct spdk_thread *thread;
+ struct spdk_nvmf_tcp_transport ttransport = {};
+ struct spdk_nvmf_tcp_qpair tqpair = {};
+ struct spdk_nvmf_tcp_req tcp_req = {};
+ struct nvme_tcp_pdu pdu = {};
+ struct spdk_nvme_tcp_c2h_data_hdr *c2h_data;
+
+ thread = spdk_thread_create(NULL, NULL);
+ SPDK_CU_ASSERT_FATAL(thread != NULL);
+ spdk_set_thread(thread);
+
+ tcp_req.pdu = &pdu;
+ tcp_req.req.length = 300;
+
+ tqpair.qpair.transport = &ttransport.transport;
+ TAILQ_INIT(&tqpair.send_queue);
+
+ /* Set qpair state to make unrelated operations NOP */
+ tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
+ tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR;
+
+ tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd;
+
+ tcp_req.req.iov[0].iov_base = (void *)0xDEADBEEF;
+ tcp_req.req.iov[0].iov_len = 101;
+ tcp_req.req.iov[1].iov_base = (void *)0xFEEDBEEF;
+ tcp_req.req.iov[1].iov_len = 100;
+ tcp_req.req.iov[2].iov_base = (void *)0xC0FFEE;
+ tcp_req.req.iov[2].iov_len = 99;
+ tcp_req.req.iovcnt = 3;
+ tcp_req.req.length = 300;
+
+ nvmf_tcp_send_c2h_data(&tqpair, &tcp_req);
+
+ CU_ASSERT(TAILQ_FIRST(&tqpair.send_queue) == &pdu);
+ TAILQ_REMOVE(&tqpair.send_queue, &pdu, tailq);
+
+ c2h_data = &pdu.hdr.c2h_data;
+ CU_ASSERT(c2h_data->datao == 0);
+ CU_ASSERT(c2h_data->datal = 300);
+ CU_ASSERT(c2h_data->common.plen == sizeof(*c2h_data) + 300);
+ CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU);
+
+ CU_ASSERT(pdu.data_iovcnt == 3);
+ CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF);
+ CU_ASSERT(pdu.data_iov[0].iov_len == 101);
+ CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF);
+ CU_ASSERT(pdu.data_iov[1].iov_len == 100);
+ CU_ASSERT((uint64_t)pdu.data_iov[2].iov_base == 0xC0FFEE);
+ CU_ASSERT(pdu.data_iov[2].iov_len == 99);
+
+ spdk_thread_exit(thread);
+ while (!spdk_thread_is_exited(thread)) {
+ spdk_thread_poll(thread, 0, 0);
+ }
+ spdk_thread_destroy(thread);
+}
+
+#define NVMF_TCP_PDU_MAX_H2C_DATA_SIZE (128 * 1024)
+
+static void
+test_nvmf_tcp_h2c_data_hdr_handle(void)
+{
+ struct spdk_nvmf_tcp_transport ttransport = {};
+ struct spdk_nvmf_tcp_qpair tqpair = {};
+ struct nvme_tcp_pdu pdu = {};
+ struct spdk_nvmf_tcp_req tcp_req = {};
+ struct spdk_nvme_tcp_h2c_data_hdr *h2c_data;
+
+ TAILQ_INIT(&tqpair.state_queue[TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER]);
+
+ /* Set qpair state to make unrelated operations NOP */
+ tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
+ tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR;
+
+ tcp_req.req.iov[0].iov_base = (void *)0xDEADBEEF;
+ tcp_req.req.iov[0].iov_len = 101;
+ tcp_req.req.iov[1].iov_base = (void *)0xFEEDBEEF;
+ tcp_req.req.iov[1].iov_len = 99;
+ tcp_req.req.iovcnt = 2;
+ tcp_req.req.length = 200;
+
+ tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd;
+ tcp_req.req.cmd->nvme_cmd.cid = 1;
+ tcp_req.ttag = 2;
+
+ TAILQ_INSERT_TAIL(&tqpair.state_queue[TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER],
+ &tcp_req, state_link);
+
+ h2c_data = &pdu.hdr.h2c_data;
+ h2c_data->cccid = 1;
+ h2c_data->ttag = 2;
+ h2c_data->datao = 0;
+ h2c_data->datal = 200;
+
+ nvmf_tcp_h2c_data_hdr_handle(&ttransport, &tqpair, &pdu);
+
+ CU_ASSERT(pdu.data_iovcnt == 2);
+ CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF);
+ CU_ASSERT(pdu.data_iov[0].iov_len == 101);
+ CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF);
+ CU_ASSERT(pdu.data_iov[1].iov_len == 99);
+
+ CU_ASSERT(TAILQ_FIRST(&tqpair.state_queue[TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER]) ==
+ &tcp_req);
+ TAILQ_REMOVE(&tqpair.state_queue[TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER],
+ &tcp_req, state_link);
+}
+
+
+static void
+test_nvmf_tcp_incapsule_data_handle(void)
+{
+ struct spdk_nvmf_tcp_transport ttransport = {};
+ struct spdk_nvmf_tcp_qpair tqpair = {};
+ struct nvme_tcp_pdu *pdu;
+ union nvmf_c2h_msg rsp0 = {};
+ union nvmf_c2h_msg rsp = {};
+
+ struct spdk_nvmf_request *req_temp = NULL;
+ struct spdk_nvmf_tcp_req tcp_req2 = {};
+ struct spdk_nvmf_tcp_req tcp_req1 = {};
+
+ struct spdk_nvme_tcp_cmd *capsule_data;
+ struct spdk_nvmf_capsule_cmd *nvmf_capsule_data;
+ struct spdk_nvme_sgl_descriptor *sgl;
+
+ struct spdk_nvmf_transport_poll_group *group;
+ struct spdk_nvmf_tcp_poll_group tcp_group = {};
+ struct spdk_sock_group grp = {};
+ int i = 0;
+
+ ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE;
+ ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE;
+
+ tcp_group.sock_group = &grp;
+ TAILQ_INIT(&tcp_group.qpairs);
+ group = &tcp_group.group;
+ group->transport = &ttransport.transport;
+ STAILQ_INIT(&group->pending_buf_queue);
+ tqpair.group = &tcp_group;
+
+ /* init tqpair, add pdu to pdu_in_progress and wait for the buff */
+ for (i = TCP_REQUEST_STATE_FREE; i < TCP_REQUEST_NUM_STATES; i++) {
+ TAILQ_INIT(&tqpair.state_queue[i]);
+ }
+
+ TAILQ_INIT(&tqpair.send_queue);
+
+ TAILQ_INSERT_TAIL(&tqpair.state_queue[TCP_REQUEST_STATE_FREE], &tcp_req2, state_link);
+ tqpair.state_cntr[TCP_REQUEST_STATE_FREE]++;
+ tqpair.qpair.transport = &ttransport.transport;
+ tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
+ tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH;
+ tqpair.qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
+
+ /* init a null tcp_req into tqpair TCP_REQUEST_STATE_FREE queue */
+ tcp_req2.req.qpair = &tqpair.qpair;
+ tcp_req2.req.cmd = (union nvmf_h2c_msg *)&tcp_req2.cmd;
+ tcp_req2.req.rsp = &rsp;
+
+ /* init tcp_req1 */
+ tcp_req1.req.qpair = &tqpair.qpair;
+ tcp_req1.req.cmd = (union nvmf_h2c_msg *)&tcp_req1.cmd;
+ tcp_req1.req.rsp = &rsp0;
+ tcp_req1.state = TCP_REQUEST_STATE_NEW;
+
+ TAILQ_INSERT_TAIL(&tqpair.state_queue[TCP_REQUEST_STATE_NEW], &tcp_req1, state_link);
+ tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++;
+
+ /* init pdu, make pdu need sgl buff */
+ pdu = &tqpair.pdu_in_progress;
+ capsule_data = &pdu->hdr.capsule_cmd;
+ nvmf_capsule_data = (struct spdk_nvmf_capsule_cmd *)&pdu->hdr.capsule_cmd.ccsqe;
+ sgl = &capsule_data->ccsqe.dptr.sgl1;
+
+ capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD;
+ capsule_data->common.hlen = sizeof(*capsule_data);
+ capsule_data->common.plen = 1096;
+ capsule_data->ccsqe.opc = SPDK_NVME_OPC_FABRIC;
+
+ sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT;
+ sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK;
+ sgl->unkeyed.length = UT_IO_UNIT_SIZE;
+
+ nvmf_capsule_data->fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT;
+
+ /* insert tcp_req1 to pending_buf_queue, And this req takes precedence over the next req. */
+ nvmf_tcp_req_process(&ttransport, &tcp_req1);
+ CU_ASSERT(STAILQ_FIRST(&group->pending_buf_queue) == &tcp_req1.req);
+
+ sgl->unkeyed.length = UT_IO_UNIT_SIZE - 1;
+
+ /* process tqpair capsule req. but we still remain req in pending_buff. */
+ nvmf_tcp_capsule_cmd_hdr_handle(&ttransport, &tqpair, &tqpair.pdu_in_progress);
+ CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD);
+ CU_ASSERT(STAILQ_FIRST(&group->pending_buf_queue) == &tcp_req1.req);
+ STAILQ_FOREACH(req_temp, &group->pending_buf_queue, buf_link) {
+ if (req_temp == &tcp_req2.req) {
+ break;
+ }
+ }
+ CU_ASSERT(req_temp == NULL);
+ CU_ASSERT(tqpair.pdu_in_progress.req == (void *)&tcp_req2);
+}
+
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvmf", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_nvmf_tcp_create);
+ CU_ADD_TEST(suite, test_nvmf_tcp_destroy);
+ CU_ADD_TEST(suite, test_nvmf_tcp_poll_group_create);
+ CU_ADD_TEST(suite, test_nvmf_tcp_send_c2h_data);
+ CU_ADD_TEST(suite, test_nvmf_tcp_h2c_data_hdr_handle);
+ CU_ADD_TEST(suite, test_nvmf_tcp_incapsule_data_handle);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/reduce/Makefile b/src/spdk/test/unit/lib/reduce/Makefile
new file mode 100644
index 000000000..7c901ac18
--- /dev/null
+++ b/src/spdk/test/unit/lib/reduce/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = reduce.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/reduce/reduce.c/.gitignore b/src/spdk/test/unit/lib/reduce/reduce.c/.gitignore
new file mode 100644
index 000000000..be248403f
--- /dev/null
+++ b/src/spdk/test/unit/lib/reduce/reduce.c/.gitignore
@@ -0,0 +1 @@
+reduce_ut
diff --git a/src/spdk/test/unit/lib/reduce/reduce.c/Makefile b/src/spdk/test/unit/lib/reduce/reduce.c/Makefile
new file mode 100644
index 000000000..4a704c660
--- /dev/null
+++ b/src/spdk/test/unit/lib/reduce/reduce.c/Makefile
@@ -0,0 +1,39 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = reduce_ut.c
+LDFLAGS += -Wl,--wrap,unlink
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/reduce/reduce.c/reduce_ut.c b/src/spdk/test/unit/lib/reduce/reduce.c/reduce_ut.c
new file mode 100644
index 000000000..9c94a4ac6
--- /dev/null
+++ b/src/spdk/test/unit/lib/reduce/reduce.c/reduce_ut.c
@@ -0,0 +1,1300 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "reduce/reduce.c"
+#include "spdk_internal/mock.h"
+#include "common/lib/test_env.c"
+
+static struct spdk_reduce_vol *g_vol;
+static int g_reduce_errno;
+static char *g_volatile_pm_buf;
+static size_t g_volatile_pm_buf_len;
+static char *g_persistent_pm_buf;
+static size_t g_persistent_pm_buf_len;
+static char *g_backing_dev_buf;
+static char g_path[REDUCE_PATH_MAX];
+static char *g_decomp_buf;
+
+#define TEST_MD_PATH "/tmp"
+
+enum ut_reduce_bdev_io_type {
+ UT_REDUCE_IO_READV = 1,
+ UT_REDUCE_IO_WRITEV = 2,
+ UT_REDUCE_IO_UNMAP = 3,
+};
+
+struct ut_reduce_bdev_io {
+ enum ut_reduce_bdev_io_type type;
+ struct spdk_reduce_backing_dev *backing_dev;
+ struct iovec *iov;
+ int iovcnt;
+ uint64_t lba;
+ uint32_t lba_count;
+ struct spdk_reduce_vol_cb_args *args;
+ TAILQ_ENTRY(ut_reduce_bdev_io) link;
+};
+
+static bool g_defer_bdev_io = false;
+static TAILQ_HEAD(, ut_reduce_bdev_io) g_pending_bdev_io =
+ TAILQ_HEAD_INITIALIZER(g_pending_bdev_io);
+static uint32_t g_pending_bdev_io_count = 0;
+
+static void
+sync_pm_buf(const void *addr, size_t length)
+{
+ uint64_t offset = (char *)addr - g_volatile_pm_buf;
+
+ memcpy(&g_persistent_pm_buf[offset], addr, length);
+}
+
+int
+pmem_msync(const void *addr, size_t length)
+{
+ sync_pm_buf(addr, length);
+ return 0;
+}
+
+void
+pmem_persist(const void *addr, size_t len)
+{
+ sync_pm_buf(addr, len);
+}
+
+static void
+get_pm_file_size(void)
+{
+ struct spdk_reduce_vol_params params;
+ uint64_t pm_size, expected_pm_size;
+
+ params.backing_io_unit_size = 4096;
+ params.chunk_size = 4096 * 4;
+ params.vol_size = 4096 * 4 * 100;
+
+ pm_size = _get_pm_file_size(&params);
+ expected_pm_size = sizeof(struct spdk_reduce_vol_superblock);
+ /* 100 chunks in logical map * 8 bytes per chunk */
+ expected_pm_size += 100 * sizeof(uint64_t);
+ /* 100 chunks * (chunk stuct size + 4 backing io units per chunk * 8 bytes per backing io unit) */
+ expected_pm_size += 100 * (sizeof(struct spdk_reduce_chunk_map) + 4 * sizeof(uint64_t));
+ /* reduce allocates some extra chunks too for in-flight writes when logical map
+ * is full. REDUCE_EXTRA_CHUNKS is a private #ifdef in reduce.c Here we need the num chunks
+ * times (chunk struct size + 4 backing io units per chunk * 8 bytes per backing io unit).
+ */
+ expected_pm_size += REDUCE_NUM_EXTRA_CHUNKS *
+ (sizeof(struct spdk_reduce_chunk_map) + 4 * sizeof(uint64_t));
+ /* reduce will add some padding so numbers may not match exactly. Make sure
+ * they are close though.
+ */
+ CU_ASSERT((pm_size - expected_pm_size) <= REDUCE_PM_SIZE_ALIGNMENT);
+}
+
+static void
+get_vol_size(void)
+{
+ uint64_t chunk_size, backing_dev_size;
+
+ chunk_size = 16 * 1024;
+ backing_dev_size = 16 * 1024 * 1000;
+ CU_ASSERT(_get_vol_size(chunk_size, backing_dev_size) < backing_dev_size);
+}
+
+void *
+pmem_map_file(const char *path, size_t len, int flags, mode_t mode,
+ size_t *mapped_lenp, int *is_pmemp)
+{
+ CU_ASSERT(g_volatile_pm_buf == NULL);
+ snprintf(g_path, sizeof(g_path), "%s", path);
+ *is_pmemp = 1;
+
+ if (g_persistent_pm_buf == NULL) {
+ g_persistent_pm_buf = calloc(1, len);
+ g_persistent_pm_buf_len = len;
+ SPDK_CU_ASSERT_FATAL(g_persistent_pm_buf != NULL);
+ }
+
+ *mapped_lenp = g_persistent_pm_buf_len;
+ g_volatile_pm_buf = calloc(1, g_persistent_pm_buf_len);
+ SPDK_CU_ASSERT_FATAL(g_volatile_pm_buf != NULL);
+ memcpy(g_volatile_pm_buf, g_persistent_pm_buf, g_persistent_pm_buf_len);
+ g_volatile_pm_buf_len = g_persistent_pm_buf_len;
+
+ return g_volatile_pm_buf;
+}
+
+int
+pmem_unmap(void *addr, size_t len)
+{
+ CU_ASSERT(addr == g_volatile_pm_buf);
+ CU_ASSERT(len == g_volatile_pm_buf_len);
+ free(g_volatile_pm_buf);
+ g_volatile_pm_buf = NULL;
+ g_volatile_pm_buf_len = 0;
+
+ return 0;
+}
+
+static void
+persistent_pm_buf_destroy(void)
+{
+ CU_ASSERT(g_persistent_pm_buf != NULL);
+ free(g_persistent_pm_buf);
+ g_persistent_pm_buf = NULL;
+ g_persistent_pm_buf_len = 0;
+}
+
+static void
+unlink_cb(void)
+{
+ persistent_pm_buf_destroy();
+}
+
+static void
+init_cb(void *cb_arg, struct spdk_reduce_vol *vol, int reduce_errno)
+{
+ g_vol = vol;
+ g_reduce_errno = reduce_errno;
+}
+
+static void
+load_cb(void *cb_arg, struct spdk_reduce_vol *vol, int reduce_errno)
+{
+ g_vol = vol;
+ g_reduce_errno = reduce_errno;
+}
+
+static void
+unload_cb(void *cb_arg, int reduce_errno)
+{
+ g_reduce_errno = reduce_errno;
+}
+
+static void
+init_failure(void)
+{
+ struct spdk_reduce_vol_params params = {};
+ struct spdk_reduce_backing_dev backing_dev = {};
+
+ backing_dev.blocklen = 512;
+ /* This blockcnt is too small for a reduce vol - there needs to be
+ * enough space for at least REDUCE_NUM_EXTRA_CHUNKS + 1 chunks.
+ */
+ backing_dev.blockcnt = 20;
+
+ params.vol_size = 0;
+ params.chunk_size = 16 * 1024;
+ params.backing_io_unit_size = backing_dev.blocklen;
+ params.logical_block_size = 512;
+
+ /* backing_dev has an invalid size. This should fail. */
+ g_vol = NULL;
+ g_reduce_errno = 0;
+ spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
+ CU_ASSERT(g_reduce_errno == -EINVAL);
+ SPDK_CU_ASSERT_FATAL(g_vol == NULL);
+
+ /* backing_dev now has valid size, but backing_dev still has null
+ * function pointers. This should fail.
+ */
+ backing_dev.blockcnt = 20000;
+
+ g_vol = NULL;
+ g_reduce_errno = 0;
+ spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
+ CU_ASSERT(g_reduce_errno == -EINVAL);
+ SPDK_CU_ASSERT_FATAL(g_vol == NULL);
+}
+
+static void
+backing_dev_readv_execute(struct spdk_reduce_backing_dev *backing_dev,
+ struct iovec *iov, int iovcnt,
+ uint64_t lba, uint32_t lba_count,
+ struct spdk_reduce_vol_cb_args *args)
+{
+ char *offset;
+ int i;
+
+ offset = g_backing_dev_buf + lba * backing_dev->blocklen;
+ for (i = 0; i < iovcnt; i++) {
+ memcpy(iov[i].iov_base, offset, iov[i].iov_len);
+ offset += iov[i].iov_len;
+ }
+ args->cb_fn(args->cb_arg, 0);
+}
+
+static void
+backing_dev_insert_io(enum ut_reduce_bdev_io_type type, struct spdk_reduce_backing_dev *backing_dev,
+ struct iovec *iov, int iovcnt, uint64_t lba, uint32_t lba_count,
+ struct spdk_reduce_vol_cb_args *args)
+{
+ struct ut_reduce_bdev_io *ut_bdev_io;
+
+ ut_bdev_io = calloc(1, sizeof(*ut_bdev_io));
+ SPDK_CU_ASSERT_FATAL(ut_bdev_io != NULL);
+
+ ut_bdev_io->type = type;
+ ut_bdev_io->backing_dev = backing_dev;
+ ut_bdev_io->iov = iov;
+ ut_bdev_io->iovcnt = iovcnt;
+ ut_bdev_io->lba = lba;
+ ut_bdev_io->lba_count = lba_count;
+ ut_bdev_io->args = args;
+ TAILQ_INSERT_TAIL(&g_pending_bdev_io, ut_bdev_io, link);
+ g_pending_bdev_io_count++;
+}
+
+static void
+backing_dev_readv(struct spdk_reduce_backing_dev *backing_dev, struct iovec *iov, int iovcnt,
+ uint64_t lba, uint32_t lba_count, struct spdk_reduce_vol_cb_args *args)
+{
+ if (g_defer_bdev_io == false) {
+ CU_ASSERT(g_pending_bdev_io_count == 0);
+ CU_ASSERT(TAILQ_EMPTY(&g_pending_bdev_io));
+ backing_dev_readv_execute(backing_dev, iov, iovcnt, lba, lba_count, args);
+ return;
+ }
+
+ backing_dev_insert_io(UT_REDUCE_IO_READV, backing_dev, iov, iovcnt, lba, lba_count, args);
+}
+
+static void
+backing_dev_writev_execute(struct spdk_reduce_backing_dev *backing_dev,
+ struct iovec *iov, int iovcnt,
+ uint64_t lba, uint32_t lba_count,
+ struct spdk_reduce_vol_cb_args *args)
+{
+ char *offset;
+ int i;
+
+ offset = g_backing_dev_buf + lba * backing_dev->blocklen;
+ for (i = 0; i < iovcnt; i++) {
+ memcpy(offset, iov[i].iov_base, iov[i].iov_len);
+ offset += iov[i].iov_len;
+ }
+ args->cb_fn(args->cb_arg, 0);
+}
+
+static void
+backing_dev_writev(struct spdk_reduce_backing_dev *backing_dev, struct iovec *iov, int iovcnt,
+ uint64_t lba, uint32_t lba_count, struct spdk_reduce_vol_cb_args *args)
+{
+ if (g_defer_bdev_io == false) {
+ CU_ASSERT(g_pending_bdev_io_count == 0);
+ CU_ASSERT(TAILQ_EMPTY(&g_pending_bdev_io));
+ backing_dev_writev_execute(backing_dev, iov, iovcnt, lba, lba_count, args);
+ return;
+ }
+
+ backing_dev_insert_io(UT_REDUCE_IO_WRITEV, backing_dev, iov, iovcnt, lba, lba_count, args);
+}
+
+static void
+backing_dev_unmap_execute(struct spdk_reduce_backing_dev *backing_dev,
+ uint64_t lba, uint32_t lba_count,
+ struct spdk_reduce_vol_cb_args *args)
+{
+ char *offset;
+
+ offset = g_backing_dev_buf + lba * backing_dev->blocklen;
+ memset(offset, 0, lba_count * backing_dev->blocklen);
+ args->cb_fn(args->cb_arg, 0);
+}
+
+static void
+backing_dev_unmap(struct spdk_reduce_backing_dev *backing_dev,
+ uint64_t lba, uint32_t lba_count, struct spdk_reduce_vol_cb_args *args)
+{
+ if (g_defer_bdev_io == false) {
+ CU_ASSERT(g_pending_bdev_io_count == 0);
+ CU_ASSERT(TAILQ_EMPTY(&g_pending_bdev_io));
+ backing_dev_unmap_execute(backing_dev, lba, lba_count, args);
+ return;
+ }
+
+ backing_dev_insert_io(UT_REDUCE_IO_UNMAP, backing_dev, NULL, 0, lba, lba_count, args);
+}
+
+static void
+backing_dev_io_execute(uint32_t count)
+{
+ struct ut_reduce_bdev_io *ut_bdev_io;
+ uint32_t done = 0;
+
+ CU_ASSERT(g_defer_bdev_io == true);
+ while (!TAILQ_EMPTY(&g_pending_bdev_io) && (count == 0 || done < count)) {
+ ut_bdev_io = TAILQ_FIRST(&g_pending_bdev_io);
+ TAILQ_REMOVE(&g_pending_bdev_io, ut_bdev_io, link);
+ g_pending_bdev_io_count--;
+ switch (ut_bdev_io->type) {
+ case UT_REDUCE_IO_READV:
+ backing_dev_readv_execute(ut_bdev_io->backing_dev,
+ ut_bdev_io->iov, ut_bdev_io->iovcnt,
+ ut_bdev_io->lba, ut_bdev_io->lba_count,
+ ut_bdev_io->args);
+ break;
+ case UT_REDUCE_IO_WRITEV:
+ backing_dev_writev_execute(ut_bdev_io->backing_dev,
+ ut_bdev_io->iov, ut_bdev_io->iovcnt,
+ ut_bdev_io->lba, ut_bdev_io->lba_count,
+ ut_bdev_io->args);
+ break;
+ case UT_REDUCE_IO_UNMAP:
+ backing_dev_unmap_execute(ut_bdev_io->backing_dev,
+ ut_bdev_io->lba, ut_bdev_io->lba_count,
+ ut_bdev_io->args);
+ break;
+ default:
+ CU_ASSERT(false);
+ break;
+ }
+ free(ut_bdev_io);
+ done++;
+ }
+}
+
+static int
+ut_compress(char *outbuf, uint32_t *compressed_len, char *inbuf, uint32_t inbuflen)
+{
+ uint32_t len = 0;
+ uint8_t count;
+ char last;
+
+ while (true) {
+ if (inbuflen == 0) {
+ *compressed_len = len;
+ return 0;
+ }
+
+ if (*compressed_len < (len + 2)) {
+ return -ENOSPC;
+ }
+
+ last = *inbuf;
+ count = 1;
+ inbuflen--;
+ inbuf++;
+
+ while (inbuflen > 0 && *inbuf == last && count < UINT8_MAX) {
+ count++;
+ inbuflen--;
+ inbuf++;
+ }
+
+ outbuf[len] = count;
+ outbuf[len + 1] = last;
+ len += 2;
+ }
+}
+
+static int
+ut_decompress(uint8_t *outbuf, uint32_t *compressed_len, uint8_t *inbuf, uint32_t inbuflen)
+{
+ uint32_t len = 0;
+
+ SPDK_CU_ASSERT_FATAL(inbuflen % 2 == 0);
+
+ while (true) {
+ if (inbuflen == 0) {
+ *compressed_len = len;
+ return 0;
+ }
+
+ if ((len + inbuf[0]) > *compressed_len) {
+ return -ENOSPC;
+ }
+
+ memset(outbuf, inbuf[1], inbuf[0]);
+ outbuf += inbuf[0];
+ len += inbuf[0];
+ inbuflen -= 2;
+ inbuf += 2;
+ }
+}
+
+static void
+ut_build_data_buffer(uint8_t *data, uint32_t data_len, uint8_t init_val, uint32_t repeat)
+{
+ uint32_t _repeat = repeat;
+
+ SPDK_CU_ASSERT_FATAL(repeat > 0);
+
+ while (data_len > 0) {
+ *data = init_val;
+ data++;
+ data_len--;
+ _repeat--;
+ if (_repeat == 0) {
+ init_val++;
+ _repeat = repeat;
+ }
+ }
+}
+
+static void
+backing_dev_compress(struct spdk_reduce_backing_dev *backing_dev,
+ struct iovec *src_iov, int src_iovcnt,
+ struct iovec *dst_iov, int dst_iovcnt,
+ struct spdk_reduce_vol_cb_args *args)
+{
+ uint32_t compressed_len;
+ uint64_t total_length = 0;
+ char *buf = g_decomp_buf;
+ int rc, i;
+
+ CU_ASSERT(dst_iovcnt == 1);
+
+ for (i = 0; i < src_iovcnt; i++) {
+ memcpy(buf, src_iov[i].iov_base, src_iov[i].iov_len);
+ buf += src_iov[i].iov_len;
+ total_length += src_iov[i].iov_len;
+ }
+
+ compressed_len = dst_iov[0].iov_len;
+ rc = ut_compress(dst_iov[0].iov_base, &compressed_len,
+ g_decomp_buf, total_length);
+
+ args->cb_fn(args->cb_arg, rc ? rc : (int)compressed_len);
+}
+
+static void
+backing_dev_decompress(struct spdk_reduce_backing_dev *backing_dev,
+ struct iovec *src_iov, int src_iovcnt,
+ struct iovec *dst_iov, int dst_iovcnt,
+ struct spdk_reduce_vol_cb_args *args)
+{
+ uint32_t decompressed_len = 0;
+ char *buf = g_decomp_buf;
+ int rc, i;
+
+ CU_ASSERT(src_iovcnt == 1);
+
+ for (i = 0; i < dst_iovcnt; i++) {
+ decompressed_len += dst_iov[i].iov_len;
+ }
+
+ rc = ut_decompress(g_decomp_buf, &decompressed_len,
+ src_iov[0].iov_base, src_iov[0].iov_len);
+
+ for (i = 0; i < dst_iovcnt; i++) {
+ memcpy(dst_iov[i].iov_base, buf, dst_iov[i].iov_len);
+ buf += dst_iov[i].iov_len;
+ }
+
+ args->cb_fn(args->cb_arg, rc ? rc : (int)decompressed_len);
+}
+
+static void
+backing_dev_destroy(struct spdk_reduce_backing_dev *backing_dev)
+{
+ /* We don't free this during backing_dev_close so that we can test init/unload/load
+ * scenarios.
+ */
+ free(g_backing_dev_buf);
+ free(g_decomp_buf);
+ g_backing_dev_buf = NULL;
+}
+
+static void
+backing_dev_init(struct spdk_reduce_backing_dev *backing_dev, struct spdk_reduce_vol_params *params,
+ uint32_t backing_blocklen)
+{
+ int64_t size;
+
+ size = 4 * 1024 * 1024;
+ backing_dev->blocklen = backing_blocklen;
+ backing_dev->blockcnt = size / backing_dev->blocklen;
+ backing_dev->readv = backing_dev_readv;
+ backing_dev->writev = backing_dev_writev;
+ backing_dev->unmap = backing_dev_unmap;
+ backing_dev->compress = backing_dev_compress;
+ backing_dev->decompress = backing_dev_decompress;
+
+ g_decomp_buf = calloc(1, params->chunk_size);
+ SPDK_CU_ASSERT_FATAL(g_decomp_buf != NULL);
+
+ g_backing_dev_buf = calloc(1, size);
+ SPDK_CU_ASSERT_FATAL(g_backing_dev_buf != NULL);
+}
+
+static void
+init_md(void)
+{
+ struct spdk_reduce_vol_params params = {};
+ struct spdk_reduce_vol_params *persistent_params;
+ struct spdk_reduce_backing_dev backing_dev = {};
+ struct spdk_uuid uuid;
+ uint64_t *entry;
+
+ params.chunk_size = 16 * 1024;
+ params.backing_io_unit_size = 512;
+ params.logical_block_size = 512;
+
+ backing_dev_init(&backing_dev, &params, 512);
+
+ g_vol = NULL;
+ g_reduce_errno = -1;
+ spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+ SPDK_CU_ASSERT_FATAL(g_vol != NULL);
+ /* Confirm that reduce persisted the params to metadata. */
+ CU_ASSERT(memcmp(g_persistent_pm_buf, SPDK_REDUCE_SIGNATURE, 8) == 0);
+ persistent_params = (struct spdk_reduce_vol_params *)(g_persistent_pm_buf + 8);
+ CU_ASSERT(memcmp(persistent_params, &params, sizeof(params)) == 0);
+ /* Now confirm that contents of pm_file after the superblock have been initialized
+ * to REDUCE_EMPTY_MAP_ENTRY.
+ */
+ entry = (uint64_t *)(g_persistent_pm_buf + sizeof(struct spdk_reduce_vol_superblock));
+ while (entry != (uint64_t *)(g_persistent_pm_buf + g_vol->pm_file.size)) {
+ CU_ASSERT(*entry == REDUCE_EMPTY_MAP_ENTRY);
+ entry++;
+ }
+
+ /* Check that the pm file path was constructed correctly. It should be in
+ * the form:
+ * TEST_MD_PATH + "/" + <uuid string>
+ */
+ CU_ASSERT(strncmp(&g_path[0], TEST_MD_PATH, strlen(TEST_MD_PATH)) == 0);
+ CU_ASSERT(g_path[strlen(TEST_MD_PATH)] == '/');
+ CU_ASSERT(spdk_uuid_parse(&uuid, &g_path[strlen(TEST_MD_PATH) + 1]) == 0);
+ CU_ASSERT(spdk_uuid_compare(&uuid, spdk_reduce_vol_get_uuid(g_vol)) == 0);
+
+ g_reduce_errno = -1;
+ spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+ CU_ASSERT(g_volatile_pm_buf == NULL);
+
+ persistent_pm_buf_destroy();
+ backing_dev_destroy(&backing_dev);
+}
+
+static void
+_init_backing_dev(uint32_t backing_blocklen)
+{
+ struct spdk_reduce_vol_params params = {};
+ struct spdk_reduce_vol_params *persistent_params;
+ struct spdk_reduce_backing_dev backing_dev = {};
+
+ params.chunk_size = 16 * 1024;
+ params.backing_io_unit_size = 512;
+ params.logical_block_size = 512;
+ spdk_uuid_generate(&params.uuid);
+
+ backing_dev_init(&backing_dev, &params, backing_blocklen);
+
+ g_vol = NULL;
+ memset(g_path, 0, sizeof(g_path));
+ g_reduce_errno = -1;
+ spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+ SPDK_CU_ASSERT_FATAL(g_vol != NULL);
+ CU_ASSERT(strncmp(TEST_MD_PATH, g_path, strlen(TEST_MD_PATH)) == 0);
+ /* Confirm that libreduce persisted the params to the backing device. */
+ CU_ASSERT(memcmp(g_backing_dev_buf, SPDK_REDUCE_SIGNATURE, 8) == 0);
+ persistent_params = (struct spdk_reduce_vol_params *)(g_backing_dev_buf + 8);
+ CU_ASSERT(memcmp(persistent_params, &params, sizeof(params)) == 0);
+ /* Confirm that the path to the persistent memory metadata file was persisted to
+ * the backing device.
+ */
+ CU_ASSERT(strncmp(g_path,
+ g_backing_dev_buf + REDUCE_BACKING_DEV_PATH_OFFSET,
+ REDUCE_PATH_MAX) == 0);
+
+ g_reduce_errno = -1;
+ spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+
+ persistent_pm_buf_destroy();
+ backing_dev_destroy(&backing_dev);
+}
+
+static void
+init_backing_dev(void)
+{
+ _init_backing_dev(512);
+ _init_backing_dev(4096);
+}
+
+static void
+_load(uint32_t backing_blocklen)
+{
+ struct spdk_reduce_vol_params params = {};
+ struct spdk_reduce_backing_dev backing_dev = {};
+ char pmem_file_path[REDUCE_PATH_MAX];
+
+ params.chunk_size = 16 * 1024;
+ params.backing_io_unit_size = 512;
+ params.logical_block_size = 512;
+ spdk_uuid_generate(&params.uuid);
+
+ backing_dev_init(&backing_dev, &params, backing_blocklen);
+
+ g_vol = NULL;
+ g_reduce_errno = -1;
+ spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+ SPDK_CU_ASSERT_FATAL(g_vol != NULL);
+ CU_ASSERT(strncmp(TEST_MD_PATH, g_path, strlen(TEST_MD_PATH)) == 0);
+ memcpy(pmem_file_path, g_path, sizeof(pmem_file_path));
+
+ g_reduce_errno = -1;
+ spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+
+ g_vol = NULL;
+ memset(g_path, 0, sizeof(g_path));
+ g_reduce_errno = -1;
+ spdk_reduce_vol_load(&backing_dev, load_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+ SPDK_CU_ASSERT_FATAL(g_vol != NULL);
+ CU_ASSERT(strncmp(g_path, pmem_file_path, sizeof(pmem_file_path)) == 0);
+ CU_ASSERT(g_vol->params.vol_size == params.vol_size);
+ CU_ASSERT(g_vol->params.chunk_size == params.chunk_size);
+ CU_ASSERT(g_vol->params.backing_io_unit_size == params.backing_io_unit_size);
+
+ g_reduce_errno = -1;
+ spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+
+ persistent_pm_buf_destroy();
+ backing_dev_destroy(&backing_dev);
+}
+
+static void
+load(void)
+{
+ _load(512);
+ _load(4096);
+}
+
+static uint64_t
+_vol_get_chunk_map_index(struct spdk_reduce_vol *vol, uint64_t offset)
+{
+ uint64_t logical_map_index = offset / vol->logical_blocks_per_chunk;
+
+ return vol->pm_logical_map[logical_map_index];
+}
+
+static void
+write_cb(void *arg, int reduce_errno)
+{
+ g_reduce_errno = reduce_errno;
+}
+
+static void
+read_cb(void *arg, int reduce_errno)
+{
+ g_reduce_errno = reduce_errno;
+}
+
+static void
+_write_maps(uint32_t backing_blocklen)
+{
+ struct spdk_reduce_vol_params params = {};
+ struct spdk_reduce_backing_dev backing_dev = {};
+ struct iovec iov;
+ const int bufsize = 16 * 1024; /* chunk size */
+ char buf[bufsize];
+ uint32_t num_lbas, i;
+ uint64_t old_chunk0_map_index, new_chunk0_map_index;
+ struct spdk_reduce_chunk_map *old_chunk0_map, *new_chunk0_map;
+
+ params.chunk_size = bufsize;
+ params.backing_io_unit_size = 4096;
+ params.logical_block_size = 512;
+ num_lbas = bufsize / params.logical_block_size;
+ spdk_uuid_generate(&params.uuid);
+
+ backing_dev_init(&backing_dev, &params, backing_blocklen);
+
+ g_vol = NULL;
+ g_reduce_errno = -1;
+ spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+ SPDK_CU_ASSERT_FATAL(g_vol != NULL);
+
+ for (i = 0; i < g_vol->params.vol_size / g_vol->params.chunk_size; i++) {
+ CU_ASSERT(_vol_get_chunk_map_index(g_vol, i) == REDUCE_EMPTY_MAP_ENTRY);
+ }
+
+ ut_build_data_buffer(buf, bufsize, 0x00, 1);
+ iov.iov_base = buf;
+ iov.iov_len = bufsize;
+ g_reduce_errno = -1;
+ spdk_reduce_vol_writev(g_vol, &iov, 1, 0, num_lbas, write_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+
+ old_chunk0_map_index = _vol_get_chunk_map_index(g_vol, 0);
+ CU_ASSERT(old_chunk0_map_index != REDUCE_EMPTY_MAP_ENTRY);
+ CU_ASSERT(spdk_bit_array_get(g_vol->allocated_chunk_maps, old_chunk0_map_index) == true);
+
+ old_chunk0_map = _reduce_vol_get_chunk_map(g_vol, old_chunk0_map_index);
+ for (i = 0; i < g_vol->backing_io_units_per_chunk; i++) {
+ CU_ASSERT(old_chunk0_map->io_unit_index[i] != REDUCE_EMPTY_MAP_ENTRY);
+ CU_ASSERT(spdk_bit_array_get(g_vol->allocated_backing_io_units,
+ old_chunk0_map->io_unit_index[i]) == true);
+ }
+
+ g_reduce_errno = -1;
+ spdk_reduce_vol_writev(g_vol, &iov, 1, 0, num_lbas, write_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+
+ new_chunk0_map_index = _vol_get_chunk_map_index(g_vol, 0);
+ CU_ASSERT(new_chunk0_map_index != REDUCE_EMPTY_MAP_ENTRY);
+ CU_ASSERT(new_chunk0_map_index != old_chunk0_map_index);
+ CU_ASSERT(spdk_bit_array_get(g_vol->allocated_chunk_maps, new_chunk0_map_index) == true);
+ CU_ASSERT(spdk_bit_array_get(g_vol->allocated_chunk_maps, old_chunk0_map_index) == false);
+
+ for (i = 0; i < g_vol->backing_io_units_per_chunk; i++) {
+ CU_ASSERT(spdk_bit_array_get(g_vol->allocated_backing_io_units,
+ old_chunk0_map->io_unit_index[i]) == false);
+ }
+
+ new_chunk0_map = _reduce_vol_get_chunk_map(g_vol, new_chunk0_map_index);
+ for (i = 0; i < g_vol->backing_io_units_per_chunk; i++) {
+ CU_ASSERT(new_chunk0_map->io_unit_index[i] != REDUCE_EMPTY_MAP_ENTRY);
+ CU_ASSERT(spdk_bit_array_get(g_vol->allocated_backing_io_units,
+ new_chunk0_map->io_unit_index[i]) == true);
+ }
+
+ g_reduce_errno = -1;
+ spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+
+ g_vol = NULL;
+ g_reduce_errno = -1;
+ spdk_reduce_vol_load(&backing_dev, load_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+ SPDK_CU_ASSERT_FATAL(g_vol != NULL);
+ CU_ASSERT(g_vol->params.vol_size == params.vol_size);
+ CU_ASSERT(g_vol->params.chunk_size == params.chunk_size);
+ CU_ASSERT(g_vol->params.backing_io_unit_size == params.backing_io_unit_size);
+
+ g_reduce_errno = -1;
+ spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+
+ persistent_pm_buf_destroy();
+ backing_dev_destroy(&backing_dev);
+}
+
+static void
+write_maps(void)
+{
+ _write_maps(512);
+ _write_maps(4096);
+}
+
+static void
+_read_write(uint32_t backing_blocklen)
+{
+ struct spdk_reduce_vol_params params = {};
+ struct spdk_reduce_backing_dev backing_dev = {};
+ struct iovec iov;
+ char buf[16 * 1024]; /* chunk size */
+ char compare_buf[16 * 1024];
+ uint32_t i;
+
+ params.chunk_size = 16 * 1024;
+ params.backing_io_unit_size = 4096;
+ params.logical_block_size = 512;
+ spdk_uuid_generate(&params.uuid);
+
+ backing_dev_init(&backing_dev, &params, backing_blocklen);
+
+ g_vol = NULL;
+ g_reduce_errno = -1;
+ spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+ SPDK_CU_ASSERT_FATAL(g_vol != NULL);
+
+ /* Write 0xAA to 2 512-byte logical blocks, starting at LBA 2. */
+ memset(buf, 0xAA, 2 * params.logical_block_size);
+ iov.iov_base = buf;
+ iov.iov_len = 2 * params.logical_block_size;
+ g_reduce_errno = -1;
+ spdk_reduce_vol_writev(g_vol, &iov, 1, 2, 2, write_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+
+ memset(compare_buf, 0xAA, sizeof(compare_buf));
+ for (i = 0; i < params.chunk_size / params.logical_block_size; i++) {
+ memset(buf, 0xFF, params.logical_block_size);
+ iov.iov_base = buf;
+ iov.iov_len = params.logical_block_size;
+ g_reduce_errno = -1;
+ spdk_reduce_vol_readv(g_vol, &iov, 1, i, 1, read_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+
+ switch (i) {
+ case 2:
+ case 3:
+ CU_ASSERT(memcmp(buf, compare_buf, params.logical_block_size) == 0);
+ break;
+ default:
+ CU_ASSERT(spdk_mem_all_zero(buf, params.logical_block_size));
+ break;
+ }
+ }
+
+ g_reduce_errno = -1;
+ spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+
+ /* Overwrite what we just wrote with 0xCC */
+ g_vol = NULL;
+ g_reduce_errno = -1;
+ spdk_reduce_vol_load(&backing_dev, load_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+ SPDK_CU_ASSERT_FATAL(g_vol != NULL);
+ CU_ASSERT(g_vol->params.vol_size == params.vol_size);
+ CU_ASSERT(g_vol->params.chunk_size == params.chunk_size);
+ CU_ASSERT(g_vol->params.backing_io_unit_size == params.backing_io_unit_size);
+
+ memset(buf, 0xCC, 2 * params.logical_block_size);
+ iov.iov_base = buf;
+ iov.iov_len = 2 * params.logical_block_size;
+ g_reduce_errno = -1;
+ spdk_reduce_vol_writev(g_vol, &iov, 1, 2, 2, write_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+
+ memset(compare_buf, 0xCC, sizeof(compare_buf));
+ for (i = 0; i < params.chunk_size / params.logical_block_size; i++) {
+ memset(buf, 0xFF, params.logical_block_size);
+ iov.iov_base = buf;
+ iov.iov_len = params.logical_block_size;
+ g_reduce_errno = -1;
+ spdk_reduce_vol_readv(g_vol, &iov, 1, i, 1, read_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+
+ switch (i) {
+ case 2:
+ case 3:
+ CU_ASSERT(memcmp(buf, compare_buf, params.logical_block_size) == 0);
+ break;
+ default:
+ CU_ASSERT(spdk_mem_all_zero(buf, params.logical_block_size));
+ break;
+ }
+ }
+
+ g_reduce_errno = -1;
+ spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+
+ g_vol = NULL;
+ g_reduce_errno = -1;
+ spdk_reduce_vol_load(&backing_dev, load_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+ SPDK_CU_ASSERT_FATAL(g_vol != NULL);
+ CU_ASSERT(g_vol->params.vol_size == params.vol_size);
+ CU_ASSERT(g_vol->params.chunk_size == params.chunk_size);
+ CU_ASSERT(g_vol->params.backing_io_unit_size == params.backing_io_unit_size);
+
+ g_reduce_errno = -1;
+
+ /* Write 0xBB to 2 512-byte logical blocks, starting at LBA 37.
+ * This is writing into the second chunk of the volume. This also
+ * enables implicitly checking that we reloaded the bit arrays
+ * correctly - making sure we don't use the first chunk map again
+ * for this new write - the first chunk map was already used by the
+ * write from before we unloaded and reloaded.
+ */
+ memset(buf, 0xBB, 2 * params.logical_block_size);
+ iov.iov_base = buf;
+ iov.iov_len = 2 * params.logical_block_size;
+ g_reduce_errno = -1;
+ spdk_reduce_vol_writev(g_vol, &iov, 1, 37, 2, write_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+
+ for (i = 0; i < 2 * params.chunk_size / params.logical_block_size; i++) {
+ memset(buf, 0xFF, params.logical_block_size);
+ iov.iov_base = buf;
+ iov.iov_len = params.logical_block_size;
+ g_reduce_errno = -1;
+ spdk_reduce_vol_readv(g_vol, &iov, 1, i, 1, read_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+
+ switch (i) {
+ case 2:
+ case 3:
+ memset(compare_buf, 0xCC, sizeof(compare_buf));
+ CU_ASSERT(memcmp(buf, compare_buf, params.logical_block_size) == 0);
+ break;
+ case 37:
+ case 38:
+ memset(compare_buf, 0xBB, sizeof(compare_buf));
+ CU_ASSERT(memcmp(buf, compare_buf, params.logical_block_size) == 0);
+ break;
+ default:
+ CU_ASSERT(spdk_mem_all_zero(buf, params.logical_block_size));
+ break;
+ }
+ }
+
+ g_reduce_errno = -1;
+ spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+
+ persistent_pm_buf_destroy();
+ backing_dev_destroy(&backing_dev);
+}
+
+static void
+read_write(void)
+{
+ _read_write(512);
+ _read_write(4096);
+}
+
+static void
+_readv_writev(uint32_t backing_blocklen)
+{
+ struct spdk_reduce_vol_params params = {};
+ struct spdk_reduce_backing_dev backing_dev = {};
+ struct iovec iov[REDUCE_MAX_IOVECS + 1];
+
+ params.chunk_size = 16 * 1024;
+ params.backing_io_unit_size = 4096;
+ params.logical_block_size = 512;
+ spdk_uuid_generate(&params.uuid);
+
+ backing_dev_init(&backing_dev, &params, backing_blocklen);
+
+ g_vol = NULL;
+ g_reduce_errno = -1;
+ spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+ SPDK_CU_ASSERT_FATAL(g_vol != NULL);
+
+ g_reduce_errno = -1;
+ spdk_reduce_vol_writev(g_vol, iov, REDUCE_MAX_IOVECS + 1, 2, REDUCE_MAX_IOVECS + 1, write_cb, NULL);
+ CU_ASSERT(g_reduce_errno == -EINVAL);
+
+ g_reduce_errno = -1;
+ spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+
+ persistent_pm_buf_destroy();
+ backing_dev_destroy(&backing_dev);
+}
+
+static void
+readv_writev(void)
+{
+ _readv_writev(512);
+ _readv_writev(4096);
+}
+
+static void
+destroy_cb(void *ctx, int reduce_errno)
+{
+ g_reduce_errno = reduce_errno;
+}
+
+static void
+destroy(void)
+{
+ struct spdk_reduce_vol_params params = {};
+ struct spdk_reduce_backing_dev backing_dev = {};
+
+ params.chunk_size = 16 * 1024;
+ params.backing_io_unit_size = 512;
+ params.logical_block_size = 512;
+ spdk_uuid_generate(&params.uuid);
+
+ backing_dev_init(&backing_dev, &params, 512);
+
+ g_vol = NULL;
+ g_reduce_errno = -1;
+ spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+ SPDK_CU_ASSERT_FATAL(g_vol != NULL);
+
+ g_reduce_errno = -1;
+ spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+
+ g_vol = NULL;
+ g_reduce_errno = -1;
+ spdk_reduce_vol_load(&backing_dev, load_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+ SPDK_CU_ASSERT_FATAL(g_vol != NULL);
+
+ g_reduce_errno = -1;
+ spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+
+ g_reduce_errno = -1;
+ MOCK_CLEAR(spdk_malloc);
+ MOCK_CLEAR(spdk_zmalloc);
+ spdk_reduce_vol_destroy(&backing_dev, destroy_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+
+ g_reduce_errno = 0;
+ spdk_reduce_vol_load(&backing_dev, load_cb, NULL);
+ CU_ASSERT(g_reduce_errno == -EILSEQ);
+
+ backing_dev_destroy(&backing_dev);
+}
+
+/* This test primarily checks that the reduce unit test infrastructure for asynchronous
+ * backing device I/O operations is working correctly.
+ */
+static void
+defer_bdev_io(void)
+{
+ struct spdk_reduce_vol_params params = {};
+ struct spdk_reduce_backing_dev backing_dev = {};
+ const uint32_t logical_block_size = 512;
+ struct iovec iov;
+ char buf[logical_block_size];
+ char compare_buf[logical_block_size];
+
+ params.chunk_size = 16 * 1024;
+ params.backing_io_unit_size = 4096;
+ params.logical_block_size = logical_block_size;
+ spdk_uuid_generate(&params.uuid);
+
+ backing_dev_init(&backing_dev, &params, 512);
+
+ g_vol = NULL;
+ g_reduce_errno = -1;
+ spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+ SPDK_CU_ASSERT_FATAL(g_vol != NULL);
+
+ /* Write 0xAA to 1 512-byte logical block. */
+ memset(buf, 0xAA, params.logical_block_size);
+ iov.iov_base = buf;
+ iov.iov_len = params.logical_block_size;
+ g_reduce_errno = -100;
+ g_defer_bdev_io = true;
+ spdk_reduce_vol_writev(g_vol, &iov, 1, 0, 1, write_cb, NULL);
+ /* Callback should not have executed, so this should still equal -100. */
+ CU_ASSERT(g_reduce_errno == -100);
+ CU_ASSERT(!TAILQ_EMPTY(&g_pending_bdev_io));
+ /* We wrote to just 512 bytes of one chunk which was previously unallocated. This
+ * should result in 1 pending I/O since the rest of this chunk will be zeroes and
+ * very compressible.
+ */
+ CU_ASSERT(g_pending_bdev_io_count == 1);
+
+ backing_dev_io_execute(0);
+ CU_ASSERT(TAILQ_EMPTY(&g_pending_bdev_io));
+ CU_ASSERT(g_reduce_errno == 0);
+
+ g_defer_bdev_io = false;
+ memset(compare_buf, 0xAA, sizeof(compare_buf));
+ memset(buf, 0xFF, sizeof(buf));
+ iov.iov_base = buf;
+ iov.iov_len = params.logical_block_size;
+ g_reduce_errno = -100;
+ spdk_reduce_vol_readv(g_vol, &iov, 1, 0, 1, read_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+ CU_ASSERT(memcmp(buf, compare_buf, sizeof(buf)) == 0);
+
+ g_reduce_errno = -1;
+ spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+
+ persistent_pm_buf_destroy();
+ backing_dev_destroy(&backing_dev);
+}
+
+static void
+overlapped(void)
+{
+ struct spdk_reduce_vol_params params = {};
+ struct spdk_reduce_backing_dev backing_dev = {};
+ const uint32_t logical_block_size = 512;
+ struct iovec iov;
+ char buf[2 * logical_block_size];
+ char compare_buf[2 * logical_block_size];
+
+ params.chunk_size = 16 * 1024;
+ params.backing_io_unit_size = 4096;
+ params.logical_block_size = logical_block_size;
+ spdk_uuid_generate(&params.uuid);
+
+ backing_dev_init(&backing_dev, &params, 512);
+
+ g_vol = NULL;
+ g_reduce_errno = -1;
+ spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+ SPDK_CU_ASSERT_FATAL(g_vol != NULL);
+
+ /* Write 0xAA to 1 512-byte logical block. */
+ memset(buf, 0xAA, logical_block_size);
+ iov.iov_base = buf;
+ iov.iov_len = logical_block_size;
+ g_reduce_errno = -100;
+ g_defer_bdev_io = true;
+ spdk_reduce_vol_writev(g_vol, &iov, 1, 0, 1, write_cb, NULL);
+ /* Callback should not have executed, so this should still equal -100. */
+ CU_ASSERT(g_reduce_errno == -100);
+ CU_ASSERT(!TAILQ_EMPTY(&g_pending_bdev_io));
+ /* We wrote to just 512 bytes of one chunk which was previously unallocated. This
+ * should result in 1 pending I/O since the rest of this chunk will be zeroes and
+ * very compressible.
+ */
+ CU_ASSERT(g_pending_bdev_io_count == 1);
+
+ /* Now do an overlapped I/O to the same chunk. */
+ spdk_reduce_vol_writev(g_vol, &iov, 1, 1, 1, write_cb, NULL);
+ /* Callback should not have executed, so this should still equal -100. */
+ CU_ASSERT(g_reduce_errno == -100);
+ CU_ASSERT(!TAILQ_EMPTY(&g_pending_bdev_io));
+ /* The second I/O overlaps with the first one. So we should only see pending bdev_io
+ * related to the first I/O here - the second one won't start until the first one is completed.
+ */
+ CU_ASSERT(g_pending_bdev_io_count == 1);
+
+ backing_dev_io_execute(0);
+ CU_ASSERT(g_reduce_errno == 0);
+
+ g_defer_bdev_io = false;
+ memset(compare_buf, 0xAA, sizeof(compare_buf));
+ memset(buf, 0xFF, sizeof(buf));
+ iov.iov_base = buf;
+ iov.iov_len = 2 * logical_block_size;
+ g_reduce_errno = -100;
+ spdk_reduce_vol_readv(g_vol, &iov, 1, 0, 2, read_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+ CU_ASSERT(memcmp(buf, compare_buf, 2 * logical_block_size) == 0);
+
+ g_reduce_errno = -1;
+ spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+
+ persistent_pm_buf_destroy();
+ backing_dev_destroy(&backing_dev);
+}
+
+#define BUFSIZE 4096
+
+static void
+compress_algorithm(void)
+{
+ uint8_t original_data[BUFSIZE];
+ uint8_t compressed_data[BUFSIZE];
+ uint8_t decompressed_data[BUFSIZE];
+ uint32_t compressed_len, decompressed_len;
+ int rc;
+
+ ut_build_data_buffer(original_data, BUFSIZE, 0xAA, BUFSIZE);
+ compressed_len = sizeof(compressed_data);
+ rc = ut_compress(compressed_data, &compressed_len, original_data, UINT8_MAX);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(compressed_len == 2);
+ CU_ASSERT(compressed_data[0] == UINT8_MAX);
+ CU_ASSERT(compressed_data[1] == 0xAA);
+
+ decompressed_len = sizeof(decompressed_data);
+ rc = ut_decompress(decompressed_data, &decompressed_len, compressed_data, compressed_len);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(decompressed_len == UINT8_MAX);
+ CU_ASSERT(memcmp(original_data, decompressed_data, decompressed_len) == 0);
+
+ compressed_len = sizeof(compressed_data);
+ rc = ut_compress(compressed_data, &compressed_len, original_data, UINT8_MAX + 1);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(compressed_len == 4);
+ CU_ASSERT(compressed_data[0] == UINT8_MAX);
+ CU_ASSERT(compressed_data[1] == 0xAA);
+ CU_ASSERT(compressed_data[2] == 1);
+ CU_ASSERT(compressed_data[3] == 0xAA);
+
+ decompressed_len = sizeof(decompressed_data);
+ rc = ut_decompress(decompressed_data, &decompressed_len, compressed_data, compressed_len);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(decompressed_len == UINT8_MAX + 1);
+ CU_ASSERT(memcmp(original_data, decompressed_data, decompressed_len) == 0);
+
+ ut_build_data_buffer(original_data, BUFSIZE, 0x00, 1);
+ compressed_len = sizeof(compressed_data);
+ rc = ut_compress(compressed_data, &compressed_len, original_data, 2048);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(compressed_len == 4096);
+ CU_ASSERT(compressed_data[0] == 1);
+ CU_ASSERT(compressed_data[1] == 0);
+ CU_ASSERT(compressed_data[4094] == 1);
+ CU_ASSERT(compressed_data[4095] == 0xFF);
+
+ decompressed_len = sizeof(decompressed_data);
+ rc = ut_decompress(decompressed_data, &decompressed_len, compressed_data, compressed_len);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(decompressed_len == 2048);
+ CU_ASSERT(memcmp(original_data, decompressed_data, decompressed_len) == 0);
+
+ compressed_len = sizeof(compressed_data);
+ rc = ut_compress(compressed_data, &compressed_len, original_data, 2049);
+ CU_ASSERT(rc == -ENOSPC);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("reduce", NULL, NULL);
+
+ CU_ADD_TEST(suite, get_pm_file_size);
+ CU_ADD_TEST(suite, get_vol_size);
+ CU_ADD_TEST(suite, init_failure);
+ CU_ADD_TEST(suite, init_md);
+ CU_ADD_TEST(suite, init_backing_dev);
+ CU_ADD_TEST(suite, load);
+ CU_ADD_TEST(suite, write_maps);
+ CU_ADD_TEST(suite, read_write);
+ CU_ADD_TEST(suite, readv_writev);
+ CU_ADD_TEST(suite, destroy);
+ CU_ADD_TEST(suite, defer_bdev_io);
+ CU_ADD_TEST(suite, overlapped);
+ CU_ADD_TEST(suite, compress_algorithm);
+
+ g_unlink_path = g_path;
+ g_unlink_callback = unlink_cb;
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/scsi/Makefile b/src/spdk/test/unit/lib/scsi/Makefile
new file mode 100644
index 000000000..8044d3f4e
--- /dev/null
+++ b/src/spdk/test/unit/lib/scsi/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = dev.c lun.c scsi.c scsi_bdev.c scsi_pr.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/scsi/dev.c/.gitignore b/src/spdk/test/unit/lib/scsi/dev.c/.gitignore
new file mode 100644
index 000000000..e325086bb
--- /dev/null
+++ b/src/spdk/test/unit/lib/scsi/dev.c/.gitignore
@@ -0,0 +1 @@
+dev_ut
diff --git a/src/spdk/test/unit/lib/scsi/dev.c/Makefile b/src/spdk/test/unit/lib/scsi/dev.c/Makefile
new file mode 100644
index 000000000..983b3bc9e
--- /dev/null
+++ b/src/spdk/test/unit/lib/scsi/dev.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = dev_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/scsi/dev.c/dev_ut.c b/src/spdk/test/unit/lib/scsi/dev.c/dev_ut.c
new file mode 100644
index 000000000..f738011fb
--- /dev/null
+++ b/src/spdk/test/unit/lib/scsi/dev.c/dev_ut.c
@@ -0,0 +1,682 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "CUnit/Basic.h"
+#include "spdk_cunit.h"
+
+#include "spdk/util.h"
+
+#include "scsi/dev.c"
+#include "scsi/port.c"
+
+#include "spdk_internal/mock.h"
+
+/* Unit test bdev mockup */
+struct spdk_bdev {
+ char name[100];
+};
+
+static struct spdk_bdev g_bdevs[] = {
+ {"malloc0"},
+ {"malloc1"},
+};
+
+static struct spdk_scsi_port *g_initiator_port_with_pending_tasks = NULL;
+static struct spdk_scsi_port *g_initiator_port_with_pending_mgmt_tasks = NULL;
+
+const char *
+spdk_bdev_get_name(const struct spdk_bdev *bdev)
+{
+ return bdev->name;
+}
+
+static struct spdk_scsi_task *
+spdk_get_task(uint32_t *owner_task_ctr)
+{
+ struct spdk_scsi_task *task;
+
+ task = calloc(1, sizeof(*task));
+ if (!task) {
+ return NULL;
+ }
+
+ return task;
+}
+
+void
+spdk_scsi_task_put(struct spdk_scsi_task *task)
+{
+ free(task);
+}
+
+struct spdk_scsi_lun *scsi_lun_construct(struct spdk_bdev *bdev,
+ void (*hotremove_cb)(const struct spdk_scsi_lun *, void *),
+ void *hotremove_ctx)
+{
+ struct spdk_scsi_lun *lun;
+
+ lun = calloc(1, sizeof(struct spdk_scsi_lun));
+ SPDK_CU_ASSERT_FATAL(lun != NULL);
+
+ lun->bdev = bdev;
+
+ return lun;
+}
+
+void
+scsi_lun_destruct(struct spdk_scsi_lun *lun)
+{
+ free(lun);
+}
+
+struct spdk_bdev *
+spdk_bdev_get_by_name(const char *bdev_name)
+{
+ size_t i;
+
+ for (i = 0; i < SPDK_COUNTOF(g_bdevs); i++) {
+ if (strcmp(bdev_name, g_bdevs[i].name) == 0) {
+ return &g_bdevs[i];
+ }
+ }
+
+ return NULL;
+}
+
+DEFINE_STUB_V(scsi_lun_execute_mgmt_task,
+ (struct spdk_scsi_lun *lun, struct spdk_scsi_task *task));
+
+DEFINE_STUB_V(scsi_lun_execute_task,
+ (struct spdk_scsi_lun *lun, struct spdk_scsi_task *task));
+
+DEFINE_STUB(scsi_lun_allocate_io_channel, int,
+ (struct spdk_scsi_lun *lun), 0);
+
+DEFINE_STUB_V(scsi_lun_free_io_channel, (struct spdk_scsi_lun *lun));
+
+bool
+scsi_lun_has_pending_mgmt_tasks(const struct spdk_scsi_lun *lun,
+ const struct spdk_scsi_port *initiator_port)
+{
+ return (g_initiator_port_with_pending_mgmt_tasks == initiator_port);
+}
+
+bool
+scsi_lun_has_pending_tasks(const struct spdk_scsi_lun *lun,
+ const struct spdk_scsi_port *initiator_port)
+{
+ return (g_initiator_port_with_pending_tasks == initiator_port);
+}
+
+static void
+dev_destruct_null_dev(void)
+{
+ /* pass null for the dev */
+ spdk_scsi_dev_destruct(NULL, NULL, NULL);
+}
+
+static void
+dev_destruct_zero_luns(void)
+{
+ struct spdk_scsi_dev dev = { .is_allocated = 1 };
+
+ /* No luns attached to the dev */
+
+ /* free the dev */
+ spdk_scsi_dev_destruct(&dev, NULL, NULL);
+}
+
+static void
+dev_destruct_null_lun(void)
+{
+ struct spdk_scsi_dev dev = { .is_allocated = 1 };
+
+ /* pass null for the lun */
+ dev.lun[0] = NULL;
+
+ /* free the dev */
+ spdk_scsi_dev_destruct(&dev, NULL, NULL);
+}
+
+static void
+dev_destruct_success(void)
+{
+ struct spdk_scsi_dev dev = { .is_allocated = 1 };
+ int rc;
+
+ /* dev with a single lun */
+ rc = spdk_scsi_dev_add_lun(&dev, "malloc0", 0, NULL, NULL);
+
+ CU_ASSERT(rc == 0);
+
+ /* free the dev */
+ spdk_scsi_dev_destruct(&dev, NULL, NULL);
+
+}
+
+static void
+dev_construct_num_luns_zero(void)
+{
+ struct spdk_scsi_dev *dev;
+ const char *bdev_name_list[1] = {};
+ int lun_id_list[1] = { 0 };
+
+ dev = spdk_scsi_dev_construct("Name", bdev_name_list, lun_id_list, 0,
+ SPDK_SPC_PROTOCOL_IDENTIFIER_ISCSI, NULL, NULL);
+
+ /* dev should be null since we passed num_luns = 0 */
+ CU_ASSERT_TRUE(dev == NULL);
+}
+
+static void
+dev_construct_no_lun_zero(void)
+{
+ struct spdk_scsi_dev *dev;
+ const char *bdev_name_list[1] = {};
+ int lun_id_list[1] = { 0 };
+
+ lun_id_list[0] = 1;
+
+ dev = spdk_scsi_dev_construct("Name", bdev_name_list, lun_id_list, 1,
+ SPDK_SPC_PROTOCOL_IDENTIFIER_ISCSI, NULL, NULL);
+
+ /* dev should be null since no LUN0 was specified (lun_id_list[0] = 1) */
+ CU_ASSERT_TRUE(dev == NULL);
+}
+
+static void
+dev_construct_null_lun(void)
+{
+ struct spdk_scsi_dev *dev;
+ const char *bdev_name_list[1] = {};
+ int lun_id_list[1] = { 0 };
+
+ dev = spdk_scsi_dev_construct("Name", bdev_name_list, lun_id_list, 1,
+ SPDK_SPC_PROTOCOL_IDENTIFIER_ISCSI, NULL, NULL);
+
+ /* dev should be null since no LUN0 was specified (lun_list[0] = NULL) */
+ CU_ASSERT_TRUE(dev == NULL);
+}
+
+static void
+dev_construct_name_too_long(void)
+{
+ struct spdk_scsi_dev *dev;
+ const char *bdev_name_list[1] = {"malloc0"};
+ int lun_id_list[1] = { 0 };
+ char name[SPDK_SCSI_DEV_MAX_NAME + 1 + 1];
+
+ /* Try to construct a dev with a name that is one byte longer than allowed. */
+ memset(name, 'x', sizeof(name) - 1);
+ name[sizeof(name) - 1] = '\0';
+
+ dev = spdk_scsi_dev_construct(name, bdev_name_list, lun_id_list, 1,
+ SPDK_SPC_PROTOCOL_IDENTIFIER_ISCSI, NULL, NULL);
+
+ CU_ASSERT(dev == NULL);
+}
+
+static void
+dev_construct_success(void)
+{
+ struct spdk_scsi_dev *dev;
+ const char *bdev_name_list[1] = {"malloc0"};
+ int lun_id_list[1] = { 0 };
+
+ dev = spdk_scsi_dev_construct("Name", bdev_name_list, lun_id_list, 1,
+ SPDK_SPC_PROTOCOL_IDENTIFIER_ISCSI, NULL, NULL);
+
+ /* Successfully constructs and returns a dev */
+ CU_ASSERT_TRUE(dev != NULL);
+
+ /* free the dev */
+ spdk_scsi_dev_destruct(dev, NULL, NULL);
+}
+
+static void
+dev_construct_success_lun_zero_not_first(void)
+{
+ struct spdk_scsi_dev *dev;
+ const char *bdev_name_list[2] = {"malloc1", "malloc0"};
+ int lun_id_list[2] = { 1, 0 };
+
+ dev = spdk_scsi_dev_construct("Name", bdev_name_list, lun_id_list, 2,
+ SPDK_SPC_PROTOCOL_IDENTIFIER_ISCSI, NULL, NULL);
+
+ /* Successfully constructs and returns a dev */
+ CU_ASSERT_TRUE(dev != NULL);
+
+ /* free the dev */
+ spdk_scsi_dev_destruct(dev, NULL, NULL);
+}
+
+static void
+dev_queue_mgmt_task_success(void)
+{
+ struct spdk_scsi_dev *dev;
+ const char *bdev_name_list[1] = {"malloc0"};
+ int lun_id_list[1] = { 0 };
+ struct spdk_scsi_task *task;
+
+ dev = spdk_scsi_dev_construct("Name", bdev_name_list, lun_id_list, 1,
+ SPDK_SPC_PROTOCOL_IDENTIFIER_ISCSI, NULL, NULL);
+
+ /* Successfully constructs and returns a dev */
+ CU_ASSERT_TRUE(dev != NULL);
+
+ task = spdk_get_task(NULL);
+
+ task->function = SPDK_SCSI_TASK_FUNC_LUN_RESET;
+ spdk_scsi_dev_queue_mgmt_task(dev, task);
+
+ spdk_scsi_task_put(task);
+
+ spdk_scsi_dev_destruct(dev, NULL, NULL);
+}
+
+static void
+dev_queue_task_success(void)
+{
+ struct spdk_scsi_dev *dev;
+ const char *bdev_name_list[1] = {"malloc0"};
+ int lun_id_list[1] = { 0 };
+ struct spdk_scsi_task *task;
+
+ dev = spdk_scsi_dev_construct("Name", bdev_name_list, lun_id_list, 1,
+ SPDK_SPC_PROTOCOL_IDENTIFIER_ISCSI, NULL, NULL);
+
+ /* Successfully constructs and returns a dev */
+ CU_ASSERT_TRUE(dev != NULL);
+
+ task = spdk_get_task(NULL);
+
+ spdk_scsi_dev_queue_task(dev, task);
+
+ spdk_scsi_task_put(task);
+
+ spdk_scsi_dev_destruct(dev, NULL, NULL);
+}
+
+static void
+dev_stop_success(void)
+{
+ struct spdk_scsi_dev dev = { 0 };
+ struct spdk_scsi_task *task;
+ struct spdk_scsi_task *task_mgmt;
+
+ task = spdk_get_task(NULL);
+
+ spdk_scsi_dev_queue_task(&dev, task);
+
+ task_mgmt = spdk_get_task(NULL);
+
+ /* Enqueue the tasks into dev->task_mgmt_submit_queue */
+ task->function = SPDK_SCSI_TASK_FUNC_LUN_RESET;
+ spdk_scsi_dev_queue_mgmt_task(&dev, task_mgmt);
+
+ spdk_scsi_task_put(task);
+ spdk_scsi_task_put(task_mgmt);
+}
+
+static void
+dev_add_port_max_ports(void)
+{
+ struct spdk_scsi_dev dev = { 0 };
+ const char *name;
+ int id, rc;
+
+ /* dev is set to SPDK_SCSI_DEV_MAX_PORTS */
+ dev.num_ports = SPDK_SCSI_DEV_MAX_PORTS;
+ name = "Name of Port";
+ id = 1;
+
+ rc = spdk_scsi_dev_add_port(&dev, id, name);
+
+ /* returns -1; since the dev already has maximum
+ * number of ports (SPDK_SCSI_DEV_MAX_PORTS) */
+ CU_ASSERT_TRUE(rc < 0);
+}
+
+static void
+dev_add_port_construct_failure1(void)
+{
+ struct spdk_scsi_dev dev = { 0 };
+ const int port_name_length = SPDK_SCSI_PORT_MAX_NAME_LENGTH + 2;
+ char name[port_name_length];
+ uint64_t id;
+ int rc;
+
+ dev.num_ports = 1;
+ /* Set the name such that the length exceeds SPDK_SCSI_PORT_MAX_NAME_LENGTH
+ * SPDK_SCSI_PORT_MAX_NAME_LENGTH = 256 */
+ memset(name, 'a', port_name_length - 1);
+ name[port_name_length - 1] = '\0';
+ id = 1;
+
+ rc = spdk_scsi_dev_add_port(&dev, id, name);
+
+ /* returns -1; since the length of the name exceeds
+ * SPDK_SCSI_PORT_MAX_NAME_LENGTH */
+ CU_ASSERT_TRUE(rc < 0);
+}
+
+static void
+dev_add_port_construct_failure2(void)
+{
+ struct spdk_scsi_dev dev = { 0 };
+ const char *name;
+ uint64_t id;
+ int rc;
+
+ dev.num_ports = 1;
+ name = "Name of Port";
+ id = 1;
+
+ /* Initialize port[0] to be valid and its index is set to 1 */
+ dev.port[0].id = id;
+ dev.port[0].is_used = 1;
+
+ rc = spdk_scsi_dev_add_port(&dev, id, name);
+
+ /* returns -1; since the dev already has a port whose index to be 1 */
+ CU_ASSERT_TRUE(rc < 0);
+}
+
+static void
+dev_add_port_success1(void)
+{
+ struct spdk_scsi_dev dev = { 0 };
+ const char *name;
+ int id, rc;
+
+ dev.num_ports = 1;
+ name = "Name of Port";
+ id = 1;
+
+ rc = spdk_scsi_dev_add_port(&dev, id, name);
+
+ /* successfully adds a port */
+ CU_ASSERT_EQUAL(rc, 0);
+ /* Assert num_ports has been incremented to 2 */
+ CU_ASSERT_EQUAL(dev.num_ports, 2);
+}
+
+static void
+dev_add_port_success2(void)
+{
+ struct spdk_scsi_dev dev = { 0 };
+ const char *name;
+ uint64_t id;
+ int rc;
+
+ dev.num_ports = 1;
+ name = "Name of Port";
+ id = 1;
+ /* set id of invalid port[0] to 1. This must be ignored */
+ dev.port[0].id = id;
+ dev.port[0].is_used = 0;
+
+ rc = spdk_scsi_dev_add_port(&dev, id, name);
+
+ /* successfully adds a port */
+ CU_ASSERT_EQUAL(rc, 0);
+ /* Assert num_ports has been incremented to 1 */
+ CU_ASSERT_EQUAL(dev.num_ports, 2);
+}
+
+static void
+dev_add_port_success3(void)
+{
+ struct spdk_scsi_dev dev = { 0 };
+ const char *name;
+ uint64_t add_id;
+ int rc;
+
+ dev.num_ports = 1;
+ name = "Name of Port";
+ dev.port[0].id = 1;
+ dev.port[0].is_used = 1;
+ add_id = 2;
+
+ /* Add a port with id = 2 */
+ rc = spdk_scsi_dev_add_port(&dev, add_id, name);
+
+ /* successfully adds a port */
+ CU_ASSERT_EQUAL(rc, 0);
+ /* Assert num_ports has been incremented to 2 */
+ CU_ASSERT_EQUAL(dev.num_ports, 2);
+}
+
+static void
+dev_find_port_by_id_num_ports_zero(void)
+{
+ struct spdk_scsi_dev dev = { 0 };
+ struct spdk_scsi_port *rp_port;
+ uint64_t id;
+
+ dev.num_ports = 0;
+ id = 1;
+
+ rp_port = spdk_scsi_dev_find_port_by_id(&dev, id);
+
+ /* returns null; since dev's num_ports is 0 */
+ CU_ASSERT_TRUE(rp_port == NULL);
+}
+
+static void
+dev_find_port_by_id_id_not_found_failure(void)
+{
+ struct spdk_scsi_dev dev = { 0 };
+ struct spdk_scsi_port *rp_port;
+ const char *name;
+ int rc;
+ uint64_t id, find_id;
+
+ id = 1;
+ dev.num_ports = 1;
+ name = "Name of Port";
+ find_id = 2;
+
+ /* Add a port with id = 1 */
+ rc = spdk_scsi_dev_add_port(&dev, id, name);
+
+ CU_ASSERT_EQUAL(rc, 0);
+
+ /* Find port with id = 2 */
+ rp_port = spdk_scsi_dev_find_port_by_id(&dev, find_id);
+
+ /* returns null; failed to find port specified by id = 2 */
+ CU_ASSERT_TRUE(rp_port == NULL);
+}
+
+static void
+dev_find_port_by_id_success(void)
+{
+ struct spdk_scsi_dev dev = { 0 };
+ struct spdk_scsi_port *rp_port;
+ const char *name;
+ int rc;
+ uint64_t id;
+
+ id = 1;
+ dev.num_ports = 1;
+ name = "Name of Port";
+
+ /* Add a port */
+ rc = spdk_scsi_dev_add_port(&dev, id, name);
+
+ CU_ASSERT_EQUAL(rc, 0);
+
+ /* Find port by the same id as the one added above */
+ rp_port = spdk_scsi_dev_find_port_by_id(&dev, id);
+
+ /* Successfully found port specified by id */
+ CU_ASSERT_TRUE(rp_port != NULL);
+ if (rp_port != NULL) {
+ /* Assert the found port's id and name are same as
+ * the port added. */
+ CU_ASSERT_EQUAL(rp_port->id, 1);
+ CU_ASSERT_STRING_EQUAL(rp_port->name, "Name of Port");
+ }
+}
+
+static void
+dev_add_lun_bdev_not_found(void)
+{
+ int rc;
+ struct spdk_scsi_dev dev = {0};
+
+ rc = spdk_scsi_dev_add_lun(&dev, "malloc2", 0, NULL, NULL);
+
+ SPDK_CU_ASSERT_FATAL(dev.lun[0] == NULL);
+ CU_ASSERT_NOT_EQUAL(rc, 0);
+}
+
+static void
+dev_add_lun_no_free_lun_id(void)
+{
+ int rc;
+ int i;
+ struct spdk_scsi_dev dev = {0};
+ struct spdk_scsi_lun lun;
+
+ for (i = 0; i < SPDK_SCSI_DEV_MAX_LUN; i++) {
+ dev.lun[i] = &lun;
+ }
+
+ rc = spdk_scsi_dev_add_lun(&dev, "malloc0", -1, NULL, NULL);
+
+ CU_ASSERT_NOT_EQUAL(rc, 0);
+}
+
+static void
+dev_add_lun_success1(void)
+{
+ int rc;
+ struct spdk_scsi_dev dev = {0};
+
+ rc = spdk_scsi_dev_add_lun(&dev, "malloc0", -1, NULL, NULL);
+
+ CU_ASSERT_EQUAL(rc, 0);
+
+ spdk_scsi_dev_destruct(&dev, NULL, NULL);
+}
+
+static void
+dev_add_lun_success2(void)
+{
+ int rc;
+ struct spdk_scsi_dev dev = {0};
+
+ rc = spdk_scsi_dev_add_lun(&dev, "malloc0", 0, NULL, NULL);
+
+ CU_ASSERT_EQUAL(rc, 0);
+
+ spdk_scsi_dev_destruct(&dev, NULL, NULL);
+}
+
+static void
+dev_check_pending_tasks(void)
+{
+ struct spdk_scsi_dev dev = {};
+ struct spdk_scsi_lun lun = {};
+ struct spdk_scsi_port initiator_port = {};
+
+ g_initiator_port_with_pending_tasks = NULL;
+ g_initiator_port_with_pending_mgmt_tasks = NULL;
+
+ CU_ASSERT(spdk_scsi_dev_has_pending_tasks(&dev, NULL) == false);
+
+ dev.lun[SPDK_SCSI_DEV_MAX_LUN - 1] = &lun;
+
+ CU_ASSERT(spdk_scsi_dev_has_pending_tasks(&dev, NULL) == true);
+ CU_ASSERT(spdk_scsi_dev_has_pending_tasks(&dev, &initiator_port) == false);
+
+ g_initiator_port_with_pending_tasks = &initiator_port;
+ CU_ASSERT(spdk_scsi_dev_has_pending_tasks(&dev, NULL) == true);
+ CU_ASSERT(spdk_scsi_dev_has_pending_tasks(&dev, &initiator_port) == true);
+
+ g_initiator_port_with_pending_tasks = NULL;
+ g_initiator_port_with_pending_mgmt_tasks = &initiator_port;
+ CU_ASSERT(spdk_scsi_dev_has_pending_tasks(&dev, NULL) == true);
+ CU_ASSERT(spdk_scsi_dev_has_pending_tasks(&dev, &initiator_port) == true);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("dev_suite", NULL, NULL);
+
+ CU_ADD_TEST(suite, dev_destruct_null_dev);
+ CU_ADD_TEST(suite, dev_destruct_zero_luns);
+ CU_ADD_TEST(suite, dev_destruct_null_lun);
+ CU_ADD_TEST(suite, dev_destruct_success);
+ CU_ADD_TEST(suite, dev_construct_num_luns_zero);
+ CU_ADD_TEST(suite, dev_construct_no_lun_zero);
+ CU_ADD_TEST(suite, dev_construct_null_lun);
+ CU_ADD_TEST(suite, dev_construct_name_too_long);
+ CU_ADD_TEST(suite, dev_construct_success);
+ CU_ADD_TEST(suite, dev_construct_success_lun_zero_not_first);
+ CU_ADD_TEST(suite, dev_queue_mgmt_task_success);
+ CU_ADD_TEST(suite, dev_queue_task_success);
+ CU_ADD_TEST(suite, dev_stop_success);
+ CU_ADD_TEST(suite, dev_add_port_max_ports);
+ CU_ADD_TEST(suite, dev_add_port_construct_failure1);
+ CU_ADD_TEST(suite, dev_add_port_construct_failure2);
+ CU_ADD_TEST(suite, dev_add_port_success1);
+ CU_ADD_TEST(suite, dev_add_port_success2);
+ CU_ADD_TEST(suite, dev_add_port_success3);
+ CU_ADD_TEST(suite, dev_find_port_by_id_num_ports_zero);
+ CU_ADD_TEST(suite, dev_find_port_by_id_id_not_found_failure);
+ CU_ADD_TEST(suite, dev_find_port_by_id_success);
+ CU_ADD_TEST(suite, dev_add_lun_bdev_not_found);
+ CU_ADD_TEST(suite, dev_add_lun_no_free_lun_id);
+ CU_ADD_TEST(suite, dev_add_lun_success1);
+ CU_ADD_TEST(suite, dev_add_lun_success2);
+ CU_ADD_TEST(suite, dev_check_pending_tasks);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/scsi/lun.c/.gitignore b/src/spdk/test/unit/lib/scsi/lun.c/.gitignore
new file mode 100644
index 000000000..89bd2aaf1
--- /dev/null
+++ b/src/spdk/test/unit/lib/scsi/lun.c/.gitignore
@@ -0,0 +1 @@
+lun_ut
diff --git a/src/spdk/test/unit/lib/scsi/lun.c/Makefile b/src/spdk/test/unit/lib/scsi/lun.c/Makefile
new file mode 100644
index 000000000..95e179fe5
--- /dev/null
+++ b/src/spdk/test/unit/lib/scsi/lun.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = lun_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/scsi/lun.c/lun_ut.c b/src/spdk/test/unit/lib/scsi/lun.c/lun_ut.c
new file mode 100644
index 000000000..4efa8e364
--- /dev/null
+++ b/src/spdk/test/unit/lib/scsi/lun.c/lun_ut.c
@@ -0,0 +1,750 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "scsi/task.c"
+#include "scsi/lun.c"
+
+#include "spdk_internal/mock.h"
+/* These unit tests aren't multithreads, but we need to allocate threads since
+ * the lun.c code will register pollers.
+ */
+#include "common/lib/ut_multithread.c"
+
+/* Unit test bdev mockup */
+struct spdk_bdev {
+ int x;
+};
+
+SPDK_LOG_REGISTER_COMPONENT("scsi", SPDK_LOG_SCSI)
+
+struct spdk_scsi_globals g_spdk_scsi;
+
+static bool g_lun_execute_fail = false;
+static int g_lun_execute_status = SPDK_SCSI_TASK_PENDING;
+static uint32_t g_task_count = 0;
+
+struct spdk_trace_histories *g_trace_histories;
+
+DEFINE_STUB_V(_spdk_trace_record,
+ (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
+ uint32_t size, uint64_t object_id, uint64_t arg1));
+
+static void
+spdk_lun_ut_cpl_task(struct spdk_scsi_task *task)
+{
+ SPDK_CU_ASSERT_FATAL(g_task_count > 0);
+ g_task_count--;
+}
+
+static void
+spdk_lun_ut_free_task(struct spdk_scsi_task *task)
+{
+}
+
+static void
+ut_init_task(struct spdk_scsi_task *task)
+{
+ memset(task, 0, sizeof(*task));
+ spdk_scsi_task_construct(task, spdk_lun_ut_cpl_task,
+ spdk_lun_ut_free_task);
+ g_task_count++;
+}
+
+void
+spdk_bdev_free_io(struct spdk_bdev_io *bdev_io)
+{
+ CU_ASSERT(0);
+}
+
+DEFINE_STUB(spdk_bdev_open, int,
+ (struct spdk_bdev *bdev, bool write, spdk_bdev_remove_cb_t remove_cb,
+ void *remove_ctx, struct spdk_bdev_desc **desc),
+ 0);
+
+DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
+
+DEFINE_STUB(spdk_bdev_get_name, const char *,
+ (const struct spdk_bdev *bdev), "test");
+
+DEFINE_STUB_V(spdk_scsi_dev_queue_mgmt_task,
+ (struct spdk_scsi_dev *dev, struct spdk_scsi_task *task));
+
+DEFINE_STUB_V(spdk_scsi_dev_delete_lun,
+ (struct spdk_scsi_dev *dev, struct spdk_scsi_lun *lun));
+
+DEFINE_STUB(scsi_pr_check, int, (struct spdk_scsi_task *task), 0);
+DEFINE_STUB(scsi2_reserve_check, int, (struct spdk_scsi_task *task), 0);
+
+void
+bdev_scsi_reset(struct spdk_scsi_task *task)
+{
+ task->status = SPDK_SCSI_STATUS_GOOD;
+ task->response = SPDK_SCSI_TASK_MGMT_RESP_SUCCESS;
+
+ scsi_lun_complete_reset_task(task->lun, task);
+}
+
+int
+bdev_scsi_execute(struct spdk_scsi_task *task)
+{
+ if (g_lun_execute_fail) {
+ return -EINVAL;
+ } else {
+ task->status = SPDK_SCSI_STATUS_GOOD;
+
+ if (g_lun_execute_status == SPDK_SCSI_TASK_PENDING) {
+ return g_lun_execute_status;
+ } else if (g_lun_execute_status == SPDK_SCSI_TASK_COMPLETE) {
+ return g_lun_execute_status;
+ } else {
+ return 0;
+ }
+ }
+}
+
+DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *,
+ (struct spdk_bdev_desc *desc), NULL);
+
+static struct spdk_scsi_lun *lun_construct(void)
+{
+ struct spdk_scsi_lun *lun;
+ struct spdk_bdev bdev;
+
+ lun = scsi_lun_construct(&bdev, NULL, NULL);
+
+ SPDK_CU_ASSERT_FATAL(lun != NULL);
+ return lun;
+}
+
+static void
+lun_destruct(struct spdk_scsi_lun *lun)
+{
+ /* LUN will defer its removal if there are any unfinished tasks */
+ SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&lun->tasks));
+
+ scsi_lun_destruct(lun);
+}
+
+static void
+lun_task_mgmt_execute_abort_task_not_supported(void)
+{
+ struct spdk_scsi_lun *lun;
+ struct spdk_scsi_task task = { 0 };
+ struct spdk_scsi_task mgmt_task = { 0 };
+ struct spdk_scsi_port initiator_port = { 0 };
+ struct spdk_scsi_dev dev = { 0 };
+ uint8_t cdb[6] = { 0 };
+
+ lun = lun_construct();
+ lun->dev = &dev;
+
+ ut_init_task(&mgmt_task);
+ mgmt_task.lun = lun;
+ mgmt_task.initiator_port = &initiator_port;
+ mgmt_task.function = SPDK_SCSI_TASK_FUNC_ABORT_TASK;
+
+ /* Params to add regular task to the lun->tasks */
+ ut_init_task(&task);
+ task.lun = lun;
+ task.cdb = cdb;
+
+ scsi_lun_execute_task(lun, &task);
+
+ /* task should now be on the tasks list */
+ CU_ASSERT(!TAILQ_EMPTY(&lun->tasks));
+
+ scsi_lun_execute_mgmt_task(lun, &mgmt_task);
+
+ /* task abort is not supported */
+ CU_ASSERT(mgmt_task.response == SPDK_SCSI_TASK_MGMT_RESP_REJECT_FUNC_NOT_SUPPORTED);
+
+ /* task is still on the tasks list */
+ CU_ASSERT_EQUAL(g_task_count, 1);
+
+ scsi_lun_complete_task(lun, &task);
+ CU_ASSERT_EQUAL(g_task_count, 0);
+
+ lun_destruct(lun);
+}
+
+static void
+lun_task_mgmt_execute_abort_task_all_not_supported(void)
+{
+ struct spdk_scsi_lun *lun;
+ struct spdk_scsi_task task = { 0 };
+ struct spdk_scsi_task mgmt_task = { 0 };
+ struct spdk_scsi_port initiator_port = { 0 };
+ struct spdk_scsi_dev dev = { 0 };
+ uint8_t cdb[6] = { 0 };
+
+ lun = lun_construct();
+ lun->dev = &dev;
+
+ ut_init_task(&mgmt_task);
+ mgmt_task.lun = lun;
+ mgmt_task.initiator_port = &initiator_port;
+ mgmt_task.function = SPDK_SCSI_TASK_FUNC_ABORT_TASK_SET;
+
+ /* Params to add regular task to the lun->tasks */
+ ut_init_task(&task);
+ task.initiator_port = &initiator_port;
+ task.lun = lun;
+ task.cdb = cdb;
+
+ scsi_lun_execute_task(lun, &task);
+
+ /* task should now be on the tasks list */
+ CU_ASSERT(!TAILQ_EMPTY(&lun->tasks));
+
+ scsi_lun_execute_mgmt_task(lun, &mgmt_task);
+
+ /* task abort is not supported */
+ CU_ASSERT(mgmt_task.response == SPDK_SCSI_TASK_MGMT_RESP_REJECT_FUNC_NOT_SUPPORTED);
+
+ /* task is still on the tasks list */
+ CU_ASSERT_EQUAL(g_task_count, 1);
+
+ scsi_lun_complete_task(lun, &task);
+
+ CU_ASSERT_EQUAL(g_task_count, 0);
+
+ lun_destruct(lun);
+}
+
+static void
+lun_task_mgmt_execute_lun_reset(void)
+{
+ struct spdk_scsi_lun *lun;
+ struct spdk_scsi_task mgmt_task = { 0 };
+ struct spdk_scsi_dev dev = { 0 };
+
+ lun = lun_construct();
+ lun->dev = &dev;
+
+ ut_init_task(&mgmt_task);
+ mgmt_task.lun = lun;
+ mgmt_task.function = SPDK_SCSI_TASK_FUNC_LUN_RESET;
+
+ scsi_lun_execute_mgmt_task(lun, &mgmt_task);
+
+ /* Returns success */
+ CU_ASSERT_EQUAL(mgmt_task.status, SPDK_SCSI_STATUS_GOOD);
+ CU_ASSERT_EQUAL(mgmt_task.response, SPDK_SCSI_TASK_MGMT_RESP_SUCCESS);
+
+ lun_destruct(lun);
+
+ CU_ASSERT_EQUAL(g_task_count, 0);
+}
+
+static void
+lun_task_mgmt_execute_invalid_case(void)
+{
+ struct spdk_scsi_lun *lun;
+ struct spdk_scsi_task mgmt_task = { 0 };
+ struct spdk_scsi_dev dev = { 0 };
+
+ lun = lun_construct();
+ lun->dev = &dev;
+
+ ut_init_task(&mgmt_task);
+ mgmt_task.function = 5;
+
+ /* Pass an invalid value to the switch statement */
+ scsi_lun_execute_mgmt_task(lun, &mgmt_task);
+
+ /* function code is invalid */
+ CU_ASSERT_EQUAL(mgmt_task.response, SPDK_SCSI_TASK_MGMT_RESP_REJECT_FUNC_NOT_SUPPORTED);
+
+ lun_destruct(lun);
+
+ CU_ASSERT_EQUAL(g_task_count, 0);
+}
+
+static void
+lun_append_task_null_lun_task_cdb_spc_inquiry(void)
+{
+ struct spdk_scsi_task task = { 0 };
+ uint8_t cdb[6] = { 0 };
+
+ ut_init_task(&task);
+ task.cdb = cdb;
+ task.cdb[0] = SPDK_SPC_INQUIRY;
+ /* alloc_len >= 4096 */
+ task.cdb[3] = 0xFF;
+ task.cdb[4] = 0xFF;
+ task.lun = NULL;
+
+ spdk_scsi_task_process_null_lun(&task);
+
+ CU_ASSERT_EQUAL(task.status, SPDK_SCSI_STATUS_GOOD);
+
+ spdk_scsi_task_put(&task);
+
+ /* spdk_scsi_task_process_null_lun() does not call cpl_fn */
+ CU_ASSERT_EQUAL(g_task_count, 1);
+ g_task_count = 0;
+}
+
+static void
+lun_append_task_null_lun_alloc_len_lt_4096(void)
+{
+ struct spdk_scsi_task task = { 0 };
+ uint8_t cdb[6] = { 0 };
+
+ ut_init_task(&task);
+ task.cdb = cdb;
+ task.cdb[0] = SPDK_SPC_INQUIRY;
+ /* alloc_len < 4096 */
+ task.cdb[3] = 0;
+ task.cdb[4] = 0;
+ /* alloc_len is set to a minimal value of 4096
+ * Hence, buf of size 4096 is allocated */
+ spdk_scsi_task_process_null_lun(&task);
+
+ CU_ASSERT_EQUAL(task.status, SPDK_SCSI_STATUS_GOOD);
+
+ spdk_scsi_task_put(&task);
+
+ /* spdk_scsi_task_process_null_lun() does not call cpl_fn */
+ CU_ASSERT_EQUAL(g_task_count, 1);
+ g_task_count = 0;
+}
+
+static void
+lun_append_task_null_lun_not_supported(void)
+{
+ struct spdk_scsi_task task = { 0 };
+ uint8_t cdb[6] = { 0 };
+
+ ut_init_task(&task);
+ task.cdb = cdb;
+ task.lun = NULL;
+
+ spdk_scsi_task_process_null_lun(&task);
+
+ CU_ASSERT_EQUAL(task.status, SPDK_SCSI_STATUS_CHECK_CONDITION);
+ /* LUN not supported; task's data transferred should be 0 */
+ CU_ASSERT_EQUAL(task.data_transferred, 0);
+
+ /* spdk_scsi_task_process_null_lun() does not call cpl_fn */
+ CU_ASSERT_EQUAL(g_task_count, 1);
+ g_task_count = 0;
+}
+
+static void
+lun_execute_scsi_task_pending(void)
+{
+ struct spdk_scsi_lun *lun;
+ struct spdk_scsi_task task = { 0 };
+ struct spdk_scsi_dev dev = { 0 };
+
+ lun = lun_construct();
+
+ ut_init_task(&task);
+ task.lun = lun;
+ lun->dev = &dev;
+
+ g_lun_execute_fail = false;
+ g_lun_execute_status = SPDK_SCSI_TASK_PENDING;
+
+ /* the tasks list should still be empty since it has not been
+ executed yet
+ */
+ CU_ASSERT(TAILQ_EMPTY(&lun->tasks));
+
+ scsi_lun_execute_task(lun, &task);
+
+ /* Assert the task has been successfully added to the tasks queue */
+ CU_ASSERT(!TAILQ_EMPTY(&lun->tasks));
+
+ /* task is still on the tasks list */
+ CU_ASSERT_EQUAL(g_task_count, 1);
+
+ /* Need to complete task so LUN might be removed right now */
+ scsi_lun_complete_task(lun, &task);
+
+ CU_ASSERT_EQUAL(g_task_count, 0);
+
+ lun_destruct(lun);
+}
+
+static void
+lun_execute_scsi_task_complete(void)
+{
+ struct spdk_scsi_lun *lun;
+ struct spdk_scsi_task task = { 0 };
+ struct spdk_scsi_dev dev = { 0 };
+
+ lun = lun_construct();
+
+ ut_init_task(&task);
+ task.lun = lun;
+ lun->dev = &dev;
+
+ g_lun_execute_fail = false;
+ g_lun_execute_status = SPDK_SCSI_TASK_COMPLETE;
+
+ /* the tasks list should still be empty since it has not been
+ executed yet
+ */
+ CU_ASSERT(TAILQ_EMPTY(&lun->tasks));
+
+ scsi_lun_execute_task(lun, &task);
+
+ /* Assert the task has not been added to the tasks queue */
+ CU_ASSERT(TAILQ_EMPTY(&lun->tasks));
+
+ lun_destruct(lun);
+
+ CU_ASSERT_EQUAL(g_task_count, 0);
+}
+
+static void
+lun_destruct_success(void)
+{
+ struct spdk_scsi_lun *lun;
+
+ lun = lun_construct();
+
+ scsi_lun_destruct(lun);
+
+ CU_ASSERT_EQUAL(g_task_count, 0);
+}
+
+static void
+lun_construct_null_ctx(void)
+{
+ struct spdk_scsi_lun *lun;
+
+ lun = scsi_lun_construct(NULL, NULL, NULL);
+
+ /* lun should be NULL since we passed NULL for the ctx pointer. */
+ CU_ASSERT(lun == NULL);
+ CU_ASSERT_EQUAL(g_task_count, 0);
+}
+
+static void
+lun_construct_success(void)
+{
+ struct spdk_scsi_lun *lun = lun_construct();
+
+ lun_destruct(lun);
+
+ CU_ASSERT_EQUAL(g_task_count, 0);
+}
+
+static void
+lun_reset_task_wait_scsi_task_complete(void)
+{
+ struct spdk_scsi_lun *lun;
+ struct spdk_scsi_task task = { 0 };
+ struct spdk_scsi_task mgmt_task = { 0 };
+ struct spdk_scsi_dev dev = { 0 };
+
+ lun = lun_construct();
+ lun->dev = &dev;
+
+ ut_init_task(&task);
+ task.lun = lun;
+
+ g_lun_execute_fail = false;
+ g_lun_execute_status = SPDK_SCSI_TASK_PENDING;
+
+ ut_init_task(&mgmt_task);
+ mgmt_task.lun = lun;
+ mgmt_task.function = SPDK_SCSI_TASK_FUNC_LUN_RESET;
+
+ /* Execute the task but it is still in the task list. */
+ scsi_lun_execute_task(lun, &task);
+
+ CU_ASSERT(TAILQ_EMPTY(&lun->pending_tasks));
+ CU_ASSERT(!TAILQ_EMPTY(&lun->tasks));
+
+ /* Execute the reset task */
+ scsi_lun_execute_mgmt_task(lun, &mgmt_task);
+
+ /* The reset task should be on the submitted mgmt task list and
+ * a poller is created because the task prior to the reset task is pending.
+ */
+ CU_ASSERT(!TAILQ_EMPTY(&lun->mgmt_tasks));
+ CU_ASSERT(lun->reset_poller != NULL);
+
+ /* Execute the poller to check if the task prior to the reset task complete. */
+ scsi_lun_reset_check_outstanding_tasks(&mgmt_task);
+
+ CU_ASSERT(!TAILQ_EMPTY(&lun->mgmt_tasks));
+ CU_ASSERT(lun->reset_poller != NULL);
+
+ /* Complete the task. */
+ scsi_lun_complete_task(lun, &task);
+
+ CU_ASSERT(TAILQ_EMPTY(&lun->tasks));
+
+ /* Execute the poller to check if the task prior to the reset task complete. */
+ scsi_lun_reset_check_outstanding_tasks(&mgmt_task);
+
+ CU_ASSERT(TAILQ_EMPTY(&lun->mgmt_tasks));
+ CU_ASSERT(lun->reset_poller == NULL);
+ CU_ASSERT_EQUAL(mgmt_task.status, SPDK_SCSI_STATUS_GOOD);
+ CU_ASSERT_EQUAL(mgmt_task.response, SPDK_SCSI_TASK_MGMT_RESP_SUCCESS);
+
+ lun_destruct(lun);
+
+ CU_ASSERT_EQUAL(g_task_count, 0);
+}
+
+static void
+lun_reset_task_suspend_scsi_task(void)
+{
+ struct spdk_scsi_lun *lun;
+ struct spdk_scsi_task task = { 0 };
+ struct spdk_scsi_task mgmt_task = { 0 };
+ struct spdk_scsi_dev dev = { 0 };
+
+ lun = lun_construct();
+ lun->dev = &dev;
+
+ ut_init_task(&task);
+ task.lun = lun;
+
+ g_lun_execute_fail = false;
+ g_lun_execute_status = SPDK_SCSI_TASK_COMPLETE;
+
+ ut_init_task(&mgmt_task);
+ mgmt_task.lun = lun;
+ mgmt_task.function = SPDK_SCSI_TASK_FUNC_LUN_RESET;
+
+ /* Append a reset task to the pending mgmt task list. */
+ scsi_lun_append_mgmt_task(lun, &mgmt_task);
+
+ CU_ASSERT(!TAILQ_EMPTY(&lun->pending_mgmt_tasks));
+
+ /* Execute the task but it is on the pending task list. */
+ scsi_lun_execute_task(lun, &task);
+
+ CU_ASSERT(!TAILQ_EMPTY(&lun->pending_tasks));
+
+ /* Execute the reset task. The task will be executed then. */
+ _scsi_lun_execute_mgmt_task(lun);
+
+ CU_ASSERT(TAILQ_EMPTY(&lun->mgmt_tasks));
+ CU_ASSERT(lun->reset_poller == NULL);
+ CU_ASSERT_EQUAL(mgmt_task.status, SPDK_SCSI_STATUS_GOOD);
+ CU_ASSERT_EQUAL(mgmt_task.response, SPDK_SCSI_TASK_MGMT_RESP_SUCCESS);
+
+ CU_ASSERT(TAILQ_EMPTY(&lun->pending_tasks));
+ CU_ASSERT(TAILQ_EMPTY(&lun->tasks));
+
+ lun_destruct(lun);
+
+ CU_ASSERT_EQUAL(g_task_count, 0);
+}
+
+static void
+lun_check_pending_tasks_only_for_specific_initiator(void)
+{
+ struct spdk_bdev bdev = {};
+ struct spdk_scsi_lun *lun;
+ struct spdk_scsi_task task1 = {};
+ struct spdk_scsi_task task2 = {};
+ struct spdk_scsi_port initiator_port1 = {};
+ struct spdk_scsi_port initiator_port2 = {};
+ struct spdk_scsi_port initiator_port3 = {};
+
+ lun = scsi_lun_construct(&bdev, NULL, NULL);
+
+ task1.initiator_port = &initiator_port1;
+ task2.initiator_port = &initiator_port2;
+
+ TAILQ_INSERT_TAIL(&lun->tasks, &task1, scsi_link);
+ TAILQ_INSERT_TAIL(&lun->tasks, &task2, scsi_link);
+ CU_ASSERT(scsi_lun_has_outstanding_tasks(lun) == true);
+ CU_ASSERT(_scsi_lun_has_pending_tasks(lun) == false);
+ CU_ASSERT(scsi_lun_has_pending_tasks(lun, NULL) == true);
+ CU_ASSERT(scsi_lun_has_pending_tasks(lun, &initiator_port1) == true);
+ CU_ASSERT(scsi_lun_has_pending_tasks(lun, &initiator_port2) == true);
+ CU_ASSERT(scsi_lun_has_pending_tasks(lun, &initiator_port3) == false);
+ TAILQ_REMOVE(&lun->tasks, &task1, scsi_link);
+ TAILQ_REMOVE(&lun->tasks, &task2, scsi_link);
+ CU_ASSERT(_scsi_lun_has_pending_tasks(lun) == false);
+ CU_ASSERT(scsi_lun_has_pending_tasks(lun, NULL) == false);
+
+ TAILQ_INSERT_TAIL(&lun->pending_tasks, &task1, scsi_link);
+ TAILQ_INSERT_TAIL(&lun->pending_tasks, &task2, scsi_link);
+ CU_ASSERT(scsi_lun_has_outstanding_tasks(lun) == false);
+ CU_ASSERT(_scsi_lun_has_pending_tasks(lun) == true);
+ CU_ASSERT(scsi_lun_has_pending_tasks(lun, NULL) == true);
+ CU_ASSERT(scsi_lun_has_pending_tasks(lun, &initiator_port1) == true);
+ CU_ASSERT(scsi_lun_has_pending_tasks(lun, &initiator_port2) == true);
+ CU_ASSERT(scsi_lun_has_pending_tasks(lun, &initiator_port3) == false);
+ TAILQ_REMOVE(&lun->pending_tasks, &task1, scsi_link);
+ TAILQ_REMOVE(&lun->pending_tasks, &task2, scsi_link);
+ CU_ASSERT(_scsi_lun_has_pending_tasks(lun) == false);
+ CU_ASSERT(scsi_lun_has_pending_tasks(lun, NULL) == false);
+
+ TAILQ_INSERT_TAIL(&lun->mgmt_tasks, &task1, scsi_link);
+ TAILQ_INSERT_TAIL(&lun->mgmt_tasks, &task2, scsi_link);
+ CU_ASSERT(scsi_lun_has_outstanding_mgmt_tasks(lun) == true);
+ CU_ASSERT(_scsi_lun_has_pending_mgmt_tasks(lun) == false);
+ CU_ASSERT(scsi_lun_has_pending_mgmt_tasks(lun, NULL) == true);
+ CU_ASSERT(scsi_lun_has_pending_mgmt_tasks(lun, &initiator_port1) == true);
+ CU_ASSERT(scsi_lun_has_pending_mgmt_tasks(lun, &initiator_port2) == true);
+ CU_ASSERT(scsi_lun_has_pending_mgmt_tasks(lun, &initiator_port3) == false);
+ TAILQ_REMOVE(&lun->mgmt_tasks, &task1, scsi_link);
+ TAILQ_REMOVE(&lun->mgmt_tasks, &task2, scsi_link);
+ CU_ASSERT(_scsi_lun_has_pending_mgmt_tasks(lun) == false);
+ CU_ASSERT(scsi_lun_has_pending_mgmt_tasks(lun, NULL) == false);
+
+ TAILQ_INSERT_TAIL(&lun->pending_mgmt_tasks, &task1, scsi_link);
+ TAILQ_INSERT_TAIL(&lun->pending_mgmt_tasks, &task2, scsi_link);
+ CU_ASSERT(_scsi_lun_has_pending_mgmt_tasks(lun) == true);
+ CU_ASSERT(scsi_lun_has_pending_mgmt_tasks(lun, NULL) == true);
+ CU_ASSERT(scsi_lun_has_pending_mgmt_tasks(lun, &initiator_port1) == true);
+ CU_ASSERT(scsi_lun_has_pending_mgmt_tasks(lun, &initiator_port2) == true);
+ CU_ASSERT(scsi_lun_has_pending_mgmt_tasks(lun, &initiator_port3) == false);
+ TAILQ_REMOVE(&lun->pending_mgmt_tasks, &task1, scsi_link);
+ TAILQ_REMOVE(&lun->pending_mgmt_tasks, &task2, scsi_link);
+ CU_ASSERT(_scsi_lun_has_pending_mgmt_tasks(lun) == false);
+ CU_ASSERT(scsi_lun_has_pending_mgmt_tasks(lun, NULL) == false);
+
+ scsi_lun_remove(lun);
+}
+
+static void
+abort_pending_mgmt_tasks_when_lun_is_removed(void)
+{
+ struct spdk_bdev bdev = {};
+ struct spdk_scsi_lun *lun;
+ struct spdk_scsi_task task1, task2, task3;
+
+ lun = scsi_lun_construct(&bdev, NULL, NULL);
+
+ /* Normal case */
+ ut_init_task(&task1);
+ ut_init_task(&task2);
+ ut_init_task(&task3);
+ task1.lun = lun;
+ task2.lun = lun;
+ task3.lun = lun;
+ task1.function = SPDK_SCSI_TASK_FUNC_LUN_RESET;
+ task2.function = SPDK_SCSI_TASK_FUNC_LUN_RESET;
+ task3.function = SPDK_SCSI_TASK_FUNC_LUN_RESET;
+
+ CU_ASSERT(g_task_count == 3);
+
+ scsi_lun_append_mgmt_task(lun, &task1);
+ scsi_lun_append_mgmt_task(lun, &task2);
+ scsi_lun_append_mgmt_task(lun, &task3);
+
+ CU_ASSERT(!TAILQ_EMPTY(&lun->pending_mgmt_tasks));
+
+ _scsi_lun_execute_mgmt_task(lun);
+
+ CU_ASSERT(TAILQ_EMPTY(&lun->pending_mgmt_tasks));
+ CU_ASSERT(TAILQ_EMPTY(&lun->mgmt_tasks));
+ CU_ASSERT(g_task_count == 0);
+ CU_ASSERT(task1.response == SPDK_SCSI_TASK_MGMT_RESP_SUCCESS);
+ CU_ASSERT(task2.response == SPDK_SCSI_TASK_MGMT_RESP_SUCCESS);
+ CU_ASSERT(task3.response == SPDK_SCSI_TASK_MGMT_RESP_SUCCESS);
+
+ /* LUN hotplug case */
+ ut_init_task(&task1);
+ ut_init_task(&task2);
+ ut_init_task(&task3);
+ task1.function = SPDK_SCSI_TASK_FUNC_LUN_RESET;
+ task2.function = SPDK_SCSI_TASK_FUNC_LUN_RESET;
+ task3.function = SPDK_SCSI_TASK_FUNC_LUN_RESET;
+
+ CU_ASSERT(g_task_count == 3);
+
+ scsi_lun_append_mgmt_task(lun, &task1);
+ scsi_lun_append_mgmt_task(lun, &task2);
+ scsi_lun_append_mgmt_task(lun, &task3);
+
+ CU_ASSERT(!TAILQ_EMPTY(&lun->pending_mgmt_tasks));
+
+ lun->removed = true;
+
+ _scsi_lun_execute_mgmt_task(lun);
+
+ CU_ASSERT(TAILQ_EMPTY(&lun->pending_mgmt_tasks));
+ CU_ASSERT(TAILQ_EMPTY(&lun->mgmt_tasks));
+ CU_ASSERT(g_task_count == 0);
+ CU_ASSERT(task1.response == SPDK_SCSI_TASK_MGMT_RESP_INVALID_LUN);
+ CU_ASSERT(task2.response == SPDK_SCSI_TASK_MGMT_RESP_INVALID_LUN);
+ CU_ASSERT(task3.response == SPDK_SCSI_TASK_MGMT_RESP_INVALID_LUN);
+
+ scsi_lun_remove(lun);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("lun_suite", NULL, NULL);
+
+ CU_ADD_TEST(suite, lun_task_mgmt_execute_abort_task_not_supported);
+ CU_ADD_TEST(suite, lun_task_mgmt_execute_abort_task_all_not_supported);
+ CU_ADD_TEST(suite, lun_task_mgmt_execute_lun_reset);
+ CU_ADD_TEST(suite, lun_task_mgmt_execute_invalid_case);
+ CU_ADD_TEST(suite, lun_append_task_null_lun_task_cdb_spc_inquiry);
+ CU_ADD_TEST(suite, lun_append_task_null_lun_alloc_len_lt_4096);
+ CU_ADD_TEST(suite, lun_append_task_null_lun_not_supported);
+ CU_ADD_TEST(suite, lun_execute_scsi_task_pending);
+ CU_ADD_TEST(suite, lun_execute_scsi_task_complete);
+ CU_ADD_TEST(suite, lun_destruct_success);
+ CU_ADD_TEST(suite, lun_construct_null_ctx);
+ CU_ADD_TEST(suite, lun_construct_success);
+ CU_ADD_TEST(suite, lun_reset_task_wait_scsi_task_complete);
+ CU_ADD_TEST(suite, lun_reset_task_suspend_scsi_task);
+ CU_ADD_TEST(suite, lun_check_pending_tasks_only_for_specific_initiator);
+ CU_ADD_TEST(suite, abort_pending_mgmt_tasks_when_lun_is_removed);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ allocate_threads(1);
+ set_thread(0);
+ CU_basic_run_tests();
+ free_threads();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/scsi/scsi.c/.gitignore b/src/spdk/test/unit/lib/scsi/scsi.c/.gitignore
new file mode 100644
index 000000000..99a7db2b1
--- /dev/null
+++ b/src/spdk/test/unit/lib/scsi/scsi.c/.gitignore
@@ -0,0 +1 @@
+scsi_ut
diff --git a/src/spdk/test/unit/lib/scsi/scsi.c/Makefile b/src/spdk/test/unit/lib/scsi/scsi.c/Makefile
new file mode 100644
index 000000000..2ed249227
--- /dev/null
+++ b/src/spdk/test/unit/lib/scsi/scsi.c/Makefile
@@ -0,0 +1,39 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+SPDK_LIB_LIST = trace
+TEST_FILE = scsi_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/scsi/scsi.c/scsi_ut.c b/src/spdk/test/unit/lib/scsi/scsi.c/scsi_ut.c
new file mode 100644
index 000000000..430ff96b0
--- /dev/null
+++ b/src/spdk/test/unit/lib/scsi/scsi.c/scsi_ut.c
@@ -0,0 +1,69 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk/scsi.h"
+
+#include "spdk_cunit.h"
+
+#include "scsi/scsi.c"
+
+static void
+scsi_init(void)
+{
+ int rc;
+
+ rc = spdk_scsi_init();
+ CU_ASSERT_EQUAL(rc, 0);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("scsi_suite", NULL, NULL);
+
+ CU_ADD_TEST(suite, scsi_init);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/scsi/scsi_bdev.c/.gitignore b/src/spdk/test/unit/lib/scsi/scsi_bdev.c/.gitignore
new file mode 100644
index 000000000..8f1ecc12c
--- /dev/null
+++ b/src/spdk/test/unit/lib/scsi/scsi_bdev.c/.gitignore
@@ -0,0 +1 @@
+scsi_bdev_ut
diff --git a/src/spdk/test/unit/lib/scsi/scsi_bdev.c/Makefile b/src/spdk/test/unit/lib/scsi/scsi_bdev.c/Makefile
new file mode 100644
index 000000000..66a4119bb
--- /dev/null
+++ b/src/spdk/test/unit/lib/scsi/scsi_bdev.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = scsi_bdev_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/scsi/scsi_bdev.c/scsi_bdev_ut.c b/src/spdk/test/unit/lib/scsi/scsi_bdev.c/scsi_bdev_ut.c
new file mode 100644
index 000000000..4e64f7071
--- /dev/null
+++ b/src/spdk/test/unit/lib/scsi/scsi_bdev.c/scsi_bdev_ut.c
@@ -0,0 +1,1037 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "scsi/task.c"
+#include "scsi/scsi_bdev.c"
+#include "common/lib/test_env.c"
+
+#include "spdk_cunit.h"
+
+#include "spdk_internal/mock.h"
+
+SPDK_LOG_REGISTER_COMPONENT("scsi", SPDK_LOG_SCSI)
+
+struct spdk_scsi_globals g_spdk_scsi;
+
+static uint64_t g_test_bdev_num_blocks;
+
+TAILQ_HEAD(, spdk_bdev_io) g_bdev_io_queue;
+int g_scsi_cb_called = 0;
+
+TAILQ_HEAD(, spdk_bdev_io_wait_entry) g_io_wait_queue;
+bool g_bdev_io_pool_full = false;
+
+bool
+spdk_bdev_io_type_supported(struct spdk_bdev *bdev, enum spdk_bdev_io_type io_type)
+{
+ abort();
+ return false;
+}
+
+DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
+
+DEFINE_STUB(spdk_bdev_get_name, const char *,
+ (const struct spdk_bdev *bdev), "test");
+
+DEFINE_STUB(spdk_bdev_get_block_size, uint32_t,
+ (const struct spdk_bdev *bdev), 512);
+
+DEFINE_STUB(spdk_bdev_get_md_size, uint32_t,
+ (const struct spdk_bdev *bdev), 8);
+
+DEFINE_STUB(spdk_bdev_is_md_interleaved, bool,
+ (const struct spdk_bdev *bdev), false);
+
+DEFINE_STUB(spdk_bdev_get_data_block_size, uint32_t,
+ (const struct spdk_bdev *bdev), 512);
+
+uint64_t
+spdk_bdev_get_num_blocks(const struct spdk_bdev *bdev)
+{
+ return g_test_bdev_num_blocks;
+}
+
+DEFINE_STUB(spdk_bdev_get_product_name, const char *,
+ (const struct spdk_bdev *bdev), "test product");
+
+DEFINE_STUB(spdk_bdev_has_write_cache, bool,
+ (const struct spdk_bdev *bdev), false);
+
+DEFINE_STUB(spdk_bdev_get_dif_type, enum spdk_dif_type,
+ (const struct spdk_bdev *bdev), SPDK_DIF_DISABLE);
+
+DEFINE_STUB(spdk_bdev_is_dif_head_of_md, bool,
+ (const struct spdk_bdev *bdev), false);
+
+DEFINE_STUB(spdk_bdev_is_dif_check_enabled, bool,
+ (const struct spdk_bdev *bdev, enum spdk_dif_check_type check_type), false);
+
+DEFINE_STUB(scsi_pr_out, int, (struct spdk_scsi_task *task,
+ uint8_t *cdb, uint8_t *data, uint16_t data_len), 0);
+
+DEFINE_STUB(scsi_pr_in, int, (struct spdk_scsi_task *task, uint8_t *cdb,
+ uint8_t *data, uint16_t data_len), 0);
+
+DEFINE_STUB(scsi2_reserve, int, (struct spdk_scsi_task *task, uint8_t *cdb), 0);
+DEFINE_STUB(scsi2_release, int, (struct spdk_scsi_task *task), 0);
+
+void
+scsi_lun_complete_task(struct spdk_scsi_lun *lun, struct spdk_scsi_task *task)
+{
+ g_scsi_cb_called++;
+}
+
+DEFINE_STUB_V(scsi_lun_complete_reset_task,
+ (struct spdk_scsi_lun *lun, struct spdk_scsi_task *task));
+
+DEFINE_STUB(spdk_scsi_lun_id_int_to_fmt, uint64_t, (int lun_id), 0);
+
+static void
+ut_put_task(struct spdk_scsi_task *task)
+{
+ if (task->alloc_len) {
+ free(task->iov.iov_base);
+ }
+
+ task->iov.iov_base = NULL;
+ task->iov.iov_len = 0;
+ task->alloc_len = 0;
+ SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&g_bdev_io_queue));
+}
+
+static void
+ut_init_task(struct spdk_scsi_task *task)
+{
+ memset(task, 0xFF, sizeof(*task));
+ task->iov.iov_base = NULL;
+ task->iovs = &task->iov;
+ task->iovcnt = 1;
+ task->alloc_len = 0;
+ task->dxfer_dir = SPDK_SCSI_DIR_NONE;
+}
+
+void
+spdk_bdev_io_get_scsi_status(const struct spdk_bdev_io *bdev_io,
+ int *sc, int *sk, int *asc, int *ascq)
+{
+ switch (bdev_io->internal.status) {
+ case SPDK_BDEV_IO_STATUS_SUCCESS:
+ *sc = SPDK_SCSI_STATUS_GOOD;
+ *sk = SPDK_SCSI_SENSE_NO_SENSE;
+ *asc = SPDK_SCSI_ASC_NO_ADDITIONAL_SENSE;
+ *ascq = SPDK_SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+ case SPDK_BDEV_IO_STATUS_SCSI_ERROR:
+ *sc = bdev_io->internal.error.scsi.sc;
+ *sk = bdev_io->internal.error.scsi.sk;
+ *asc = bdev_io->internal.error.scsi.asc;
+ *ascq = bdev_io->internal.error.scsi.ascq;
+ break;
+ default:
+ *sc = SPDK_SCSI_STATUS_CHECK_CONDITION;
+ *sk = SPDK_SCSI_SENSE_ABORTED_COMMAND;
+ *asc = SPDK_SCSI_ASC_NO_ADDITIONAL_SENSE;
+ *ascq = SPDK_SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+ }
+}
+
+void
+spdk_bdev_io_get_iovec(struct spdk_bdev_io *bdev_io, struct iovec **iovp, int *iovcntp)
+{
+ *iovp = NULL;
+ *iovcntp = 0;
+}
+
+static void
+ut_bdev_io_flush(void)
+{
+ struct spdk_bdev_io *bdev_io;
+ struct spdk_bdev_io_wait_entry *entry;
+
+ while (!TAILQ_EMPTY(&g_bdev_io_queue) || !TAILQ_EMPTY(&g_io_wait_queue)) {
+ while (!TAILQ_EMPTY(&g_bdev_io_queue)) {
+ bdev_io = TAILQ_FIRST(&g_bdev_io_queue);
+ TAILQ_REMOVE(&g_bdev_io_queue, bdev_io, internal.link);
+ bdev_io->internal.cb(bdev_io, true, bdev_io->internal.caller_ctx);
+ free(bdev_io);
+ }
+
+ while (!TAILQ_EMPTY(&g_io_wait_queue)) {
+ entry = TAILQ_FIRST(&g_io_wait_queue);
+ TAILQ_REMOVE(&g_io_wait_queue, entry, link);
+ entry->cb_fn(entry->cb_arg);
+ }
+ }
+}
+
+static int
+_spdk_bdev_io_op(spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ struct spdk_bdev_io *bdev_io;
+
+ if (g_bdev_io_pool_full) {
+ g_bdev_io_pool_full = false;
+ return -ENOMEM;
+ }
+
+ bdev_io = calloc(1, sizeof(*bdev_io));
+ SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
+ bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ bdev_io->internal.cb = cb;
+ bdev_io->internal.caller_ctx = cb_arg;
+
+ TAILQ_INSERT_TAIL(&g_bdev_io_queue, bdev_io, internal.link);
+
+ return 0;
+}
+
+int
+spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct iovec *iov, int iovcnt,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ return _spdk_bdev_io_op(cb, cb_arg);
+}
+
+int
+spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct iovec *iov, int iovcnt,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ return _spdk_bdev_io_op(cb, cb_arg);
+}
+
+int
+spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ return _spdk_bdev_io_op(cb, cb_arg);
+}
+
+int
+spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ return _spdk_bdev_io_op(cb, cb_arg);
+}
+
+int
+spdk_bdev_flush_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ return _spdk_bdev_io_op(cb, cb_arg);
+}
+
+int
+spdk_bdev_queue_io_wait(struct spdk_bdev *bdev, struct spdk_io_channel *ch,
+ struct spdk_bdev_io_wait_entry *entry)
+{
+ TAILQ_INSERT_TAIL(&g_io_wait_queue, entry, link);
+ return 0;
+}
+
+int
+spdk_dif_ctx_init(struct spdk_dif_ctx *ctx, uint32_t block_size, uint32_t md_size,
+ bool md_interleave, bool dif_loc, enum spdk_dif_type dif_type, uint32_t dif_flags,
+ uint32_t init_ref_tag, uint16_t apptag_mask, uint16_t app_tag,
+ uint32_t data_offset, uint16_t guard_seed)
+{
+ ctx->init_ref_tag = init_ref_tag;
+ ctx->ref_tag_offset = data_offset / 512;
+ return 0;
+}
+
+/*
+ * This test specifically tests a mode select 6 command from the
+ * Windows SCSI compliance test that caused SPDK to crash.
+ */
+static void
+mode_select_6_test(void)
+{
+ struct spdk_bdev bdev;
+ struct spdk_scsi_task task;
+ struct spdk_scsi_lun lun;
+ struct spdk_scsi_dev dev;
+ char cdb[16];
+ char data[24];
+ int rc;
+
+ ut_init_task(&task);
+
+ cdb[0] = 0x15;
+ cdb[1] = 0x11;
+ cdb[2] = 0x00;
+ cdb[3] = 0x00;
+ cdb[4] = 0x18;
+ cdb[5] = 0x00;
+ task.cdb = cdb;
+
+ snprintf(&dev.name[0], sizeof(dev.name), "spdk_iscsi_translation_test");
+ lun.bdev = &bdev;
+ lun.dev = &dev;
+ task.lun = &lun;
+
+ memset(data, 0, sizeof(data));
+ data[4] = 0x08;
+ data[5] = 0x02;
+ spdk_scsi_task_set_data(&task, data, sizeof(data));
+
+ rc = bdev_scsi_execute(&task);
+
+ CU_ASSERT_EQUAL(rc, 0);
+
+ ut_put_task(&task);
+}
+
+/*
+ * This test specifically tests a mode select 6 command which
+ * contains no mode pages.
+ */
+static void
+mode_select_6_test2(void)
+{
+ struct spdk_bdev bdev;
+ struct spdk_scsi_task task;
+ struct spdk_scsi_lun lun;
+ struct spdk_scsi_dev dev;
+ char cdb[16];
+ int rc;
+
+ ut_init_task(&task);
+
+ cdb[0] = 0x15;
+ cdb[1] = 0x00;
+ cdb[2] = 0x00;
+ cdb[3] = 0x00;
+ cdb[4] = 0x00;
+ cdb[5] = 0x00;
+ task.cdb = cdb;
+
+ snprintf(&dev.name[0], sizeof(dev.name), "spdk_iscsi_translation_test");
+ lun.bdev = &bdev;
+ lun.dev = &dev;
+ task.lun = &lun;
+
+ rc = bdev_scsi_execute(&task);
+
+ CU_ASSERT_EQUAL(rc, 0);
+
+ ut_put_task(&task);
+}
+
+/*
+ * This test specifically tests a mode sense 6 command which
+ * return all subpage 00h mode pages.
+ */
+static void
+mode_sense_6_test(void)
+{
+ struct spdk_bdev bdev;
+ struct spdk_scsi_task task;
+ struct spdk_scsi_lun lun;
+ struct spdk_scsi_dev dev;
+ char cdb[12];
+ unsigned char *data;
+ int rc;
+ unsigned char mode_data_len = 0;
+ unsigned char medium_type = 0;
+ unsigned char dev_specific_param = 0;
+ unsigned char blk_descriptor_len = 0;
+
+ memset(&bdev, 0, sizeof(struct spdk_bdev));
+ ut_init_task(&task);
+ memset(cdb, 0, sizeof(cdb));
+
+ cdb[0] = 0x1A;
+ cdb[2] = 0x3F;
+ cdb[4] = 0xFF;
+ task.cdb = cdb;
+
+ snprintf(&dev.name[0], sizeof(dev.name), "spdk_iscsi_translation_test");
+ lun.bdev = &bdev;
+ lun.dev = &dev;
+ task.lun = &lun;
+
+ rc = bdev_scsi_execute(&task);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+
+ data = task.iovs[0].iov_base;
+ mode_data_len = data[0];
+ medium_type = data[1];
+ dev_specific_param = data[2];
+ blk_descriptor_len = data[3];
+
+ CU_ASSERT(mode_data_len >= 11);
+ CU_ASSERT_EQUAL(medium_type, 0);
+ CU_ASSERT_EQUAL(dev_specific_param, 0);
+ CU_ASSERT_EQUAL(blk_descriptor_len, 8);
+
+ ut_put_task(&task);
+}
+
+/*
+ * This test specifically tests a mode sense 10 command which
+ * return all subpage 00h mode pages.
+ */
+static void
+mode_sense_10_test(void)
+{
+ struct spdk_bdev bdev;
+ struct spdk_scsi_task task;
+ struct spdk_scsi_lun lun;
+ struct spdk_scsi_dev dev;
+ char cdb[12];
+ unsigned char *data;
+ int rc;
+ unsigned short mode_data_len = 0;
+ unsigned char medium_type = 0;
+ unsigned char dev_specific_param = 0;
+ unsigned short blk_descriptor_len = 0;
+
+ memset(&bdev, 0, sizeof(struct spdk_bdev));
+ ut_init_task(&task);
+ memset(cdb, 0, sizeof(cdb));
+ cdb[0] = 0x5A;
+ cdb[2] = 0x3F;
+ cdb[8] = 0xFF;
+ task.cdb = cdb;
+
+ snprintf(&dev.name[0], sizeof(dev.name), "spdk_iscsi_translation_test");
+ lun.bdev = &bdev;
+ lun.dev = &dev;
+ task.lun = &lun;
+
+ rc = bdev_scsi_execute(&task);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+
+ data = task.iovs[0].iov_base;
+ mode_data_len = ((data[0] << 8) + data[1]);
+ medium_type = data[2];
+ dev_specific_param = data[3];
+ blk_descriptor_len = ((data[6] << 8) + data[7]);
+
+ CU_ASSERT(mode_data_len >= 14);
+ CU_ASSERT_EQUAL(medium_type, 0);
+ CU_ASSERT_EQUAL(dev_specific_param, 0);
+ CU_ASSERT_EQUAL(blk_descriptor_len, 8);
+
+ ut_put_task(&task);
+}
+
+/*
+ * This test specifically tests a scsi inquiry command from the
+ * Windows SCSI compliance test that failed to return the
+ * expected SCSI error sense code.
+ */
+static void
+inquiry_evpd_test(void)
+{
+ struct spdk_bdev bdev;
+ struct spdk_scsi_task task;
+ struct spdk_scsi_lun lun;
+ struct spdk_scsi_dev dev;
+ char cdb[6];
+ int rc;
+
+ ut_init_task(&task);
+
+ cdb[0] = 0x12;
+ cdb[1] = 0x00; /* EVPD = 0 */
+ cdb[2] = 0xff; /* PageCode non-zero */
+ cdb[3] = 0x00;
+ cdb[4] = 0xff;
+ cdb[5] = 0x00;
+ task.cdb = cdb;
+
+ snprintf(&dev.name[0], sizeof(dev.name), "spdk_iscsi_translation_test");
+ lun.bdev = &bdev;
+ lun.dev = &dev;
+ task.lun = &lun;
+
+ rc = bdev_scsi_execute(&task);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+
+ CU_ASSERT_EQUAL(task.status, SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT_EQUAL(task.sense_data[2] & 0xf, SPDK_SCSI_SENSE_ILLEGAL_REQUEST);
+ CU_ASSERT_EQUAL(task.sense_data[12], 0x24);
+ CU_ASSERT_EQUAL(task.sense_data[13], 0x0);
+
+ ut_put_task(&task);
+}
+
+/*
+ * This test is to verify specific return data for a standard scsi inquiry
+ * command: Version
+ */
+static void
+inquiry_standard_test(void)
+{
+ struct spdk_bdev bdev = { .blocklen = 512 };
+ struct spdk_scsi_task task;
+ struct spdk_scsi_lun lun;
+ struct spdk_scsi_dev dev;
+ char cdb[6];
+ char *data;
+ struct spdk_scsi_cdb_inquiry_data *inq_data;
+ int rc;
+
+ ut_init_task(&task);
+
+ cdb[0] = 0x12;
+ cdb[1] = 0x00; /* EVPD = 0 */
+ cdb[2] = 0x00; /* PageCode zero - requesting standard inquiry */
+ cdb[3] = 0x00;
+ cdb[4] = 0xff; /* Indicate data size used by conformance test */
+ cdb[5] = 0x00;
+ task.cdb = cdb;
+
+ snprintf(&dev.name[0], sizeof(dev.name), "spdk_iscsi_translation_test");
+ lun.bdev = &bdev;
+ lun.dev = &dev;
+ task.lun = &lun;
+
+ rc = bdev_scsi_execute(&task);
+
+ data = task.iovs[0].iov_base;
+ inq_data = (struct spdk_scsi_cdb_inquiry_data *)&data[0];
+
+ CU_ASSERT_EQUAL(inq_data->version, SPDK_SPC_VERSION_SPC3);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ ut_put_task(&task);
+}
+
+static void
+_inquiry_overflow_test(uint8_t alloc_len)
+{
+ struct spdk_bdev bdev = { .blocklen = 512 };
+ struct spdk_scsi_task task;
+ struct spdk_scsi_lun lun;
+ struct spdk_scsi_dev dev;
+ uint8_t cdb[6];
+ int rc;
+ /* expects a 4K internal data buffer */
+ char data[4096], data_compare[4096];
+
+ ut_init_task(&task);
+
+ cdb[0] = 0x12;
+ cdb[1] = 0x00; /* EVPD = 0 */
+ cdb[2] = 0x00; /* PageCode zero - requesting standard inquiry */
+ cdb[3] = 0x00;
+ cdb[4] = alloc_len; /* Indicate data size used by conformance test */
+ cdb[5] = 0x00;
+ task.cdb = cdb;
+
+ snprintf(&dev.name[0], sizeof(dev.name), "spdk_iscsi_translation_test");
+ lun.bdev = &bdev;
+ lun.dev = &dev;
+ task.lun = &lun;
+
+ memset(data, 0, sizeof(data));
+ memset(data_compare, 0, sizeof(data_compare));
+
+ spdk_scsi_task_set_data(&task, data, sizeof(data));
+
+ rc = bdev_scsi_execute(&task);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+
+ CU_ASSERT_EQUAL(memcmp(data + alloc_len, data_compare + alloc_len, sizeof(data) - alloc_len), 0);
+ CU_ASSERT(task.data_transferred <= alloc_len);
+
+ ut_put_task(&task);
+}
+
+static void
+inquiry_overflow_test(void)
+{
+ int i;
+
+ for (i = 0; i < 256; i++) {
+ _inquiry_overflow_test(i);
+ }
+}
+
+static void
+scsi_name_padding_test(void)
+{
+ char name[SPDK_SCSI_DEV_MAX_NAME + 1];
+ char buf[SPDK_SCSI_DEV_MAX_NAME + 1];
+ int written, i;
+
+ /* case 1 */
+ memset(name, '\0', sizeof(name));
+ memset(name, 'x', 251);
+ written = bdev_scsi_pad_scsi_name(buf, name);
+
+ CU_ASSERT(written == 252);
+ CU_ASSERT(buf[250] == 'x');
+ CU_ASSERT(buf[251] == '\0');
+
+ /* case 2: */
+ memset(name, '\0', sizeof(name));
+ memset(name, 'x', 252);
+ written = bdev_scsi_pad_scsi_name(buf, name);
+
+ CU_ASSERT(written == 256);
+ CU_ASSERT(buf[251] == 'x');
+ for (i = 252; i < 256; i++) {
+ CU_ASSERT(buf[i] == '\0');
+ }
+
+ /* case 3 */
+ memset(name, '\0', sizeof(name));
+ memset(name, 'x', 255);
+ written = bdev_scsi_pad_scsi_name(buf, name);
+
+ CU_ASSERT(written == 256);
+ CU_ASSERT(buf[254] == 'x');
+ CU_ASSERT(buf[255] == '\0');
+}
+
+/*
+ * This test is to verify specific error translation from bdev to scsi.
+ */
+static void
+task_complete_test(void)
+{
+ struct spdk_scsi_task task;
+ struct spdk_bdev_io bdev_io = {};
+ struct spdk_scsi_lun lun;
+
+ ut_init_task(&task);
+
+ TAILQ_INIT(&lun.tasks);
+ TAILQ_INSERT_TAIL(&lun.tasks, &task, scsi_link);
+ task.lun = &lun;
+
+ bdev_io.internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ bdev_scsi_task_complete_cmd(&bdev_io, bdev_io.internal.status, &task);
+ CU_ASSERT_EQUAL(task.status, SPDK_SCSI_STATUS_GOOD);
+ CU_ASSERT(g_scsi_cb_called == 1);
+ g_scsi_cb_called = 0;
+
+ bdev_io.internal.status = SPDK_BDEV_IO_STATUS_SCSI_ERROR;
+ bdev_io.internal.error.scsi.sc = SPDK_SCSI_STATUS_CHECK_CONDITION;
+ bdev_io.internal.error.scsi.sk = SPDK_SCSI_SENSE_HARDWARE_ERROR;
+ bdev_io.internal.error.scsi.asc = SPDK_SCSI_ASC_WARNING;
+ bdev_io.internal.error.scsi.ascq = SPDK_SCSI_ASCQ_POWER_LOSS_EXPECTED;
+ bdev_scsi_task_complete_cmd(&bdev_io, bdev_io.internal.status, &task);
+ CU_ASSERT_EQUAL(task.status, SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT_EQUAL(task.sense_data[2] & 0xf, SPDK_SCSI_SENSE_HARDWARE_ERROR);
+ CU_ASSERT_EQUAL(task.sense_data[12], SPDK_SCSI_ASC_WARNING);
+ CU_ASSERT_EQUAL(task.sense_data[13], SPDK_SCSI_ASCQ_POWER_LOSS_EXPECTED);
+ CU_ASSERT(g_scsi_cb_called == 1);
+ g_scsi_cb_called = 0;
+
+ bdev_io.internal.status = SPDK_BDEV_IO_STATUS_FAILED;
+ bdev_scsi_task_complete_cmd(&bdev_io, bdev_io.internal.status, &task);
+ CU_ASSERT_EQUAL(task.status, SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT_EQUAL(task.sense_data[2] & 0xf, SPDK_SCSI_SENSE_ABORTED_COMMAND);
+ CU_ASSERT_EQUAL(task.sense_data[12], SPDK_SCSI_ASC_NO_ADDITIONAL_SENSE);
+ CU_ASSERT_EQUAL(task.sense_data[13], SPDK_SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ CU_ASSERT(g_scsi_cb_called == 1);
+ g_scsi_cb_called = 0;
+
+ ut_put_task(&task);
+}
+
+static void
+lba_range_test(void)
+{
+ struct spdk_bdev bdev = { .blocklen = 512 };
+ struct spdk_scsi_lun lun;
+ struct spdk_scsi_task task;
+ uint8_t cdb[16];
+ int rc;
+
+ lun.bdev = &bdev;
+
+ ut_init_task(&task);
+ task.lun = &lun;
+ task.lun->bdev_desc = NULL;
+ task.lun->io_channel = NULL;
+ task.cdb = cdb;
+
+ memset(cdb, 0, sizeof(cdb));
+ cdb[0] = 0x88; /* READ (16) */
+
+ /* Test block device size of 4 blocks */
+ g_test_bdev_num_blocks = 4;
+
+ /* LBA = 0, length = 1 (in range) */
+ to_be64(&cdb[2], 0); /* LBA */
+ to_be32(&cdb[10], 1); /* transfer length */
+ task.transfer_len = 1 * 512;
+ task.offset = 0;
+ task.length = 1 * 512;
+ rc = bdev_scsi_execute(&task);
+ CU_ASSERT(rc == SPDK_SCSI_TASK_PENDING);
+ CU_ASSERT(task.status == 0xFF);
+ SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&g_bdev_io_queue));
+ ut_bdev_io_flush();
+ CU_ASSERT(task.status == SPDK_SCSI_STATUS_GOOD);
+ CU_ASSERT(g_scsi_cb_called == 1);
+ g_scsi_cb_called = 0;
+
+ /* LBA = 4, length = 1 (LBA out of range) */
+ to_be64(&cdb[2], 4); /* LBA */
+ to_be32(&cdb[10], 1); /* transfer length */
+ task.transfer_len = 1 * 512;
+ task.offset = 0;
+ task.length = 1 * 512;
+ rc = bdev_scsi_execute(&task);
+ CU_ASSERT(rc == SPDK_SCSI_TASK_COMPLETE);
+ CU_ASSERT(task.status == SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT(task.sense_data[12] == SPDK_SCSI_ASC_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE);
+ SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&g_bdev_io_queue));
+
+ /* LBA = 0, length = 4 (in range, max valid size) */
+ to_be64(&cdb[2], 0); /* LBA */
+ to_be32(&cdb[10], 4); /* transfer length */
+ task.transfer_len = 4 * 512;
+ task.status = 0xFF;
+ task.offset = 0;
+ task.length = 1 * 512;
+ rc = bdev_scsi_execute(&task);
+ CU_ASSERT(rc == SPDK_SCSI_TASK_PENDING);
+ CU_ASSERT(task.status == 0xFF);
+ SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&g_bdev_io_queue));
+ ut_bdev_io_flush();
+ CU_ASSERT(task.status == SPDK_SCSI_STATUS_GOOD);
+ CU_ASSERT(g_scsi_cb_called == 1);
+ g_scsi_cb_called = 0;
+
+ /* LBA = 0, length = 5 (LBA in range, length beyond end of bdev) */
+ to_be64(&cdb[2], 0); /* LBA */
+ to_be32(&cdb[10], 5); /* transfer length */
+ task.transfer_len = 5 * 512;
+ task.offset = 0;
+ task.length = 1 * 512;
+ rc = bdev_scsi_execute(&task);
+ CU_ASSERT(rc == SPDK_SCSI_TASK_COMPLETE);
+ CU_ASSERT(task.status == SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT(task.sense_data[12] == SPDK_SCSI_ASC_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE);
+ SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&g_bdev_io_queue));
+
+ ut_put_task(&task);
+}
+
+static void
+xfer_len_test(void)
+{
+ struct spdk_bdev bdev = { .blocklen = 512 };
+ struct spdk_scsi_lun lun;
+ struct spdk_scsi_task task;
+ uint8_t cdb[16];
+ int rc;
+
+ lun.bdev = &bdev;
+
+ ut_init_task(&task);
+ task.lun = &lun;
+ task.lun->bdev_desc = NULL;
+ task.lun->io_channel = NULL;
+ task.cdb = cdb;
+
+ memset(cdb, 0, sizeof(cdb));
+ cdb[0] = 0x88; /* READ (16) */
+
+ /* Test block device size of 512 MiB */
+ g_test_bdev_num_blocks = 512 * 1024 * 1024;
+
+ /* 1 block */
+ to_be64(&cdb[2], 0); /* LBA */
+ to_be32(&cdb[10], 1); /* transfer length */
+ task.transfer_len = 1 * 512;
+ task.offset = 0;
+ task.length = 1 * 512;
+ rc = bdev_scsi_execute(&task);
+ CU_ASSERT(rc == SPDK_SCSI_TASK_PENDING);
+ CU_ASSERT(task.status == 0xFF);
+ SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&g_bdev_io_queue));
+ ut_bdev_io_flush();
+ CU_ASSERT(task.status == SPDK_SCSI_STATUS_GOOD);
+ CU_ASSERT(g_scsi_cb_called == 1);
+ g_scsi_cb_called = 0;
+
+ /* max transfer length (as reported in block limits VPD page) */
+ to_be64(&cdb[2], 0); /* LBA */
+ to_be32(&cdb[10], SPDK_WORK_BLOCK_SIZE / 512); /* transfer length */
+ task.transfer_len = SPDK_WORK_BLOCK_SIZE;
+ task.status = 0xFF;
+ task.offset = 0;
+ task.length = 1 * 512;
+ rc = bdev_scsi_execute(&task);
+ CU_ASSERT(rc == SPDK_SCSI_TASK_PENDING);
+ CU_ASSERT(task.status == 0xFF);
+ SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&g_bdev_io_queue));
+ ut_bdev_io_flush();
+ CU_ASSERT(task.status == SPDK_SCSI_STATUS_GOOD);
+ CU_ASSERT(g_scsi_cb_called == 1);
+ g_scsi_cb_called = 0;
+
+ /* max transfer length plus one block (invalid) */
+ to_be64(&cdb[2], 0); /* LBA */
+ to_be32(&cdb[10], SPDK_WORK_BLOCK_SIZE / 512 + 1); /* transfer length */
+ task.transfer_len = SPDK_WORK_BLOCK_SIZE + 512;
+ task.offset = 0;
+ task.length = 1 * 512;
+ rc = bdev_scsi_execute(&task);
+ CU_ASSERT(rc == SPDK_SCSI_TASK_COMPLETE);
+ CU_ASSERT(task.status == SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT((task.sense_data[2] & 0xf) == SPDK_SCSI_SENSE_ILLEGAL_REQUEST);
+ CU_ASSERT(task.sense_data[12] == SPDK_SCSI_ASC_INVALID_FIELD_IN_CDB);
+ SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&g_bdev_io_queue));
+
+ /* zero transfer length (valid) */
+ to_be64(&cdb[2], 0); /* LBA */
+ to_be32(&cdb[10], 0); /* transfer length */
+ task.transfer_len = 0;
+ task.offset = 0;
+ task.length = 0;
+ rc = bdev_scsi_execute(&task);
+ CU_ASSERT(rc == SPDK_SCSI_TASK_COMPLETE);
+ CU_ASSERT(task.status == SPDK_SCSI_STATUS_GOOD);
+ CU_ASSERT(task.data_transferred == 0);
+ SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&g_bdev_io_queue));
+
+ /* zero transfer length past end of disk (invalid) */
+ to_be64(&cdb[2], g_test_bdev_num_blocks); /* LBA */
+ to_be32(&cdb[10], 0); /* transfer length */
+ task.transfer_len = 0;
+ task.offset = 0;
+ task.length = 0;
+ rc = bdev_scsi_execute(&task);
+ CU_ASSERT(rc == SPDK_SCSI_TASK_COMPLETE);
+ CU_ASSERT(task.status == SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT(task.sense_data[12] == SPDK_SCSI_ASC_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE);
+ SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&g_bdev_io_queue));
+
+ ut_put_task(&task);
+}
+
+static void
+_xfer_test(bool bdev_io_pool_full)
+{
+ struct spdk_bdev bdev = { .blocklen = 512 };
+ struct spdk_scsi_lun lun;
+ struct spdk_scsi_task task;
+ uint8_t cdb[16];
+ char data[4096];
+ int rc;
+
+ lun.bdev = &bdev;
+
+ /* Test block device size of 512 MiB */
+ g_test_bdev_num_blocks = 512 * 1024 * 1024;
+
+ /* Read 1 block */
+ ut_init_task(&task);
+ task.lun = &lun;
+ task.lun->bdev_desc = NULL;
+ task.lun->io_channel = NULL;
+ task.cdb = cdb;
+ memset(cdb, 0, sizeof(cdb));
+ cdb[0] = 0x88; /* READ (16) */
+ to_be64(&cdb[2], 0); /* LBA */
+ to_be32(&cdb[10], 1); /* transfer length */
+ task.transfer_len = 1 * 512;
+ task.offset = 0;
+ task.length = 1 * 512;
+ g_bdev_io_pool_full = bdev_io_pool_full;
+ rc = bdev_scsi_execute(&task);
+ CU_ASSERT(rc == SPDK_SCSI_TASK_PENDING);
+ CU_ASSERT(task.status == 0xFF);
+
+ ut_bdev_io_flush();
+ CU_ASSERT(task.status == SPDK_SCSI_STATUS_GOOD);
+ CU_ASSERT(g_scsi_cb_called == 1);
+ g_scsi_cb_called = 0;
+ ut_put_task(&task);
+
+ /* Write 1 block */
+ ut_init_task(&task);
+ task.lun = &lun;
+ task.cdb = cdb;
+ memset(cdb, 0, sizeof(cdb));
+ cdb[0] = 0x8a; /* WRITE (16) */
+ to_be64(&cdb[2], 0); /* LBA */
+ to_be32(&cdb[10], 1); /* transfer length */
+ task.transfer_len = 1 * 512;
+ task.offset = 0;
+ task.length = 1 * 512;
+ g_bdev_io_pool_full = bdev_io_pool_full;
+ rc = bdev_scsi_execute(&task);
+ CU_ASSERT(rc == SPDK_SCSI_TASK_PENDING);
+ CU_ASSERT(task.status == 0xFF);
+
+ ut_bdev_io_flush();
+ CU_ASSERT(task.status == SPDK_SCSI_STATUS_GOOD);
+ CU_ASSERT(g_scsi_cb_called == 1);
+ g_scsi_cb_called = 0;
+ ut_put_task(&task);
+
+ /* Unmap 5 blocks using 2 descriptors */
+ ut_init_task(&task);
+ task.lun = &lun;
+ task.cdb = cdb;
+ memset(cdb, 0, sizeof(cdb));
+ cdb[0] = 0x42; /* UNMAP */
+ to_be16(&data[7], 2); /* 2 parameters in list */
+ memset(data, 0, sizeof(data));
+ to_be16(&data[2], 32); /* 2 descriptors */
+ to_be64(&data[8], 1); /* LBA 1 */
+ to_be32(&data[16], 2); /* 2 blocks */
+ to_be64(&data[24], 10); /* LBA 10 */
+ to_be32(&data[32], 3); /* 3 blocks */
+ spdk_scsi_task_set_data(&task, data, sizeof(data));
+ task.status = SPDK_SCSI_STATUS_GOOD;
+ g_bdev_io_pool_full = bdev_io_pool_full;
+ rc = bdev_scsi_execute(&task);
+ CU_ASSERT(rc == SPDK_SCSI_TASK_PENDING);
+ CU_ASSERT(task.status == SPDK_SCSI_STATUS_GOOD);
+
+ ut_bdev_io_flush();
+ CU_ASSERT(task.status == SPDK_SCSI_STATUS_GOOD);
+ CU_ASSERT(g_scsi_cb_called == 1);
+ g_scsi_cb_called = 0;
+ ut_put_task(&task);
+
+ /* Flush 1 block */
+ ut_init_task(&task);
+ task.lun = &lun;
+ task.cdb = cdb;
+ memset(cdb, 0, sizeof(cdb));
+ cdb[0] = 0x91; /* SYNCHRONIZE CACHE (16) */
+ to_be64(&cdb[2], 0); /* LBA */
+ to_be32(&cdb[10], 1); /* 1 blocks */
+ g_bdev_io_pool_full = bdev_io_pool_full;
+ rc = bdev_scsi_execute(&task);
+ CU_ASSERT(rc == SPDK_SCSI_TASK_PENDING);
+ CU_ASSERT(task.status == 0xFF);
+
+ ut_bdev_io_flush();
+ CU_ASSERT(task.status == SPDK_SCSI_STATUS_GOOD);
+ CU_ASSERT(g_scsi_cb_called == 1);
+ g_scsi_cb_called = 0;
+ SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&g_bdev_io_queue));
+
+ ut_put_task(&task);
+}
+
+static void
+xfer_test(void)
+{
+ _xfer_test(false);
+ _xfer_test(true);
+}
+
+static void
+get_dif_ctx_test(void)
+{
+ struct spdk_bdev bdev = {};
+ struct spdk_scsi_task task = {};
+ struct spdk_dif_ctx dif_ctx = {};
+ uint8_t cdb[16];
+ bool ret;
+
+ cdb[0] = SPDK_SBC_READ_6;
+ cdb[1] = 0x12;
+ cdb[2] = 0x34;
+ cdb[3] = 0x50;
+ task.cdb = cdb;
+ task.offset = 0x6 * 512;
+
+ ret = bdev_scsi_get_dif_ctx(&bdev, &task, &dif_ctx);
+ CU_ASSERT(ret == true);
+ CU_ASSERT(dif_ctx.init_ref_tag + dif_ctx.ref_tag_offset == 0x123456);
+
+ cdb[0] = SPDK_SBC_WRITE_12;
+ to_be32(&cdb[2], 0x12345670);
+ task.offset = 0x8 * 512;
+
+ ret = bdev_scsi_get_dif_ctx(&bdev, &task, &dif_ctx);
+ CU_ASSERT(ret == true);
+ CU_ASSERT(dif_ctx.init_ref_tag + dif_ctx.ref_tag_offset == 0x12345678);
+
+ cdb[0] = SPDK_SBC_WRITE_16;
+ to_be64(&cdb[2], 0x0000000012345670);
+ task.offset = 0x8 * 512;
+
+ ret = bdev_scsi_get_dif_ctx(&bdev, &task, &dif_ctx);
+ CU_ASSERT(ret == true);
+ CU_ASSERT(dif_ctx.init_ref_tag + dif_ctx.ref_tag_offset == 0x12345678);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ TAILQ_INIT(&g_bdev_io_queue);
+ TAILQ_INIT(&g_io_wait_queue);
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("translation_suite", NULL, NULL);
+
+ CU_ADD_TEST(suite, mode_select_6_test);
+ CU_ADD_TEST(suite, mode_select_6_test2);
+ CU_ADD_TEST(suite, mode_sense_6_test);
+ CU_ADD_TEST(suite, mode_sense_10_test);
+ CU_ADD_TEST(suite, inquiry_evpd_test);
+ CU_ADD_TEST(suite, inquiry_standard_test);
+ CU_ADD_TEST(suite, inquiry_overflow_test);
+ CU_ADD_TEST(suite, task_complete_test);
+ CU_ADD_TEST(suite, lba_range_test);
+ CU_ADD_TEST(suite, xfer_len_test);
+ CU_ADD_TEST(suite, xfer_test);
+ CU_ADD_TEST(suite, scsi_name_padding_test);
+ CU_ADD_TEST(suite, get_dif_ctx_test);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/scsi/scsi_pr.c/.gitignore b/src/spdk/test/unit/lib/scsi/scsi_pr.c/.gitignore
new file mode 100644
index 000000000..9655d812e
--- /dev/null
+++ b/src/spdk/test/unit/lib/scsi/scsi_pr.c/.gitignore
@@ -0,0 +1 @@
+scsi_pr_ut
diff --git a/src/spdk/test/unit/lib/scsi/scsi_pr.c/Makefile b/src/spdk/test/unit/lib/scsi/scsi_pr.c/Makefile
new file mode 100644
index 000000000..22be734ae
--- /dev/null
+++ b/src/spdk/test/unit/lib/scsi/scsi_pr.c/Makefile
@@ -0,0 +1,39 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+TEST_FILE = scsi_pr_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/scsi/scsi_pr.c/scsi_pr_ut.c b/src/spdk/test/unit/lib/scsi/scsi_pr.c/scsi_pr_ut.c
new file mode 100644
index 000000000..993277036
--- /dev/null
+++ b/src/spdk/test/unit/lib/scsi/scsi_pr.c/scsi_pr_ut.c
@@ -0,0 +1,673 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "scsi/port.c"
+#include "scsi/scsi_pr.c"
+
+#include "spdk_cunit.h"
+
+#include "spdk_internal/mock.h"
+
+SPDK_LOG_REGISTER_COMPONENT("scsi", SPDK_LOG_SCSI)
+
+void
+spdk_scsi_task_set_status(struct spdk_scsi_task *task, int sc, int sk,
+ int asc, int ascq)
+{
+ task->status = sc;
+}
+
+/*
+ * Reservation Unit Test Configuration
+ *
+ * -------- -------- -------
+ * | Host A | | Host B | | Host C|
+ * -------- -------- -------
+ * | | |
+ * ------ ------ ------
+ * |Port A| |Port B| |Port C|
+ * ------ ------ ------
+ * \ | /
+ * \ | /
+ * \ | /
+ * ------------------------
+ * | Target Node 1 Port 0 |
+ * ------------------------
+ * |
+ * ----------------------------------
+ * | Target Node |
+ * ----------------------------------
+ * |
+ * -----
+ * |LUN 0|
+ * -----
+ *
+ */
+
+static struct spdk_scsi_lun g_lun;
+static struct spdk_scsi_port g_i_port_a;
+static struct spdk_scsi_port g_i_port_b;
+static struct spdk_scsi_port g_i_port_c;
+static struct spdk_scsi_port g_t_port_0;
+
+static void
+ut_lun_deinit(void)
+{
+ struct spdk_scsi_pr_registrant *reg, *tmp;
+
+ TAILQ_FOREACH_SAFE(reg, &g_lun.reg_head, link, tmp) {
+ TAILQ_REMOVE(&g_lun.reg_head, reg, link);
+ free(reg);
+ }
+ g_lun.reservation.rtype = 0;
+ g_lun.reservation.crkey = 0;
+ g_lun.reservation.holder = NULL;
+ g_lun.pr_generation = 0;
+}
+
+static void
+ut_port_init(void)
+{
+ int rc;
+
+ /* g_i_port_a */
+ rc = scsi_port_construct(&g_i_port_a, 0xa, 0,
+ "iqn.2016-06.io.spdk:fe5aacf7420a,i,0x00023d00000a");
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ spdk_scsi_port_set_iscsi_transport_id(&g_i_port_a,
+ "iqn.2016-06.io.spdk:fe5aacf7420a", 0x00023d00000a);
+ /* g_i_port_b */
+ rc = scsi_port_construct(&g_i_port_b, 0xb, 0,
+ "iqn.2016-06.io.spdk:fe5aacf7420b,i,0x00023d00000b");
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ spdk_scsi_port_set_iscsi_transport_id(&g_i_port_b,
+ "iqn.2016-06.io.spdk:fe5aacf7420b", 0x00023d00000b);
+ /* g_i_port_c */
+ rc = scsi_port_construct(&g_i_port_c, 0xc, 0,
+ "iqn.2016-06.io.spdk:fe5aacf7420c,i,0x00023d00000c");
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ spdk_scsi_port_set_iscsi_transport_id(&g_i_port_c,
+ "iqn.2016-06.io.spdk:fe5aacf7420c", 0x00023d00000c);
+ /* g_t_port_0 */
+ rc = scsi_port_construct(&g_t_port_0, 0x0, 1,
+ "iqn.2016-06.io.spdk:fe5aacf74200,t,0x00023d000000");
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ spdk_scsi_port_set_iscsi_transport_id(&g_t_port_0,
+ "iqn.2016-06.io.spdk:fe5aacf74200", 0x00023d000000);
+}
+
+static void
+ut_lun_init(void)
+{
+ TAILQ_INIT(&g_lun.reg_head);
+}
+
+static void
+ut_init_reservation_test(void)
+{
+ ut_lun_init();
+ ut_port_init();
+ ut_lun_init();
+}
+
+static void
+ut_deinit_reservation_test(void)
+{
+ ut_lun_deinit();
+}
+
+/* Host A: register with key 0xa.
+ * Host B: register with key 0xb.
+ * Host C: register with key 0xc.
+ */
+static void
+test_build_registrants(void)
+{
+ struct spdk_scsi_pr_registrant *reg;
+ struct spdk_scsi_task task = {0};
+ uint32_t gen;
+ int rc;
+
+ task.lun = &g_lun;
+ task.target_port = &g_t_port_0;
+
+ gen = g_lun.pr_generation;
+
+ /* I_T nexus: Initiator Port A to Target Port 0 */
+ task.initiator_port = &g_i_port_a;
+ /* Test Case: Host A registers with a new key */
+ task.status = 0;
+ rc = scsi_pr_out_register(&task, SPDK_SCSI_PR_OUT_REGISTER,
+ 0x0, 0xa1, 0, 0, 0);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ reg = scsi_pr_get_registrant(&g_lun, &g_i_port_a, &g_t_port_0);
+ SPDK_CU_ASSERT_FATAL(reg != NULL);
+ SPDK_CU_ASSERT_FATAL(reg->rkey == 0xa1);
+ SPDK_CU_ASSERT_FATAL(g_lun.pr_generation == gen + 1);
+
+ /* Test Case: Host A replaces with a new key */
+ task.status = 0;
+ rc = scsi_pr_out_register(&task, SPDK_SCSI_PR_OUT_REGISTER,
+ 0xa1, 0xa, 0, 0, 0);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ reg = scsi_pr_get_registrant(&g_lun, &g_i_port_a, &g_t_port_0);
+ SPDK_CU_ASSERT_FATAL(reg != NULL);
+ SPDK_CU_ASSERT_FATAL(reg->rkey == 0xa);
+ SPDK_CU_ASSERT_FATAL(g_lun.pr_generation == gen + 2);
+
+ /* Test Case: Host A replaces with a new key, reservation conflict is expected */
+ task.status = 0;
+ rc = scsi_pr_out_register(&task, SPDK_SCSI_PR_OUT_REGISTER,
+ 0xa1, 0xdead, 0, 0, 0);
+ SPDK_CU_ASSERT_FATAL(rc < 0);
+ reg = scsi_pr_get_registrant(&g_lun, &g_i_port_a, &g_t_port_0);
+ SPDK_CU_ASSERT_FATAL(reg != NULL);
+ SPDK_CU_ASSERT_FATAL(reg->rkey == 0xa);
+ SPDK_CU_ASSERT_FATAL(g_lun.pr_generation == gen + 2);
+ SPDK_CU_ASSERT_FATAL(task.status == SPDK_SCSI_STATUS_RESERVATION_CONFLICT);
+
+ /* I_T nexus: Initiator Port B to Target Port 0 */
+ task.initiator_port = &g_i_port_b;
+ /* Test Case: Host B registers with a new key */
+ task.status = 0;
+ rc = scsi_pr_out_register(&task, SPDK_SCSI_PR_OUT_REGISTER,
+ 0x0, 0xb, 0, 0, 0);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ reg = scsi_pr_get_registrant(&g_lun, &g_i_port_b, &g_t_port_0);
+ SPDK_CU_ASSERT_FATAL(reg != NULL);
+ SPDK_CU_ASSERT_FATAL(reg->rkey == 0xb);
+ SPDK_CU_ASSERT_FATAL(g_lun.pr_generation == gen + 3);
+
+ /* I_T nexus: Initiator Port C to Target Port 0 */
+ task.initiator_port = &g_i_port_c;
+ /* Test Case: Host C registers with a new key */
+ task.status = 0;
+ rc = scsi_pr_out_register(&task, SPDK_SCSI_PR_OUT_REGISTER,
+ 0x0, 0xc, 0, 0, 0);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ reg = scsi_pr_get_registrant(&g_lun, &g_i_port_c, &g_t_port_0);
+ SPDK_CU_ASSERT_FATAL(reg != NULL);
+ SPDK_CU_ASSERT_FATAL(reg->rkey == 0xc);
+ SPDK_CU_ASSERT_FATAL(g_lun.pr_generation == gen + 4);
+}
+
+static void
+test_reservation_register(void)
+{
+ ut_init_reservation_test();
+
+ test_build_registrants();
+
+ ut_deinit_reservation_test();
+}
+
+static void
+test_reservation_reserve(void)
+{
+ struct spdk_scsi_pr_registrant *reg;
+ struct spdk_scsi_task task = {0};
+ uint32_t gen;
+ int rc;
+
+ task.lun = &g_lun;
+ task.target_port = &g_t_port_0;
+
+ ut_init_reservation_test();
+ test_build_registrants();
+
+ gen = g_lun.pr_generation;
+
+ task.initiator_port = &g_i_port_a;
+ task.status = 0;
+ /* Test Case: Host A acquires the reservation */
+ rc = scsi_pr_out_reserve(&task, SPDK_SCSI_PR_WRITE_EXCLUSIVE,
+ 0xa, 0, 0, 0);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.rtype == SPDK_SCSI_PR_WRITE_EXCLUSIVE);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.crkey == 0xa);
+ SPDK_CU_ASSERT_FATAL(g_lun.pr_generation == gen);
+
+ /* Test Case: Host B acquires the reservation, reservation
+ * conflict is expected.
+ */
+ task.initiator_port = &g_i_port_b;
+ task.status = 0;
+ rc = scsi_pr_out_reserve(&task, SPDK_SCSI_PR_WRITE_EXCLUSIVE,
+ 0xb, 0, 0, 0);
+ SPDK_CU_ASSERT_FATAL(rc < 0);
+ SPDK_CU_ASSERT_FATAL(task.status == SPDK_SCSI_STATUS_RESERVATION_CONFLICT);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.rtype == SPDK_SCSI_PR_WRITE_EXCLUSIVE);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.crkey == 0xa);
+ SPDK_CU_ASSERT_FATAL(g_lun.pr_generation == gen);
+
+ /* Test Case: Host A unregister with reservation */
+ task.initiator_port = &g_i_port_a;
+ task.status = 0;
+ rc = scsi_pr_out_register(&task, SPDK_SCSI_PR_OUT_REGISTER,
+ 0xa, 0, 0, 0, 0);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.rtype == 0);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.crkey == 0);
+ SPDK_CU_ASSERT_FATAL(g_lun.pr_generation == gen + 1);
+ reg = scsi_pr_get_registrant(&g_lun, &g_i_port_a, &g_t_port_0);
+ SPDK_CU_ASSERT_FATAL(reg == NULL);
+
+ /* Test Case: Host B acquires the reservation */
+ task.initiator_port = &g_i_port_b;
+ task.status = 0;
+ rc = scsi_pr_out_reserve(&task, SPDK_SCSI_PR_WRITE_EXCLUSIVE_ALL_REGS,
+ 0xb, 0, 0, 0);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.rtype == SPDK_SCSI_PR_WRITE_EXCLUSIVE_ALL_REGS);
+ SPDK_CU_ASSERT_FATAL(g_lun.pr_generation == gen + 1);
+
+ /* Test Case: Host C acquires the reservation with invalid type */
+ task.initiator_port = &g_i_port_c;
+ task.status = 0;
+ rc = scsi_pr_out_reserve(&task, SPDK_SCSI_PR_WRITE_EXCLUSIVE,
+ 0xc, 0, 0, 0);
+ SPDK_CU_ASSERT_FATAL(rc < 0);
+ SPDK_CU_ASSERT_FATAL(task.status == SPDK_SCSI_STATUS_RESERVATION_CONFLICT);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.rtype == SPDK_SCSI_PR_WRITE_EXCLUSIVE_ALL_REGS);
+ SPDK_CU_ASSERT_FATAL(g_lun.pr_generation == gen + 1);
+
+ /* Test Case: Host C acquires the reservation, all registrants type */
+ task.status = 0;
+ rc = scsi_pr_out_reserve(&task, SPDK_SCSI_PR_WRITE_EXCLUSIVE_ALL_REGS,
+ 0xc, 0, 0, 0);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.rtype == SPDK_SCSI_PR_WRITE_EXCLUSIVE_ALL_REGS);
+ SPDK_CU_ASSERT_FATAL(g_lun.pr_generation == gen + 1);
+
+ ut_deinit_reservation_test();
+}
+
+static void
+test_reservation_preempt_non_all_regs(void)
+{
+ struct spdk_scsi_pr_registrant *reg;
+ struct spdk_scsi_task task = {0};
+ uint32_t gen;
+ int rc;
+
+ task.lun = &g_lun;
+ task.target_port = &g_t_port_0;
+
+ ut_init_reservation_test();
+ test_build_registrants();
+
+ task.initiator_port = &g_i_port_a;
+ task.status = 0;
+ gen = g_lun.pr_generation;
+ /* Host A acquires the reservation */
+ rc = scsi_pr_out_reserve(&task, SPDK_SCSI_PR_WRITE_EXCLUSIVE_REGS_ONLY,
+ 0xa, 0, 0, 0);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.rtype == SPDK_SCSI_PR_WRITE_EXCLUSIVE_REGS_ONLY);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.crkey == 0xa);
+ SPDK_CU_ASSERT_FATAL(g_lun.pr_generation == gen);
+
+ /* Test Case: Host B premmpts Host A, Check condition is expected
+ * for zeroed service action reservation key */
+ task.initiator_port = &g_i_port_b;
+ task.status = 0;
+ rc = scsi_pr_out_preempt(&task, SPDK_SCSI_PR_OUT_PREEMPT,
+ SPDK_SCSI_PR_WRITE_EXCLUSIVE_REGS_ONLY,
+ 0xb, 0);
+ SPDK_CU_ASSERT_FATAL(rc < 0);
+ SPDK_CU_ASSERT_FATAL(task.status == SPDK_SCSI_STATUS_CHECK_CONDITION);
+
+ /* Test Case: Host B preempts Host A, Host A is unregisted */
+ task.status = 0;
+ gen = g_lun.pr_generation;
+ rc = scsi_pr_out_preempt(&task, SPDK_SCSI_PR_OUT_PREEMPT,
+ SPDK_SCSI_PR_WRITE_EXCLUSIVE,
+ 0xb, 0xa);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.rtype == SPDK_SCSI_PR_WRITE_EXCLUSIVE);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.crkey == 0xb);
+ SPDK_CU_ASSERT_FATAL(g_lun.pr_generation > gen);
+ reg = scsi_pr_get_registrant(&g_lun, &g_i_port_a, &g_t_port_0);
+ SPDK_CU_ASSERT_FATAL(reg == NULL);
+
+ /* Test Case: Host B preempts itself */
+ task.status = 0;
+ gen = g_lun.pr_generation;
+ rc = scsi_pr_out_preempt(&task, SPDK_SCSI_PR_OUT_PREEMPT,
+ SPDK_SCSI_PR_WRITE_EXCLUSIVE,
+ 0xb, 0xb);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.rtype == SPDK_SCSI_PR_WRITE_EXCLUSIVE);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.crkey == 0xb);
+ SPDK_CU_ASSERT_FATAL(g_lun.pr_generation > gen);
+
+ /* Test Case: Host B preempts itself and remove registrants */
+ task.status = 0;
+ gen = g_lun.pr_generation;
+ rc = scsi_pr_out_preempt(&task, SPDK_SCSI_PR_OUT_PREEMPT,
+ SPDK_SCSI_PR_WRITE_EXCLUSIVE,
+ 0xb, 0xc);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.rtype == SPDK_SCSI_PR_WRITE_EXCLUSIVE);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.crkey == 0xb);
+ reg = scsi_pr_get_registrant(&g_lun, &g_i_port_c, &g_t_port_0);
+ SPDK_CU_ASSERT_FATAL(reg == NULL);
+ SPDK_CU_ASSERT_FATAL(g_lun.pr_generation > gen);
+
+ ut_deinit_reservation_test();
+}
+
+static void
+test_reservation_preempt_all_regs(void)
+{
+ struct spdk_scsi_pr_registrant *reg;
+ struct spdk_scsi_task task = {0};
+ uint32_t gen;
+ int rc;
+
+ task.lun = &g_lun;
+ task.target_port = &g_t_port_0;
+
+ ut_init_reservation_test();
+ test_build_registrants();
+
+ /* Test Case: No reservation yet, Host B removes Host C's registrant */
+ task.initiator_port = &g_i_port_b;
+ task.status = 0;
+ gen = g_lun.pr_generation;
+ rc = scsi_pr_out_preempt(&task, SPDK_SCSI_PR_OUT_PREEMPT,
+ SPDK_SCSI_PR_WRITE_EXCLUSIVE_REGS_ONLY,
+ 0xb, 0xc);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ reg = scsi_pr_get_registrant(&g_lun, &g_i_port_c, &g_t_port_0);
+ SPDK_CU_ASSERT_FATAL(reg == NULL);
+ SPDK_CU_ASSERT_FATAL(g_lun.pr_generation > gen);
+
+ task.initiator_port = &g_i_port_a;
+ task.status = 0;
+ gen = g_lun.pr_generation;
+ /* Host A acquires the reservation */
+ rc = scsi_pr_out_reserve(&task, SPDK_SCSI_PR_WRITE_EXCLUSIVE_ALL_REGS,
+ 0xa, 0, 0, 0);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.rtype == SPDK_SCSI_PR_WRITE_EXCLUSIVE_ALL_REGS);
+ SPDK_CU_ASSERT_FATAL(g_lun.pr_generation == gen);
+
+ /* Test Case: Host B removes Host A's registrant and preempt */
+ task.initiator_port = &g_i_port_b;
+ task.status = 0;
+ gen = g_lun.pr_generation;
+ rc = scsi_pr_out_preempt(&task, SPDK_SCSI_PR_OUT_PREEMPT,
+ SPDK_SCSI_PR_EXCLUSIVE_ACCESS_ALL_REGS,
+ 0xb, 0x0);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ reg = scsi_pr_get_registrant(&g_lun, &g_i_port_a, &g_t_port_0);
+ SPDK_CU_ASSERT_FATAL(reg == NULL);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.rtype == SPDK_SCSI_PR_EXCLUSIVE_ACCESS_ALL_REGS);
+ SPDK_CU_ASSERT_FATAL(g_lun.pr_generation > gen);
+
+ ut_deinit_reservation_test();
+}
+
+static void
+test_reservation_cmds_conflict(void)
+{
+ struct spdk_scsi_pr_registrant *reg;
+ struct spdk_scsi_task task = {0};
+ uint8_t cdb[32];
+ int rc;
+
+ task.lun = &g_lun;
+ task.target_port = &g_t_port_0;
+ task.cdb = cdb;
+
+ ut_init_reservation_test();
+ test_build_registrants();
+
+ /* Host A acquires the reservation */
+ task.initiator_port = &g_i_port_a;
+ rc = scsi_pr_out_reserve(&task, SPDK_SCSI_PR_WRITE_EXCLUSIVE_REGS_ONLY,
+ 0xa, 0, 0, 0);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.rtype == SPDK_SCSI_PR_WRITE_EXCLUSIVE_REGS_ONLY);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.crkey == 0xa);
+
+ /* Remove Host B registrant */
+ task.initiator_port = &g_i_port_b;
+ task.status = 0;
+ rc = scsi_pr_out_register(&task, SPDK_SCSI_PR_OUT_REGISTER,
+ 0xb, 0, 0, 0, 0);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ reg = scsi_pr_get_registrant(&g_lun, &g_i_port_b, &g_t_port_0);
+ SPDK_CU_ASSERT_FATAL(reg == NULL);
+
+ /* Test Case: Host B sends Read/Write commands,
+ * reservation conflict is expected.
+ */
+ task.cdb[0] = SPDK_SBC_READ_10;
+ task.status = 0;
+ rc = scsi_pr_check(&task);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ task.cdb[0] = SPDK_SBC_WRITE_10;
+ task.status = 0;
+ rc = scsi_pr_check(&task);
+ SPDK_CU_ASSERT_FATAL(rc < 0);
+ SPDK_CU_ASSERT_FATAL(task.status == SPDK_SCSI_STATUS_RESERVATION_CONFLICT);
+
+ /* Test Case: Host C sends Read/Write commands */
+ task.initiator_port = &g_i_port_c;
+ task.cdb[0] = SPDK_SBC_READ_10;
+ task.status = 0;
+ rc = scsi_pr_check(&task);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ task.cdb[0] = SPDK_SBC_WRITE_10;
+ task.status = 0;
+ rc = scsi_pr_check(&task);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+
+ /* Host A preempts itself with SPDK_SCSI_PR_EXCLUSIVE_ACCESS */
+ task.initiator_port = &g_i_port_a;
+ rc = scsi_pr_out_preempt(&task, SPDK_SCSI_PR_OUT_PREEMPT,
+ SPDK_SCSI_PR_EXCLUSIVE_ACCESS,
+ 0xa, 0xa);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.rtype == SPDK_SCSI_PR_EXCLUSIVE_ACCESS);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.crkey == 0xa);
+
+ /* Test Case: Host C sends Read/Write commands */
+ task.initiator_port = &g_i_port_c;
+ task.cdb[0] = SPDK_SBC_READ_10;
+ task.status = 0;
+ rc = scsi_pr_check(&task);
+ SPDK_CU_ASSERT_FATAL(rc < 0);
+ SPDK_CU_ASSERT_FATAL(task.status == SPDK_SCSI_STATUS_RESERVATION_CONFLICT);
+ task.cdb[0] = SPDK_SBC_WRITE_10;
+ task.status = 0;
+ rc = scsi_pr_check(&task);
+ SPDK_CU_ASSERT_FATAL(rc < 0);
+ SPDK_CU_ASSERT_FATAL(task.status == SPDK_SCSI_STATUS_RESERVATION_CONFLICT);
+
+ /* Test Case: Host B sends Read/Write commands */
+ task.initiator_port = &g_i_port_b;
+ task.cdb[0] = SPDK_SBC_READ_10;
+ task.status = 0;
+ rc = scsi_pr_check(&task);
+ SPDK_CU_ASSERT_FATAL(rc < 0);
+ SPDK_CU_ASSERT_FATAL(task.status == SPDK_SCSI_STATUS_RESERVATION_CONFLICT);
+ task.cdb[0] = SPDK_SBC_WRITE_10;
+ task.status = 0;
+ rc = scsi_pr_check(&task);
+ SPDK_CU_ASSERT_FATAL(rc < 0);
+ SPDK_CU_ASSERT_FATAL(task.status == SPDK_SCSI_STATUS_RESERVATION_CONFLICT);
+
+ ut_deinit_reservation_test();
+}
+
+static void
+test_scsi2_reserve_release(void)
+{
+ struct spdk_scsi_task task = {0};
+ uint8_t cdb[32] = {};
+ int rc;
+
+ task.lun = &g_lun;
+ task.target_port = &g_t_port_0;
+ task.cdb = cdb;
+
+ ut_init_reservation_test();
+
+ /* Test Case: SPC2 RESERVE from Host A */
+ task.initiator_port = &g_i_port_a;
+ task.cdb[0] = SPDK_SPC2_RESERVE_10;
+ rc = scsi2_reserve(&task, task.cdb);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.holder != NULL);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.flags == SCSI_SPC2_RESERVE);
+
+ /* Test Case: READ command from Host B */
+ task.initiator_port = &g_i_port_b;
+ task.cdb[0] = SPDK_SBC_READ_10;
+ task.status = 0;
+ rc = scsi2_reserve_check(&task);
+ SPDK_CU_ASSERT_FATAL(rc < 0);
+ SPDK_CU_ASSERT_FATAL(task.status == SPDK_SCSI_STATUS_RESERVATION_CONFLICT);
+
+ /* Test Case: SPDK_SPC2_RELEASE10 command from Host B */
+ task.initiator_port = &g_i_port_b;
+ task.cdb[0] = SPDK_SPC2_RELEASE_10;
+ task.status = 0;
+ rc = scsi2_reserve_check(&task);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+
+ rc = scsi2_release(&task);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.holder == NULL);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.flags == 0);
+
+ /* Test Case: SPC2 RESERVE from Host B */
+ task.initiator_port = &g_i_port_b;
+ task.cdb[0] = SPDK_SPC2_RESERVE_10;
+ rc = scsi2_reserve(&task, task.cdb);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.holder != NULL);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.flags == SCSI_SPC2_RESERVE);
+
+ /* Test Case: READ command from Host B */
+ task.initiator_port = &g_i_port_b;
+ task.cdb[0] = SPDK_SBC_READ_10;
+ rc = scsi2_reserve_check(&task);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+
+ /* Test Case: SPDK_SPC2_RELEASE10 command from Host A */
+ task.initiator_port = &g_i_port_a;
+ task.cdb[0] = SPDK_SPC2_RELEASE_10;
+
+ rc = scsi2_release(&task);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.holder == NULL);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.flags == 0);
+
+ ut_deinit_reservation_test();
+}
+
+static void
+test_pr_with_scsi2_reserve_release(void)
+{
+ struct spdk_scsi_task task = {0};
+ uint8_t cdb[32] = {};
+ int rc;
+
+ task.lun = &g_lun;
+ task.target_port = &g_t_port_0;
+ task.cdb = cdb;
+
+ ut_init_reservation_test();
+ test_build_registrants();
+
+ task.initiator_port = &g_i_port_a;
+ task.status = 0;
+ /* Test Case: Host A acquires the reservation */
+ rc = scsi_pr_out_reserve(&task, SPDK_SCSI_PR_WRITE_EXCLUSIVE_REGS_ONLY,
+ 0xa, 0, 0, 0);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.rtype == SPDK_SCSI_PR_WRITE_EXCLUSIVE_REGS_ONLY);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.crkey == 0xa);
+
+ /* Test Case: SPDK_SPC2_RESERVE_10 command from Host B */
+ task.initiator_port = &g_i_port_b;
+ task.cdb[0] = SPDK_SPC2_RESERVE_10;
+ /* SPC2 RESERVE/RELEASE will pass to scsi2_reserve/release */
+ rc = scsi_pr_check(&task);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+
+ /* do nothing with PR but have good status */
+ rc = scsi2_reserve(&task, task.cdb);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.holder != NULL);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.rtype == SPDK_SCSI_PR_WRITE_EXCLUSIVE_REGS_ONLY);
+
+ rc = scsi2_release(&task);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.holder != NULL);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.rtype == SPDK_SCSI_PR_WRITE_EXCLUSIVE_REGS_ONLY);
+
+ ut_deinit_reservation_test();
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("reservation_suite", NULL, NULL);
+ CU_ADD_TEST(suite, test_reservation_register);
+ CU_ADD_TEST(suite, test_reservation_reserve);
+ CU_ADD_TEST(suite, test_reservation_preempt_non_all_regs);
+ CU_ADD_TEST(suite, test_reservation_preempt_all_regs);
+ CU_ADD_TEST(suite, test_reservation_cmds_conflict);
+ CU_ADD_TEST(suite, test_scsi2_reserve_release);
+ CU_ADD_TEST(suite, test_pr_with_scsi2_reserve_release);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+
+}
diff --git a/src/spdk/test/unit/lib/sock/Makefile b/src/spdk/test/unit/lib/sock/Makefile
new file mode 100644
index 000000000..310f544ed
--- /dev/null
+++ b/src/spdk/test/unit/lib/sock/Makefile
@@ -0,0 +1,48 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = sock.c posix.c
+
+ifeq ($(OS), Linux)
+DIRS-$(CONFIG_URING) += uring.c
+endif
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/sock/posix.c/.gitignore b/src/spdk/test/unit/lib/sock/posix.c/.gitignore
new file mode 100644
index 000000000..7d8243ef0
--- /dev/null
+++ b/src/spdk/test/unit/lib/sock/posix.c/.gitignore
@@ -0,0 +1 @@
+posix_ut
diff --git a/src/spdk/test/unit/lib/sock/posix.c/Makefile b/src/spdk/test/unit/lib/sock/posix.c/Makefile
new file mode 100644
index 000000000..e06a2adb1
--- /dev/null
+++ b/src/spdk/test/unit/lib/sock/posix.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = posix_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/sock/posix.c/posix_ut.c b/src/spdk/test/unit/lib/sock/posix.c/posix_ut.c
new file mode 100644
index 000000000..498a37628
--- /dev/null
+++ b/src/spdk/test/unit/lib/sock/posix.c/posix_ut.c
@@ -0,0 +1,174 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+#include "spdk/util.h"
+
+#include "spdk_internal/mock.h"
+
+#include "spdk_cunit.h"
+
+#include "sock/posix/posix.c"
+
+DEFINE_STUB_V(spdk_net_impl_register, (struct spdk_net_impl *impl, int priority));
+DEFINE_STUB(spdk_sock_close, int, (struct spdk_sock **s), 0);
+
+static void
+_req_cb(void *cb_arg, int len)
+{
+ *(bool *)cb_arg = true;
+ CU_ASSERT(len == 0);
+}
+
+static void
+flush(void)
+{
+ struct spdk_posix_sock_group_impl group = {};
+ struct spdk_posix_sock psock = {};
+ struct spdk_sock *sock = &psock.base;
+ struct spdk_sock_request *req1, *req2;
+ bool cb_arg1, cb_arg2;
+ int rc;
+
+ /* Set up data structures */
+ TAILQ_INIT(&sock->queued_reqs);
+ TAILQ_INIT(&sock->pending_reqs);
+ sock->group_impl = &group.base;
+
+ req1 = calloc(1, sizeof(struct spdk_sock_request) + 2 * sizeof(struct iovec));
+ SPDK_CU_ASSERT_FATAL(req1 != NULL);
+ SPDK_SOCK_REQUEST_IOV(req1, 0)->iov_base = (void *)100;
+ SPDK_SOCK_REQUEST_IOV(req1, 0)->iov_len = 32;
+ SPDK_SOCK_REQUEST_IOV(req1, 1)->iov_base = (void *)200;
+ SPDK_SOCK_REQUEST_IOV(req1, 1)->iov_len = 32;
+ req1->iovcnt = 2;
+ req1->cb_fn = _req_cb;
+ req1->cb_arg = &cb_arg1;
+
+ req2 = calloc(1, sizeof(struct spdk_sock_request) + 2 * sizeof(struct iovec));
+ SPDK_CU_ASSERT_FATAL(req2 != NULL);
+ SPDK_SOCK_REQUEST_IOV(req2, 0)->iov_base = (void *)100;
+ SPDK_SOCK_REQUEST_IOV(req2, 0)->iov_len = 32;
+ SPDK_SOCK_REQUEST_IOV(req2, 1)->iov_base = (void *)200;
+ SPDK_SOCK_REQUEST_IOV(req2, 1)->iov_len = 32;
+ req2->iovcnt = 2;
+ req2->cb_fn = _req_cb;
+ req2->cb_arg = &cb_arg2;
+
+ /* Simple test - a request with a 2 element iovec
+ * that gets submitted in a single sendmsg. */
+ spdk_sock_request_queue(sock, req1);
+ MOCK_SET(sendmsg, 64);
+ cb_arg1 = false;
+ rc = _sock_flush(sock);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cb_arg1 == true);
+ CU_ASSERT(TAILQ_EMPTY(&sock->queued_reqs));
+
+ /* Two requests, where both can fully send. */
+ spdk_sock_request_queue(sock, req1);
+ spdk_sock_request_queue(sock, req2);
+ MOCK_SET(sendmsg, 128);
+ cb_arg1 = false;
+ cb_arg2 = false;
+ rc = _sock_flush(sock);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cb_arg1 == true);
+ CU_ASSERT(cb_arg2 == true);
+ CU_ASSERT(TAILQ_EMPTY(&sock->queued_reqs));
+
+ /* Two requests. Only first one can send */
+ spdk_sock_request_queue(sock, req1);
+ spdk_sock_request_queue(sock, req2);
+ MOCK_SET(sendmsg, 64);
+ cb_arg1 = false;
+ cb_arg2 = false;
+ rc = _sock_flush(sock);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cb_arg1 == true);
+ CU_ASSERT(cb_arg2 == false);
+ CU_ASSERT(TAILQ_FIRST(&sock->queued_reqs) == req2);
+ TAILQ_REMOVE(&sock->queued_reqs, req2, internal.link);
+ CU_ASSERT(TAILQ_EMPTY(&sock->queued_reqs));
+
+ /* One request. Partial send. */
+ spdk_sock_request_queue(sock, req1);
+ MOCK_SET(sendmsg, 10);
+ cb_arg1 = false;
+ rc = _sock_flush(sock);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cb_arg1 == false);
+ CU_ASSERT(TAILQ_FIRST(&sock->queued_reqs) == req1);
+
+ /* Do a second flush that partial sends again. */
+ MOCK_SET(sendmsg, 24);
+ cb_arg1 = false;
+ rc = _sock_flush(sock);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cb_arg1 == false);
+ CU_ASSERT(TAILQ_FIRST(&sock->queued_reqs) == req1);
+
+ /* Flush the rest of the data */
+ MOCK_SET(sendmsg, 30);
+ cb_arg1 = false;
+ rc = _sock_flush(sock);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cb_arg1 == true);
+ CU_ASSERT(TAILQ_EMPTY(&sock->queued_reqs));
+
+ free(req1);
+ free(req2);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("posix", NULL, NULL);
+
+ CU_ADD_TEST(suite, flush);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+
+ CU_basic_run_tests();
+
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/sock/sock.c/.gitignore b/src/spdk/test/unit/lib/sock/sock.c/.gitignore
new file mode 100644
index 000000000..bd9bf8335
--- /dev/null
+++ b/src/spdk/test/unit/lib/sock/sock.c/.gitignore
@@ -0,0 +1 @@
+sock_ut
diff --git a/src/spdk/test/unit/lib/sock/sock.c/Makefile b/src/spdk/test/unit/lib/sock/sock.c/Makefile
new file mode 100644
index 000000000..1d907c097
--- /dev/null
+++ b/src/spdk/test/unit/lib/sock/sock.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = sock_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/sock/sock.c/sock_ut.c b/src/spdk/test/unit/lib/sock/sock.c/sock_ut.c
new file mode 100644
index 000000000..bbe4822d7
--- /dev/null
+++ b/src/spdk/test/unit/lib/sock/sock.c/sock_ut.c
@@ -0,0 +1,982 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation. All rights reserved.
+ * Copyright (c) 2020 Mellanox Technologies LTD. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+#include "spdk/util.h"
+
+#include "spdk_cunit.h"
+
+#include "spdk_internal/sock.h"
+
+#include "sock/sock.c"
+#include "sock/posix/posix.c"
+
+#define UT_IP "test_ip"
+#define UT_PORT 1234
+
+bool g_read_data_called;
+ssize_t g_bytes_read;
+char g_buf[256];
+struct spdk_sock *g_server_sock_read;
+int g_ut_accept_count;
+struct spdk_ut_sock *g_ut_listen_sock;
+struct spdk_ut_sock *g_ut_client_sock;
+
+struct spdk_ut_sock {
+ struct spdk_sock base;
+ struct spdk_ut_sock *peer;
+ size_t bytes_avail;
+ char buf[256];
+};
+
+struct spdk_ut_sock_group_impl {
+ struct spdk_sock_group_impl base;
+ struct spdk_ut_sock *sock;
+};
+
+#define __ut_sock(sock) (struct spdk_ut_sock *)sock
+#define __ut_group(group) (struct spdk_ut_sock_group_impl *)group
+
+static int
+spdk_ut_sock_getaddr(struct spdk_sock *_sock, char *saddr, int slen, uint16_t *sport,
+ char *caddr, int clen, uint16_t *cport)
+{
+ return 0;
+}
+
+static struct spdk_sock *
+spdk_ut_sock_listen(const char *ip, int port, struct spdk_sock_opts *opts)
+{
+ struct spdk_ut_sock *sock;
+
+ if (strcmp(ip, UT_IP) || port != UT_PORT) {
+ return NULL;
+ }
+
+ CU_ASSERT(g_ut_listen_sock == NULL);
+
+ sock = calloc(1, sizeof(*sock));
+ SPDK_CU_ASSERT_FATAL(sock != NULL);
+ g_ut_listen_sock = sock;
+
+ return &sock->base;
+}
+
+static struct spdk_sock *
+spdk_ut_sock_connect(const char *ip, int port, struct spdk_sock_opts *opts)
+{
+ struct spdk_ut_sock *sock;
+
+ if (strcmp(ip, UT_IP) || port != UT_PORT) {
+ return NULL;
+ }
+
+ sock = calloc(1, sizeof(*sock));
+ SPDK_CU_ASSERT_FATAL(sock != NULL);
+ g_ut_accept_count++;
+ CU_ASSERT(g_ut_client_sock == NULL);
+ g_ut_client_sock = sock;
+
+ return &sock->base;
+}
+
+static struct spdk_sock *
+spdk_ut_sock_accept(struct spdk_sock *_sock)
+{
+ struct spdk_ut_sock *sock = __ut_sock(_sock);
+ struct spdk_ut_sock *new_sock;
+
+ CU_ASSERT(sock == g_ut_listen_sock);
+
+ if (g_ut_accept_count == 0) {
+ errno = EAGAIN;
+ return NULL;
+ }
+
+ g_ut_accept_count--;
+ new_sock = calloc(1, sizeof(*sock));
+ if (new_sock == NULL) {
+ SPDK_ERRLOG("sock allocation failed\n");
+ return NULL;
+ }
+
+ SPDK_CU_ASSERT_FATAL(g_ut_client_sock != NULL);
+ g_ut_client_sock->peer = new_sock;
+ new_sock->peer = g_ut_client_sock;
+
+ return &new_sock->base;
+}
+
+static int
+spdk_ut_sock_close(struct spdk_sock *_sock)
+{
+ struct spdk_ut_sock *sock = __ut_sock(_sock);
+
+ if (sock == g_ut_listen_sock) {
+ g_ut_listen_sock = NULL;
+ }
+ if (sock == g_ut_client_sock) {
+ g_ut_client_sock = NULL;
+ }
+
+ if (sock->peer != NULL) {
+ sock->peer->peer = NULL;
+ }
+
+ free(_sock);
+
+ return 0;
+}
+
+static ssize_t
+spdk_ut_sock_recv(struct spdk_sock *_sock, void *buf, size_t len)
+{
+ struct spdk_ut_sock *sock = __ut_sock(_sock);
+ char tmp[256];
+
+ len = spdk_min(len, sock->bytes_avail);
+
+ if (len == 0) {
+ errno = EAGAIN;
+ return -1;
+ }
+
+ memcpy(buf, sock->buf, len);
+ memcpy(tmp, &sock->buf[len], sock->bytes_avail - len);
+ memcpy(sock->buf, tmp, sock->bytes_avail - len);
+ sock->bytes_avail -= len;
+
+ return len;
+}
+
+static ssize_t
+spdk_ut_sock_readv(struct spdk_sock *_sock, struct iovec *iov, int iovcnt)
+{
+ struct spdk_ut_sock *sock = __ut_sock(_sock);
+ size_t len;
+ char tmp[256];
+
+ /* Test implementation only supports single iov for now. */
+ CU_ASSERT(iovcnt == 1);
+
+ len = spdk_min(iov[0].iov_len, sock->bytes_avail);
+
+ if (len == 0) {
+ errno = EAGAIN;
+ return -1;
+ }
+
+ memcpy(iov[0].iov_base, sock->buf, len);
+ memcpy(tmp, &sock->buf[len], sock->bytes_avail - len);
+ memcpy(sock->buf, tmp, sock->bytes_avail - len);
+ sock->bytes_avail -= len;
+
+ return len;
+}
+
+static ssize_t
+spdk_ut_sock_writev(struct spdk_sock *_sock, struct iovec *iov, int iovcnt)
+{
+ struct spdk_ut_sock *sock = __ut_sock(_sock);
+ struct spdk_ut_sock *peer;
+
+ SPDK_CU_ASSERT_FATAL(sock->peer != NULL);
+ peer = sock->peer;
+
+ /* Test implementation only supports single iov for now. */
+ CU_ASSERT(iovcnt == 1);
+
+ memcpy(&peer->buf[peer->bytes_avail], iov[0].iov_base, iov[0].iov_len);
+ peer->bytes_avail += iov[0].iov_len;
+
+ return iov[0].iov_len;
+}
+
+static int
+spdk_ut_sock_set_recvlowat(struct spdk_sock *_sock, int nbytes)
+{
+ return 0;
+}
+
+static int
+spdk_ut_sock_set_recvbuf(struct spdk_sock *_sock, int sz)
+{
+ return 0;
+}
+
+static int
+spdk_ut_sock_set_sendbuf(struct spdk_sock *_sock, int sz)
+{
+ return 0;
+}
+
+static bool
+spdk_ut_sock_is_ipv6(struct spdk_sock *_sock)
+{
+ return false;
+}
+
+static bool
+spdk_ut_sock_is_ipv4(struct spdk_sock *_sock)
+{
+ return true;
+}
+
+static bool
+spdk_ut_sock_is_connected(struct spdk_sock *_sock)
+{
+ struct spdk_ut_sock *sock = __ut_sock(_sock);
+
+ return (sock->peer != NULL);
+}
+
+static int
+spdk_ut_sock_get_placement_id(struct spdk_sock *_sock, int *placement_id)
+{
+ return -1;
+}
+
+static struct spdk_sock_group_impl *
+spdk_ut_sock_group_impl_create(void)
+{
+ struct spdk_ut_sock_group_impl *group_impl;
+
+ group_impl = calloc(1, sizeof(*group_impl));
+ SPDK_CU_ASSERT_FATAL(group_impl != NULL);
+
+ return &group_impl->base;
+}
+
+static int
+spdk_ut_sock_group_impl_add_sock(struct spdk_sock_group_impl *_group, struct spdk_sock *_sock)
+{
+ struct spdk_ut_sock_group_impl *group = __ut_group(_group);
+ struct spdk_ut_sock *sock = __ut_sock(_sock);
+
+ group->sock = sock;
+
+ return 0;
+}
+
+static int
+spdk_ut_sock_group_impl_remove_sock(struct spdk_sock_group_impl *_group, struct spdk_sock *_sock)
+{
+ struct spdk_ut_sock_group_impl *group = __ut_group(_group);
+ struct spdk_ut_sock *sock = __ut_sock(_sock);
+
+ CU_ASSERT(group->sock == sock);
+ group->sock = NULL;
+
+ return 0;
+}
+
+static int
+spdk_ut_sock_group_impl_poll(struct spdk_sock_group_impl *_group, int max_events,
+ struct spdk_sock **socks)
+{
+ struct spdk_ut_sock_group_impl *group = __ut_group(_group);
+
+ if (group->sock != NULL && group->sock->bytes_avail > 0) {
+ socks[0] = &group->sock->base;
+ return 1;
+ }
+
+ return 0;
+}
+
+static int
+spdk_ut_sock_group_impl_close(struct spdk_sock_group_impl *_group)
+{
+ struct spdk_ut_sock_group_impl *group = __ut_group(_group);
+
+ CU_ASSERT(group->sock == NULL);
+ free(_group);
+
+ return 0;
+}
+
+static struct spdk_net_impl g_ut_net_impl = {
+ .name = "ut",
+ .getaddr = spdk_ut_sock_getaddr,
+ .connect = spdk_ut_sock_connect,
+ .listen = spdk_ut_sock_listen,
+ .accept = spdk_ut_sock_accept,
+ .close = spdk_ut_sock_close,
+ .recv = spdk_ut_sock_recv,
+ .readv = spdk_ut_sock_readv,
+ .writev = spdk_ut_sock_writev,
+ .set_recvlowat = spdk_ut_sock_set_recvlowat,
+ .set_recvbuf = spdk_ut_sock_set_recvbuf,
+ .set_sendbuf = spdk_ut_sock_set_sendbuf,
+ .is_ipv6 = spdk_ut_sock_is_ipv6,
+ .is_ipv4 = spdk_ut_sock_is_ipv4,
+ .is_connected = spdk_ut_sock_is_connected,
+ .get_placement_id = spdk_ut_sock_get_placement_id,
+ .group_impl_create = spdk_ut_sock_group_impl_create,
+ .group_impl_add_sock = spdk_ut_sock_group_impl_add_sock,
+ .group_impl_remove_sock = spdk_ut_sock_group_impl_remove_sock,
+ .group_impl_poll = spdk_ut_sock_group_impl_poll,
+ .group_impl_close = spdk_ut_sock_group_impl_close,
+};
+
+SPDK_NET_IMPL_REGISTER(ut, &g_ut_net_impl, DEFAULT_SOCK_PRIORITY + 2);
+
+static void
+_sock(const char *ip, int port, char *impl_name)
+{
+ struct spdk_sock *listen_sock;
+ struct spdk_sock *server_sock;
+ struct spdk_sock *client_sock;
+ char *test_string = "abcdef";
+ char buffer[64];
+ ssize_t bytes_read, bytes_written;
+ struct iovec iov;
+ int rc;
+
+ listen_sock = spdk_sock_listen(ip, port, impl_name);
+ SPDK_CU_ASSERT_FATAL(listen_sock != NULL);
+
+ server_sock = spdk_sock_accept(listen_sock);
+ CU_ASSERT(server_sock == NULL);
+ CU_ASSERT(errno == EAGAIN || errno == EWOULDBLOCK);
+
+ client_sock = spdk_sock_connect(ip, port, impl_name);
+ SPDK_CU_ASSERT_FATAL(client_sock != NULL);
+
+ /*
+ * Delay a bit here before checking if server socket is
+ * ready.
+ */
+ usleep(1000);
+
+ server_sock = spdk_sock_accept(listen_sock);
+ SPDK_CU_ASSERT_FATAL(server_sock != NULL);
+ CU_ASSERT(spdk_sock_is_connected(client_sock) == true);
+ CU_ASSERT(spdk_sock_is_connected(server_sock) == true);
+
+ /* Test spdk_sock_recv */
+ iov.iov_base = test_string;
+ iov.iov_len = 7;
+ bytes_written = spdk_sock_writev(client_sock, &iov, 1);
+ CU_ASSERT(bytes_written == 7);
+
+ usleep(1000);
+
+ bytes_read = spdk_sock_recv(server_sock, buffer, 2);
+ CU_ASSERT(bytes_read == 2);
+
+ usleep(1000);
+
+ bytes_read += spdk_sock_recv(server_sock, buffer + 2, 5);
+ CU_ASSERT(bytes_read == 7);
+
+ CU_ASSERT(strncmp(test_string, buffer, 7) == 0);
+
+ /* Test spdk_sock_readv */
+ iov.iov_base = test_string;
+ iov.iov_len = 7;
+ bytes_written = spdk_sock_writev(client_sock, &iov, 1);
+ CU_ASSERT(bytes_written == 7);
+
+ usleep(1000);
+
+ iov.iov_base = buffer;
+ iov.iov_len = 2;
+ bytes_read = spdk_sock_readv(server_sock, &iov, 1);
+ CU_ASSERT(bytes_read == 2);
+
+ usleep(1000);
+
+ iov.iov_base = buffer + 2;
+ iov.iov_len = 5;
+ bytes_read += spdk_sock_readv(server_sock, &iov, 1);
+ CU_ASSERT(bytes_read == 7);
+
+ usleep(1000);
+
+ CU_ASSERT(strncmp(test_string, buffer, 7) == 0);
+
+ rc = spdk_sock_close(&client_sock);
+ CU_ASSERT(client_sock == NULL);
+ CU_ASSERT(rc == 0);
+
+#if defined(__FreeBSD__)
+ /* On FreeBSD, it takes a small amount of time for a close to propagate to the
+ * other side, even in loopback. Introduce a small sleep. */
+ sleep(1);
+#endif
+ CU_ASSERT(spdk_sock_is_connected(server_sock) == false);
+
+ rc = spdk_sock_close(&server_sock);
+ CU_ASSERT(server_sock == NULL);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_sock_close(&listen_sock);
+ CU_ASSERT(listen_sock == NULL);
+ CU_ASSERT(rc == 0);
+}
+
+static void
+posix_sock(void)
+{
+ _sock("127.0.0.1", UT_PORT, "posix");
+}
+
+static void
+ut_sock(void)
+{
+ _sock(UT_IP, UT_PORT, "ut");
+}
+
+static void
+read_data(void *cb_arg, struct spdk_sock_group *group, struct spdk_sock *sock)
+{
+ struct spdk_sock *server_sock = cb_arg;
+
+ CU_ASSERT(server_sock == sock);
+
+ g_read_data_called = true;
+ g_bytes_read += spdk_sock_recv(server_sock, g_buf + g_bytes_read, sizeof(g_buf) - g_bytes_read);
+}
+
+static void
+_sock_group(const char *ip, int port, char *impl_name)
+{
+ struct spdk_sock_group *group;
+ struct spdk_sock *listen_sock;
+ struct spdk_sock *server_sock;
+ struct spdk_sock *client_sock;
+ char *test_string = "abcdef";
+ ssize_t bytes_written;
+ struct iovec iov;
+ int rc;
+
+ listen_sock = spdk_sock_listen(ip, port, impl_name);
+ SPDK_CU_ASSERT_FATAL(listen_sock != NULL);
+
+ server_sock = spdk_sock_accept(listen_sock);
+ CU_ASSERT(server_sock == NULL);
+ CU_ASSERT(errno == EAGAIN || errno == EWOULDBLOCK);
+
+ client_sock = spdk_sock_connect(ip, port, impl_name);
+ SPDK_CU_ASSERT_FATAL(client_sock != NULL);
+
+ usleep(1000);
+
+ server_sock = spdk_sock_accept(listen_sock);
+ SPDK_CU_ASSERT_FATAL(server_sock != NULL);
+
+ group = spdk_sock_group_create(NULL);
+ SPDK_CU_ASSERT_FATAL(group != NULL);
+
+ /* pass null cb_fn */
+ rc = spdk_sock_group_add_sock(group, server_sock, NULL, NULL);
+ CU_ASSERT(rc == -1);
+ CU_ASSERT(errno == EINVAL);
+
+ rc = spdk_sock_group_add_sock(group, server_sock, read_data, server_sock);
+ CU_ASSERT(rc == 0);
+
+ /* try adding sock a second time */
+ rc = spdk_sock_group_add_sock(group, server_sock, read_data, server_sock);
+ CU_ASSERT(rc == -1);
+ CU_ASSERT(errno == EBUSY);
+
+ g_read_data_called = false;
+ g_bytes_read = 0;
+ rc = spdk_sock_group_poll(group);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_read_data_called == false);
+
+ iov.iov_base = test_string;
+ iov.iov_len = 7;
+ bytes_written = spdk_sock_writev(client_sock, &iov, 1);
+ CU_ASSERT(bytes_written == 7);
+
+ usleep(1000);
+
+ g_read_data_called = false;
+ g_bytes_read = 0;
+ rc = spdk_sock_group_poll(group);
+
+ CU_ASSERT(rc == 1);
+ CU_ASSERT(g_read_data_called == true);
+ CU_ASSERT(g_bytes_read == 7);
+
+ CU_ASSERT(strncmp(test_string, g_buf, 7) == 0);
+
+ rc = spdk_sock_close(&client_sock);
+ CU_ASSERT(client_sock == NULL);
+ CU_ASSERT(rc == 0);
+
+ /* Try to close sock_group while it still has sockets. */
+ rc = spdk_sock_group_close(&group);
+ CU_ASSERT(rc == -1);
+ CU_ASSERT(errno == EBUSY);
+
+ /* Try to close sock while it is still part of a sock_group. */
+ rc = spdk_sock_close(&server_sock);
+ CU_ASSERT(rc == -1);
+ CU_ASSERT(errno == EBUSY);
+
+ rc = spdk_sock_group_remove_sock(group, server_sock);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_sock_group_close(&group);
+ CU_ASSERT(group == NULL);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_sock_close(&server_sock);
+ CU_ASSERT(server_sock == NULL);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_sock_close(&listen_sock);
+ CU_ASSERT(listen_sock == NULL);
+ CU_ASSERT(rc == 0);
+}
+
+static void
+posix_sock_group(void)
+{
+ _sock_group("127.0.0.1", UT_PORT, "posix");
+}
+
+static void
+ut_sock_group(void)
+{
+ _sock_group(UT_IP, UT_PORT, "ut");
+}
+
+static void
+read_data_fairness(void *cb_arg, struct spdk_sock_group *group, struct spdk_sock *sock)
+{
+ struct spdk_sock *server_sock = cb_arg;
+ ssize_t bytes_read;
+ char buf[1];
+
+ CU_ASSERT(g_server_sock_read == NULL);
+ CU_ASSERT(server_sock == sock);
+
+ g_server_sock_read = server_sock;
+ bytes_read = spdk_sock_recv(server_sock, buf, 1);
+ CU_ASSERT(bytes_read == 1);
+}
+
+static void
+posix_sock_group_fairness(void)
+{
+ struct spdk_sock_group *group;
+ struct spdk_sock *listen_sock;
+ struct spdk_sock *server_sock[3];
+ struct spdk_sock *client_sock[3];
+ char test_char = 'a';
+ ssize_t bytes_written;
+ struct iovec iov;
+ int i, rc;
+
+ listen_sock = spdk_sock_listen("127.0.0.1", UT_PORT, "posix");
+ SPDK_CU_ASSERT_FATAL(listen_sock != NULL);
+
+ group = spdk_sock_group_create(NULL);
+ SPDK_CU_ASSERT_FATAL(group != NULL);
+
+ for (i = 0; i < 3; i++) {
+ client_sock[i] = spdk_sock_connect("127.0.0.1", UT_PORT, "posix");
+ SPDK_CU_ASSERT_FATAL(client_sock[i] != NULL);
+
+ usleep(1000);
+
+ server_sock[i] = spdk_sock_accept(listen_sock);
+ SPDK_CU_ASSERT_FATAL(server_sock[i] != NULL);
+
+ rc = spdk_sock_group_add_sock(group, server_sock[i],
+ read_data_fairness, server_sock[i]);
+ CU_ASSERT(rc == 0);
+ }
+
+ iov.iov_base = &test_char;
+ iov.iov_len = 1;
+
+ for (i = 0; i < 3; i++) {
+ bytes_written = spdk_sock_writev(client_sock[i], &iov, 1);
+ CU_ASSERT(bytes_written == 1);
+ }
+
+ usleep(1000);
+
+ /*
+ * Poll for just one event - this should be server sock 0, since that
+ * is the peer of the first client sock that we wrote to.
+ */
+ g_server_sock_read = NULL;
+ rc = spdk_sock_group_poll_count(group, 1);
+ CU_ASSERT(rc == 1);
+ CU_ASSERT(g_server_sock_read == server_sock[0]);
+
+ /*
+ * Now write another byte to client sock 0. We want to ensure that
+ * the sock group does not unfairly process the event for this sock
+ * before the socks that were written to earlier.
+ */
+ bytes_written = spdk_sock_writev(client_sock[0], &iov, 1);
+ CU_ASSERT(bytes_written == 1);
+
+ usleep(1000);
+
+ g_server_sock_read = NULL;
+ rc = spdk_sock_group_poll_count(group, 1);
+ CU_ASSERT(rc == 1);
+ CU_ASSERT(g_server_sock_read == server_sock[1]);
+
+ g_server_sock_read = NULL;
+ rc = spdk_sock_group_poll_count(group, 1);
+ CU_ASSERT(rc == 1);
+ CU_ASSERT(g_server_sock_read == server_sock[2]);
+
+ g_server_sock_read = NULL;
+ rc = spdk_sock_group_poll_count(group, 1);
+ CU_ASSERT(rc == 1);
+ CU_ASSERT(g_server_sock_read == server_sock[0]);
+
+ for (i = 0; i < 3; i++) {
+ rc = spdk_sock_group_remove_sock(group, server_sock[i]);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_sock_close(&client_sock[i]);
+ CU_ASSERT(client_sock[i] == NULL);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_sock_close(&server_sock[i]);
+ CU_ASSERT(server_sock[i] == NULL);
+ CU_ASSERT(rc == 0);
+ }
+
+ rc = spdk_sock_group_close(&group);
+ CU_ASSERT(group == NULL);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_sock_close(&listen_sock);
+ CU_ASSERT(listen_sock == NULL);
+ CU_ASSERT(rc == 0);
+}
+
+struct close_ctx {
+ struct spdk_sock_group *group;
+ struct spdk_sock *sock;
+ bool called;
+};
+
+static void
+_first_close_cb(void *cb_arg, int err)
+{
+ struct close_ctx *ctx = cb_arg;
+ int rc;
+
+ ctx->called = true;
+
+ /* Always close the socket here */
+ rc = spdk_sock_group_remove_sock(ctx->group, ctx->sock);
+ CU_ASSERT(rc == 0);
+ spdk_sock_close(&ctx->sock);
+
+ CU_ASSERT(err == 0);
+}
+
+static void
+_second_close_cb(void *cb_arg, int err)
+{
+ *(bool *)cb_arg = true;
+ CU_ASSERT(err == -ECANCELED);
+}
+
+static void
+_sock_close(const char *ip, int port, char *impl_name)
+{
+ struct spdk_sock_group *group;
+ struct spdk_sock *listen_sock;
+ struct spdk_sock *server_sock;
+ struct spdk_sock *client_sock;
+ uint8_t data_buf[64] = {};
+ struct spdk_sock_request *req1, *req2;
+ struct close_ctx ctx = {};
+ bool cb_arg2 = false;
+ int rc;
+
+ listen_sock = spdk_sock_listen(ip, port, impl_name);
+ SPDK_CU_ASSERT_FATAL(listen_sock != NULL);
+
+ client_sock = spdk_sock_connect(ip, port, impl_name);
+ SPDK_CU_ASSERT_FATAL(client_sock != NULL);
+
+ usleep(1000);
+
+ server_sock = spdk_sock_accept(listen_sock);
+ SPDK_CU_ASSERT_FATAL(server_sock != NULL);
+
+ group = spdk_sock_group_create(NULL);
+ SPDK_CU_ASSERT_FATAL(group != NULL);
+
+ rc = spdk_sock_group_add_sock(group, server_sock, read_data, server_sock);
+ CU_ASSERT(rc == 0);
+
+ /* Submit multiple async writevs on the server sock */
+
+ req1 = calloc(1, sizeof(struct spdk_sock_request) + sizeof(struct iovec));
+ SPDK_CU_ASSERT_FATAL(req1 != NULL);
+ SPDK_SOCK_REQUEST_IOV(req1, 0)->iov_base = data_buf;
+ SPDK_SOCK_REQUEST_IOV(req1, 0)->iov_len = 64;
+ ctx.group = group;
+ ctx.sock = server_sock;
+ ctx.called = false;
+ req1->iovcnt = 1;
+ req1->cb_fn = _first_close_cb;
+ req1->cb_arg = &ctx;
+ spdk_sock_writev_async(server_sock, req1);
+ CU_ASSERT(ctx.called == false);
+
+ req2 = calloc(1, sizeof(struct spdk_sock_request) + sizeof(struct iovec));
+ SPDK_CU_ASSERT_FATAL(req2 != NULL);
+ SPDK_SOCK_REQUEST_IOV(req2, 0)->iov_base = data_buf;
+ SPDK_SOCK_REQUEST_IOV(req2, 0)->iov_len = 64;
+ req2->iovcnt = 1;
+ req2->cb_fn = _second_close_cb;
+ req2->cb_arg = &cb_arg2;
+ spdk_sock_writev_async(server_sock, req2);
+ CU_ASSERT(cb_arg2 == false);
+
+ /* Poll the socket so the writev_async's send. The first one's
+ * callback will close the socket. */
+ spdk_sock_group_poll(group);
+ if (ctx.called == false) {
+ /* Sometimes the zerocopy completion isn't posted immediately. Delay slightly
+ * and poll one more time. */
+ usleep(1000);
+ spdk_sock_group_poll(group);
+ }
+ CU_ASSERT(ctx.called == true);
+ CU_ASSERT(cb_arg2 == true);
+
+ rc = spdk_sock_group_close(&group);
+ CU_ASSERT(group == NULL);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_sock_close(&client_sock);
+ CU_ASSERT(client_sock == NULL);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_sock_close(&listen_sock);
+ CU_ASSERT(listen_sock == NULL);
+ CU_ASSERT(rc == 0);
+
+ free(req1);
+ free(req2);
+}
+
+static void
+_posix_sock_close(void)
+{
+ _sock_close("127.0.0.1", UT_PORT, "posix");
+}
+
+static void
+sock_get_default_opts(void)
+{
+ struct spdk_sock_opts opts;
+
+ /* opts_size is 0 */
+ opts.opts_size = 0;
+ opts.priority = 3;
+ spdk_sock_get_default_opts(&opts);
+ CU_ASSERT(opts.priority == 3);
+ CU_ASSERT(opts.opts_size == 0);
+
+ /* opts_size is less than sizeof(opts) */
+ opts.opts_size = 4;
+ opts.priority = 3;
+ spdk_sock_get_default_opts(&opts);
+ CU_ASSERT(opts.priority == 3);
+ CU_ASSERT(opts.opts_size == 4);
+
+ /* opts_size is equal to sizeof(opts) */
+ opts.opts_size = sizeof(opts);
+ opts.priority = 3;
+ spdk_sock_get_default_opts(&opts);
+ CU_ASSERT(opts.priority == SPDK_SOCK_DEFAULT_PRIORITY);
+ CU_ASSERT(opts.opts_size == sizeof(opts));
+
+ /* opts_size is larger then sizeof(opts) */
+ opts.opts_size = sizeof(opts) + 1;
+ opts.priority = 3;
+ spdk_sock_get_default_opts(&opts);
+ CU_ASSERT(opts.priority == SPDK_SOCK_DEFAULT_PRIORITY);
+ CU_ASSERT(opts.opts_size == (sizeof(opts) + 1));
+}
+
+static void
+ut_sock_impl_get_set_opts(void)
+{
+ int rc;
+ size_t len = 0;
+ /* Use any pointer value for opts. It is never dereferenced in this test */
+ struct spdk_sock_impl_opts *opts = (struct spdk_sock_impl_opts *)0x123456789;
+
+ rc = spdk_sock_impl_get_opts("ut", NULL, &len);
+ CU_ASSERT(rc == -1);
+ CU_ASSERT(errno == EINVAL);
+ rc = spdk_sock_impl_get_opts("ut", opts, NULL);
+ CU_ASSERT(rc == -1);
+ CU_ASSERT(errno == EINVAL);
+ rc = spdk_sock_impl_get_opts("ut", opts, &len);
+ CU_ASSERT(rc == -1);
+ CU_ASSERT(errno == ENOTSUP);
+
+ rc = spdk_sock_impl_set_opts("ut", NULL, len);
+ CU_ASSERT(rc == -1);
+ CU_ASSERT(errno == EINVAL);
+ rc = spdk_sock_impl_set_opts("ut", opts, len);
+ CU_ASSERT(rc == -1);
+ CU_ASSERT(errno == ENOTSUP);
+}
+
+static void
+posix_sock_impl_get_set_opts(void)
+{
+ int rc;
+ size_t len = 0;
+ struct spdk_sock_impl_opts opts = {};
+ struct spdk_sock_impl_opts long_opts[2];
+
+ rc = spdk_sock_impl_get_opts("posix", NULL, &len);
+ CU_ASSERT(rc == -1);
+ CU_ASSERT(errno == EINVAL);
+ rc = spdk_sock_impl_get_opts("posix", &opts, NULL);
+ CU_ASSERT(rc == -1);
+ CU_ASSERT(errno == EINVAL);
+
+ /* Check default opts */
+ len = sizeof(opts);
+ rc = spdk_sock_impl_get_opts("posix", &opts, &len);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(len == sizeof(opts));
+ CU_ASSERT(opts.recv_buf_size == MIN_SO_RCVBUF_SIZE);
+ CU_ASSERT(opts.send_buf_size == MIN_SO_SNDBUF_SIZE);
+
+ /* Try to request more opts */
+ len = sizeof(long_opts);
+ rc = spdk_sock_impl_get_opts("posix", long_opts, &len);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(len == sizeof(opts));
+
+ /* Try to request zero opts */
+ len = 0;
+ rc = spdk_sock_impl_get_opts("posix", &opts, &len);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(len == 0);
+
+ rc = spdk_sock_impl_set_opts("posix", NULL, len);
+ CU_ASSERT(rc == -1);
+ CU_ASSERT(errno == EINVAL);
+
+ opts.recv_buf_size = 16;
+ opts.send_buf_size = 4;
+ rc = spdk_sock_impl_set_opts("posix", &opts, sizeof(opts));
+ CU_ASSERT(rc == 0);
+ len = sizeof(opts);
+ memset(&opts, 0, sizeof(opts));
+ rc = spdk_sock_impl_get_opts("posix", &opts, &len);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(opts.recv_buf_size == 16);
+ CU_ASSERT(opts.send_buf_size == 4);
+
+ /* Try to set more opts */
+ long_opts[0].recv_buf_size = 4;
+ long_opts[0].send_buf_size = 6;
+ long_opts[1].recv_buf_size = 0;
+ long_opts[1].send_buf_size = 0;
+ rc = spdk_sock_impl_set_opts("posix", long_opts, sizeof(long_opts));
+ CU_ASSERT(rc == 0);
+
+ /* Try to set less opts. Opts in the end should be untouched */
+ opts.recv_buf_size = 5;
+ opts.send_buf_size = 10;
+ rc = spdk_sock_impl_set_opts("posix", &opts, sizeof(opts.recv_buf_size));
+ CU_ASSERT(rc == 0);
+ len = sizeof(opts);
+ memset(&opts, 0, sizeof(opts));
+ rc = spdk_sock_impl_get_opts("posix", &opts, &len);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(opts.recv_buf_size == 5);
+ CU_ASSERT(opts.send_buf_size == 6);
+
+ /* Try to set partial option. It should not be changed */
+ opts.recv_buf_size = 1000;
+ rc = spdk_sock_impl_set_opts("posix", &opts, 1);
+ CU_ASSERT(rc == 0);
+ len = sizeof(opts);
+ memset(&opts, 0, sizeof(opts));
+ rc = spdk_sock_impl_get_opts("posix", &opts, &len);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(opts.recv_buf_size == 5);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("sock", NULL, NULL);
+
+ CU_ADD_TEST(suite, posix_sock);
+ CU_ADD_TEST(suite, ut_sock);
+ CU_ADD_TEST(suite, posix_sock_group);
+ CU_ADD_TEST(suite, ut_sock_group);
+ CU_ADD_TEST(suite, posix_sock_group_fairness);
+ CU_ADD_TEST(suite, _posix_sock_close);
+ CU_ADD_TEST(suite, sock_get_default_opts);
+ CU_ADD_TEST(suite, ut_sock_impl_get_set_opts);
+ CU_ADD_TEST(suite, posix_sock_impl_get_set_opts);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+
+ CU_basic_run_tests();
+
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/sock/uring.c/.gitignore b/src/spdk/test/unit/lib/sock/uring.c/.gitignore
new file mode 100644
index 000000000..ad7627b7b
--- /dev/null
+++ b/src/spdk/test/unit/lib/sock/uring.c/.gitignore
@@ -0,0 +1 @@
+uring_ut
diff --git a/src/spdk/test/unit/lib/sock/uring.c/Makefile b/src/spdk/test/unit/lib/sock/uring.c/Makefile
new file mode 100644
index 000000000..8b0da0181
--- /dev/null
+++ b/src/spdk/test/unit/lib/sock/uring.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = uring_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/sock/uring.c/uring_ut.c b/src/spdk/test/unit/lib/sock/uring.c/uring_ut.c
new file mode 100644
index 000000000..edad8e5da
--- /dev/null
+++ b/src/spdk/test/unit/lib/sock/uring.c/uring_ut.c
@@ -0,0 +1,272 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+#include "spdk/util.h"
+
+#include "spdk_internal/mock.h"
+
+#include "spdk_cunit.h"
+
+#include "sock/uring/uring.c"
+
+DEFINE_STUB_V(spdk_net_impl_register, (struct spdk_net_impl *impl, int priority));
+DEFINE_STUB(spdk_sock_close, int, (struct spdk_sock **s), 0);
+DEFINE_STUB(__io_uring_get_cqe, int, (struct io_uring *ring, struct io_uring_cqe **cqe_ptr,
+ unsigned submit,
+ unsigned wait_nr, sigset_t *sigmask), 0);
+DEFINE_STUB(io_uring_submit, int, (struct io_uring *ring), 0);
+DEFINE_STUB(io_uring_get_sqe, struct io_uring_sqe *, (struct io_uring *ring), 0);
+DEFINE_STUB(io_uring_queue_init, int, (unsigned entries, struct io_uring *ring, unsigned flags), 0);
+DEFINE_STUB_V(io_uring_queue_exit, (struct io_uring *ring));
+
+static void
+_req_cb(void *cb_arg, int len)
+{
+ *(bool *)cb_arg = true;
+ CU_ASSERT(len == 0);
+}
+
+static void
+flush_client(void)
+{
+ struct spdk_uring_sock_group_impl group = {};
+ struct spdk_uring_sock usock = {};
+ struct spdk_sock *sock = &usock.base;
+ struct spdk_sock_request *req1, *req2;
+ bool cb_arg1, cb_arg2;
+ int rc;
+
+ /* Set up data structures */
+ TAILQ_INIT(&sock->queued_reqs);
+ TAILQ_INIT(&sock->pending_reqs);
+ sock->group_impl = &group.base;
+
+ req1 = calloc(1, sizeof(struct spdk_sock_request) + 3 * sizeof(struct iovec));
+ SPDK_CU_ASSERT_FATAL(req1 != NULL);
+ SPDK_SOCK_REQUEST_IOV(req1, 0)->iov_base = (void *)100;
+ SPDK_SOCK_REQUEST_IOV(req1, 0)->iov_len = 64;
+ SPDK_SOCK_REQUEST_IOV(req1, 1)->iov_base = (void *)200;
+ SPDK_SOCK_REQUEST_IOV(req1, 1)->iov_len = 64;
+ SPDK_SOCK_REQUEST_IOV(req1, 2)->iov_base = (void *)300;
+ SPDK_SOCK_REQUEST_IOV(req1, 2)->iov_len = 64;
+ req1->iovcnt = 3;
+ req1->cb_fn = _req_cb;
+ req1->cb_arg = &cb_arg1;
+
+ req2 = calloc(1, sizeof(struct spdk_sock_request) + 2 * sizeof(struct iovec));
+ SPDK_CU_ASSERT_FATAL(req2 != NULL);
+ SPDK_SOCK_REQUEST_IOV(req2, 0)->iov_base = (void *)100;
+ SPDK_SOCK_REQUEST_IOV(req2, 0)->iov_len = 32;
+ SPDK_SOCK_REQUEST_IOV(req2, 1)->iov_base = (void *)200;
+ SPDK_SOCK_REQUEST_IOV(req2, 1)->iov_len = 32;
+ req2->iovcnt = 2;
+ req2->cb_fn = _req_cb;
+ req2->cb_arg = &cb_arg2;
+
+ /* Simple test - a request with a 3 element iovec
+ * that gets submitted in a single sendmsg. */
+ spdk_sock_request_queue(sock, req1);
+ MOCK_SET(sendmsg, 192);
+ cb_arg1 = false;
+ rc = _sock_flush_client(sock);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cb_arg1 == true);
+ CU_ASSERT(TAILQ_EMPTY(&sock->queued_reqs));
+
+ /* Two requests, where both can fully send. */
+ spdk_sock_request_queue(sock, req1);
+ spdk_sock_request_queue(sock, req2);
+ MOCK_SET(sendmsg, 256);
+ cb_arg1 = false;
+ cb_arg2 = false;
+ rc = _sock_flush_client(sock);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cb_arg1 == true);
+ CU_ASSERT(cb_arg2 == true);
+ CU_ASSERT(TAILQ_EMPTY(&sock->queued_reqs));
+
+ /* Two requests. Only first one can send */
+ spdk_sock_request_queue(sock, req1);
+ spdk_sock_request_queue(sock, req2);
+ MOCK_SET(sendmsg, 192);
+ cb_arg1 = false;
+ cb_arg2 = false;
+ rc = _sock_flush_client(sock);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cb_arg1 == true);
+ CU_ASSERT(cb_arg2 == false);
+ CU_ASSERT(TAILQ_FIRST(&sock->queued_reqs) == req2);
+ TAILQ_REMOVE(&sock->queued_reqs, req2, internal.link);
+ CU_ASSERT(TAILQ_EMPTY(&sock->queued_reqs));
+
+ /* One request. Partial send. */
+ spdk_sock_request_queue(sock, req1);
+ MOCK_SET(sendmsg, 10);
+ cb_arg1 = false;
+ rc = _sock_flush_client(sock);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cb_arg1 == false);
+ CU_ASSERT(TAILQ_FIRST(&sock->queued_reqs) == req1);
+
+ /* Do a second flush that partial sends again. */
+ MOCK_SET(sendmsg, 52);
+ cb_arg1 = false;
+ rc = _sock_flush_client(sock);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cb_arg1 == false);
+ CU_ASSERT(TAILQ_FIRST(&sock->queued_reqs) == req1);
+
+ /* Flush the rest of the data */
+ MOCK_SET(sendmsg, 130);
+ cb_arg1 = false;
+ rc = _sock_flush_client(sock);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cb_arg1 == true);
+ CU_ASSERT(TAILQ_EMPTY(&sock->queued_reqs));
+
+ free(req1);
+ free(req2);
+}
+
+static void
+flush_server(void)
+{
+ struct spdk_uring_sock_group_impl group = {};
+ struct spdk_uring_sock usock = {};
+ struct spdk_sock *sock = &usock.base;
+ struct spdk_sock_request *req1, *req2;
+ bool cb_arg1, cb_arg2;
+ int rc;
+
+ /* Set up data structures */
+ TAILQ_INIT(&sock->queued_reqs);
+ TAILQ_INIT(&sock->pending_reqs);
+ sock->group_impl = &group.base;
+ usock.write_task.sock = &usock;
+ usock.group = &group;
+
+ req1 = calloc(1, sizeof(struct spdk_sock_request) + 2 * sizeof(struct iovec));
+ SPDK_CU_ASSERT_FATAL(req1 != NULL);
+ SPDK_SOCK_REQUEST_IOV(req1, 0)->iov_base = (void *)100;
+ SPDK_SOCK_REQUEST_IOV(req1, 0)->iov_len = 64;
+ SPDK_SOCK_REQUEST_IOV(req1, 1)->iov_base = (void *)200;
+ SPDK_SOCK_REQUEST_IOV(req1, 1)->iov_len = 64;
+ req1->iovcnt = 2;
+ req1->cb_fn = _req_cb;
+ req1->cb_arg = &cb_arg1;
+
+ req2 = calloc(1, sizeof(struct spdk_sock_request) + 2 * sizeof(struct iovec));
+ SPDK_CU_ASSERT_FATAL(req2 != NULL);
+ SPDK_SOCK_REQUEST_IOV(req2, 0)->iov_base = (void *)100;
+ SPDK_SOCK_REQUEST_IOV(req2, 0)->iov_len = 32;
+ SPDK_SOCK_REQUEST_IOV(req2, 1)->iov_base = (void *)200;
+ SPDK_SOCK_REQUEST_IOV(req2, 1)->iov_len = 32;
+ req2->iovcnt = 2;
+ req2->cb_fn = _req_cb;
+ req2->cb_arg = &cb_arg2;
+
+ /* we should not call _sock_flush directly, since it will finally
+ * call liburing related funtions */
+
+ /* Simple test - a request with a 2 element iovec
+ * that is fully completed. */
+ spdk_sock_request_queue(sock, req1);
+ cb_arg1 = false;
+ rc = sock_prep_reqs(sock, usock.write_task.iovs, 0, NULL);
+ CU_ASSERT(rc == 2);
+ sock_complete_reqs(sock, 128);
+ CU_ASSERT(cb_arg1 == true);
+ CU_ASSERT(TAILQ_EMPTY(&sock->queued_reqs));
+
+ /* Two requests, where both can be fully completed. */
+ spdk_sock_request_queue(sock, req1);
+ spdk_sock_request_queue(sock, req2);
+ cb_arg1 = false;
+ cb_arg2 = false;
+ rc = sock_prep_reqs(sock, usock.write_task.iovs, 0, NULL);
+ CU_ASSERT(rc == 4);
+ sock_complete_reqs(sock, 192);
+ CU_ASSERT(cb_arg1 == true);
+ CU_ASSERT(cb_arg2 == true);
+ CU_ASSERT(TAILQ_EMPTY(&sock->queued_reqs));
+
+
+ /* One request that is partially sent. */
+ spdk_sock_request_queue(sock, req1);
+ cb_arg1 = false;
+ rc = sock_prep_reqs(sock, usock.write_task.iovs, 0, NULL);
+ CU_ASSERT(rc == 2);
+ sock_complete_reqs(sock, 92);
+ CU_ASSERT(rc == 2);
+ CU_ASSERT(cb_arg1 == false);
+ CU_ASSERT(TAILQ_FIRST(&sock->queued_reqs) == req1);
+
+ /* Get the second time partial sent result. */
+ sock_complete_reqs(sock, 10);
+ CU_ASSERT(cb_arg1 == false);
+ CU_ASSERT(TAILQ_FIRST(&sock->queued_reqs) == req1);
+
+ /* Data is finally sent. */
+ sock_complete_reqs(sock, 26);
+ CU_ASSERT(cb_arg1 == true);
+ CU_ASSERT(TAILQ_EMPTY(&sock->queued_reqs));
+
+ free(req1);
+ free(req2);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("uring", NULL, NULL);
+
+
+ CU_ADD_TEST(suite, flush_client);
+ CU_ADD_TEST(suite, flush_server);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+
+ CU_basic_run_tests();
+
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/thread/Makefile b/src/spdk/test/unit/lib/thread/Makefile
new file mode 100644
index 000000000..d73816947
--- /dev/null
+++ b/src/spdk/test/unit/lib/thread/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = thread.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/thread/thread.c/.gitignore b/src/spdk/test/unit/lib/thread/thread.c/.gitignore
new file mode 100644
index 000000000..1a165acb8
--- /dev/null
+++ b/src/spdk/test/unit/lib/thread/thread.c/.gitignore
@@ -0,0 +1 @@
+thread_ut
diff --git a/src/spdk/test/unit/lib/thread/thread.c/Makefile b/src/spdk/test/unit/lib/thread/thread.c/Makefile
new file mode 100644
index 000000000..461cfcd22
--- /dev/null
+++ b/src/spdk/test/unit/lib/thread/thread.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = thread_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/thread/thread.c/thread_ut.c b/src/spdk/test/unit/lib/thread/thread.c/thread_ut.c
new file mode 100644
index 000000000..d577671b8
--- /dev/null
+++ b/src/spdk/test/unit/lib/thread/thread.c/thread_ut.c
@@ -0,0 +1,1270 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "spdk_internal/thread.h"
+
+#include "thread/thread.c"
+#include "common/lib/ut_multithread.c"
+
+static int g_sched_rc = 0;
+
+static int
+_thread_schedule(struct spdk_thread *thread)
+{
+ return g_sched_rc;
+}
+
+static bool
+_thread_op_supported(enum spdk_thread_op op)
+{
+ switch (op) {
+ case SPDK_THREAD_OP_NEW:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int
+_thread_op(struct spdk_thread *thread, enum spdk_thread_op op)
+{
+ switch (op) {
+ case SPDK_THREAD_OP_NEW:
+ return _thread_schedule(thread);
+ default:
+ return -ENOTSUP;
+ }
+}
+
+static void
+thread_alloc(void)
+{
+ struct spdk_thread *thread;
+
+ /* No schedule callback */
+ spdk_thread_lib_init(NULL, 0);
+ thread = spdk_thread_create(NULL, NULL);
+ SPDK_CU_ASSERT_FATAL(thread != NULL);
+ spdk_set_thread(thread);
+ spdk_thread_exit(thread);
+ while (!spdk_thread_is_exited(thread)) {
+ spdk_thread_poll(thread, 0, 0);
+ }
+ spdk_thread_destroy(thread);
+ spdk_thread_lib_fini();
+
+ /* Schedule callback exists */
+ spdk_thread_lib_init(_thread_schedule, 0);
+
+ /* Scheduling succeeds */
+ g_sched_rc = 0;
+ thread = spdk_thread_create(NULL, NULL);
+ SPDK_CU_ASSERT_FATAL(thread != NULL);
+ spdk_set_thread(thread);
+ spdk_thread_exit(thread);
+ while (!spdk_thread_is_exited(thread)) {
+ spdk_thread_poll(thread, 0, 0);
+ }
+ spdk_thread_destroy(thread);
+
+ /* Scheduling fails */
+ g_sched_rc = -1;
+ thread = spdk_thread_create(NULL, NULL);
+ SPDK_CU_ASSERT_FATAL(thread == NULL);
+
+ spdk_thread_lib_fini();
+
+ /* Scheduling callback exists with extended thread library initialization. */
+ spdk_thread_lib_init_ext(_thread_op, _thread_op_supported, 0);
+
+ /* Scheduling succeeds */
+ g_sched_rc = 0;
+ thread = spdk_thread_create(NULL, NULL);
+ SPDK_CU_ASSERT_FATAL(thread != NULL);
+ spdk_set_thread(thread);
+ spdk_thread_exit(thread);
+ while (!spdk_thread_is_exited(thread)) {
+ spdk_thread_poll(thread, 0, 0);
+ }
+ spdk_thread_destroy(thread);
+
+ /* Scheduling fails */
+ g_sched_rc = -1;
+ thread = spdk_thread_create(NULL, NULL);
+ SPDK_CU_ASSERT_FATAL(thread == NULL);
+
+ spdk_thread_lib_fini();
+}
+
+static void
+send_msg_cb(void *ctx)
+{
+ bool *done = ctx;
+
+ *done = true;
+}
+
+static void
+thread_send_msg(void)
+{
+ struct spdk_thread *thread0;
+ bool done = false;
+
+ allocate_threads(2);
+ set_thread(0);
+ thread0 = spdk_get_thread();
+
+ set_thread(1);
+ /* Simulate thread 1 sending a message to thread 0. */
+ spdk_thread_send_msg(thread0, send_msg_cb, &done);
+
+ /* We have not polled thread 0 yet, so done should be false. */
+ CU_ASSERT(!done);
+
+ /*
+ * Poll thread 1. The message was sent to thread 0, so this should be
+ * a nop and done should still be false.
+ */
+ poll_thread(1);
+ CU_ASSERT(!done);
+
+ /*
+ * Poll thread 0. This should execute the message and done should then
+ * be true.
+ */
+ poll_thread(0);
+ CU_ASSERT(done);
+
+ free_threads();
+}
+
+static int
+poller_run_done(void *ctx)
+{
+ bool *poller_run = ctx;
+
+ *poller_run = true;
+
+ return -1;
+}
+
+static void
+thread_poller(void)
+{
+ struct spdk_poller *poller = NULL;
+ bool poller_run = false;
+
+ allocate_threads(1);
+
+ set_thread(0);
+ MOCK_SET(spdk_get_ticks, 0);
+ /* Register a poller with no-wait time and test execution */
+ poller = spdk_poller_register(poller_run_done, &poller_run, 0);
+ CU_ASSERT(poller != NULL);
+
+ poll_threads();
+ CU_ASSERT(poller_run == true);
+
+ spdk_poller_unregister(&poller);
+ CU_ASSERT(poller == NULL);
+
+ /* Register a poller with 1000us wait time and test single execution */
+ poller_run = false;
+ poller = spdk_poller_register(poller_run_done, &poller_run, 1000);
+ CU_ASSERT(poller != NULL);
+
+ poll_threads();
+ CU_ASSERT(poller_run == false);
+
+ spdk_delay_us(1000);
+ poll_threads();
+ CU_ASSERT(poller_run == true);
+
+ poller_run = false;
+ poll_threads();
+ CU_ASSERT(poller_run == false);
+
+ spdk_delay_us(1000);
+ poll_threads();
+ CU_ASSERT(poller_run == true);
+
+ spdk_poller_unregister(&poller);
+ CU_ASSERT(poller == NULL);
+
+ free_threads();
+}
+
+struct poller_ctx {
+ struct spdk_poller *poller;
+ bool run;
+};
+
+static int
+poller_run_pause(void *ctx)
+{
+ struct poller_ctx *poller_ctx = ctx;
+
+ poller_ctx->run = true;
+ spdk_poller_pause(poller_ctx->poller);
+
+ return 0;
+}
+
+static void
+poller_msg_pause_cb(void *ctx)
+{
+ struct spdk_poller *poller = ctx;
+
+ spdk_poller_pause(poller);
+}
+
+static void
+poller_msg_resume_cb(void *ctx)
+{
+ struct spdk_poller *poller = ctx;
+
+ spdk_poller_resume(poller);
+}
+
+static void
+poller_pause(void)
+{
+ struct poller_ctx poller_ctx = {};
+ unsigned int delay[] = { 0, 1000 };
+ unsigned int i;
+
+ allocate_threads(1);
+ set_thread(0);
+
+ /* Register a poller that pauses itself */
+ poller_ctx.poller = spdk_poller_register(poller_run_pause, &poller_ctx, 0);
+ CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
+
+ poller_ctx.run = false;
+ poll_threads();
+ CU_ASSERT_EQUAL(poller_ctx.run, true);
+
+ poller_ctx.run = false;
+ poll_threads();
+ CU_ASSERT_EQUAL(poller_ctx.run, false);
+
+ spdk_poller_unregister(&poller_ctx.poller);
+ CU_ASSERT_PTR_NULL(poller_ctx.poller);
+
+ /* Verify that resuming an unpaused poller doesn't do anything */
+ poller_ctx.poller = spdk_poller_register(poller_run_done, &poller_ctx.run, 0);
+ CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
+
+ spdk_poller_resume(poller_ctx.poller);
+
+ poller_ctx.run = false;
+ poll_threads();
+ CU_ASSERT_EQUAL(poller_ctx.run, true);
+
+ /* Verify that pausing the same poller twice works too */
+ spdk_poller_pause(poller_ctx.poller);
+
+ poller_ctx.run = false;
+ poll_threads();
+ CU_ASSERT_EQUAL(poller_ctx.run, false);
+
+ spdk_poller_pause(poller_ctx.poller);
+ poll_threads();
+ CU_ASSERT_EQUAL(poller_ctx.run, false);
+
+ spdk_poller_resume(poller_ctx.poller);
+ poll_threads();
+ CU_ASSERT_EQUAL(poller_ctx.run, true);
+
+ /* Verify that a poller is run when it's resumed immediately after pausing */
+ poller_ctx.run = false;
+ spdk_poller_pause(poller_ctx.poller);
+ spdk_poller_resume(poller_ctx.poller);
+ poll_threads();
+ CU_ASSERT_EQUAL(poller_ctx.run, true);
+
+ spdk_poller_unregister(&poller_ctx.poller);
+ CU_ASSERT_PTR_NULL(poller_ctx.poller);
+
+ /* Poll the thread to make sure the previous poller gets unregistered */
+ poll_threads();
+ CU_ASSERT_EQUAL(spdk_thread_has_pollers(spdk_get_thread()), false);
+
+ /* Verify that it's possible to unregister a paused poller */
+ poller_ctx.poller = spdk_poller_register(poller_run_done, &poller_ctx.run, 0);
+ CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
+
+ poller_ctx.run = false;
+ poll_threads();
+ CU_ASSERT_EQUAL(poller_ctx.run, true);
+
+ spdk_poller_pause(poller_ctx.poller);
+
+ poller_ctx.run = false;
+ poll_threads();
+ CU_ASSERT_EQUAL(poller_ctx.run, false);
+
+ spdk_poller_unregister(&poller_ctx.poller);
+
+ poll_threads();
+ CU_ASSERT_EQUAL(poller_ctx.run, false);
+ CU_ASSERT_EQUAL(spdk_thread_has_pollers(spdk_get_thread()), false);
+
+ /* Register pollers with 0 and 1000us wait time and pause/resume them */
+ for (i = 0; i < SPDK_COUNTOF(delay); ++i) {
+ poller_ctx.poller = spdk_poller_register(poller_run_done, &poller_ctx.run, delay[i]);
+ CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
+
+ spdk_delay_us(delay[i]);
+ poller_ctx.run = false;
+ poll_threads();
+ CU_ASSERT_EQUAL(poller_ctx.run, true);
+
+ spdk_poller_pause(poller_ctx.poller);
+
+ spdk_delay_us(delay[i]);
+ poller_ctx.run = false;
+ poll_threads();
+ CU_ASSERT_EQUAL(poller_ctx.run, false);
+
+ spdk_poller_resume(poller_ctx.poller);
+
+ spdk_delay_us(delay[i]);
+ poll_threads();
+ CU_ASSERT_EQUAL(poller_ctx.run, true);
+
+ /* Verify that the poller can be paused/resumed from spdk_thread_send_msg */
+ spdk_thread_send_msg(spdk_get_thread(), poller_msg_pause_cb, poller_ctx.poller);
+
+ spdk_delay_us(delay[i]);
+ poller_ctx.run = false;
+ poll_threads();
+ CU_ASSERT_EQUAL(poller_ctx.run, false);
+
+ spdk_thread_send_msg(spdk_get_thread(), poller_msg_resume_cb, poller_ctx.poller);
+
+ poll_threads();
+ if (delay[i] > 0) {
+ spdk_delay_us(delay[i]);
+ poll_threads();
+ }
+ CU_ASSERT_EQUAL(poller_ctx.run, true);
+
+ spdk_poller_unregister(&poller_ctx.poller);
+ CU_ASSERT_PTR_NULL(poller_ctx.poller);
+ }
+
+ free_threads();
+}
+
+static void
+for_each_cb(void *ctx)
+{
+ int *count = ctx;
+
+ (*count)++;
+}
+
+static void
+thread_for_each(void)
+{
+ int count = 0;
+ int i;
+
+ allocate_threads(3);
+ set_thread(0);
+
+ spdk_for_each_thread(for_each_cb, &count, for_each_cb);
+
+ /* We have not polled thread 0 yet, so count should be 0 */
+ CU_ASSERT(count == 0);
+
+ /* Poll each thread to verify the message is passed to each */
+ for (i = 0; i < 3; i++) {
+ poll_thread(i);
+ CU_ASSERT(count == (i + 1));
+ }
+
+ /*
+ * After each thread is called, the completion calls it
+ * one more time.
+ */
+ poll_thread(0);
+ CU_ASSERT(count == 4);
+
+ free_threads();
+}
+
+static int
+channel_create(void *io_device, void *ctx_buf)
+{
+ int *ch_count = io_device;
+
+ (*ch_count)++;
+ return 0;
+}
+
+static void
+channel_destroy(void *io_device, void *ctx_buf)
+{
+ int *ch_count = io_device;
+
+ (*ch_count)--;
+}
+
+static void
+channel_msg(struct spdk_io_channel_iter *i)
+{
+ int *msg_count = spdk_io_channel_iter_get_ctx(i);
+
+ (*msg_count)++;
+ spdk_for_each_channel_continue(i, 0);
+}
+
+static void
+channel_cpl(struct spdk_io_channel_iter *i, int status)
+{
+ int *msg_count = spdk_io_channel_iter_get_ctx(i);
+
+ (*msg_count)++;
+}
+
+static void
+for_each_channel_remove(void)
+{
+ struct spdk_io_channel *ch0, *ch1, *ch2;
+ int ch_count = 0;
+ int msg_count = 0;
+
+ allocate_threads(3);
+ set_thread(0);
+ spdk_io_device_register(&ch_count, channel_create, channel_destroy, sizeof(int), NULL);
+ ch0 = spdk_get_io_channel(&ch_count);
+ set_thread(1);
+ ch1 = spdk_get_io_channel(&ch_count);
+ set_thread(2);
+ ch2 = spdk_get_io_channel(&ch_count);
+ CU_ASSERT(ch_count == 3);
+
+ /*
+ * Test that io_channel handles the case where we start to iterate through
+ * the channels, and during the iteration, one of the channels is deleted.
+ * This is done in some different and sometimes non-intuitive orders, because
+ * some operations are deferred and won't execute until their threads are
+ * polled.
+ *
+ * Case #1: Put the I/O channel before spdk_for_each_channel.
+ */
+ set_thread(0);
+ spdk_put_io_channel(ch0);
+ CU_ASSERT(ch_count == 3);
+ poll_threads();
+ CU_ASSERT(ch_count == 2);
+ spdk_for_each_channel(&ch_count, channel_msg, &msg_count, channel_cpl);
+ CU_ASSERT(msg_count == 0);
+ poll_threads();
+ CU_ASSERT(msg_count == 3);
+
+ msg_count = 0;
+
+ /*
+ * Case #2: Put the I/O channel after spdk_for_each_channel, but before
+ * thread 0 is polled.
+ */
+ ch0 = spdk_get_io_channel(&ch_count);
+ CU_ASSERT(ch_count == 3);
+ spdk_for_each_channel(&ch_count, channel_msg, &msg_count, channel_cpl);
+ spdk_put_io_channel(ch0);
+ CU_ASSERT(ch_count == 3);
+
+ poll_threads();
+ CU_ASSERT(ch_count == 2);
+ CU_ASSERT(msg_count == 4);
+ set_thread(1);
+ spdk_put_io_channel(ch1);
+ CU_ASSERT(ch_count == 2);
+ set_thread(2);
+ spdk_put_io_channel(ch2);
+ CU_ASSERT(ch_count == 2);
+ poll_threads();
+ CU_ASSERT(ch_count == 0);
+
+ spdk_io_device_unregister(&ch_count, NULL);
+ poll_threads();
+
+ free_threads();
+}
+
+struct unreg_ctx {
+ bool ch_done;
+ bool foreach_done;
+};
+
+static void
+unreg_ch_done(struct spdk_io_channel_iter *i)
+{
+ struct unreg_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
+
+ ctx->ch_done = true;
+
+ SPDK_CU_ASSERT_FATAL(i->cur_thread != NULL);
+ spdk_for_each_channel_continue(i, 0);
+}
+
+static void
+unreg_foreach_done(struct spdk_io_channel_iter *i, int status)
+{
+ struct unreg_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
+
+ ctx->foreach_done = true;
+}
+
+static void
+for_each_channel_unreg(void)
+{
+ struct spdk_io_channel *ch0;
+ struct io_device *dev;
+ struct unreg_ctx ctx = {};
+ int io_target = 0;
+
+ allocate_threads(1);
+ set_thread(0);
+ CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
+ spdk_io_device_register(&io_target, channel_create, channel_destroy, sizeof(int), NULL);
+ CU_ASSERT(!TAILQ_EMPTY(&g_io_devices));
+ dev = TAILQ_FIRST(&g_io_devices);
+ SPDK_CU_ASSERT_FATAL(dev != NULL);
+ CU_ASSERT(TAILQ_NEXT(dev, tailq) == NULL);
+ ch0 = spdk_get_io_channel(&io_target);
+ spdk_for_each_channel(&io_target, unreg_ch_done, &ctx, unreg_foreach_done);
+
+ spdk_io_device_unregister(&io_target, NULL);
+ /*
+ * There is an outstanding foreach call on the io_device, so the unregister should not
+ * have removed the device.
+ */
+ CU_ASSERT(dev == TAILQ_FIRST(&g_io_devices));
+ spdk_io_device_register(&io_target, channel_create, channel_destroy, sizeof(int), NULL);
+ /*
+ * There is already a device registered at &io_target, so a new io_device should not
+ * have been added to g_io_devices.
+ */
+ CU_ASSERT(dev == TAILQ_FIRST(&g_io_devices));
+ CU_ASSERT(TAILQ_NEXT(dev, tailq) == NULL);
+
+ poll_thread(0);
+ CU_ASSERT(ctx.ch_done == true);
+ CU_ASSERT(ctx.foreach_done == true);
+ /*
+ * There are no more foreach operations outstanding, so we can unregister the device,
+ * even though a channel still exists for the device.
+ */
+ spdk_io_device_unregister(&io_target, NULL);
+ CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
+
+ set_thread(0);
+ spdk_put_io_channel(ch0);
+
+ poll_threads();
+
+ free_threads();
+}
+
+static void
+thread_name(void)
+{
+ struct spdk_thread *thread;
+ const char *name;
+
+ spdk_thread_lib_init(NULL, 0);
+
+ /* Create thread with no name, which automatically generates one */
+ thread = spdk_thread_create(NULL, NULL);
+ spdk_set_thread(thread);
+ thread = spdk_get_thread();
+ SPDK_CU_ASSERT_FATAL(thread != NULL);
+ name = spdk_thread_get_name(thread);
+ CU_ASSERT(name != NULL);
+ spdk_thread_exit(thread);
+ while (!spdk_thread_is_exited(thread)) {
+ spdk_thread_poll(thread, 0, 0);
+ }
+ spdk_thread_destroy(thread);
+
+ /* Create thread named "test_thread" */
+ thread = spdk_thread_create("test_thread", NULL);
+ spdk_set_thread(thread);
+ thread = spdk_get_thread();
+ SPDK_CU_ASSERT_FATAL(thread != NULL);
+ name = spdk_thread_get_name(thread);
+ SPDK_CU_ASSERT_FATAL(name != NULL);
+ CU_ASSERT(strcmp(name, "test_thread") == 0);
+ spdk_thread_exit(thread);
+ while (!spdk_thread_is_exited(thread)) {
+ spdk_thread_poll(thread, 0, 0);
+ }
+ spdk_thread_destroy(thread);
+
+ spdk_thread_lib_fini();
+}
+
+static uint64_t g_device1;
+static uint64_t g_device2;
+static uint64_t g_device3;
+
+static uint64_t g_ctx1 = 0x1111;
+static uint64_t g_ctx2 = 0x2222;
+
+static int g_create_cb_calls = 0;
+static int g_destroy_cb_calls = 0;
+
+static int
+create_cb_1(void *io_device, void *ctx_buf)
+{
+ CU_ASSERT(io_device == &g_device1);
+ *(uint64_t *)ctx_buf = g_ctx1;
+ g_create_cb_calls++;
+ return 0;
+}
+
+static void
+destroy_cb_1(void *io_device, void *ctx_buf)
+{
+ CU_ASSERT(io_device == &g_device1);
+ CU_ASSERT(*(uint64_t *)ctx_buf == g_ctx1);
+ g_destroy_cb_calls++;
+}
+
+static int
+create_cb_2(void *io_device, void *ctx_buf)
+{
+ CU_ASSERT(io_device == &g_device2);
+ *(uint64_t *)ctx_buf = g_ctx2;
+ g_create_cb_calls++;
+ return 0;
+}
+
+static void
+destroy_cb_2(void *io_device, void *ctx_buf)
+{
+ CU_ASSERT(io_device == &g_device2);
+ CU_ASSERT(*(uint64_t *)ctx_buf == g_ctx2);
+ g_destroy_cb_calls++;
+}
+
+static void
+channel(void)
+{
+ struct spdk_io_channel *ch1, *ch2;
+ void *ctx;
+
+ allocate_threads(1);
+ set_thread(0);
+
+ spdk_io_device_register(&g_device1, create_cb_1, destroy_cb_1, sizeof(g_ctx1), NULL);
+ spdk_io_device_register(&g_device2, create_cb_2, destroy_cb_2, sizeof(g_ctx2), NULL);
+
+ g_create_cb_calls = 0;
+ ch1 = spdk_get_io_channel(&g_device1);
+ CU_ASSERT(g_create_cb_calls == 1);
+ SPDK_CU_ASSERT_FATAL(ch1 != NULL);
+
+ g_create_cb_calls = 0;
+ ch2 = spdk_get_io_channel(&g_device1);
+ CU_ASSERT(g_create_cb_calls == 0);
+ CU_ASSERT(ch1 == ch2);
+ SPDK_CU_ASSERT_FATAL(ch2 != NULL);
+
+ g_destroy_cb_calls = 0;
+ spdk_put_io_channel(ch2);
+ poll_threads();
+ CU_ASSERT(g_destroy_cb_calls == 0);
+
+ g_create_cb_calls = 0;
+ ch2 = spdk_get_io_channel(&g_device2);
+ CU_ASSERT(g_create_cb_calls == 1);
+ CU_ASSERT(ch1 != ch2);
+ SPDK_CU_ASSERT_FATAL(ch2 != NULL);
+
+ ctx = spdk_io_channel_get_ctx(ch2);
+ CU_ASSERT(*(uint64_t *)ctx == g_ctx2);
+
+ g_destroy_cb_calls = 0;
+ spdk_put_io_channel(ch1);
+ poll_threads();
+ CU_ASSERT(g_destroy_cb_calls == 1);
+
+ g_destroy_cb_calls = 0;
+ spdk_put_io_channel(ch2);
+ poll_threads();
+ CU_ASSERT(g_destroy_cb_calls == 1);
+
+ ch1 = spdk_get_io_channel(&g_device3);
+ CU_ASSERT(ch1 == NULL);
+
+ spdk_io_device_unregister(&g_device1, NULL);
+ poll_threads();
+ spdk_io_device_unregister(&g_device2, NULL);
+ poll_threads();
+ CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
+ free_threads();
+ CU_ASSERT(TAILQ_EMPTY(&g_threads));
+}
+
+static int
+create_cb(void *io_device, void *ctx_buf)
+{
+ uint64_t *refcnt = (uint64_t *)ctx_buf;
+
+ CU_ASSERT(*refcnt == 0);
+ *refcnt = 1;
+
+ return 0;
+}
+
+static void
+destroy_cb(void *io_device, void *ctx_buf)
+{
+ uint64_t *refcnt = (uint64_t *)ctx_buf;
+
+ CU_ASSERT(*refcnt == 1);
+ *refcnt = 0;
+}
+
+/**
+ * This test is checking that a sequence of get, put, get, put without allowing
+ * the deferred put operation to complete doesn't result in releasing the memory
+ * for the channel twice.
+ */
+static void
+channel_destroy_races(void)
+{
+ uint64_t device;
+ struct spdk_io_channel *ch;
+
+ allocate_threads(1);
+ set_thread(0);
+
+ spdk_io_device_register(&device, create_cb, destroy_cb, sizeof(uint64_t), NULL);
+
+ ch = spdk_get_io_channel(&device);
+ SPDK_CU_ASSERT_FATAL(ch != NULL);
+
+ spdk_put_io_channel(ch);
+
+ ch = spdk_get_io_channel(&device);
+ SPDK_CU_ASSERT_FATAL(ch != NULL);
+
+ spdk_put_io_channel(ch);
+ poll_threads();
+
+ spdk_io_device_unregister(&device, NULL);
+ poll_threads();
+
+ CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
+ free_threads();
+ CU_ASSERT(TAILQ_EMPTY(&g_threads));
+}
+
+static void
+thread_exit_test(void)
+{
+ struct spdk_thread *thread;
+ struct spdk_io_channel *ch;
+ struct spdk_poller *poller1, *poller2;
+ void *ctx;
+ bool done1 = false, done2 = false, poller1_run = false, poller2_run = false;
+ int rc __attribute__((unused));
+
+ MOCK_SET(spdk_get_ticks, 10);
+ MOCK_SET(spdk_get_ticks_hz, 1);
+
+ allocate_threads(4);
+
+ /* Test if all pending messages are reaped for the exiting thread, and the
+ * thread moves to the exited state.
+ */
+ set_thread(0);
+ thread = spdk_get_thread();
+
+ /* Sending message to thread 0 will be accepted. */
+ rc = spdk_thread_send_msg(thread, send_msg_cb, &done1);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(!done1);
+
+ /* Move thread 0 to the exiting state. */
+ spdk_thread_exit(thread);
+
+ CU_ASSERT(spdk_thread_is_exited(thread) == false);
+
+ /* Sending message to thread 0 will be still accepted. */
+ rc = spdk_thread_send_msg(thread, send_msg_cb, &done2);
+ CU_ASSERT(rc == 0);
+
+ /* Thread 0 will reap pending messages. */
+ poll_thread(0);
+ CU_ASSERT(done1 == true);
+ CU_ASSERT(done2 == true);
+
+ /* Thread 0 will move to the exited state. */
+ CU_ASSERT(spdk_thread_is_exited(thread) == true);
+
+ /* Test releasing I/O channel is reaped even after the thread moves to
+ * the exiting state
+ */
+ set_thread(1);
+
+ spdk_io_device_register(&g_device1, create_cb_1, destroy_cb_1, sizeof(g_ctx1), NULL);
+
+ g_create_cb_calls = 0;
+ ch = spdk_get_io_channel(&g_device1);
+ CU_ASSERT(g_create_cb_calls == 1);
+ SPDK_CU_ASSERT_FATAL(ch != NULL);
+
+ ctx = spdk_io_channel_get_ctx(ch);
+ CU_ASSERT(*(uint64_t *)ctx == g_ctx1);
+
+ g_destroy_cb_calls = 0;
+ spdk_put_io_channel(ch);
+
+ thread = spdk_get_thread();
+ spdk_thread_exit(thread);
+
+ /* Thread 1 will not move to the exited state yet because I/O channel release
+ * does not complete yet.
+ */
+ CU_ASSERT(spdk_thread_is_exited(thread) == false);
+
+ /* Thread 1 will be able to get the another reference of I/O channel
+ * even after the thread moves to the exiting state.
+ */
+ g_create_cb_calls = 0;
+ ch = spdk_get_io_channel(&g_device1);
+
+ CU_ASSERT(g_create_cb_calls == 0);
+ SPDK_CU_ASSERT_FATAL(ch != NULL);
+
+ ctx = spdk_io_channel_get_ctx(ch);
+ CU_ASSERT(*(uint64_t *)ctx == g_ctx1);
+
+ spdk_put_io_channel(ch);
+
+ poll_threads();
+ CU_ASSERT(g_destroy_cb_calls == 1);
+
+ /* Thread 1 will move to the exited state after I/O channel is released.
+ * are released.
+ */
+ CU_ASSERT(spdk_thread_is_exited(thread) == true);
+
+ spdk_io_device_unregister(&g_device1, NULL);
+ poll_threads();
+
+ /* Test if unregistering poller is reaped for the exiting thread, and the
+ * thread moves to the exited thread.
+ */
+ set_thread(2);
+ thread = spdk_get_thread();
+
+ poller1 = spdk_poller_register(poller_run_done, &poller1_run, 0);
+ CU_ASSERT(poller1 != NULL);
+
+ spdk_poller_unregister(&poller1);
+
+ spdk_thread_exit(thread);
+
+ poller2 = spdk_poller_register(poller_run_done, &poller2_run, 0);
+
+ poll_threads();
+
+ CU_ASSERT(poller1_run == false);
+ CU_ASSERT(poller2_run == true);
+
+ CU_ASSERT(spdk_thread_is_exited(thread) == false);
+
+ spdk_poller_unregister(&poller2);
+
+ poll_threads();
+
+ CU_ASSERT(spdk_thread_is_exited(thread) == true);
+
+ /* Test if the exiting thread is exited forcefully after timeout. */
+ set_thread(3);
+ thread = spdk_get_thread();
+
+ poller1 = spdk_poller_register(poller_run_done, &poller1_run, 0);
+ CU_ASSERT(poller1 != NULL);
+
+ spdk_thread_exit(thread);
+
+ CU_ASSERT(spdk_thread_is_exited(thread) == false);
+
+ MOCK_SET(spdk_get_ticks, 11);
+
+ poll_threads();
+
+ CU_ASSERT(spdk_thread_is_exited(thread) == false);
+
+ /* Cause timeout forcefully. */
+ MOCK_SET(spdk_get_ticks, 15);
+
+ poll_threads();
+
+ CU_ASSERT(spdk_thread_is_exited(thread) == true);
+
+ spdk_poller_unregister(&poller1);
+
+ poll_threads();
+
+ MOCK_CLEAR(spdk_get_ticks);
+ MOCK_CLEAR(spdk_get_ticks_hz);
+
+ free_threads();
+}
+
+static int
+poller_run_idle(void *ctx)
+{
+ uint64_t delay_us = (uint64_t)ctx;
+
+ spdk_delay_us(delay_us);
+
+ return 0;
+}
+
+static int
+poller_run_busy(void *ctx)
+{
+ uint64_t delay_us = (uint64_t)ctx;
+
+ spdk_delay_us(delay_us);
+
+ return 1;
+}
+
+static void
+thread_update_stats_test(void)
+{
+ struct spdk_poller *poller;
+ struct spdk_thread *thread;
+
+ MOCK_SET(spdk_get_ticks, 10);
+
+ allocate_threads(1);
+
+ set_thread(0);
+ thread = spdk_get_thread();
+
+ CU_ASSERT(thread->tsc_last == 10);
+ CU_ASSERT(thread->stats.idle_tsc == 0);
+ CU_ASSERT(thread->stats.busy_tsc == 0);
+
+ /* Test if idle_tsc is updated expectedly. */
+ poller = spdk_poller_register(poller_run_idle, (void *)1000, 0);
+ CU_ASSERT(poller != NULL);
+
+ spdk_delay_us(100);
+
+ poll_thread_times(0, 1);
+
+ CU_ASSERT(thread->tsc_last == 1110);
+ CU_ASSERT(thread->stats.idle_tsc == 1000);
+ CU_ASSERT(thread->stats.busy_tsc == 0);
+
+ spdk_delay_us(100);
+
+ poll_thread_times(0, 1);
+
+ CU_ASSERT(thread->tsc_last == 2210);
+ CU_ASSERT(thread->stats.idle_tsc == 2000);
+ CU_ASSERT(thread->stats.busy_tsc == 0);
+
+ spdk_poller_unregister(&poller);
+
+ /* Test if busy_tsc is updated expectedly. */
+ poller = spdk_poller_register(poller_run_busy, (void *)100000, 0);
+ CU_ASSERT(poller != NULL);
+
+ spdk_delay_us(10000);
+
+ poll_thread_times(0, 1);
+
+ CU_ASSERT(thread->tsc_last == 112210);
+ CU_ASSERT(thread->stats.idle_tsc == 2000);
+ CU_ASSERT(thread->stats.busy_tsc == 100000);
+
+ spdk_delay_us(10000);
+
+ poll_thread_times(0, 1);
+
+ CU_ASSERT(thread->tsc_last == 222210);
+ CU_ASSERT(thread->stats.idle_tsc == 2000);
+ CU_ASSERT(thread->stats.busy_tsc == 200000);
+
+ spdk_poller_unregister(&poller);
+
+ MOCK_CLEAR(spdk_get_ticks);
+
+ free_threads();
+}
+
+struct ut_nested_ch {
+ struct spdk_io_channel *child;
+ struct spdk_poller *poller;
+};
+
+struct ut_nested_dev {
+ struct ut_nested_dev *child;
+};
+
+static struct io_device *
+ut_get_io_device(void *dev)
+{
+ struct io_device *tmp;
+
+ TAILQ_FOREACH(tmp, &g_io_devices, tailq) {
+ if (tmp->io_device == dev) {
+ return tmp;
+ }
+ }
+
+ return NULL;
+}
+
+static int
+ut_null_poll(void *ctx)
+{
+ return -1;
+}
+
+static int
+ut_nested_ch_create_cb(void *io_device, void *ctx_buf)
+{
+ struct ut_nested_ch *_ch = ctx_buf;
+ struct ut_nested_dev *_dev = io_device;
+ struct ut_nested_dev *_child;
+
+ _child = _dev->child;
+
+ if (_child != NULL) {
+ _ch->child = spdk_get_io_channel(_child);
+ SPDK_CU_ASSERT_FATAL(_ch->child != NULL);
+ } else {
+ _ch->child = NULL;
+ }
+
+ _ch->poller = spdk_poller_register(ut_null_poll, NULL, 0);
+ SPDK_CU_ASSERT_FATAL(_ch->poller != NULL);
+
+ return 0;
+}
+
+static void
+ut_nested_ch_destroy_cb(void *io_device, void *ctx_buf)
+{
+ struct ut_nested_ch *_ch = ctx_buf;
+ struct spdk_io_channel *child;
+
+ child = _ch->child;
+ if (child != NULL) {
+ spdk_put_io_channel(child);
+ }
+
+ spdk_poller_unregister(&_ch->poller);
+}
+
+static void
+ut_check_nested_ch_create(struct spdk_io_channel *ch, struct io_device *dev)
+{
+ CU_ASSERT(ch->ref == 1);
+ CU_ASSERT(ch->dev == dev);
+ CU_ASSERT(dev->refcnt == 1);
+}
+
+static void
+ut_check_nested_ch_destroy_pre(struct spdk_io_channel *ch, struct io_device *dev)
+{
+ CU_ASSERT(ch->ref == 0);
+ CU_ASSERT(ch->destroy_ref == 1);
+ CU_ASSERT(dev->refcnt == 1);
+}
+
+static void
+ut_check_nested_ch_destroy_post(struct io_device *dev)
+{
+ CU_ASSERT(dev->refcnt == 0);
+}
+
+static void
+ut_check_nested_poller_register(struct spdk_poller *poller)
+{
+ SPDK_CU_ASSERT_FATAL(poller != NULL);
+}
+
+static void
+nested_channel(void)
+{
+ struct ut_nested_dev _dev1, _dev2, _dev3;
+ struct ut_nested_ch *_ch1, *_ch2, *_ch3;
+ struct io_device *dev1, *dev2, *dev3;
+ struct spdk_io_channel *ch1, *ch2, *ch3;
+ struct spdk_poller *poller;
+ struct spdk_thread *thread;
+
+ allocate_threads(1);
+ set_thread(0);
+
+ thread = spdk_get_thread();
+ SPDK_CU_ASSERT_FATAL(thread != NULL);
+
+ _dev1.child = &_dev2;
+ _dev2.child = &_dev3;
+ _dev3.child = NULL;
+
+ spdk_io_device_register(&_dev1, ut_nested_ch_create_cb, ut_nested_ch_destroy_cb,
+ sizeof(struct ut_nested_ch), "dev1");
+ spdk_io_device_register(&_dev2, ut_nested_ch_create_cb, ut_nested_ch_destroy_cb,
+ sizeof(struct ut_nested_ch), "dev2");
+ spdk_io_device_register(&_dev3, ut_nested_ch_create_cb, ut_nested_ch_destroy_cb,
+ sizeof(struct ut_nested_ch), "dev3");
+
+ dev1 = ut_get_io_device(&_dev1);
+ SPDK_CU_ASSERT_FATAL(dev1 != NULL);
+ dev2 = ut_get_io_device(&_dev2);
+ SPDK_CU_ASSERT_FATAL(dev2 != NULL);
+ dev3 = ut_get_io_device(&_dev3);
+ SPDK_CU_ASSERT_FATAL(dev3 != NULL);
+
+ /* A single call spdk_get_io_channel() to dev1 will also create channels
+ * to dev2 and dev3 continuously. Pollers will be registered together.
+ */
+ ch1 = spdk_get_io_channel(&_dev1);
+ SPDK_CU_ASSERT_FATAL(ch1 != NULL);
+
+ _ch1 = spdk_io_channel_get_ctx(ch1);
+ ch2 = _ch1->child;
+ SPDK_CU_ASSERT_FATAL(ch2 != NULL);
+
+ _ch2 = spdk_io_channel_get_ctx(ch2);
+ ch3 = _ch2->child;
+ SPDK_CU_ASSERT_FATAL(ch3 != NULL);
+
+ _ch3 = spdk_io_channel_get_ctx(ch3);
+ CU_ASSERT(_ch3->child == NULL);
+
+ ut_check_nested_ch_create(ch1, dev1);
+ ut_check_nested_ch_create(ch2, dev2);
+ ut_check_nested_ch_create(ch3, dev3);
+
+ poller = spdk_poller_register(ut_null_poll, NULL, 0);
+
+ ut_check_nested_poller_register(poller);
+ ut_check_nested_poller_register(_ch1->poller);
+ ut_check_nested_poller_register(_ch2->poller);
+ ut_check_nested_poller_register(_ch3->poller);
+
+ spdk_poller_unregister(&poller);
+ poll_thread_times(0, 1);
+
+ /* A single call spdk_put_io_channel() to dev1 will also destroy channels
+ * to dev2 and dev3 continuously. Pollers will be unregistered together.
+ */
+ spdk_put_io_channel(ch1);
+
+ /* Start exiting the current thread after unregistering the non-nested
+ * I/O channel.
+ */
+ spdk_thread_exit(thread);
+
+ ut_check_nested_ch_destroy_pre(ch1, dev1);
+ poll_thread_times(0, 1);
+ ut_check_nested_ch_destroy_post(dev1);
+
+ CU_ASSERT(spdk_thread_is_exited(thread) == false);
+
+ ut_check_nested_ch_destroy_pre(ch2, dev2);
+ poll_thread_times(0, 1);
+ ut_check_nested_ch_destroy_post(dev2);
+
+ CU_ASSERT(spdk_thread_is_exited(thread) == false);
+
+ ut_check_nested_ch_destroy_pre(ch3, dev3);
+ poll_thread_times(0, 1);
+ ut_check_nested_ch_destroy_post(dev3);
+
+ CU_ASSERT(spdk_thread_is_exited(thread) == true);
+
+ spdk_io_device_unregister(&_dev1, NULL);
+ spdk_io_device_unregister(&_dev2, NULL);
+ spdk_io_device_unregister(&_dev3, NULL);
+ CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
+
+ free_threads();
+ CU_ASSERT(TAILQ_EMPTY(&g_threads));
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("io_channel", NULL, NULL);
+
+ CU_ADD_TEST(suite, thread_alloc);
+ CU_ADD_TEST(suite, thread_send_msg);
+ CU_ADD_TEST(suite, thread_poller);
+ CU_ADD_TEST(suite, poller_pause);
+ CU_ADD_TEST(suite, thread_for_each);
+ CU_ADD_TEST(suite, for_each_channel_remove);
+ CU_ADD_TEST(suite, for_each_channel_unreg);
+ CU_ADD_TEST(suite, thread_name);
+ CU_ADD_TEST(suite, channel);
+ CU_ADD_TEST(suite, channel_destroy_races);
+ CU_ADD_TEST(suite, thread_exit_test);
+ CU_ADD_TEST(suite, thread_update_stats_test);
+ CU_ADD_TEST(suite, nested_channel);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/util/Makefile b/src/spdk/test/unit/lib/util/Makefile
new file mode 100644
index 000000000..221715725
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/Makefile
@@ -0,0 +1,45 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = base64.c bit_array.c cpuset.c crc16.c crc32_ieee.c crc32c.c dif.c \
+ iov.c math.c pipe.c string.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/util/base64.c/.gitignore b/src/spdk/test/unit/lib/util/base64.c/.gitignore
new file mode 100644
index 000000000..a5b175236
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/base64.c/.gitignore
@@ -0,0 +1 @@
+base64_ut
diff --git a/src/spdk/test/unit/lib/util/base64.c/Makefile b/src/spdk/test/unit/lib/util/base64.c/Makefile
new file mode 100644
index 000000000..c0d91c076
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/base64.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = base64_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/util/base64.c/base64_ut.c b/src/spdk/test/unit/lib/util/base64.c/base64_ut.c
new file mode 100644
index 000000000..b1f70561c
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/base64.c/base64_ut.c
@@ -0,0 +1,381 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "util/base64.c"
+
+char text_A[] = "FZB3";
+uint8_t raw_A[] = {0x15, 0x90, 0x77};
+char text_B[] = "AbC/1+c=";
+char text_urlsafe_B[] = "AbC_1-c=";
+uint8_t raw_B[] = {0x01, 0xB0, 0xBF, 0xD7, 0xE7};
+char text_C[] = "AbC/1+cC";
+char text_urlsafe_C[] = "AbC_1-cC";
+uint8_t raw_C[] = {0x01, 0xB0, 0xBF, 0xD7, 0xE7, 0x02};
+char text_D[] = "AbC/1w==";
+char text_urlsafe_D[] = "AbC_1w==";
+uint8_t raw_D[] = {0x01, 0xB0, 0xBF, 0xD7};
+char text_E[] = "AbC12===";
+char text_F[] = "AbCd112";
+char text_G[] = "AbCd12";
+char text_H[] = "AbC12";
+char text_I[] = "AQu/1+cCCBUnOBFWv+HzoL3BOVUBItP2mRDdqhnxAtIT4hD1wbQ30Ylm8R+7khPS";//64 bytes
+char text_urlsafe_I[] =
+ "AQu_1-cCCBUnOBFWv-HzoL3BOVUBItP2mRDdqhnxAtIT4hD1wbQ30Ylm8R-7khPS";//64 bytes
+uint8_t raw_I[] = {0x01, 0x0B, 0xBF, 0xD7, 0xE7, 0x02, 0x08, 0x15, 0x27, 0x38, 0x11, 0x56, 0xBF, 0xE1, 0xF3, 0xA0,
+ 0xBD, 0xC1, 0x39, 0x55, 0x01, 0x22, 0xD3, 0xF6, 0x99, 0x10, 0xDD, 0xAA, 0x19, 0xF1, 0x02, 0xD2,
+ 0x13, 0xE2, 0x10, 0xF5, 0xC1, 0xB4, 0x37, 0xD1, 0x89, 0x66, 0xF1, 0x1F, 0xBB, 0x92, 0x13, 0xD2
+ };
+char text_J[] =
+ "AQu/1+cCCBUnOBFWv+HzoL3BOVUBItP2mRDdqhnxAtIT4hD1wbQ30Ylm8R+7khPSvcE5VecCCBUZ8QLiEPVm8b3BOVUBItP2GfEC4hD1ZvE5VQEi0/aJZvEfu5LiEPUTvcE5VQEi0/YZEQ==";
+char text_urlsafe_J[] =
+ "AQu_1-cCCBUnOBFWv-HzoL3BOVUBItP2mRDdqhnxAtIT4hD1wbQ30Ylm8R-7khPSvcE5VecCCBUZ8QLiEPVm8b3BOVUBItP2GfEC4hD1ZvE5VQEi0_aJZvEfu5LiEPUTvcE5VQEi0_YZEQ==";
+uint8_t raw_J[] = {0x01, 0x0B, 0xBF, 0xD7, 0xE7, 0x02, 0x08, 0x15, 0x27, 0x38, 0x11, 0x56, 0xBF, 0xE1, 0xF3, 0xA0,
+ 0xBD, 0xC1, 0x39, 0x55, 0x01, 0x22, 0xD3, 0xF6, 0x99, 0x10, 0xDD, 0xAA, 0x19, 0xF1, 0x02, 0xD2,
+ 0x13, 0xE2, 0x10, 0xF5, 0xC1, 0xB4, 0x37, 0xD1, 0x89, 0x66, 0xF1, 0x1F, 0xBB, 0x92, 0x13, 0xD2,
+ 0xBD, 0xC1, 0x39, 0x55, 0xE7, 0x02, 0x08, 0x15, 0x19, 0xF1, 0x02, 0xE2, 0x10, 0xF5, 0x66, 0xF1,
+ 0xBD, 0xC1, 0x39, 0x55, 0x01, 0x22, 0xD3, 0xF6, 0x19, 0xF1, 0x02, 0xE2, 0x10, 0xF5, 0x66, 0xF1,
+ 0x39, 0x55, 0x01, 0x22, 0xD3, 0xF6, 0x89, 0x66, 0xF1, 0x1F, 0xBB, 0x92, 0xE2, 0x10, 0xF5, 0x13,
+ 0xBD, 0xC1, 0x39, 0x55, 0x01, 0x22, 0xD3, 0xF6, 0x19, 0x11
+ };
+
+static void
+test_base64_get_encoded_strlen(void)
+{
+ uint32_t raw_lens[4] = {8, 9, 10, 11};
+ uint32_t text_strlens[4] = {12, 12, 16, 16};
+ uint32_t text_strlen;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ text_strlen = spdk_base64_get_encoded_strlen(raw_lens[i]);
+ CU_ASSERT_EQUAL(text_strlen, text_strlens[i]);
+ }
+}
+
+static void
+test_base64_get_decoded_len(void)
+{
+ uint32_t text_strlens[4] = {8, 10, 11, 12};
+ uint32_t raw_lens[4] = {6, 7, 8, 9};
+ uint32_t bin_len;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ bin_len = spdk_base64_get_decoded_len(text_strlens[i]);
+ CU_ASSERT_EQUAL(bin_len, raw_lens[i]);
+ }
+}
+
+static void
+test_base64_encode(void)
+{
+ char text[200];
+ int ret;
+
+ ret = spdk_base64_encode(text, raw_A, sizeof(raw_A));
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT(strcmp(text, text_A) == 0);
+ CU_ASSERT_EQUAL(strlen(text), strlen(text_A));
+
+ ret = spdk_base64_encode(text, raw_B, sizeof(raw_B));
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT(strcmp(text, text_B) == 0);
+ CU_ASSERT_EQUAL(strlen(text), strlen(text_B));
+
+ ret = spdk_base64_encode(text, raw_C, sizeof(raw_C));
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT(strcmp(text, text_C) == 0);
+
+ ret = spdk_base64_encode(text, raw_D, sizeof(raw_D));
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT(strcmp(text, text_D) == 0);
+
+ ret = spdk_base64_encode(text, raw_I, sizeof(raw_I));
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT(strcmp(text, text_I) == 0);
+
+ ret = spdk_base64_encode(text, raw_J, sizeof(raw_J));
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT(strcmp(text, text_J) == 0);
+
+ ret = spdk_base64_encode(NULL, raw_A, sizeof(raw_A));
+ CU_ASSERT_EQUAL(ret, -EINVAL);
+ ret = spdk_base64_encode(text, NULL, sizeof(raw_A));
+ CU_ASSERT_EQUAL(ret, -EINVAL);
+ ret = spdk_base64_encode(text, raw_A, 0);
+ CU_ASSERT_EQUAL(ret, -EINVAL);
+}
+
+static void
+test_base64_decode(void)
+{
+ char raw_buf[200];
+ void *raw = (void *)raw_buf;
+ size_t raw_len;
+ int ret;
+
+ /* len */
+ ret = spdk_base64_decode(NULL, &raw_len, text_A);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_A));
+
+ /* decode */
+ ret = spdk_base64_decode(raw, &raw_len, text_A);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_A));
+ CU_ASSERT(memcmp(raw, raw_A, sizeof(raw_A)) == 0);
+
+ /* len */
+ ret = spdk_base64_decode(NULL, &raw_len, text_B);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_B));
+
+ /* decode */
+ ret = spdk_base64_decode(raw, &raw_len, text_B);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_B));
+ CU_ASSERT(memcmp(raw, raw_B, sizeof(raw_B)) == 0);
+
+ /* len */
+ ret = spdk_base64_decode(NULL, &raw_len, text_C);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_C));
+
+ /* decode */
+ ret = spdk_base64_decode(raw, &raw_len, text_C);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_C));
+ CU_ASSERT(memcmp(raw, raw_C, sizeof(raw_C)) == 0);
+
+ /* len */
+ ret = spdk_base64_decode(NULL, &raw_len, text_D);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_D));
+
+ /* decode */
+ ret = spdk_base64_decode(raw, &raw_len, text_D);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_D));
+ CU_ASSERT(memcmp(raw, raw_D, sizeof(raw_D)) == 0);
+
+ /* len */
+ ret = spdk_base64_decode(NULL, &raw_len, text_I);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_I));
+
+ /* decode */
+ ret = spdk_base64_decode(raw, &raw_len, text_I);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_I));
+ CU_ASSERT(memcmp(raw, raw_I, sizeof(raw_I)) == 0);
+
+ /* len */
+ ret = spdk_base64_decode(NULL, &raw_len, text_J);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_J));
+
+ /* decode */
+ ret = spdk_base64_decode(raw, &raw_len, text_J);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_J));
+ CU_ASSERT(memcmp(raw, raw_J, sizeof(raw_J)) == 0);
+
+ ret = spdk_base64_decode(raw, &raw_len, text_E);
+ CU_ASSERT_EQUAL(ret, -EINVAL);
+ ret = spdk_base64_decode(raw, &raw_len, text_F);
+ CU_ASSERT_EQUAL(ret, -EINVAL);
+ ret = spdk_base64_decode(raw, &raw_len, text_G);
+ CU_ASSERT_EQUAL(ret, -EINVAL);
+ ret = spdk_base64_decode(raw, &raw_len, text_H);
+ CU_ASSERT_EQUAL(ret, -EINVAL);
+ ret = spdk_base64_decode(raw, &raw_len, NULL);
+ CU_ASSERT_EQUAL(ret, -EINVAL);
+}
+
+static void
+test_base64_urlsafe_encode(void)
+{
+ char text[200];
+ int ret;
+
+ ret = spdk_base64_urlsafe_encode(text, raw_A, sizeof(raw_A));
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT(strcmp(text, text_A) == 0);
+ CU_ASSERT_EQUAL(strlen(text), strlen(text_A));
+
+ ret = spdk_base64_urlsafe_encode(text, raw_B, sizeof(raw_B));
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT(strcmp(text, text_urlsafe_B) == 0);
+ CU_ASSERT_EQUAL(strlen(text), strlen(text_urlsafe_B));
+
+ ret = spdk_base64_urlsafe_encode(text, raw_C, sizeof(raw_C));
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT(strcmp(text, text_urlsafe_C) == 0);
+
+ ret = spdk_base64_urlsafe_encode(text, raw_D, sizeof(raw_D));
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT(strcmp(text, text_urlsafe_D) == 0);
+
+ ret = spdk_base64_urlsafe_encode(text, raw_I, sizeof(raw_I));
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT(strcmp(text, text_urlsafe_I) == 0);
+
+ ret = spdk_base64_urlsafe_encode(text, raw_J, sizeof(raw_J));
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT(strcmp(text, text_urlsafe_J) == 0);
+
+ ret = spdk_base64_urlsafe_encode(NULL, raw_A, sizeof(raw_A));
+ CU_ASSERT_EQUAL(ret, -EINVAL);
+ ret = spdk_base64_urlsafe_encode(text, NULL, sizeof(raw_A));
+ CU_ASSERT_EQUAL(ret, -EINVAL);
+ ret = spdk_base64_urlsafe_encode(text, raw_A, 0);
+ CU_ASSERT_EQUAL(ret, -EINVAL);
+}
+
+static void
+test_base64_urlsafe_decode(void)
+{
+ char raw_buf[200];
+ void *raw = (void *)raw_buf;
+ size_t raw_len = 0;
+ int ret;
+
+ /* len */
+ ret = spdk_base64_urlsafe_decode(NULL, &raw_len, text_A);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_A));
+
+ /* decode */
+ ret = spdk_base64_urlsafe_decode(raw, &raw_len, text_A);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_A));
+ CU_ASSERT(memcmp(raw, raw_A, sizeof(raw_A)) == 0);
+
+ /* len */
+ ret = spdk_base64_urlsafe_decode(NULL, &raw_len, text_urlsafe_B);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_B));
+
+ /* decode */
+ ret = spdk_base64_urlsafe_decode(raw, &raw_len, text_urlsafe_B);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_B));
+ CU_ASSERT(memcmp(raw, raw_B, sizeof(raw_B)) == 0);
+
+ /* len */
+ ret = spdk_base64_urlsafe_decode(NULL, &raw_len, text_urlsafe_C);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_C));
+
+ /* decode */
+ ret = spdk_base64_urlsafe_decode(raw, &raw_len, text_urlsafe_C);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_C));
+ CU_ASSERT(memcmp(raw, raw_C, sizeof(raw_C)) == 0);
+
+ /* len */
+ ret = spdk_base64_urlsafe_decode(NULL, &raw_len, text_urlsafe_D);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_D));
+
+ /* decode */
+ ret = spdk_base64_urlsafe_decode(raw, &raw_len, text_urlsafe_D);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_D));
+ CU_ASSERT(memcmp(raw, raw_D, sizeof(raw_D)) == 0);
+
+ /* len */
+ ret = spdk_base64_urlsafe_decode(NULL, &raw_len, text_urlsafe_I);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_I));
+
+ /* decode */
+ ret = spdk_base64_urlsafe_decode(raw, &raw_len, text_urlsafe_I);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_I));
+ CU_ASSERT(memcmp(raw, raw_I, sizeof(raw_I)) == 0);
+
+ /* len */
+ ret = spdk_base64_urlsafe_decode(NULL, &raw_len, text_urlsafe_J);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_J));
+
+ /* decode */
+ ret = spdk_base64_urlsafe_decode(raw, &raw_len, text_urlsafe_J);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_J));
+ CU_ASSERT(memcmp(raw, raw_J, sizeof(raw_J)) == 0);
+
+ ret = spdk_base64_urlsafe_decode(raw, &raw_len, text_E);
+ CU_ASSERT_EQUAL(ret, -EINVAL);
+ ret = spdk_base64_urlsafe_decode(raw, &raw_len, text_F);
+ CU_ASSERT_EQUAL(ret, -EINVAL);
+ ret = spdk_base64_urlsafe_decode(raw, &raw_len, text_G);
+ CU_ASSERT_EQUAL(ret, -EINVAL);
+ ret = spdk_base64_urlsafe_decode(raw, &raw_len, text_H);
+ CU_ASSERT_EQUAL(ret, -EINVAL);
+ ret = spdk_base64_urlsafe_decode(raw, &raw_len, NULL);
+ CU_ASSERT_EQUAL(ret, -EINVAL);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("base64", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_base64_get_encoded_strlen);
+ CU_ADD_TEST(suite, test_base64_get_decoded_len);
+ CU_ADD_TEST(suite, test_base64_encode);
+ CU_ADD_TEST(suite, test_base64_decode);
+ CU_ADD_TEST(suite, test_base64_urlsafe_encode);
+ CU_ADD_TEST(suite, test_base64_urlsafe_decode);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+
+ CU_basic_run_tests();
+
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/util/bit_array.c/.gitignore b/src/spdk/test/unit/lib/util/bit_array.c/.gitignore
new file mode 100644
index 000000000..24300cdb3
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/bit_array.c/.gitignore
@@ -0,0 +1 @@
+bit_array_ut
diff --git a/src/spdk/test/unit/lib/util/bit_array.c/Makefile b/src/spdk/test/unit/lib/util/bit_array.c/Makefile
new file mode 100644
index 000000000..281001af8
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/bit_array.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = bit_array_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/util/bit_array.c/bit_array_ut.c b/src/spdk/test/unit/lib/util/bit_array.c/bit_array_ut.c
new file mode 100644
index 000000000..5b19b409b
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/bit_array.c/bit_array_ut.c
@@ -0,0 +1,376 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "util/bit_array.c"
+#include "common/lib/test_env.c"
+
+static void
+test_1bit(void)
+{
+ struct spdk_bit_array *ba;
+
+ ba = spdk_bit_array_create(1);
+ SPDK_CU_ASSERT_FATAL(ba != NULL);
+ CU_ASSERT(spdk_bit_array_capacity(ba) == 1);
+
+ CU_ASSERT(spdk_bit_array_get(ba, 0) == false);
+ CU_ASSERT(spdk_bit_array_find_first_set(ba, 0) == UINT32_MAX);
+
+ /* Set bit 0 */
+ CU_ASSERT(spdk_bit_array_set(ba, 0) == 0);
+ CU_ASSERT(spdk_bit_array_get(ba, 0) == true);
+ CU_ASSERT(spdk_bit_array_find_first_set(ba, 0) == 0);
+
+ /* Clear bit 0 */
+ spdk_bit_array_clear(ba, 0);
+ CU_ASSERT(spdk_bit_array_get(ba, 0) == false);
+ CU_ASSERT(spdk_bit_array_find_first_set(ba, 0) == UINT32_MAX);
+
+ spdk_bit_array_free(&ba);
+ CU_ASSERT(ba == NULL);
+}
+
+static void
+test_64bit(void)
+{
+ struct spdk_bit_array *ba;
+
+ ba = spdk_bit_array_create(64);
+ SPDK_CU_ASSERT_FATAL(ba != NULL);
+ CU_ASSERT(spdk_bit_array_capacity(ba) == 64);
+ CU_ASSERT(spdk_bit_array_get(ba, 0) == false);
+ CU_ASSERT(spdk_bit_array_get(ba, 63) == false);
+ CU_ASSERT(spdk_bit_array_get(ba, 64) == false);
+ CU_ASSERT(spdk_bit_array_get(ba, 1000) == false);
+ CU_ASSERT(spdk_bit_array_find_first_set(ba, 0) == UINT32_MAX);
+
+ /* Set bit 1 */
+ CU_ASSERT(spdk_bit_array_set(ba, 1) == 0);
+ CU_ASSERT(spdk_bit_array_get(ba, 0) == false);
+ CU_ASSERT(spdk_bit_array_get(ba, 1) == true);
+ CU_ASSERT(spdk_bit_array_find_first_set(ba, 0) == 1);
+
+ /* Set bit 63 (1 still set) */
+ CU_ASSERT(spdk_bit_array_set(ba, 63) == 0);
+ CU_ASSERT(spdk_bit_array_get(ba, 0) == false);
+ CU_ASSERT(spdk_bit_array_get(ba, 1) == true);
+ CU_ASSERT(spdk_bit_array_get(ba, 63) == true);
+ CU_ASSERT(spdk_bit_array_find_first_set(ba, 0) == 1);
+
+ /* Clear bit 1 (63 still set) */
+ spdk_bit_array_clear(ba, 1);
+ CU_ASSERT(spdk_bit_array_get(ba, 1) == false);
+ CU_ASSERT(spdk_bit_array_find_first_set(ba, 0) == 63);
+
+ /* Clear bit 63 (no bits set) */
+ spdk_bit_array_clear(ba, 63);
+ CU_ASSERT(spdk_bit_array_get(ba, 63) == false);
+ CU_ASSERT(spdk_bit_array_find_first_set(ba, 0) == UINT32_MAX);
+
+ spdk_bit_array_free(&ba);
+}
+
+static void
+test_find(void)
+{
+ struct spdk_bit_array *ba;
+ uint32_t i;
+
+ ba = spdk_bit_array_create(256);
+ SPDK_CU_ASSERT_FATAL(ba != NULL);
+ CU_ASSERT(spdk_bit_array_capacity(ba) == 256);
+
+ /* Set all bits */
+ for (i = 0; i < 256; i++) {
+ CU_ASSERT(spdk_bit_array_set(ba, i) == 0);
+ }
+
+ /* Verify that find_first_set and find_first_clear work for each starting position */
+ for (i = 0; i < 256; i++) {
+ CU_ASSERT(spdk_bit_array_find_first_set(ba, i) == i);
+ CU_ASSERT(spdk_bit_array_find_first_clear(ba, i) == UINT32_MAX);
+ }
+ CU_ASSERT(spdk_bit_array_find_first_set(ba, 256) == UINT32_MAX);
+ CU_ASSERT(spdk_bit_array_find_first_clear(ba, 256) == UINT32_MAX);
+
+ /* Clear bits 0 through 31 */
+ for (i = 0; i < 32; i++) {
+ spdk_bit_array_clear(ba, i);
+ }
+
+ for (i = 0; i < 32; i++) {
+ CU_ASSERT(spdk_bit_array_find_first_set(ba, i) == 32);
+ CU_ASSERT(spdk_bit_array_find_first_clear(ba, i) == i);
+ }
+
+ for (i = 32; i < 256; i++) {
+ CU_ASSERT(spdk_bit_array_find_first_set(ba, i) == i);
+ CU_ASSERT(spdk_bit_array_find_first_clear(ba, i) == UINT32_MAX);
+ }
+
+ /* Clear bit 255 */
+ spdk_bit_array_clear(ba, 255);
+
+ for (i = 0; i < 32; i++) {
+ CU_ASSERT(spdk_bit_array_find_first_set(ba, i) == 32);
+ CU_ASSERT(spdk_bit_array_find_first_clear(ba, i) == i);
+ }
+
+ for (i = 32; i < 255; i++) {
+ CU_ASSERT(spdk_bit_array_find_first_set(ba, i) == i);
+ CU_ASSERT(spdk_bit_array_find_first_clear(ba, i) == 255);
+ }
+
+ CU_ASSERT(spdk_bit_array_find_first_clear(ba, 256) == UINT32_MAX);
+
+ spdk_bit_array_free(&ba);
+}
+
+static void
+test_resize(void)
+{
+ struct spdk_bit_array *ba;
+
+ /* Start with a 0 bit array */
+ ba = spdk_bit_array_create(0);
+ SPDK_CU_ASSERT_FATAL(ba != NULL);
+ CU_ASSERT(spdk_bit_array_capacity(ba) == 0);
+ CU_ASSERT(spdk_bit_array_get(ba, 0) == false);
+ CU_ASSERT(spdk_bit_array_set(ba, 0) == -EINVAL);
+ spdk_bit_array_clear(ba, 0);
+
+ /* Increase size to 1 bit */
+ SPDK_CU_ASSERT_FATAL(spdk_bit_array_resize(&ba, 1) == 0);
+ SPDK_CU_ASSERT_FATAL(ba != NULL);
+ CU_ASSERT(spdk_bit_array_capacity(ba) == 1);
+ CU_ASSERT(spdk_bit_array_get(ba, 0) == false);
+ CU_ASSERT(spdk_bit_array_set(ba, 0) == 0);
+ CU_ASSERT(spdk_bit_array_get(ba, 0) == true);
+
+ /* Increase size to 2 bits */
+ SPDK_CU_ASSERT_FATAL(spdk_bit_array_resize(&ba, 2) == 0);
+ SPDK_CU_ASSERT_FATAL(ba != NULL);
+ CU_ASSERT(spdk_bit_array_capacity(ba) == 2);
+ CU_ASSERT(spdk_bit_array_get(ba, 1) == false);
+ CU_ASSERT(spdk_bit_array_set(ba, 1) == 0);
+ CU_ASSERT(spdk_bit_array_get(ba, 1) == true);
+
+ /* Shrink size back to 1 bit */
+ SPDK_CU_ASSERT_FATAL(spdk_bit_array_resize(&ba, 1) == 0);
+ SPDK_CU_ASSERT_FATAL(ba != NULL);
+ CU_ASSERT(spdk_bit_array_capacity(ba) == 1);
+ CU_ASSERT(spdk_bit_array_get(ba, 0) == true);
+ CU_ASSERT(spdk_bit_array_get(ba, 1) == false);
+
+ /* Increase size to 65 bits */
+ SPDK_CU_ASSERT_FATAL(spdk_bit_array_resize(&ba, 65) == 0);
+ SPDK_CU_ASSERT_FATAL(ba != NULL);
+ CU_ASSERT(spdk_bit_array_capacity(ba) == 65);
+ CU_ASSERT(spdk_bit_array_get(ba, 0) == true);
+ CU_ASSERT(spdk_bit_array_get(ba, 1) == false);
+ CU_ASSERT(spdk_bit_array_set(ba, 64) == 0);
+ CU_ASSERT(spdk_bit_array_get(ba, 64) == true);
+
+ /* Shrink size back to 0 bits */
+ SPDK_CU_ASSERT_FATAL(spdk_bit_array_resize(&ba, 0) == 0);
+ SPDK_CU_ASSERT_FATAL(ba != NULL);
+ CU_ASSERT(spdk_bit_array_capacity(ba) == 0);
+ CU_ASSERT(spdk_bit_array_get(ba, 0) == false);
+ CU_ASSERT(spdk_bit_array_get(ba, 1) == false);
+
+ spdk_bit_array_free(&ba);
+}
+
+static void
+test_errors(void)
+{
+ /* Passing NULL to resize should fail. */
+ CU_ASSERT(spdk_bit_array_resize(NULL, 0) == -EINVAL);
+
+ /* Passing NULL to free is a no-op. */
+ spdk_bit_array_free(NULL);
+}
+
+static void
+test_count(void)
+{
+ struct spdk_bit_array *ba;
+ uint32_t i;
+
+ /* 0-bit array should have 0 bits set and 0 bits clear */
+ ba = spdk_bit_array_create(0);
+ SPDK_CU_ASSERT_FATAL(ba != NULL);
+ CU_ASSERT(spdk_bit_array_count_set(ba) == 0);
+ CU_ASSERT(spdk_bit_array_count_clear(ba) == 0);
+ spdk_bit_array_free(&ba);
+
+ /* 1-bit array */
+ ba = spdk_bit_array_create(1);
+ SPDK_CU_ASSERT_FATAL(ba != NULL);
+ CU_ASSERT(spdk_bit_array_count_set(ba) == 0);
+ CU_ASSERT(spdk_bit_array_count_clear(ba) == 1);
+ spdk_bit_array_set(ba, 0);
+ CU_ASSERT(spdk_bit_array_count_set(ba) == 1);
+ CU_ASSERT(spdk_bit_array_count_clear(ba) == 0);
+ spdk_bit_array_free(&ba);
+
+ /* 65-bit array */
+ ba = spdk_bit_array_create(65);
+ SPDK_CU_ASSERT_FATAL(ba != NULL);
+ CU_ASSERT(spdk_bit_array_count_set(ba) == 0);
+ CU_ASSERT(spdk_bit_array_count_clear(ba) == 65);
+ spdk_bit_array_set(ba, 0);
+ CU_ASSERT(spdk_bit_array_count_set(ba) == 1);
+ CU_ASSERT(spdk_bit_array_count_clear(ba) == 64);
+ spdk_bit_array_set(ba, 5);
+ CU_ASSERT(spdk_bit_array_count_set(ba) == 2);
+ CU_ASSERT(spdk_bit_array_count_clear(ba) == 63);
+ spdk_bit_array_set(ba, 13);
+ CU_ASSERT(spdk_bit_array_count_set(ba) == 3);
+ CU_ASSERT(spdk_bit_array_count_clear(ba) == 62);
+ spdk_bit_array_clear(ba, 0);
+ CU_ASSERT(spdk_bit_array_count_set(ba) == 2);
+ CU_ASSERT(spdk_bit_array_count_clear(ba) == 63);
+ for (i = 0; i < 65; i++) {
+ spdk_bit_array_set(ba, i);
+ }
+ CU_ASSERT(spdk_bit_array_count_set(ba) == 65);
+ CU_ASSERT(spdk_bit_array_count_clear(ba) == 0);
+ for (i = 0; i < 65; i++) {
+ spdk_bit_array_clear(ba, i);
+ CU_ASSERT(spdk_bit_array_count_set(ba) == 65 - i - 1);
+ CU_ASSERT(spdk_bit_array_count_clear(ba) == i + 1);
+ }
+ spdk_bit_array_free(&ba);
+}
+
+#define TEST_MASK_SIZE 128
+#define TEST_BITS_NUM (TEST_MASK_SIZE * 8 - 3)
+static void
+test_mask_store_load(void)
+{
+ struct spdk_bit_array *ba;
+ uint8_t mask[TEST_MASK_SIZE] = { 0 };
+ uint32_t i;
+
+ ba = spdk_bit_array_create(TEST_BITS_NUM);
+
+ /* Check if stored mask is consistent with bit array mask */
+ spdk_bit_array_set(ba, 0);
+ spdk_bit_array_set(ba, TEST_BITS_NUM / 2);
+ spdk_bit_array_set(ba, TEST_BITS_NUM - 1);
+
+ spdk_bit_array_store_mask(ba, mask);
+
+ for (i = 0; i < TEST_BITS_NUM; i++) {
+ if (i == 0 || i == TEST_BITS_NUM / 2 || i == TEST_BITS_NUM - 1) {
+ CU_ASSERT((mask[i / 8] & (1U << (i % 8))));
+ } else {
+ CU_ASSERT(!(mask[i / 8] & (1U << (i % 8))));
+ }
+ }
+
+ /* Check if loaded mask is consistent with bit array mask */
+ memset(mask, 0, TEST_MASK_SIZE);
+ mask[0] = 1;
+ mask[TEST_MASK_SIZE - 1] = 1U << 4;
+
+ spdk_bit_array_load_mask(ba, mask);
+
+ CU_ASSERT(spdk_bit_array_get(ba, 0));
+ CU_ASSERT(spdk_bit_array_get(ba, TEST_BITS_NUM - 1));
+
+ spdk_bit_array_clear(ba, 0);
+ spdk_bit_array_clear(ba, TEST_BITS_NUM - 1);
+
+ for (i = 0; i < TEST_BITS_NUM; i++) {
+ CU_ASSERT(!spdk_bit_array_get(ba, i));
+ }
+
+ spdk_bit_array_free(&ba);
+}
+
+static void
+test_mask_clear(void)
+{
+ struct spdk_bit_array *ba;
+ uint32_t i;
+
+ ba = spdk_bit_array_create(TEST_BITS_NUM);
+
+ for (i = 0; i < TEST_BITS_NUM; i++) {
+ spdk_bit_array_set(ba, i);
+ }
+
+ spdk_bit_array_clear_mask(ba);
+
+ for (i = 0; i < TEST_BITS_NUM; i++) {
+ CU_ASSERT(!spdk_bit_array_get(ba, i));
+ }
+
+ spdk_bit_array_free(&ba);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("bit_array", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_1bit);
+ CU_ADD_TEST(suite, test_64bit);
+ CU_ADD_TEST(suite, test_find);
+ CU_ADD_TEST(suite, test_resize);
+ CU_ADD_TEST(suite, test_errors);
+ CU_ADD_TEST(suite, test_count);
+ CU_ADD_TEST(suite, test_mask_store_load);
+ CU_ADD_TEST(suite, test_mask_clear);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+
+ CU_basic_run_tests();
+
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/util/cpuset.c/.gitignore b/src/spdk/test/unit/lib/util/cpuset.c/.gitignore
new file mode 100644
index 000000000..2ca1a2d36
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/cpuset.c/.gitignore
@@ -0,0 +1 @@
+cpuset_ut
diff --git a/src/spdk/test/unit/lib/util/cpuset.c/Makefile b/src/spdk/test/unit/lib/util/cpuset.c/Makefile
new file mode 100644
index 000000000..6b2374935
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/cpuset.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = cpuset_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/util/cpuset.c/cpuset_ut.c b/src/spdk/test/unit/lib/util/cpuset.c/cpuset_ut.c
new file mode 100644
index 000000000..3630c5cbd
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/cpuset.c/cpuset_ut.c
@@ -0,0 +1,262 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+#include "spdk/cpuset.h"
+
+#include "spdk_cunit.h"
+
+#include "util/cpuset.c"
+
+static int
+cpuset_check_range(struct spdk_cpuset *core_mask, uint32_t min, uint32_t max, bool isset)
+{
+ uint32_t core;
+ for (core = min; core <= max; core++) {
+ if (isset != spdk_cpuset_get_cpu(core_mask, core)) {
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static void
+test_cpuset(void)
+{
+ uint32_t cpu;
+ struct spdk_cpuset *set = spdk_cpuset_alloc();
+
+ SPDK_CU_ASSERT_FATAL(set != NULL);
+ CU_ASSERT(spdk_cpuset_count(set) == 0);
+
+ /* Set cpu 0 */
+ spdk_cpuset_set_cpu(set, 0, true);
+ CU_ASSERT(spdk_cpuset_get_cpu(set, 0) == true);
+ CU_ASSERT(cpuset_check_range(set, 1, SPDK_CPUSET_SIZE - 1, false) == 0);
+ CU_ASSERT(spdk_cpuset_count(set) == 1);
+
+ /* Set last cpu (cpu 0 already set) */
+ spdk_cpuset_set_cpu(set, SPDK_CPUSET_SIZE - 1, true);
+ CU_ASSERT(spdk_cpuset_get_cpu(set, 0) == true);
+ CU_ASSERT(spdk_cpuset_get_cpu(set, SPDK_CPUSET_SIZE - 1) == true);
+ CU_ASSERT(cpuset_check_range(set, 1, SPDK_CPUSET_SIZE - 2, false) == 0);
+ CU_ASSERT(spdk_cpuset_count(set) == 2);
+
+ /* Clear cpu 0 (last cpu already set) */
+ spdk_cpuset_set_cpu(set, 0, false);
+ CU_ASSERT(spdk_cpuset_get_cpu(set, 0) == false);
+ CU_ASSERT(cpuset_check_range(set, 1, SPDK_CPUSET_SIZE - 2, false) == 0);
+ CU_ASSERT(spdk_cpuset_get_cpu(set, SPDK_CPUSET_SIZE - 1) == true);
+ CU_ASSERT(spdk_cpuset_count(set) == 1);
+
+ /* Set middle cpu (last cpu already set) */
+ cpu = (SPDK_CPUSET_SIZE - 1) / 2;
+ spdk_cpuset_set_cpu(set, cpu, true);
+ CU_ASSERT(spdk_cpuset_get_cpu(set, cpu) == true);
+ CU_ASSERT(spdk_cpuset_get_cpu(set, SPDK_CPUSET_SIZE - 1) == true);
+ CU_ASSERT(cpuset_check_range(set, 1, cpu - 1, false) == 0);
+ CU_ASSERT(cpuset_check_range(set, cpu + 1, SPDK_CPUSET_SIZE - 2, false) == 0);
+ CU_ASSERT(spdk_cpuset_count(set) == 2);
+
+ /* Set all cpus */
+ for (cpu = 0; cpu < SPDK_CPUSET_SIZE; cpu++) {
+ spdk_cpuset_set_cpu(set, cpu, true);
+ }
+ CU_ASSERT(cpuset_check_range(set, 0, SPDK_CPUSET_SIZE - 1, true) == 0);
+ CU_ASSERT(spdk_cpuset_count(set) == SPDK_CPUSET_SIZE);
+
+ /* Clear all cpus */
+ spdk_cpuset_zero(set);
+ CU_ASSERT(cpuset_check_range(set, 0, SPDK_CPUSET_SIZE - 1, false) == 0);
+ CU_ASSERT(spdk_cpuset_count(set) == 0);
+
+ spdk_cpuset_free(set);
+}
+
+static void
+test_cpuset_parse(void)
+{
+ int rc;
+ struct spdk_cpuset *core_mask;
+ char buf[1024];
+
+ core_mask = spdk_cpuset_alloc();
+ SPDK_CU_ASSERT_FATAL(core_mask != NULL);
+
+ /* Only core 0 should be set */
+ rc = spdk_cpuset_parse(core_mask, "0x1");
+ CU_ASSERT(rc >= 0);
+ CU_ASSERT(cpuset_check_range(core_mask, 0, 0, true) == 0);
+ CU_ASSERT(cpuset_check_range(core_mask, 1, SPDK_CPUSET_SIZE - 1, false) == 0);
+
+ /* Only core 1 should be set */
+ rc = spdk_cpuset_parse(core_mask, "[1]");
+ CU_ASSERT(rc >= 0);
+ CU_ASSERT(cpuset_check_range(core_mask, 0, 0, false) == 0);
+ CU_ASSERT(cpuset_check_range(core_mask, 1, 1, true) == 0);
+ CU_ASSERT(cpuset_check_range(core_mask, 2, SPDK_CPUSET_SIZE - 1, false) == 0);
+
+ /* Set cores 0-10,12,128-254 */
+ rc = spdk_cpuset_parse(core_mask, "[0-10,12,128-254]");
+ CU_ASSERT(rc >= 0);
+ CU_ASSERT(cpuset_check_range(core_mask, 0, 10, true) == 0);
+ CU_ASSERT(cpuset_check_range(core_mask, 11, 11, false) == 0);
+ CU_ASSERT(cpuset_check_range(core_mask, 12, 12, true) == 0);
+ CU_ASSERT(cpuset_check_range(core_mask, 13, 127, false) == 0);
+ CU_ASSERT(cpuset_check_range(core_mask, 128, 254, true) == 0);
+ CU_ASSERT(cpuset_check_range(core_mask, 255, SPDK_CPUSET_SIZE - 1, false) == 0);
+
+ /* Set all cores */
+ snprintf(buf, sizeof(buf), "[0-%d]", SPDK_CPUSET_SIZE - 1);
+ rc = spdk_cpuset_parse(core_mask, buf);
+ CU_ASSERT(rc >= 0);
+ CU_ASSERT(cpuset_check_range(core_mask, 0, SPDK_CPUSET_SIZE - 1, true) == 0);
+
+ /* Null parameters not allowed */
+ rc = spdk_cpuset_parse(core_mask, NULL);
+ CU_ASSERT(rc < 0);
+
+ rc = spdk_cpuset_parse(NULL, "[1]");
+ CU_ASSERT(rc < 0);
+
+ /* Wrong formated core lists */
+ rc = spdk_cpuset_parse(core_mask, "");
+ CU_ASSERT(rc < 0);
+
+ rc = spdk_cpuset_parse(core_mask, "[");
+ CU_ASSERT(rc < 0);
+
+ rc = spdk_cpuset_parse(core_mask, "[]");
+ CU_ASSERT(rc < 0);
+
+ rc = spdk_cpuset_parse(core_mask, "[10--11]");
+ CU_ASSERT(rc < 0);
+
+ rc = spdk_cpuset_parse(core_mask, "[11-10]");
+ CU_ASSERT(rc < 0);
+
+ rc = spdk_cpuset_parse(core_mask, "[10-11,]");
+ CU_ASSERT(rc < 0);
+
+ rc = spdk_cpuset_parse(core_mask, "[,10-11]");
+ CU_ASSERT(rc < 0);
+
+ /* Out of range value */
+ snprintf(buf, sizeof(buf), "[%d]", SPDK_CPUSET_SIZE + 1);
+ rc = spdk_cpuset_parse(core_mask, buf);
+ CU_ASSERT(rc < 0);
+
+ /* Overflow value (UINT64_MAX * 10) */
+ rc = spdk_cpuset_parse(core_mask, "[184467440737095516150]");
+ CU_ASSERT(rc < 0);
+
+ spdk_cpuset_free(core_mask);
+}
+
+static void
+test_cpuset_fmt(void)
+{
+ int i;
+ uint32_t lcore;
+ struct spdk_cpuset *core_mask = spdk_cpuset_alloc();
+ const char *hex_mask;
+ char hex_mask_ref[SPDK_CPUSET_SIZE / 4 + 1];
+
+ /* Clear coremask. hex_mask should be "0" */
+ spdk_cpuset_zero(core_mask);
+ hex_mask = spdk_cpuset_fmt(core_mask);
+ SPDK_CU_ASSERT_FATAL(hex_mask != NULL);
+ CU_ASSERT(strcmp("0", hex_mask) == 0);
+
+ /* Set coremask 0x51234. Result should be "51234" */
+ spdk_cpuset_zero(core_mask);
+ spdk_cpuset_set_cpu(core_mask, 2, true);
+ spdk_cpuset_set_cpu(core_mask, 4, true);
+ spdk_cpuset_set_cpu(core_mask, 5, true);
+ spdk_cpuset_set_cpu(core_mask, 9, true);
+ spdk_cpuset_set_cpu(core_mask, 12, true);
+ spdk_cpuset_set_cpu(core_mask, 16, true);
+ spdk_cpuset_set_cpu(core_mask, 18, true);
+ hex_mask = spdk_cpuset_fmt(core_mask);
+ SPDK_CU_ASSERT_FATAL(hex_mask != NULL);
+ CU_ASSERT(strcmp("51234", hex_mask) == 0);
+
+ /* Set all cores */
+ spdk_cpuset_zero(core_mask);
+ CU_ASSERT(cpuset_check_range(core_mask, 0, SPDK_CPUSET_SIZE - 1, false) == 0);
+
+ for (lcore = 0; lcore < SPDK_CPUSET_SIZE; lcore++) {
+ spdk_cpuset_set_cpu(core_mask, lcore, true);
+ }
+ for (i = 0; i < SPDK_CPUSET_SIZE / 4; i++) {
+ hex_mask_ref[i] = 'f';
+ }
+ hex_mask_ref[SPDK_CPUSET_SIZE / 4] = '\0';
+
+ /* Check data before format */
+ CU_ASSERT(cpuset_check_range(core_mask, 0, SPDK_CPUSET_SIZE - 1, true) == 0);
+
+ hex_mask = spdk_cpuset_fmt(core_mask);
+ SPDK_CU_ASSERT_FATAL(hex_mask != NULL);
+ CU_ASSERT(strcmp(hex_mask_ref, hex_mask) == 0);
+
+ /* Check data integrity after format */
+ CU_ASSERT(cpuset_check_range(core_mask, 0, SPDK_CPUSET_SIZE - 1, true) == 0);
+
+ spdk_cpuset_free(core_mask);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("cpuset", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_cpuset);
+ CU_ADD_TEST(suite, test_cpuset_parse);
+ CU_ADD_TEST(suite, test_cpuset_fmt);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+
+ CU_basic_run_tests();
+
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/util/crc16.c/.gitignore b/src/spdk/test/unit/lib/util/crc16.c/.gitignore
new file mode 100644
index 000000000..d026adf09
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/crc16.c/.gitignore
@@ -0,0 +1 @@
+crc16_ut
diff --git a/src/spdk/test/unit/lib/util/crc16.c/Makefile b/src/spdk/test/unit/lib/util/crc16.c/Makefile
new file mode 100644
index 000000000..339146be5
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/crc16.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = crc16_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/util/crc16.c/crc16_ut.c b/src/spdk/test/unit/lib/util/crc16.c/crc16_ut.c
new file mode 100644
index 000000000..03e6c65cd
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/crc16.c/crc16_ut.c
@@ -0,0 +1,104 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "util/crc16.c"
+
+static void
+test_crc16_t10dif(void)
+{
+ uint16_t crc;
+ char buf[] = "123456789";
+
+ crc = spdk_crc16_t10dif(0, buf, strlen(buf));
+ CU_ASSERT(crc == 0xd0db);
+}
+
+static void
+test_crc16_t10dif_seed(void)
+{
+ uint16_t crc = 0;
+ char buf1[] = "1234";
+ char buf2[] = "56789";
+
+ crc = spdk_crc16_t10dif(crc, buf1, strlen(buf1));
+ crc = spdk_crc16_t10dif(crc, buf2, strlen(buf2));
+ CU_ASSERT(crc == 0xd0db);
+}
+
+static void
+test_crc16_t10dif_copy(void)
+{
+ uint16_t crc1 = 0, crc2;
+ char buf1[] = "1234";
+ char buf2[] = "56789";
+ char *buf3 = calloc(1, strlen(buf1) + strlen(buf2) + 1);
+ SPDK_CU_ASSERT_FATAL(buf3 != NULL);
+
+ crc1 = spdk_crc16_t10dif_copy(crc1, buf3, buf1, strlen(buf1));
+ crc1 = spdk_crc16_t10dif_copy(crc1, buf3 + strlen(buf1), buf2, strlen(buf2));
+ CU_ASSERT(crc1 == 0xd0db);
+
+ crc2 = spdk_crc16_t10dif(0, buf3, strlen(buf3));
+ CU_ASSERT(crc2 == 0xd0db);
+
+ free(buf3);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("crc16", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_crc16_t10dif);
+ CU_ADD_TEST(suite, test_crc16_t10dif_seed);
+ CU_ADD_TEST(suite, test_crc16_t10dif_copy);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+
+ CU_basic_run_tests();
+
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/util/crc32_ieee.c/.gitignore b/src/spdk/test/unit/lib/util/crc32_ieee.c/.gitignore
new file mode 100644
index 000000000..40a85a93f
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/crc32_ieee.c/.gitignore
@@ -0,0 +1 @@
+crc32_ieee_ut
diff --git a/src/spdk/test/unit/lib/util/crc32_ieee.c/Makefile b/src/spdk/test/unit/lib/util/crc32_ieee.c/Makefile
new file mode 100644
index 000000000..6b976721c
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/crc32_ieee.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = crc32_ieee_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/util/crc32_ieee.c/crc32_ieee_ut.c b/src/spdk/test/unit/lib/util/crc32_ieee.c/crc32_ieee_ut.c
new file mode 100644
index 000000000..2187438bf
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/crc32_ieee.c/crc32_ieee_ut.c
@@ -0,0 +1,74 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "util/crc32.c"
+#include "util/crc32_ieee.c"
+
+static void
+test_crc32_ieee(void)
+{
+ uint32_t crc;
+ char buf[] = "Hello world!";
+
+ crc = 0xFFFFFFFFu;
+ crc = spdk_crc32_ieee_update(buf, strlen(buf), crc);
+ crc ^= 0xFFFFFFFFu;
+ CU_ASSERT(crc == 0x1b851995);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("crc32_ieee", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_crc32_ieee);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+
+ CU_basic_run_tests();
+
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/util/crc32c.c/.gitignore b/src/spdk/test/unit/lib/util/crc32c.c/.gitignore
new file mode 100644
index 000000000..55bedec7f
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/crc32c.c/.gitignore
@@ -0,0 +1 @@
+crc32c_ut
diff --git a/src/spdk/test/unit/lib/util/crc32c.c/Makefile b/src/spdk/test/unit/lib/util/crc32c.c/Makefile
new file mode 100644
index 000000000..4f1cc0e4b
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/crc32c.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = crc32c_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/util/crc32c.c/crc32c_ut.c b/src/spdk/test/unit/lib/util/crc32c.c/crc32c_ut.c
new file mode 100644
index 000000000..6313d7bf6
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/crc32c.c/crc32c_ut.c
@@ -0,0 +1,145 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "util/crc32.c"
+#include "util/crc32c.c"
+
+static void
+test_crc32c(void)
+{
+ uint32_t crc;
+ char buf[1024];
+
+ /* Verify a string's CRC32-C value against the known correct result. */
+ snprintf(buf, sizeof(buf), "%s", "Hello world!");
+ crc = 0xFFFFFFFFu;
+ crc = spdk_crc32c_update(buf, strlen(buf), crc);
+ crc ^= 0xFFFFFFFFu;
+ CU_ASSERT(crc == 0x7b98e751);
+
+ /*
+ * The main loop of the optimized CRC32-C implementation processes data in 8-byte blocks,
+ * followed by a loop to handle the 0-7 trailing bytes.
+ * Test all buffer sizes from 0 to 7 in order to hit all possible trailing byte counts.
+ */
+
+ /* 0-byte buffer should not modify CRC at all, so final result should be ~0 ^ ~0 == 0 */
+ snprintf(buf, sizeof(buf), "%s", "");
+ crc = 0xFFFFFFFFu;
+ crc = spdk_crc32c_update(buf, strlen(buf), crc);
+ crc ^= 0xFFFFFFFFu;
+ CU_ASSERT(crc == 0);
+
+ /* 1-byte buffer */
+ snprintf(buf, sizeof(buf), "%s", "1");
+ crc = 0xFFFFFFFFu;
+ crc = spdk_crc32c_update(buf, strlen(buf), crc);
+ crc ^= 0xFFFFFFFFu;
+ CU_ASSERT(crc == 0x90F599E3);
+
+ /* 2-byte buffer */
+ snprintf(buf, sizeof(buf), "%s", "12");
+ crc = 0xFFFFFFFFu;
+ crc = spdk_crc32c_update(buf, strlen(buf), crc);
+ crc ^= 0xFFFFFFFFu;
+ CU_ASSERT(crc == 0x7355C460);
+
+ /* 3-byte buffer */
+ snprintf(buf, sizeof(buf), "%s", "123");
+ crc = 0xFFFFFFFFu;
+ crc = spdk_crc32c_update(buf, strlen(buf), crc);
+ crc ^= 0xFFFFFFFFu;
+ CU_ASSERT(crc == 0x107B2FB2);
+
+ /* 4-byte buffer */
+ snprintf(buf, sizeof(buf), "%s", "1234");
+ crc = 0xFFFFFFFFu;
+ crc = spdk_crc32c_update(buf, strlen(buf), crc);
+ crc ^= 0xFFFFFFFFu;
+ CU_ASSERT(crc == 0xF63AF4EE);
+
+ /* 5-byte buffer */
+ snprintf(buf, sizeof(buf), "%s", "12345");
+ crc = 0xFFFFFFFFu;
+ crc = spdk_crc32c_update(buf, strlen(buf), crc);
+ crc ^= 0xFFFFFFFFu;
+ CU_ASSERT(crc == 0x18D12335);
+
+ /* 6-byte buffer */
+ snprintf(buf, sizeof(buf), "%s", "123456");
+ crc = 0xFFFFFFFFu;
+ crc = spdk_crc32c_update(buf, strlen(buf), crc);
+ crc ^= 0xFFFFFFFFu;
+ CU_ASSERT(crc == 0x41357186);
+
+ /* 7-byte buffer */
+ snprintf(buf, sizeof(buf), "%s", "1234567");
+ crc = 0xFFFFFFFFu;
+ crc = spdk_crc32c_update(buf, strlen(buf), crc);
+ crc ^= 0xFFFFFFFFu;
+ CU_ASSERT(crc == 0x124297EA);
+
+ /* Test a buffer of exactly 8 bytes (one block in the main CRC32-C loop). */
+ snprintf(buf, sizeof(buf), "%s", "12345678");
+ crc = 0xFFFFFFFFu;
+ crc = spdk_crc32c_update(buf, strlen(buf), crc);
+ crc ^= 0xFFFFFFFFu;
+ CU_ASSERT(crc == 0x6087809A);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("crc32c", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_crc32c);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+
+ CU_basic_run_tests();
+
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/util/dif.c/.gitignore b/src/spdk/test/unit/lib/util/dif.c/.gitignore
new file mode 100644
index 000000000..040b296b7
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/dif.c/.gitignore
@@ -0,0 +1 @@
+dif_ut
diff --git a/src/spdk/test/unit/lib/util/dif.c/Makefile b/src/spdk/test/unit/lib/util/dif.c/Makefile
new file mode 100644
index 000000000..714928236
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/dif.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = dif_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/util/dif.c/dif_ut.c b/src/spdk/test/unit/lib/util/dif.c/dif_ut.c
new file mode 100644
index 000000000..0b069b189
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/dif.c/dif_ut.c
@@ -0,0 +1,2669 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "util/dif.c"
+
+#define DATA_PATTERN(offset) ((uint8_t)(0xAB + (offset)))
+#define GUARD_SEED 0xCD
+
+static int
+ut_data_pattern_generate(struct iovec *iovs, int iovcnt,
+ uint32_t block_size, uint32_t md_size, uint32_t num_blocks)
+{
+ struct _dif_sgl sgl;
+ uint32_t offset_blocks, offset_in_block, buf_len, data_offset, i;
+ uint8_t *buf;
+
+ _dif_sgl_init(&sgl, iovs, iovcnt);
+
+ if (!_dif_sgl_is_valid(&sgl, block_size * num_blocks)) {
+ return -1;
+ }
+
+ offset_blocks = 0;
+ data_offset = 0;
+
+ while (offset_blocks < num_blocks) {
+ offset_in_block = 0;
+ while (offset_in_block < block_size) {
+ _dif_sgl_get_buf(&sgl, (void *)&buf, &buf_len);
+ if (offset_in_block < block_size - md_size) {
+ buf_len = spdk_min(buf_len,
+ block_size - md_size - offset_in_block);
+ for (i = 0; i < buf_len; i++) {
+ buf[i] = DATA_PATTERN(data_offset + i);
+ }
+ data_offset += buf_len;
+ } else {
+ buf_len = spdk_min(buf_len, block_size - offset_in_block);
+ memset(buf, 0, buf_len);
+ }
+ _dif_sgl_advance(&sgl, buf_len);
+ offset_in_block += buf_len;
+ }
+ offset_blocks++;
+ }
+
+ return 0;
+}
+
+static int
+ut_data_pattern_verify(struct iovec *iovs, int iovcnt,
+ uint32_t block_size, uint32_t md_size, uint32_t num_blocks)
+{
+ struct _dif_sgl sgl;
+ uint32_t offset_blocks, offset_in_block, buf_len, data_offset, i;
+ uint8_t *buf;
+
+ _dif_sgl_init(&sgl, iovs, iovcnt);
+
+ if (!_dif_sgl_is_valid(&sgl, block_size * num_blocks)) {
+ return -1;
+ }
+
+ offset_blocks = 0;
+ data_offset = 0;
+
+ while (offset_blocks < num_blocks) {
+ offset_in_block = 0;
+ while (offset_in_block < block_size) {
+ _dif_sgl_get_buf(&sgl, (void *)&buf, &buf_len);
+
+ if (offset_in_block < block_size - md_size) {
+ buf_len = spdk_min(buf_len,
+ block_size - md_size - offset_in_block);
+ for (i = 0; i < buf_len; i++) {
+ if (buf[i] != DATA_PATTERN(data_offset + i)) {
+ return -1;
+ }
+ }
+ data_offset += buf_len;
+ } else {
+ buf_len = spdk_min(buf_len, block_size - offset_in_block);
+ }
+ _dif_sgl_advance(&sgl, buf_len);
+ offset_in_block += buf_len;
+ }
+ offset_blocks++;
+ }
+
+ return 0;
+}
+
+static void
+_iov_alloc_buf(struct iovec *iov, uint32_t len)
+{
+ iov->iov_base = calloc(1, len);
+ iov->iov_len = len;
+ SPDK_CU_ASSERT_FATAL(iov->iov_base != NULL);
+}
+
+static void
+_iov_free_buf(struct iovec *iov)
+{
+ free(iov->iov_base);
+}
+
+static void
+_iov_set_buf(struct iovec *iov, uint8_t *buf, uint32_t buf_len)
+{
+ iov->iov_base = buf;
+ iov->iov_len = buf_len;
+}
+
+static bool
+_iov_check(struct iovec *iov, void *iov_base, uint32_t iov_len)
+{
+ return (iov->iov_base == iov_base && iov->iov_len == iov_len);
+}
+
+static void
+_dif_generate_and_verify(struct iovec *iov,
+ uint32_t block_size, uint32_t md_size, bool dif_loc,
+ enum spdk_dif_type dif_type, uint32_t dif_flags,
+ uint32_t ref_tag, uint32_t e_ref_tag,
+ uint16_t app_tag, uint16_t apptag_mask, uint16_t e_app_tag,
+ bool expect_pass)
+{
+ struct spdk_dif_ctx ctx = {};
+ uint32_t guard_interval;
+ uint16_t guard = 0;
+ int rc;
+
+ rc = ut_data_pattern_generate(iov, 1, block_size, md_size, 1);
+ CU_ASSERT(rc == 0);
+
+ guard_interval = _get_guard_interval(block_size, md_size, dif_loc, true);
+
+ ctx.dif_type = dif_type;
+ ctx.dif_flags = dif_flags;
+ ctx.init_ref_tag = ref_tag;
+ ctx.app_tag = app_tag;
+
+ if (dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
+ guard = spdk_crc16_t10dif(0, iov->iov_base, guard_interval);
+ }
+
+ _dif_generate(iov->iov_base + guard_interval, guard, 0, &ctx);
+
+ ctx.init_ref_tag = e_ref_tag;
+ ctx.apptag_mask = apptag_mask;
+ ctx.app_tag = e_app_tag;
+
+ rc = _dif_verify(iov->iov_base + guard_interval, guard, 0, &ctx, NULL);
+ CU_ASSERT((expect_pass && rc == 0) || (!expect_pass && rc != 0));
+
+ rc = ut_data_pattern_verify(iov, 1, block_size, md_size, 1);
+ CU_ASSERT(rc == 0);
+}
+
+static void
+dif_generate_and_verify_test(void)
+{
+ struct iovec iov;
+ uint32_t dif_flags;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ _iov_alloc_buf(&iov, 4096 + 128);
+
+ /* Positive cases */
+
+ /* The case that DIF is contained in the first 8 bytes of metadata. */
+ _dif_generate_and_verify(&iov,
+ 4096 + 128, 128, true,
+ SPDK_DIF_TYPE1, dif_flags,
+ 22, 22,
+ 0x22, 0xFFFF, 0x22,
+ true);
+
+ /* The case that DIF is contained in the last 8 bytes of metadata. */
+ _dif_generate_and_verify(&iov,
+ 4096 + 128, 128, false,
+ SPDK_DIF_TYPE1, dif_flags,
+ 22, 22,
+ 0x22, 0xFFFF, 0x22,
+ true);
+
+ /* Negative cases */
+
+ /* Reference tag doesn't match. */
+ _dif_generate_and_verify(&iov,
+ 4096 + 128, 128, false,
+ SPDK_DIF_TYPE1, dif_flags,
+ 22, 23,
+ 0x22, 0xFFFF, 0x22,
+ false);
+
+ /* Application tag doesn't match. */
+ _dif_generate_and_verify(&iov,
+ 4096 + 128, 128, false,
+ SPDK_DIF_TYPE1, dif_flags,
+ 22, 22,
+ 0x22, 0xFFFF, 0x23,
+ false);
+
+ _iov_free_buf(&iov);
+}
+
+static void
+dif_disable_check_test(void)
+{
+ struct iovec iov;
+ uint32_t dif_flags;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ _iov_alloc_buf(&iov, 4096 + 128);
+
+ /* The case that DIF check is disabled when the Application Tag is 0xFFFF for
+ * Type 1. DIF check is disabled and pass is expected.
+ */
+ _dif_generate_and_verify(&iov,
+ 4096 + 128, 128, false,
+ SPDK_DIF_TYPE1, dif_flags,
+ 22, 22,
+ 0xFFFF, 0xFFFF, 0x22,
+ true);
+
+ /* The case that DIF check is not disabled when the Application Tag is 0xFFFF but
+ * the Reference Tag is not 0xFFFFFFFF for Type 3. DIF check is not disabled and
+ * fail is expected.
+ */
+ _dif_generate_and_verify(&iov,
+ 4096 + 128, 128, false,
+ SPDK_DIF_TYPE3, dif_flags,
+ 22, 22,
+ 0xFFFF, 0xFFFF, 0x22,
+ false);
+
+ /* The case that DIF check is disabled when the Application Tag is 0xFFFF and
+ * the Reference Tag is 0xFFFFFFFF for Type 3. DIF check is disabled and
+ * pass is expected.
+ */
+ _dif_generate_and_verify(&iov,
+ 4096 + 128, 128, false,
+ SPDK_DIF_TYPE3, dif_flags,
+ 0xFFFFFFFF, 22,
+ 0xFFFF, 0xFFFF, 0x22,
+ true);
+
+ _iov_free_buf(&iov);
+}
+
+static void
+dif_sec_512_md_0_error_test(void)
+{
+ struct spdk_dif_ctx ctx = {};
+ int rc;
+
+ /* Metadata size is 0. */
+ rc = spdk_dif_ctx_init(&ctx, 512, 0, true, false, SPDK_DIF_TYPE1, 0, 0, 0, 0, 0, 0);
+ CU_ASSERT(rc != 0);
+}
+
+static void
+dif_guard_seed_test(void)
+{
+ struct iovec iov;
+ struct spdk_dif_ctx ctx = {};
+ struct spdk_dif_error err_blk = {};
+ struct spdk_dif *dif;
+ uint16_t guard;
+ int rc;
+
+ _iov_alloc_buf(&iov, 512 + 8);
+
+ memset(iov.iov_base, 0, 512 + 8);
+
+ dif = (struct spdk_dif *)(iov.iov_base + 512);
+
+ rc = spdk_dif_ctx_init(&ctx, 512 + 8, 8, true, false, SPDK_DIF_TYPE1,
+ SPDK_DIF_FLAGS_GUARD_CHECK, 0, 0, 0, 0, 0);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_generate(&iov, 1, 1, &ctx);
+ CU_ASSERT(rc == 0);
+
+ /* Guard should be zero if the block is all zero and seed is not added. */
+ guard = from_be16(&dif->guard);
+ CU_ASSERT(guard == 0);
+
+ rc = spdk_dif_verify(&iov, 1, 1, &ctx, &err_blk);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_ctx_init(&ctx, 512 + 8, 8, true, false, SPDK_DIF_TYPE1,
+ SPDK_DIF_FLAGS_GUARD_CHECK, 0, 0, 0, 0, GUARD_SEED);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_generate(&iov, 1, 1, &ctx);
+ CU_ASSERT(rc == 0);
+
+ /* Guard should not be zero if the block is all zero but seed is added. */
+ guard = from_be16(&dif->guard);
+ CU_ASSERT(guard != 0);
+
+ rc = spdk_dif_verify(&iov, 1, 1, &ctx, &err_blk);
+ CU_ASSERT(rc == 0);
+
+ _iov_free_buf(&iov);
+}
+
+static void
+dif_generate_and_verify(struct iovec *iovs, int iovcnt,
+ uint32_t block_size, uint32_t md_size, uint32_t num_blocks,
+ bool dif_loc, enum spdk_dif_type dif_type, uint32_t dif_flags,
+ uint32_t init_ref_tag, uint16_t apptag_mask, uint16_t app_tag)
+{
+ struct spdk_dif_ctx ctx = {};
+ int rc;
+
+ rc = ut_data_pattern_generate(iovs, iovcnt, block_size, md_size, num_blocks);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_ctx_init(&ctx, block_size, md_size, true, dif_loc, dif_type, dif_flags,
+ init_ref_tag, apptag_mask, app_tag, 0, GUARD_SEED);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_generate(iovs, iovcnt, num_blocks, &ctx);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_verify(iovs, iovcnt, num_blocks, &ctx, NULL);
+ CU_ASSERT(rc == 0);
+
+ rc = ut_data_pattern_verify(iovs, iovcnt, block_size, md_size, num_blocks);
+ CU_ASSERT(rc == 0);
+}
+
+static void
+dif_disable_sec_512_md_8_single_iov_test(void)
+{
+ struct iovec iov;
+
+ _iov_alloc_buf(&iov, 512 + 8);
+
+ dif_generate_and_verify(&iov, 1, 512 + 8, 8, 1, false, SPDK_DIF_DISABLE, 0, 0, 0, 0);
+
+ _iov_free_buf(&iov);
+}
+
+static void
+dif_sec_512_md_8_prchk_0_single_iov_test(void)
+{
+ struct iovec iov;
+
+ _iov_alloc_buf(&iov, 512 + 8);
+
+ dif_generate_and_verify(&iov, 1, 512 + 8, 8, 1, false, SPDK_DIF_TYPE1, 0, 0, 0, 0);
+
+ _iov_free_buf(&iov);
+}
+
+static void
+dif_sec_512_md_8_prchk_0_1_2_4_multi_iovs_test(void)
+{
+ struct iovec iovs[4];
+ int i, num_blocks;
+
+ num_blocks = 0;
+
+ for (i = 0; i < 4; i++) {
+ _iov_alloc_buf(&iovs[i], (512 + 8) * (i + 1));
+ num_blocks += i + 1;
+ }
+
+ dif_generate_and_verify(iovs, 4, 512 + 8, 8, num_blocks, false, SPDK_DIF_TYPE1,
+ 0, 22, 0xFFFF, 0x22);
+
+ dif_generate_and_verify(iovs, 4, 512 + 8, 8, num_blocks, false, SPDK_DIF_TYPE1,
+ SPDK_DIF_FLAGS_GUARD_CHECK, 22, 0xFFFF, 0x22);
+
+ dif_generate_and_verify(iovs, 4, 512 + 8, 8, num_blocks, false, SPDK_DIF_TYPE1,
+ SPDK_DIF_FLAGS_APPTAG_CHECK, 22, 0xFFFF, 0x22);
+
+ dif_generate_and_verify(iovs, 4, 512 + 8, 8, num_blocks, false, SPDK_DIF_TYPE1,
+ SPDK_DIF_FLAGS_REFTAG_CHECK, 22, 0xFFFF, 0x22);
+
+ for (i = 0; i < 4; i++) {
+ _iov_free_buf(&iovs[i]);
+ }
+}
+
+static void
+dif_sec_4096_md_128_prchk_7_multi_iovs_test(void)
+{
+ struct iovec iovs[4];
+ int i, num_blocks;
+ uint32_t dif_flags;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ num_blocks = 0;
+
+ for (i = 0; i < 4; i++) {
+ _iov_alloc_buf(&iovs[i], (4096 + 128) * (i + 1));
+ num_blocks += i + 1;
+ }
+
+ dif_generate_and_verify(iovs, 4, 4096 + 128, 128, num_blocks, false, SPDK_DIF_TYPE1,
+ dif_flags, 22, 0xFFFF, 0x22);
+
+ dif_generate_and_verify(iovs, 4, 4096 + 128, 128, num_blocks, true, SPDK_DIF_TYPE1,
+ dif_flags, 22, 0xFFFF, 0x22);
+
+ for (i = 0; i < 4; i++) {
+ _iov_free_buf(&iovs[i]);
+ }
+}
+
+static void
+dif_sec_512_md_8_prchk_7_multi_iovs_split_data_and_md_test(void)
+{
+ struct iovec iovs[2];
+ uint32_t dif_flags;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ _iov_alloc_buf(&iovs[0], 512);
+ _iov_alloc_buf(&iovs[1], 8);
+
+ dif_generate_and_verify(iovs, 2, 512 + 8, 8, 1, false, SPDK_DIF_TYPE1,
+ dif_flags, 22, 0xFFFF, 0x22);
+
+ _iov_free_buf(&iovs[0]);
+ _iov_free_buf(&iovs[1]);
+}
+
+static void
+dif_sec_512_md_8_prchk_7_multi_iovs_split_data_test(void)
+{
+ struct iovec iovs[2];
+ uint32_t dif_flags;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ _iov_alloc_buf(&iovs[0], 256);
+ _iov_alloc_buf(&iovs[1], 264);
+
+ dif_generate_and_verify(iovs, 2, 512 + 8, 8, 1, false, SPDK_DIF_TYPE1,
+ dif_flags, 22, 0xFFFF, 0x22);
+
+ _iov_free_buf(&iovs[0]);
+ _iov_free_buf(&iovs[1]);
+}
+
+static void
+dif_sec_512_md_8_prchk_7_multi_iovs_split_guard_test(void)
+{
+ struct iovec iovs[2];
+ uint32_t dif_flags;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ _iov_alloc_buf(&iovs[0], 513);
+ _iov_alloc_buf(&iovs[1], 7);
+
+ dif_generate_and_verify(iovs, 2, 512 + 8, 8, 1, false, SPDK_DIF_TYPE1,
+ dif_flags, 22, 0xFFFF, 0x22);
+
+ _iov_free_buf(&iovs[0]);
+ _iov_free_buf(&iovs[1]);
+}
+
+static void
+dif_sec_512_md_8_prchk_7_multi_iovs_split_apptag_test(void)
+{
+ struct iovec iovs[2];
+ uint32_t dif_flags;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ _iov_alloc_buf(&iovs[0], 515);
+ _iov_alloc_buf(&iovs[1], 5);
+
+ dif_generate_and_verify(iovs, 2, 512 + 8, 8, 1, false, SPDK_DIF_TYPE1,
+ dif_flags, 22, 0xFFFF, 0x22);
+
+ _iov_free_buf(&iovs[0]);
+ _iov_free_buf(&iovs[1]);
+}
+
+static void
+dif_sec_512_md_8_prchk_7_multi_iovs_split_reftag_test(void)
+{
+ struct iovec iovs[2];
+ uint32_t dif_flags;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ _iov_alloc_buf(&iovs[0], 518);
+ _iov_alloc_buf(&iovs[1], 2);
+
+ dif_generate_and_verify(iovs, 2, 512 + 8, 8, 1, false, SPDK_DIF_TYPE1,
+ dif_flags, 22, 0xFFFF, 0x22);
+
+ _iov_free_buf(&iovs[0]);
+ _iov_free_buf(&iovs[1]);
+}
+
+static void
+dif_sec_512_md_8_prchk_7_multi_iovs_complex_splits_test(void)
+{
+ struct iovec iovs[9];
+ uint32_t dif_flags;
+ int i;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ /* data[0][255:0] */
+ _iov_alloc_buf(&iovs[0], 256);
+
+ /* data[0][511:256], guard[0][0] */
+ _iov_alloc_buf(&iovs[1], 256 + 1);
+
+ /* guard[0][1], apptag[0][0] */
+ _iov_alloc_buf(&iovs[2], 1 + 1);
+
+ /* apptag[0][1], reftag[0][0] */
+ _iov_alloc_buf(&iovs[3], 1 + 1);
+
+ /* reftag[0][3:1], data[1][255:0] */
+ _iov_alloc_buf(&iovs[4], 3 + 256);
+
+ /* data[1][511:256], guard[1][0] */
+ _iov_alloc_buf(&iovs[5], 256 + 1);
+
+ /* guard[1][1], apptag[1][0] */
+ _iov_alloc_buf(&iovs[6], 1 + 1);
+
+ /* apptag[1][1], reftag[1][0] */
+ _iov_alloc_buf(&iovs[7], 1 + 1);
+
+ /* reftag[1][3:1] */
+ _iov_alloc_buf(&iovs[8], 3);
+
+ dif_generate_and_verify(iovs, 9, 512 + 8, 8, 2, false, SPDK_DIF_TYPE1, dif_flags,
+ 22, 0xFFFF, 0x22);
+
+ for (i = 0; i < 9; i++) {
+ _iov_free_buf(&iovs[i]);
+ }
+}
+
+static void
+dif_sec_4096_md_128_prchk_7_multi_iovs_complex_splits_test(void)
+{
+ struct iovec iovs[11];
+ uint32_t dif_flags;
+ int i;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ /* data[0][1000:0] */
+ _iov_alloc_buf(&iovs[0], 1000);
+
+ /* data[0][3095:1000], guard[0][0] */
+ _iov_alloc_buf(&iovs[1], 3096 + 1);
+
+ /* guard[0][1], apptag[0][0] */
+ _iov_alloc_buf(&iovs[2], 1 + 1);
+
+ /* apptag[0][1], reftag[0][0] */
+ _iov_alloc_buf(&iovs[3], 1 + 1);
+
+ /* reftag[0][3:1], ignore[0][59:0] */
+ _iov_alloc_buf(&iovs[4], 3 + 60);
+
+ /* ignore[119:60], data[1][3050:0] */
+ _iov_alloc_buf(&iovs[5], 60 + 3051);
+
+ /* data[1][4095:3050], guard[1][0] */
+ _iov_alloc_buf(&iovs[6], 1045 + 1);
+
+ /* guard[1][1], apptag[1][0] */
+ _iov_alloc_buf(&iovs[7], 1 + 1);
+
+ /* apptag[1][1], reftag[1][0] */
+ _iov_alloc_buf(&iovs[8], 1 + 1);
+
+ /* reftag[1][3:1], ignore[1][9:0] */
+ _iov_alloc_buf(&iovs[9], 3 + 10);
+
+ /* ignore[1][127:9] */
+ _iov_alloc_buf(&iovs[10], 118);
+
+ dif_generate_and_verify(iovs, 11, 4096 + 128, 128, 2, false, SPDK_DIF_TYPE1, dif_flags,
+ 22, 0xFFFF, 0x22);
+ dif_generate_and_verify(iovs, 11, 4096 + 128, 128, 2, true, SPDK_DIF_TYPE1, dif_flags,
+ 22, 0xFFFF, 0x22);
+
+ for (i = 0; i < 11; i++) {
+ _iov_free_buf(&iovs[i]);
+ }
+}
+
+static void
+_dif_inject_error_and_verify(struct iovec *iovs, int iovcnt,
+ uint32_t block_size, uint32_t md_size, uint32_t num_blocks,
+ uint32_t inject_flags, bool dif_loc)
+{
+ struct spdk_dif_ctx ctx = {};
+ struct spdk_dif_error err_blk = {};
+ uint32_t inject_offset = 0, dif_flags;
+ int rc;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ rc = ut_data_pattern_generate(iovs, iovcnt, block_size, md_size, num_blocks);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_ctx_init(&ctx, block_size, md_size, true, dif_loc,
+ SPDK_DIF_TYPE1, dif_flags, 88, 0xFFFF, 0x88, 0, GUARD_SEED);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_generate(iovs, iovcnt, num_blocks, &ctx);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_inject_error(iovs, iovcnt, num_blocks, &ctx, inject_flags, &inject_offset);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_verify(iovs, iovcnt, num_blocks, &ctx, &err_blk);
+ CU_ASSERT(rc != 0);
+ if (inject_flags == SPDK_DIF_DATA_ERROR) {
+ CU_ASSERT(SPDK_DIF_GUARD_ERROR == err_blk.err_type);
+ } else {
+ CU_ASSERT(inject_flags == err_blk.err_type);
+ }
+ CU_ASSERT(inject_offset == err_blk.err_offset);
+
+ rc = ut_data_pattern_verify(iovs, iovcnt, block_size, md_size, num_blocks);
+ CU_ASSERT((rc == 0 && (inject_flags != SPDK_DIF_DATA_ERROR)) ||
+ (rc != 0 && (inject_flags == SPDK_DIF_DATA_ERROR)));
+}
+
+static void
+dif_inject_error_and_verify(struct iovec *iovs, int iovcnt,
+ uint32_t block_size, uint32_t md_size, uint32_t num_blocks,
+ uint32_t inject_flags)
+{
+ /* The case that DIF is contained in the first 8 bytes of metadata. */
+ _dif_inject_error_and_verify(iovs, iovcnt, block_size, md_size, num_blocks,
+ inject_flags, true);
+
+ /* The case that DIF is contained in the last 8 bytes of metadata. */
+ _dif_inject_error_and_verify(iovs, iovcnt, block_size, md_size, num_blocks,
+ inject_flags, false);
+}
+
+static void
+dif_sec_4096_md_128_inject_1_2_4_8_multi_iovs_test(void)
+{
+ struct iovec iovs[4];
+ int i, num_blocks;
+
+ num_blocks = 0;
+
+ for (i = 0; i < 4; i++) {
+ _iov_alloc_buf(&iovs[i], (4096 + 128) * (i + 1));
+ num_blocks += i + 1;
+ }
+
+ dif_inject_error_and_verify(iovs, 4, 4096 + 128, 128, num_blocks, SPDK_DIF_GUARD_ERROR);
+ dif_inject_error_and_verify(iovs, 4, 4096 + 128, 128, num_blocks, SPDK_DIF_APPTAG_ERROR);
+ dif_inject_error_and_verify(iovs, 4, 4096 + 128, 128, num_blocks, SPDK_DIF_REFTAG_ERROR);
+ dif_inject_error_and_verify(iovs, 4, 4096 + 128, 128, num_blocks, SPDK_DIF_DATA_ERROR);
+
+ for (i = 0; i < 4; i++) {
+ _iov_free_buf(&iovs[i]);
+ }
+}
+
+static void
+dif_sec_4096_md_128_inject_1_2_4_8_multi_iovs_split_data_and_md_test(void)
+{
+ struct iovec iovs[2];
+
+ _iov_alloc_buf(&iovs[0], 4096);
+ _iov_alloc_buf(&iovs[1], 128);
+
+ dif_inject_error_and_verify(iovs, 2, 4096 + 128, 128, 1, SPDK_DIF_GUARD_ERROR);
+ dif_inject_error_and_verify(iovs, 2, 4096 + 128, 128, 1, SPDK_DIF_APPTAG_ERROR);
+ dif_inject_error_and_verify(iovs, 2, 4096 + 128, 128, 1, SPDK_DIF_REFTAG_ERROR);
+ dif_inject_error_and_verify(iovs, 2, 4096 + 128, 128, 1, SPDK_DIF_DATA_ERROR);
+
+ _iov_free_buf(&iovs[0]);
+ _iov_free_buf(&iovs[1]);
+}
+
+static void
+dif_sec_4096_md_128_inject_1_2_4_8_multi_iovs_split_data_test(void)
+{
+ struct iovec iovs[2];
+
+ _iov_alloc_buf(&iovs[0], 2048);
+ _iov_alloc_buf(&iovs[1], 2048 + 128);
+
+ dif_inject_error_and_verify(iovs, 2, 4096 + 128, 128, 1, SPDK_DIF_GUARD_ERROR);
+ dif_inject_error_and_verify(iovs, 2, 4096 + 128, 128, 1, SPDK_DIF_APPTAG_ERROR);
+ dif_inject_error_and_verify(iovs, 2, 4096 + 128, 128, 1, SPDK_DIF_REFTAG_ERROR);
+ dif_inject_error_and_verify(iovs, 2, 4096 + 128, 128, 1, SPDK_DIF_DATA_ERROR);
+
+ _iov_free_buf(&iovs[0]);
+ _iov_free_buf(&iovs[1]);
+}
+
+static void
+dif_sec_4096_md_128_inject_1_2_4_8_multi_iovs_split_guard_test(void)
+{
+ struct iovec iovs[2];
+
+ _iov_alloc_buf(&iovs[0], 4096 + 1);
+ _iov_alloc_buf(&iovs[1], 127);
+
+ dif_inject_error_and_verify(iovs, 2, 4096 + 128, 128, 1, SPDK_DIF_GUARD_ERROR);
+ dif_inject_error_and_verify(iovs, 2, 4096 + 128, 128, 1, SPDK_DIF_APPTAG_ERROR);
+ dif_inject_error_and_verify(iovs, 2, 4096 + 128, 128, 1, SPDK_DIF_REFTAG_ERROR);
+ dif_inject_error_and_verify(iovs, 2, 4096 + 128, 128, 1, SPDK_DIF_DATA_ERROR);
+
+ _iov_free_buf(&iovs[0]);
+ _iov_free_buf(&iovs[1]);
+}
+
+static void
+dif_sec_4096_md_128_inject_1_2_4_8_multi_iovs_split_apptag_test(void)
+{
+ struct iovec iovs[2];
+
+ _iov_alloc_buf(&iovs[0], 4096 + 3);
+ _iov_alloc_buf(&iovs[1], 125);
+
+ dif_inject_error_and_verify(iovs, 2, 4096 + 128, 128, 1, SPDK_DIF_GUARD_ERROR);
+ dif_inject_error_and_verify(iovs, 2, 4096 + 128, 128, 1, SPDK_DIF_APPTAG_ERROR);
+ dif_inject_error_and_verify(iovs, 2, 4096 + 128, 128, 1, SPDK_DIF_REFTAG_ERROR);
+ dif_inject_error_and_verify(iovs, 2, 4096 + 128, 128, 1, SPDK_DIF_DATA_ERROR);
+
+ _iov_free_buf(&iovs[0]);
+ _iov_free_buf(&iovs[1]);
+}
+
+static void
+dif_sec_4096_md_128_inject_1_2_4_8_multi_iovs_split_reftag_test(void)
+{
+ struct iovec iovs[2];
+
+ _iov_alloc_buf(&iovs[0], 4096 + 6);
+ _iov_alloc_buf(&iovs[1], 122);
+
+ dif_inject_error_and_verify(iovs, 2, 4096 + 128, 128, 1, SPDK_DIF_GUARD_ERROR);
+ dif_inject_error_and_verify(iovs, 2, 4096 + 128, 128, 1, SPDK_DIF_APPTAG_ERROR);
+ dif_inject_error_and_verify(iovs, 2, 4096 + 128, 128, 1, SPDK_DIF_REFTAG_ERROR);
+ dif_inject_error_and_verify(iovs, 2, 4096 + 128, 128, 1, SPDK_DIF_DATA_ERROR);
+
+ _iov_free_buf(&iovs[0]);
+ _iov_free_buf(&iovs[1]);
+}
+
+static void
+dif_copy_gen_and_verify(struct iovec *iovs, int iovcnt, struct iovec *bounce_iov,
+ uint32_t block_size, uint32_t md_size, uint32_t num_blocks,
+ bool dif_loc, enum spdk_dif_type dif_type, uint32_t dif_flags,
+ uint32_t init_ref_tag, uint16_t apptag_mask, uint16_t app_tag)
+{
+ struct spdk_dif_ctx ctx = {};
+ int rc;
+
+ rc = ut_data_pattern_generate(iovs, iovcnt, block_size - md_size, 0, num_blocks);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_ctx_init(&ctx, block_size, md_size, true, dif_loc, dif_type, dif_flags,
+ init_ref_tag, apptag_mask, app_tag, 0, GUARD_SEED);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_generate_copy(iovs, iovcnt, bounce_iov, num_blocks, &ctx);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_verify_copy(iovs, iovcnt, bounce_iov, num_blocks, &ctx, NULL);
+ CU_ASSERT(rc == 0);
+
+ rc = ut_data_pattern_verify(iovs, iovcnt, block_size - md_size, 0, num_blocks);
+ CU_ASSERT(rc == 0);
+}
+
+static void
+dif_copy_sec_512_md_8_prchk_0_single_iov(void)
+{
+ struct iovec iov, bounce_iov;
+
+ _iov_alloc_buf(&iov, 512 * 4);
+ _iov_alloc_buf(&bounce_iov, (512 + 8) * 4);
+
+ dif_copy_gen_and_verify(&iov, 1, &bounce_iov, 512 + 8, 8, 4,
+ false, SPDK_DIF_TYPE1, 0, 0, 0, 0);
+ dif_copy_gen_and_verify(&iov, 1, &bounce_iov, 512 + 8, 8, 4,
+ true, SPDK_DIF_TYPE1, 0, 0, 0, 0);
+
+ _iov_free_buf(&iov);
+ _iov_free_buf(&bounce_iov);
+}
+
+static void
+dif_copy_sec_512_md_8_prchk_0_1_2_4_multi_iovs(void)
+{
+ struct iovec iovs[4], bounce_iov;
+ int i, num_blocks;
+
+ num_blocks = 0;
+
+ for (i = 0; i < 4; i++) {
+ _iov_alloc_buf(&iovs[i], 512 * (i + 1));
+ num_blocks += i + 1;
+ }
+
+ _iov_alloc_buf(&bounce_iov, (512 + 8) * num_blocks);
+
+ dif_copy_gen_and_verify(iovs, 4, &bounce_iov, 512 + 8, 8, num_blocks,
+ false, SPDK_DIF_TYPE1, 0, 22, 0xFFFF, 0x22);
+
+ dif_copy_gen_and_verify(iovs, 4, &bounce_iov, 512 + 8, 8, num_blocks,
+ false, SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK, 22, 0xFFFF, 0x22);
+
+ dif_copy_gen_and_verify(iovs, 4, &bounce_iov, 512 + 8, 8, num_blocks,
+ false, SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_APPTAG_CHECK, 22, 0xFFFF, 0x22);
+
+ dif_copy_gen_and_verify(iovs, 4, &bounce_iov, 512 + 8, 8, num_blocks,
+ false, SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_REFTAG_CHECK, 22, 0xFFFF, 0x22);
+
+ for (i = 0; i < 4; i++) {
+ _iov_free_buf(&iovs[i]);
+ }
+ _iov_free_buf(&bounce_iov);
+}
+
+static void
+dif_copy_sec_4096_md_128_prchk_7_multi_iovs(void)
+{
+ struct iovec iovs[4], bounce_iov;
+ uint32_t dif_flags;
+ int i, num_blocks;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ num_blocks = 0;
+
+ for (i = 0; i < 4; i++) {
+ _iov_alloc_buf(&iovs[i], 4096 * (i + 1));
+ num_blocks += i + 1;
+ }
+
+ _iov_alloc_buf(&bounce_iov, (4096 + 128) * num_blocks);
+
+ dif_copy_gen_and_verify(iovs, 4, &bounce_iov, 4096 + 128, 128, num_blocks,
+ false, SPDK_DIF_TYPE1, dif_flags, 22, 0xFFFF, 0x22);
+ dif_copy_gen_and_verify(iovs, 4, &bounce_iov, 4096 + 128, 128, num_blocks,
+ true, SPDK_DIF_TYPE1, dif_flags, 22, 0xFFFF, 0x22);
+
+ for (i = 0; i < 4; i++) {
+ _iov_free_buf(&iovs[i]);
+ }
+ _iov_free_buf(&bounce_iov);
+}
+
+static void
+dif_copy_sec_512_md_8_prchk_7_multi_iovs_split_data(void)
+{
+ struct iovec iovs[2], bounce_iov;
+ uint32_t dif_flags;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ _iov_alloc_buf(&iovs[0], 256);
+ _iov_alloc_buf(&iovs[1], 256);
+
+ _iov_alloc_buf(&bounce_iov, 512 + 8);
+
+ dif_copy_gen_and_verify(iovs, 2, &bounce_iov, 512 + 8, 8, 1,
+ false, SPDK_DIF_TYPE1, dif_flags, 22, 0xFFFF, 0x22);
+
+ _iov_free_buf(&iovs[0]);
+ _iov_free_buf(&iovs[1]);
+ _iov_free_buf(&bounce_iov);
+}
+
+static void
+dif_copy_sec_512_md_8_prchk_7_multi_iovs_complex_splits(void)
+{
+ struct iovec iovs[6], bounce_iov;
+ uint32_t dif_flags;
+ int i;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ /* data[0][255:0] */
+ _iov_alloc_buf(&iovs[0], 256);
+
+ /* data[0][511:256], data[1][255:0] */
+ _iov_alloc_buf(&iovs[1], 256 + 256);
+
+ /* data[1][382:256] */
+ _iov_alloc_buf(&iovs[2], 128);
+
+ /* data[1][383] */
+ _iov_alloc_buf(&iovs[3], 1);
+
+ /* data[1][510:384] */
+ _iov_alloc_buf(&iovs[4], 126);
+
+ /* data[1][511], data[2][511:0], data[3][511:0] */
+ _iov_alloc_buf(&iovs[5], 1 + 512 * 2);
+
+ _iov_alloc_buf(&bounce_iov, (512 + 8) * 4);
+
+ dif_copy_gen_and_verify(iovs, 6, &bounce_iov, 512 + 8, 8, 4,
+ true, SPDK_DIF_TYPE1, dif_flags, 22, 0xFFFF, 0x22);
+
+ for (i = 0; i < 6; i++) {
+ _iov_free_buf(&iovs[i]);
+ }
+ _iov_free_buf(&bounce_iov);
+}
+
+static void
+_dif_copy_inject_error_and_verify(struct iovec *iovs, int iovcnt, struct iovec *bounce_iov,
+ uint32_t block_size, uint32_t md_size, uint32_t num_blocks,
+ uint32_t inject_flags, bool dif_loc)
+{
+ struct spdk_dif_ctx ctx = {};
+ struct spdk_dif_error err_blk = {};
+ uint32_t inject_offset = 0, dif_flags;
+ int rc;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ rc = ut_data_pattern_generate(iovs, iovcnt, block_size - md_size, 0, num_blocks);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_ctx_init(&ctx, block_size, md_size, true, dif_loc, SPDK_DIF_TYPE1, dif_flags,
+ 88, 0xFFFF, 0x88, 0, GUARD_SEED);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+
+ rc = spdk_dif_generate_copy(iovs, iovcnt, bounce_iov, num_blocks, &ctx);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_inject_error(bounce_iov, 1, num_blocks, &ctx, inject_flags, &inject_offset);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_verify_copy(iovs, iovcnt, bounce_iov, num_blocks, &ctx, &err_blk);
+ CU_ASSERT(rc != 0);
+ if (inject_flags == SPDK_DIF_DATA_ERROR) {
+ CU_ASSERT(SPDK_DIF_GUARD_ERROR == err_blk.err_type);
+ } else {
+ CU_ASSERT(inject_flags == err_blk.err_type);
+ }
+ CU_ASSERT(inject_offset == err_blk.err_offset);
+}
+
+static void
+dif_copy_inject_error_and_verify(struct iovec *iovs, int iovcnt, struct iovec *bounce_iov,
+ uint32_t block_size, uint32_t md_size, uint32_t num_blocks,
+ uint32_t inject_flags)
+{
+ /* The case that DIF is contained in the first 8 bytes of metadata. */
+ _dif_copy_inject_error_and_verify(iovs, iovcnt, bounce_iov,
+ block_size, md_size, num_blocks,
+ inject_flags, true);
+
+ /* The case that DIF is contained in the last 8 bytes of metadata. */
+ _dif_copy_inject_error_and_verify(iovs, iovcnt, bounce_iov,
+ block_size, md_size, num_blocks,
+ inject_flags, false);
+}
+
+static void
+dif_copy_sec_4096_md_128_inject_1_2_4_8_multi_iovs_test(void)
+{
+ struct iovec iovs[4], bounce_iov;
+ int i, num_blocks;
+
+ num_blocks = 0;
+
+ for (i = 0; i < 4; i++) {
+ _iov_alloc_buf(&iovs[i], 4096 * (i + 1));
+ num_blocks += i + 1;
+ }
+
+ _iov_alloc_buf(&bounce_iov, (4096 + 128) * num_blocks);
+
+ dif_copy_inject_error_and_verify(iovs, 4, &bounce_iov, 4096 + 128, 128,
+ num_blocks, SPDK_DIF_GUARD_ERROR);
+
+ dif_copy_inject_error_and_verify(iovs, 4, &bounce_iov, 4096 + 128, 128,
+ num_blocks, SPDK_DIF_APPTAG_ERROR);
+
+ dif_copy_inject_error_and_verify(iovs, 4, &bounce_iov, 4096 + 128, 128,
+ num_blocks, SPDK_DIF_REFTAG_ERROR);
+
+ dif_copy_inject_error_and_verify(iovs, 4, &bounce_iov, 4096 + 128, 128,
+ num_blocks, SPDK_DIF_DATA_ERROR);
+
+ for (i = 0; i < 4; i++) {
+ _iov_free_buf(&iovs[i]);
+ }
+ _iov_free_buf(&bounce_iov);
+}
+
+static void
+dif_copy_sec_4096_md_128_inject_1_2_4_8_multi_iovs_split_test(void)
+{
+ struct iovec iovs[4], bounce_iov;
+ int i;
+
+ _iov_alloc_buf(&iovs[0], 2048);
+ _iov_alloc_buf(&iovs[1], 2048);
+ _iov_alloc_buf(&iovs[2], 1);
+ _iov_alloc_buf(&iovs[3], 4095);
+
+ _iov_alloc_buf(&bounce_iov, (4096 + 128) * 2);
+
+ dif_copy_inject_error_and_verify(iovs, 4, &bounce_iov, 4096 + 128, 128,
+ 2, SPDK_DIF_GUARD_ERROR);
+
+ dif_copy_inject_error_and_verify(iovs, 4, &bounce_iov, 4096 + 128, 128,
+ 2, SPDK_DIF_APPTAG_ERROR);
+
+ dif_copy_inject_error_and_verify(iovs, 4, &bounce_iov, 4096 + 128, 128,
+ 2, SPDK_DIF_REFTAG_ERROR);
+
+ dif_copy_inject_error_and_verify(iovs, 4, &bounce_iov, 4096 + 128, 128,
+ 2, SPDK_DIF_DATA_ERROR);
+
+ for (i = 0; i < 4; i++) {
+ _iov_free_buf(&iovs[i]);
+ }
+ _iov_free_buf(&bounce_iov);
+}
+
+static void
+dix_sec_512_md_0_error(void)
+{
+ struct spdk_dif_ctx ctx;
+ int rc;
+
+ rc = spdk_dif_ctx_init(&ctx, 512, 0, false, false, SPDK_DIF_TYPE1, 0, 0, 0, 0, 0, 0);
+ CU_ASSERT(rc != 0);
+}
+
+static void
+dix_generate_and_verify(struct iovec *iovs, int iovcnt, struct iovec *md_iov,
+ uint32_t block_size, uint32_t md_size, uint32_t num_blocks,
+ bool dif_loc, enum spdk_dif_type dif_type, uint32_t dif_flags,
+ uint32_t init_ref_tag, uint16_t apptag_mask, uint16_t app_tag)
+{
+ struct spdk_dif_ctx ctx;
+ int rc;
+
+ rc = ut_data_pattern_generate(iovs, iovcnt, block_size, 0, num_blocks);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_ctx_init(&ctx, block_size, md_size, false, dif_loc, dif_type, dif_flags,
+ init_ref_tag, apptag_mask, app_tag, 0, GUARD_SEED);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dix_generate(iovs, iovcnt, md_iov, num_blocks, &ctx);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dix_verify(iovs, iovcnt, md_iov, num_blocks, &ctx, NULL);
+ CU_ASSERT(rc == 0);
+
+ rc = ut_data_pattern_verify(iovs, iovcnt, block_size, 0, num_blocks);
+ CU_ASSERT(rc == 0);
+}
+
+static void
+dix_sec_512_md_8_prchk_0_single_iov(void)
+{
+ struct iovec iov, md_iov;
+
+ _iov_alloc_buf(&iov, 512 * 4);
+ _iov_alloc_buf(&md_iov, 8 * 4);
+
+ dix_generate_and_verify(&iov, 1, &md_iov, 512, 8, 4, false, SPDK_DIF_TYPE1, 0, 0, 0, 0);
+ dix_generate_and_verify(&iov, 1, &md_iov, 512, 8, 4, true, SPDK_DIF_TYPE1, 0, 0, 0, 0);
+
+ _iov_free_buf(&iov);
+ _iov_free_buf(&md_iov);
+}
+
+static void
+dix_sec_512_md_8_prchk_0_1_2_4_multi_iovs(void)
+{
+ struct iovec iovs[4], md_iov;
+ int i, num_blocks;
+
+ num_blocks = 0;
+
+ for (i = 0; i < 4; i++) {
+ _iov_alloc_buf(&iovs[i], 512 * (i + 1));
+ num_blocks += i + 1;
+ }
+ _iov_alloc_buf(&md_iov, 8 * num_blocks);
+
+ dix_generate_and_verify(iovs, 4, &md_iov, 512, 8, num_blocks, false, SPDK_DIF_TYPE1,
+ 0, 22, 0xFFFF, 0x22);
+
+ dix_generate_and_verify(iovs, 4, &md_iov, 512, 8, num_blocks, false, SPDK_DIF_TYPE1,
+ SPDK_DIF_FLAGS_GUARD_CHECK, 22, 0xFFFF, 0x22);
+
+ dix_generate_and_verify(iovs, 4, &md_iov, 512, 8, num_blocks, false, SPDK_DIF_TYPE1,
+ SPDK_DIF_FLAGS_APPTAG_CHECK, 22, 0xFFFF, 0x22);
+
+ dix_generate_and_verify(iovs, 4, &md_iov, 512, 8, num_blocks, false, SPDK_DIF_TYPE1,
+ SPDK_DIF_FLAGS_REFTAG_CHECK, 22, 0xFFFF, 0x22);
+
+ for (i = 0; i < 4; i++) {
+ _iov_free_buf(&iovs[i]);
+ }
+ _iov_free_buf(&md_iov);
+}
+
+static void
+dix_sec_4096_md_128_prchk_7_multi_iovs(void)
+{
+ struct iovec iovs[4], md_iov;
+ uint32_t dif_flags;
+ int i, num_blocks;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ num_blocks = 0;
+
+ for (i = 0; i < 4; i++) {
+ _iov_alloc_buf(&iovs[i], 4096 * (i + 1));
+ num_blocks += i + 1;
+ }
+ _iov_alloc_buf(&md_iov, 128 * num_blocks);
+
+ dix_generate_and_verify(iovs, 4, &md_iov, 4096, 128, num_blocks, false, SPDK_DIF_TYPE1,
+ dif_flags, 22, 0xFFFF, 0x22);
+ dix_generate_and_verify(iovs, 4, &md_iov, 4096, 128, num_blocks, true, SPDK_DIF_TYPE1,
+ dif_flags, 22, 0xFFFF, 0x22);
+
+ for (i = 0; i < 4; i++) {
+ _iov_free_buf(&iovs[i]);
+ }
+ _iov_free_buf(&md_iov);
+}
+
+static void
+dix_sec_512_md_8_prchk_7_multi_iovs_split_data(void)
+{
+ struct iovec iovs[2], md_iov;
+ uint32_t dif_flags;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ _iov_alloc_buf(&iovs[0], 256);
+ _iov_alloc_buf(&iovs[1], 256);
+ _iov_alloc_buf(&md_iov, 8);
+
+ dix_generate_and_verify(iovs, 2, &md_iov, 512, 8, 1, false, SPDK_DIF_TYPE1,
+ dif_flags, 22, 0xFFFF, 0x22);
+
+ _iov_free_buf(&iovs[0]);
+ _iov_free_buf(&iovs[1]);
+ _iov_free_buf(&md_iov);
+}
+
+static void
+dix_sec_512_md_8_prchk_7_multi_iovs_complex_splits(void)
+{
+ struct iovec iovs[6], md_iov;
+ uint32_t dif_flags;
+ int i;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ /* data[0][255:0] */
+ _iov_alloc_buf(&iovs[0], 256);
+
+ /* data[0][511:256], data[1][255:0] */
+ _iov_alloc_buf(&iovs[1], 256 + 256);
+
+ /* data[1][382:256] */
+ _iov_alloc_buf(&iovs[2], 128);
+
+ /* data[1][383] */
+ _iov_alloc_buf(&iovs[3], 1);
+
+ /* data[1][510:384] */
+ _iov_alloc_buf(&iovs[4], 126);
+
+ /* data[1][511], data[2][511:0], data[3][511:0] */
+ _iov_alloc_buf(&iovs[5], 1 + 512 * 2);
+
+ _iov_alloc_buf(&md_iov, 8 * 4);
+
+ dix_generate_and_verify(iovs, 6, &md_iov, 512, 8, 4, false, SPDK_DIF_TYPE1,
+ dif_flags, 22, 0xFFFF, 0x22);
+
+ for (i = 0; i < 6; i++) {
+ _iov_free_buf(&iovs[i]);
+ }
+ _iov_free_buf(&md_iov);
+}
+
+static void
+_dix_inject_error_and_verify(struct iovec *iovs, int iovcnt, struct iovec *md_iov,
+ uint32_t block_size, uint32_t md_size, uint32_t num_blocks,
+ uint32_t inject_flags, bool dif_loc)
+{
+ struct spdk_dif_ctx ctx = {};
+ struct spdk_dif_error err_blk = {};
+ uint32_t inject_offset = 0, dif_flags;
+ int rc;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ rc = ut_data_pattern_generate(iovs, iovcnt, block_size, 0, num_blocks);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_ctx_init(&ctx, block_size, md_size, false, dif_loc, SPDK_DIF_TYPE1, dif_flags,
+ 88, 0xFFFF, 0x88, 0, GUARD_SEED);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dix_generate(iovs, iovcnt, md_iov, num_blocks, &ctx);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dix_inject_error(iovs, iovcnt, md_iov, num_blocks, &ctx, inject_flags, &inject_offset);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dix_verify(iovs, iovcnt, md_iov, num_blocks, &ctx, &err_blk);
+ CU_ASSERT(rc != 0);
+
+ if (inject_flags == SPDK_DIF_DATA_ERROR) {
+ CU_ASSERT(SPDK_DIF_GUARD_ERROR == err_blk.err_type);
+ } else {
+ CU_ASSERT(inject_flags == err_blk.err_type);
+ }
+ CU_ASSERT(inject_offset == err_blk.err_offset);
+}
+
+static void
+dix_inject_error_and_verify(struct iovec *iovs, int iovcnt, struct iovec *md_iov,
+ uint32_t block_size, uint32_t md_size, uint32_t num_blocks,
+ uint32_t inject_flags)
+{
+ /* The case that DIF is contained in the first 8 bytes of metadata. */
+ _dix_inject_error_and_verify(iovs, iovcnt, md_iov, block_size, md_size, num_blocks,
+ inject_flags, true);
+
+ /* The case that DIF is contained in the last 8 bytes of metadata. */
+ _dix_inject_error_and_verify(iovs, iovcnt, md_iov, block_size, md_size, num_blocks,
+ inject_flags, false);
+}
+
+static void
+dix_sec_4096_md_128_inject_1_2_4_8_multi_iovs_test(void)
+{
+ struct iovec iovs[4], md_iov;
+ int i, num_blocks;
+
+ num_blocks = 0;
+
+ for (i = 0; i < 4; i++) {
+ _iov_alloc_buf(&iovs[i], 4096 * (i + 1));
+ num_blocks += i + 1;
+ }
+
+ _iov_alloc_buf(&md_iov, 128 * num_blocks);
+
+ dix_inject_error_and_verify(iovs, 4, &md_iov, 4096, 128, num_blocks, SPDK_DIF_GUARD_ERROR);
+ dix_inject_error_and_verify(iovs, 4, &md_iov, 4096, 128, num_blocks, SPDK_DIF_APPTAG_ERROR);
+ dix_inject_error_and_verify(iovs, 4, &md_iov, 4096, 128, num_blocks, SPDK_DIF_REFTAG_ERROR);
+ dix_inject_error_and_verify(iovs, 4, &md_iov, 4096, 128, num_blocks, SPDK_DIF_DATA_ERROR);
+
+ for (i = 0; i < 4; i++) {
+ _iov_free_buf(&iovs[i]);
+ }
+ _iov_free_buf(&md_iov);
+}
+
+static void
+dix_sec_4096_md_128_inject_1_2_4_8_multi_iovs_split_test(void)
+{
+ struct iovec iovs[4], md_iov;
+ int i;
+
+ _iov_alloc_buf(&iovs[0], 2048);
+ _iov_alloc_buf(&iovs[1], 2048);
+ _iov_alloc_buf(&iovs[2], 1);
+ _iov_alloc_buf(&iovs[3], 4095);
+
+ _iov_alloc_buf(&md_iov, 128 * 2);
+
+ dix_inject_error_and_verify(iovs, 4, &md_iov, 4096, 128, 2, SPDK_DIF_GUARD_ERROR);
+ dix_inject_error_and_verify(iovs, 4, &md_iov, 4096, 128, 2, SPDK_DIF_APPTAG_ERROR);
+ dix_inject_error_and_verify(iovs, 4, &md_iov, 4096, 128, 2, SPDK_DIF_REFTAG_ERROR);
+ dix_inject_error_and_verify(iovs, 4, &md_iov, 4096, 128, 2, SPDK_DIF_DATA_ERROR);
+
+ for (i = 0; i < 4; i++) {
+ _iov_free_buf(&iovs[i]);
+ }
+ _iov_free_buf(&md_iov);
+}
+
+static int
+ut_readv(uint32_t read_base, uint32_t read_len, struct iovec *iovs, int iovcnt)
+{
+ int i;
+ uint32_t j, offset;
+ uint8_t *buf;
+
+ offset = 0;
+ for (i = 0; i < iovcnt; i++) {
+ buf = iovs[i].iov_base;
+ for (j = 0; j < iovs[i].iov_len; j++, offset++) {
+ if (offset >= read_len) {
+ return offset;
+ }
+ buf[j] = DATA_PATTERN(read_base + offset);
+ }
+ }
+
+ return offset;
+}
+
+static void
+set_md_interleave_iovs_test(void)
+{
+ struct spdk_dif_ctx ctx = {};
+ struct spdk_dif_error err_blk = {};
+ struct iovec iov1, iov2, dif_iovs[4] = {};
+ uint32_t dif_check_flags, data_len, read_len, data_offset, mapped_len = 0;
+ uint8_t *buf1, *buf2;
+ int rc;
+
+ dif_check_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ rc = spdk_dif_ctx_init(&ctx, 4096 + 128, 128, true, false, SPDK_DIF_TYPE1,
+ dif_check_flags, 22, 0xFFFF, 0x22, 0, GUARD_SEED);
+ CU_ASSERT(rc == 0);
+
+ /* The first data buffer:
+ * - Create iovec array to Leave a space for metadata for each block
+ * - Split vectored read and so creating iovec array is done before every vectored read.
+ */
+ buf1 = calloc(1, (4096 + 128) * 4);
+ SPDK_CU_ASSERT_FATAL(buf1 != NULL);
+ _iov_set_buf(&iov1, buf1, (4096 + 128) * 4);
+
+ data_offset = 0;
+ data_len = 4096 * 4;
+
+ /* 1st read */
+ rc = spdk_dif_set_md_interleave_iovs(dif_iovs, 4, &iov1, 1,
+ data_offset, data_len, &mapped_len, &ctx);
+ CU_ASSERT(rc == 4);
+ CU_ASSERT(mapped_len == 4096 * 4);
+ CU_ASSERT(_iov_check(&dif_iovs[0], buf1, 4096) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[1], buf1 + 4096 + 128, 4096) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[2], buf1 + (4096 + 128) * 2, 4096) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[3], buf1 + (4096 + 128) * 3, 4096) == true);
+
+ read_len = ut_readv(data_offset, 1024, dif_iovs, 4);
+ CU_ASSERT(read_len == 1024);
+
+ rc = spdk_dif_generate_stream(&iov1, 1, data_offset, read_len, &ctx);
+ CU_ASSERT(rc == 0);
+
+ data_offset += read_len;
+ data_len -= read_len;
+
+ /* 2nd read */
+ rc = spdk_dif_set_md_interleave_iovs(dif_iovs, 4, &iov1, 1,
+ data_offset, data_len, &mapped_len, &ctx);
+ CU_ASSERT(rc == 4);
+ CU_ASSERT(mapped_len == 3072 + 4096 * 3);
+ CU_ASSERT(_iov_check(&dif_iovs[0], buf1 + 1024, 3072) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[1], buf1 + 4096 + 128, 4096) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[2], buf1 + (4096 + 128) * 2, 4096) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[3], buf1 + (4096 + 128) * 3, 4096) == true);
+
+ read_len = ut_readv(data_offset, 3071, dif_iovs, 4);
+ CU_ASSERT(read_len == 3071);
+
+ rc = spdk_dif_generate_stream(&iov1, 1, data_offset, read_len, &ctx);
+ CU_ASSERT(rc == 0);
+
+ data_offset += read_len;
+ data_len -= read_len;
+
+ /* 3rd read */
+ rc = spdk_dif_set_md_interleave_iovs(dif_iovs, 4, &iov1, 1,
+ data_offset, data_len, &mapped_len, &ctx);
+ CU_ASSERT(rc == 4);
+ CU_ASSERT(mapped_len == 1 + 4096 * 3);
+ CU_ASSERT(_iov_check(&dif_iovs[0], buf1 + 4095, 1) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[1], buf1 + 4096 + 128, 4096) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[2], buf1 + (4096 + 128) * 2, 4096) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[3], buf1 + (4096 + 128) * 3, 4096) == true);
+
+ read_len = ut_readv(data_offset, 1 + 4096 * 2 + 512, dif_iovs, 4);
+ CU_ASSERT(read_len == 1 + 4096 * 2 + 512);
+
+ rc = spdk_dif_generate_stream(&iov1, 1, data_offset, read_len, &ctx);
+ CU_ASSERT(rc == 0);
+
+ data_offset += read_len;
+ data_len -= read_len;
+
+ /* 4th read */
+ rc = spdk_dif_set_md_interleave_iovs(dif_iovs, 4, &iov1, 1,
+ data_offset, data_len, &mapped_len, &ctx);
+ CU_ASSERT(rc == 1);
+ CU_ASSERT(mapped_len == 3584);
+ CU_ASSERT(_iov_check(&dif_iovs[0], buf1 + (4096 + 128) * 3 + 512, 3584) == true);
+
+ read_len = ut_readv(data_offset, 3584, dif_iovs, 1);
+ CU_ASSERT(read_len == 3584);
+
+ rc = spdk_dif_generate_stream(&iov1, 1, data_offset, read_len, &ctx);
+ CU_ASSERT(rc == 0);
+
+ data_offset += read_len;
+ CU_ASSERT(data_offset == 4096 * 4);
+ data_len -= read_len;
+ CU_ASSERT(data_len == 0);
+
+ /* The second data buffer:
+ * - Set data pattern with a space for metadata for each block.
+ */
+ buf2 = calloc(1, (4096 + 128) * 4);
+ SPDK_CU_ASSERT_FATAL(buf2 != NULL);
+ _iov_set_buf(&iov2, buf2, (4096 + 128) * 4);
+
+ rc = ut_data_pattern_generate(&iov2, 1, 4096 + 128, 128, 4);
+ CU_ASSERT(rc == 0);
+ rc = spdk_dif_generate(&iov2, 1, 4, &ctx);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_verify(&iov1, 1, 4, &ctx, &err_blk);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_verify(&iov2, 1, 4, &ctx, &err_blk);
+ CU_ASSERT(rc == 0);
+
+ /* Compare the first and the second data buffer by byte. */
+ rc = memcmp(buf1, buf2, (4096 + 128) * 4);
+ CU_ASSERT(rc == 0);
+
+ free(buf1);
+ free(buf2);
+}
+
+static void
+set_md_interleave_iovs_split_test(void)
+{
+ struct spdk_dif_ctx ctx = {};
+ struct spdk_dif_error err_blk = {};
+ struct iovec iovs1[7], iovs2[7], dif_iovs[8] = {};
+ uint32_t dif_check_flags, data_len, read_len, data_offset, mapped_len = 0;
+ int rc, i;
+
+ dif_check_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ rc = spdk_dif_ctx_init(&ctx, 512 + 8, 8, true, false, SPDK_DIF_TYPE1,
+ dif_check_flags, 22, 0xFFFF, 0x22, 0, GUARD_SEED);
+ CU_ASSERT(rc == 0);
+
+ /* The first SGL data buffer:
+ * - Create iovec array to leave a space for metadata for each block
+ * - Split vectored read and so creating iovec array is done before every vectored read.
+ */
+ _iov_alloc_buf(&iovs1[0], 512 + 8 + 128);
+ _iov_alloc_buf(&iovs1[1], 128);
+ _iov_alloc_buf(&iovs1[2], 256 + 8);
+ _iov_alloc_buf(&iovs1[3], 100);
+ _iov_alloc_buf(&iovs1[4], 412 + 5);
+ _iov_alloc_buf(&iovs1[5], 3 + 300);
+ _iov_alloc_buf(&iovs1[6], 212 + 8);
+
+ data_offset = 0;
+ data_len = 512 * 4;
+
+ /* 1st read */
+ rc = spdk_dif_set_md_interleave_iovs(dif_iovs, 8, iovs1, 7,
+ data_offset, data_len, &mapped_len, &ctx);
+ CU_ASSERT(rc == 8);
+ CU_ASSERT(mapped_len == 512 * 4);
+ CU_ASSERT(_iov_check(&dif_iovs[0], iovs1[0].iov_base, 512) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[1], iovs1[0].iov_base + 512 + 8, 128) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[2], iovs1[1].iov_base, 128) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[3], iovs1[2].iov_base, 256) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[4], iovs1[3].iov_base, 100) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[5], iovs1[4].iov_base, 412) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[6], iovs1[5].iov_base + 3, 300) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[7], iovs1[6].iov_base, 212) == true);
+
+ read_len = ut_readv(data_offset, 128, dif_iovs, 8);
+ CU_ASSERT(read_len == 128);
+
+ rc = spdk_dif_generate_stream(iovs1, 7, data_offset, read_len, &ctx);
+ CU_ASSERT(rc == 0);
+
+ data_offset += read_len;
+ data_len -= read_len;
+
+ /* 2nd read */
+ rc = spdk_dif_set_md_interleave_iovs(dif_iovs, 8, iovs1, 7,
+ data_offset, data_len, &mapped_len, &ctx);
+ CU_ASSERT(rc == 8);
+ CU_ASSERT(mapped_len == 384 + 512 * 3);
+ CU_ASSERT(_iov_check(&dif_iovs[0], iovs1[0].iov_base + 128, 384) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[1], iovs1[0].iov_base + 512 + 8, 128) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[2], iovs1[1].iov_base, 128) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[3], iovs1[2].iov_base, 256) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[4], iovs1[3].iov_base, 100) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[5], iovs1[4].iov_base, 412) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[6], iovs1[5].iov_base + 3, 300) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[7], iovs1[6].iov_base, 212) == true);
+
+ read_len = ut_readv(data_offset, 383, dif_iovs, 8);
+ CU_ASSERT(read_len == 383);
+
+ rc = spdk_dif_generate_stream(iovs1, 7, data_offset, read_len, &ctx);
+ CU_ASSERT(rc == 0);
+
+ data_offset += read_len;
+ data_len -= read_len;
+
+ /* 3rd read */
+ rc = spdk_dif_set_md_interleave_iovs(dif_iovs, 8, iovs1, 7,
+ data_offset, data_len, &mapped_len, &ctx);
+ CU_ASSERT(rc == 8);
+ CU_ASSERT(mapped_len == 1 + 512 * 3);
+ CU_ASSERT(_iov_check(&dif_iovs[0], iovs1[0].iov_base + 511, 1) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[1], iovs1[0].iov_base + 512 + 8, 128) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[2], iovs1[1].iov_base, 128) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[3], iovs1[2].iov_base, 256) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[4], iovs1[3].iov_base, 100) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[5], iovs1[4].iov_base, 412) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[6], iovs1[5].iov_base + 3, 300) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[7], iovs1[6].iov_base, 212) == true);
+
+ read_len = ut_readv(data_offset, 1 + 512 * 2 + 128, dif_iovs, 8);
+ CU_ASSERT(read_len == 1 + 512 * 2 + 128);
+
+ rc = spdk_dif_generate_stream(iovs1, 7, data_offset, read_len, &ctx);
+ CU_ASSERT(rc == 0);
+
+ data_offset += read_len;
+ data_len -= read_len;
+
+ /* 4th read */
+ rc = spdk_dif_set_md_interleave_iovs(dif_iovs, 8, iovs1, 7,
+ data_offset, data_len, &mapped_len, &ctx);
+ CU_ASSERT(rc == 2);
+ CU_ASSERT(mapped_len == 384);
+ CU_ASSERT(_iov_check(&dif_iovs[0], iovs1[5].iov_base + 3 + 128, 172) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[1], iovs1[6].iov_base, 212) == true);
+
+ read_len = ut_readv(data_offset, 384, dif_iovs, 8);
+ CU_ASSERT(read_len == 384);
+
+ rc = spdk_dif_generate_stream(iovs1, 7, data_offset, read_len, &ctx);
+ CU_ASSERT(rc == 0);
+
+ data_offset += read_len;
+ CU_ASSERT(data_offset == 512 * 4);
+ data_len -= read_len;
+ CU_ASSERT(data_len == 0);
+
+ /* The second SGL data buffer:
+ * - Set data pattern with a space for metadata for each block.
+ */
+ _iov_alloc_buf(&iovs2[0], 512 + 8 + 128);
+ _iov_alloc_buf(&iovs2[1], 128);
+ _iov_alloc_buf(&iovs2[2], 256 + 8);
+ _iov_alloc_buf(&iovs2[3], 100);
+ _iov_alloc_buf(&iovs2[4], 412 + 5);
+ _iov_alloc_buf(&iovs2[5], 3 + 300);
+ _iov_alloc_buf(&iovs2[6], 212 + 8);
+
+ rc = ut_data_pattern_generate(iovs2, 7, 512 + 8, 8, 4);
+ CU_ASSERT(rc == 0);
+ rc = spdk_dif_generate(iovs2, 7, 4, &ctx);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_verify(iovs1, 7, 4, &ctx, &err_blk);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_verify(iovs2, 7, 4, &ctx, &err_blk);
+ CU_ASSERT(rc == 0);
+
+ /* Compare the first and the second SGL data buffer by byte. */
+ for (i = 0; i < 7; i++) {
+ rc = memcmp(iovs1[i].iov_base, iovs2[i].iov_base,
+ iovs1[i].iov_len);
+ CU_ASSERT(rc == 0);
+ }
+
+ for (i = 0; i < 7; i++) {
+ _iov_free_buf(&iovs1[i]);
+ _iov_free_buf(&iovs2[i]);
+ }
+}
+
+static void
+dif_generate_stream_test(void)
+{
+ struct iovec iov;
+ struct spdk_dif_ctx ctx;
+ struct spdk_dif_error err_blk;
+ uint32_t dif_flags;
+ int rc;
+
+ _iov_alloc_buf(&iov, (512 + 8) * 5);
+
+ rc = ut_data_pattern_generate(&iov, 1, 512 + 8, 8, 5);
+ CU_ASSERT(rc == 0);
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ rc = spdk_dif_ctx_init(&ctx, 512 + 8, 8, true, false, SPDK_DIF_TYPE1, dif_flags,
+ 22, 0xFFFF, 0x22, 0, GUARD_SEED);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_generate_stream(&iov, 1, 0, 511, &ctx);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_generate_stream(&iov, 1, 511, 1, &ctx);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_generate_stream(&iov, 1, 512, 256, &ctx);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_generate_stream(&iov, 1, 768, 512, &ctx);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_generate_stream(&iov, 1, 1280, 1024, &ctx);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_generate_stream(&iov, 1, 2304, 256, &ctx);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_generate_stream(&iov, 1, 2560, 512, &ctx);
+ CU_ASSERT(rc == -ERANGE);
+
+ rc = spdk_dif_verify(&iov, 1, 5, &ctx, &err_blk);
+ CU_ASSERT(rc == 0);
+
+ rc = ut_data_pattern_verify(&iov, 1, 512 + 8, 8, 5);
+ CU_ASSERT(rc == 0);
+
+ _iov_free_buf(&iov);
+}
+
+static void
+set_md_interleave_iovs_alignment_test(void)
+{
+ struct iovec iovs[3], dif_iovs[5] = {};
+ uint32_t mapped_len = 0;
+ int rc;
+ struct spdk_dif_ctx ctx;
+
+ rc = spdk_dif_ctx_init(&ctx, 512 + 8, 8, true, false, SPDK_DIF_TYPE1,
+ 0, 0, 0, 0, 0, 0);
+ CU_ASSERT(rc == 0);
+
+ /* The case that buffer size is smaller than necessary. */
+ _iov_set_buf(&iovs[0], (uint8_t *)0xDEADBEEF, 1024);
+ _iov_set_buf(&iovs[1], (uint8_t *)0xFEEDBEEF, 1024);
+ _iov_set_buf(&iovs[2], (uint8_t *)0xC0FFEE, 24);
+
+ rc = spdk_dif_set_md_interleave_iovs(dif_iovs, 5, iovs, 3, 0, 2048, &mapped_len, &ctx);
+ CU_ASSERT(rc == -ERANGE);
+
+ /* The folllowing are the normal cases. */
+ _iov_set_buf(&iovs[2], (uint8_t *)0xC0FFEE, 32);
+
+ /* data length is less than a data block size. */
+ rc = spdk_dif_set_md_interleave_iovs(dif_iovs, 5, iovs, 3, 0, 500, &mapped_len, &ctx);
+ CU_ASSERT(rc == 1);
+ CU_ASSERT(mapped_len == 500);
+ CU_ASSERT(_iov_check(&dif_iovs[0], (void *)0xDEADBEEF, 500) == true);
+
+ /* Pass enough number of iovecs */
+ rc = spdk_dif_set_md_interleave_iovs(dif_iovs, 5, iovs, 3, 500, 1000, &mapped_len, &ctx);
+ CU_ASSERT(rc == 4);
+ CU_ASSERT(mapped_len == 1000);
+ CU_ASSERT(_iov_check(&dif_iovs[0], (void *)(0xDEADBEEF + 500), 12) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[1], (void *)(0xDEADBEEF + 520), 504) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[2], (void *)0xFEEDBEEF, 8) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[3], (void *)(0xFEEDBEEF + 16), 476) == true);
+
+ /* Pass iovecs smaller than necessary */
+ rc = spdk_dif_set_md_interleave_iovs(dif_iovs, 3, iovs, 3, 500, 1000, &mapped_len, &ctx);
+ CU_ASSERT(rc == 3);
+ CU_ASSERT(mapped_len == 524);
+ CU_ASSERT(_iov_check(&dif_iovs[0], (void *)(0xDEADBEEF + 500), 12) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[1], (void *)(0xDEADBEEF + 520), 504) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[2], (void *)0xFEEDBEEF, 8) == true);
+
+ rc = spdk_dif_set_md_interleave_iovs(dif_iovs, 5, iovs, 3, 1500, 500, &mapped_len, &ctx);
+ CU_ASSERT(rc == 2);
+ CU_ASSERT(mapped_len == 500);
+ CU_ASSERT(_iov_check(&dif_iovs[0], (void *)(0xFEEDBEEF + 492), 36) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[1], (void *)(0xFEEDBEEF + 536), 464) == true);
+
+ rc = spdk_dif_set_md_interleave_iovs(dif_iovs, 5, iovs, 3, 2000, 48, &mapped_len, &ctx);
+ CU_ASSERT(rc == 2);
+ CU_ASSERT(mapped_len == 48);
+ CU_ASSERT(_iov_check(&dif_iovs[0], (void *)0xFEEDBEEF + 1000, 24) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[1], (void *)0xC0FFEE, 24) == true);
+}
+
+static void
+_dif_generate_split_test(void)
+{
+ struct spdk_dif_ctx ctx = {};
+ struct iovec iov;
+ uint8_t *buf1, *buf2;
+ struct _dif_sgl sgl;
+ uint16_t guard = 0, prev_guard;
+ uint32_t dif_flags;
+ int rc;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ rc = spdk_dif_ctx_init(&ctx, 4096 + 128, 128, true, false, SPDK_DIF_TYPE1,
+ dif_flags, 0, 0, 0, 0, GUARD_SEED);
+ CU_ASSERT(rc == 0);
+
+ buf1 = calloc(1, 4096 + 128);
+ SPDK_CU_ASSERT_FATAL(buf1 != NULL);
+ _iov_set_buf(&iov, buf1, 4096 + 128);
+
+ rc = ut_data_pattern_generate(&iov, 1, 4096 + 128, 128, 1);
+ CU_ASSERT(rc == 0);
+
+ _dif_sgl_init(&sgl, &iov, 1);
+
+ guard = GUARD_SEED;
+ prev_guard = GUARD_SEED;
+
+ guard = _dif_generate_split(&sgl, 0, 1000, guard, 0, &ctx);
+ CU_ASSERT(sgl.iov_offset == 1000);
+ CU_ASSERT(guard == spdk_crc16_t10dif(prev_guard, buf1, 1000));
+
+ prev_guard = guard;
+
+ guard = _dif_generate_split(&sgl, 1000, 3000, guard, 0, &ctx);
+ CU_ASSERT(sgl.iov_offset == 4000);
+ CU_ASSERT(guard == spdk_crc16_t10dif(prev_guard, buf1 + 1000, 3000));
+
+ guard = _dif_generate_split(&sgl, 4000, 96 + 128, guard, 0, &ctx);
+ CU_ASSERT(guard == GUARD_SEED);
+ CU_ASSERT(sgl.iov_offset == 0);
+ CU_ASSERT(sgl.iovcnt == 0);
+
+ rc = ut_data_pattern_verify(&iov, 1, 4096 + 128, 128, 1);
+ CU_ASSERT(rc == 0);
+
+ _dif_sgl_init(&sgl, &iov, 1);
+
+ rc = dif_verify(&sgl, 1, &ctx, NULL);
+ CU_ASSERT(rc == 0);
+
+ buf2 = calloc(1, 4096 + 128);
+ SPDK_CU_ASSERT_FATAL(buf2 != NULL);
+ _iov_set_buf(&iov, buf2, 4096 + 128);
+
+ rc = ut_data_pattern_generate(&iov, 1, 4096 + 128, 128, 1);
+ CU_ASSERT(rc == 0);
+
+ _dif_sgl_init(&sgl, &iov, 1);
+
+ dif_generate(&sgl, 1, &ctx);
+
+ rc = ut_data_pattern_verify(&iov, 1, 4096 + 128, 128, 1);
+ CU_ASSERT(rc == 0);
+
+ _dif_sgl_init(&sgl, &iov, 1);
+
+ rc = dif_verify(&sgl, 1, &ctx, NULL);
+ CU_ASSERT(rc == 0);
+
+ rc = memcmp(buf1, buf2, 4096 + 128);
+ CU_ASSERT(rc == 0);
+
+ free(buf1);
+ free(buf2);
+}
+
+static void
+set_md_interleave_iovs_multi_segments_test(void)
+{
+ struct spdk_dif_ctx ctx = {};
+ struct spdk_dif_error err_blk = {};
+ struct iovec iov1 = {}, iov2 = {}, dif_iovs[4] = {};
+ uint32_t dif_check_flags, data_len, read_len, data_offset, read_offset, mapped_len = 0;
+ uint8_t *buf1, *buf2;
+ int rc;
+
+ dif_check_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ rc = spdk_dif_ctx_init(&ctx, 4096 + 128, 128, true, false, SPDK_DIF_TYPE1,
+ dif_check_flags, 22, 0xFFFF, 0x22, 0, GUARD_SEED);
+ CU_ASSERT(rc == 0);
+
+ /* The first data buffer:
+ * - Data buffer is split into multi data segments
+ * - For each data segment,
+ * - Create iovec array to Leave a space for metadata for each block
+ * - Split vectored read and so creating iovec array is done before every vectored read.
+ */
+ buf1 = calloc(1, (4096 + 128) * 4);
+ SPDK_CU_ASSERT_FATAL(buf1 != NULL);
+ _iov_set_buf(&iov1, buf1, (4096 + 128) * 4);
+
+ /* 1st data segment */
+ data_offset = 0;
+ data_len = 1024;
+
+ spdk_dif_ctx_set_data_offset(&ctx, data_offset);
+
+ read_offset = 0;
+
+ /* 1st read in 1st data segment */
+ rc = spdk_dif_set_md_interleave_iovs(dif_iovs, 4, &iov1, 1,
+ read_offset, data_len - read_offset,
+ &mapped_len, &ctx);
+ CU_ASSERT(rc == 1);
+ CU_ASSERT(mapped_len == 1024);
+ CU_ASSERT(_iov_check(&dif_iovs[0], buf1, 1024) == true);
+
+ read_len = ut_readv(data_offset + read_offset, 1024, dif_iovs, 4);
+ CU_ASSERT(read_len == 1024);
+
+ rc = spdk_dif_generate_stream(&iov1, 1, read_offset, read_len, &ctx);
+ CU_ASSERT(rc == 0);
+
+ read_offset += read_len;
+ CU_ASSERT(read_offset == data_len);
+
+ /* 2nd data segment */
+ data_offset += data_len;
+ data_len = 3072 + 4096 * 2 + 512;
+
+ spdk_dif_ctx_set_data_offset(&ctx, data_offset);
+ _iov_set_buf(&iov1, buf1 + 1024, 3072 + 128 + (4096 + 128) * 3 + 512);
+
+ read_offset = 0;
+
+ /* 1st read in 2nd data segment */
+ rc = spdk_dif_set_md_interleave_iovs(dif_iovs, 4, &iov1, 1,
+ read_offset, data_len - read_offset,
+ &mapped_len, &ctx);
+ CU_ASSERT(rc == 4);
+ CU_ASSERT(mapped_len == 3072 + 4096 * 2 + 512);
+ CU_ASSERT(_iov_check(&dif_iovs[0], buf1 + 1024, 3072) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[1], buf1 + 4096 + 128, 4096) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[2], buf1 + (4096 + 128) * 2, 4096) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[3], buf1 + (4096 + 128) * 3, 512) == true);
+
+ read_len = ut_readv(data_offset + read_offset, 3071, dif_iovs, 4);
+ CU_ASSERT(read_len == 3071);
+
+ rc = spdk_dif_generate_stream(&iov1, 1, read_offset, read_len, &ctx);
+ CU_ASSERT(rc == 0);
+
+ read_offset += read_len;
+
+ /* 2nd read in 2nd data segment */
+ rc = spdk_dif_set_md_interleave_iovs(dif_iovs, 4, &iov1, 1,
+ read_offset, data_len - read_offset,
+ &mapped_len, &ctx);
+ CU_ASSERT(rc == 4);
+ CU_ASSERT(mapped_len == 1 + 4096 * 2 + 512);
+ CU_ASSERT(_iov_check(&dif_iovs[0], buf1 + 4095, 1) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[1], buf1 + 4096 + 128, 4096) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[2], buf1 + (4096 + 128) * 2, 4096) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[3], buf1 + (4096 + 128) * 3, 512) == true);
+
+ read_len = ut_readv(data_offset + read_offset, 1 + 4096 * 2 + 512, dif_iovs, 4);
+ CU_ASSERT(read_len == 1 + 4096 * 2 + 512);
+
+ rc = spdk_dif_generate_stream(&iov1, 1, read_offset, read_len, &ctx);
+ CU_ASSERT(rc == 0);
+
+ read_offset += read_len;
+ CU_ASSERT(read_offset == data_len);
+
+ /* 3rd data segment */
+ data_offset += data_len;
+ data_len = 3584;
+
+ spdk_dif_ctx_set_data_offset(&ctx, data_offset);
+ _iov_set_buf(&iov1, buf1 + (4096 + 128) * 3 + 512, 3584 + 128);
+
+ read_offset = 0;
+
+ /* 1st read in 3rd data segment */
+ rc = spdk_dif_set_md_interleave_iovs(dif_iovs, 4, &iov1, 1,
+ read_offset, data_len - read_offset,
+ &mapped_len, &ctx);
+ CU_ASSERT(rc == 1);
+ CU_ASSERT(mapped_len == 3584);
+ CU_ASSERT(_iov_check(&dif_iovs[0], buf1 + (4096 + 128) * 3 + 512, 3584) == true);
+
+ read_len = ut_readv(data_offset + read_offset, 3584, dif_iovs, 1);
+ CU_ASSERT(read_len == 3584);
+
+ rc = spdk_dif_generate_stream(&iov1, 1, read_offset, read_len, &ctx);
+ CU_ASSERT(rc == 0);
+
+ read_offset += read_len;
+ CU_ASSERT(read_offset == data_len);
+ data_offset += data_len;
+ CU_ASSERT(data_offset == 4096 * 4);
+
+ spdk_dif_ctx_set_data_offset(&ctx, 0);
+ _iov_set_buf(&iov1, buf1, (4096 + 128) * 4);
+
+ /* The second data buffer:
+ * - Set data pattern with a space for metadata for each block.
+ */
+ buf2 = calloc(1, (4096 + 128) * 4);
+ SPDK_CU_ASSERT_FATAL(buf2 != NULL);
+ _iov_set_buf(&iov2, buf2, (4096 + 128) * 4);
+
+ rc = ut_data_pattern_generate(&iov2, 1, 4096 + 128, 128, 4);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_generate(&iov2, 1, 4, &ctx);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_verify(&iov1, 1, 4, &ctx, &err_blk);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_verify(&iov2, 1, 4, &ctx, &err_blk);
+ CU_ASSERT(rc == 0);
+
+ /* Compare the first and the second data buffer by byte. */
+ rc = memcmp(buf1, buf2, (4096 + 128) * 4);
+ CU_ASSERT(rc == 0);
+
+ free(buf1);
+ free(buf2);
+}
+
+static void
+_dif_verify_split_test(void)
+{
+ struct spdk_dif_ctx ctx = {};
+ struct spdk_dif_error err_blk = {};
+ struct iovec iov;
+ uint8_t *buf;
+ struct _dif_sgl sgl;
+ uint16_t guard = 0, prev_guard = 0;
+ uint32_t dif_flags;
+ int rc;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ rc = spdk_dif_ctx_init(&ctx, 4096 + 128, 128, true, false, SPDK_DIF_TYPE1,
+ dif_flags, 0, 0, 0, 0, GUARD_SEED);
+ CU_ASSERT(rc == 0);
+
+ buf = calloc(1, 4096 + 128);
+ SPDK_CU_ASSERT_FATAL(buf != NULL);
+ _iov_set_buf(&iov, buf, 4096 + 128);
+
+ rc = ut_data_pattern_generate(&iov, 1, 4096 + 128, 128, 1);
+ CU_ASSERT(rc == 0);
+
+ _dif_sgl_init(&sgl, &iov, 1);
+
+ dif_generate(&sgl, 1, &ctx);
+
+ _dif_sgl_init(&sgl, &iov, 1);
+
+ guard = GUARD_SEED;
+ prev_guard = GUARD_SEED;
+
+ rc = _dif_verify_split(&sgl, 0, 1000, &guard, 0, &ctx, &err_blk);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(guard == spdk_crc16_t10dif(prev_guard, buf, 1000));
+ CU_ASSERT(sgl.iov_offset == 1000);
+
+ prev_guard = guard;
+
+ rc = _dif_verify_split(&sgl, 1000, 3000, &guard, 0, &ctx, &err_blk);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(guard == spdk_crc16_t10dif(prev_guard, buf + 1000, 3000));
+ CU_ASSERT(sgl.iov_offset == 4000);
+
+ rc = _dif_verify_split(&sgl, 4000, 96 + 128, &guard, 0, &ctx, &err_blk);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(guard == GUARD_SEED);
+ CU_ASSERT(sgl.iov_offset == 0);
+ CU_ASSERT(sgl.iovcnt == 0);
+
+ _dif_sgl_init(&sgl, &iov, 1);
+
+ rc = dif_verify(&sgl, 1, &ctx, &err_blk);
+ CU_ASSERT(rc == 0);
+
+ rc = ut_data_pattern_verify(&iov, 1, 4096 + 128, 128, 1);
+ CU_ASSERT(rc == 0);
+
+ free(buf);
+}
+
+static void
+dif_verify_stream_multi_segments_test(void)
+{
+ struct spdk_dif_ctx ctx = {};
+ struct spdk_dif_error err_blk = {};
+ struct iovec iov = {};
+ uint8_t *buf;
+ uint32_t dif_flags;
+ int rc;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ rc = spdk_dif_ctx_init(&ctx, 4096 + 128, 128, true, false, SPDK_DIF_TYPE1,
+ dif_flags, 22, 0xFFFF, 0x22, 0, GUARD_SEED);
+ CU_ASSERT(rc == 0);
+
+ buf = calloc(1, (4096 + 128) * 4);
+ SPDK_CU_ASSERT_FATAL(buf != NULL);
+ _iov_set_buf(&iov, buf, (4096 + 128) * 4);
+
+ rc = ut_data_pattern_generate(&iov, 1, 4096 + 128, 128, 4);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_generate(&iov, 1, 4, &ctx);
+ CU_ASSERT(rc == 0);
+
+ /* 1st data segment */
+ _iov_set_buf(&iov, buf, 1024);
+ spdk_dif_ctx_set_data_offset(&ctx, 0);
+
+ rc = spdk_dif_verify_stream(&iov, 1, 0, 1024, &ctx, &err_blk);
+ CU_ASSERT(rc == 0);
+
+ /* 2nd data segment */
+ _iov_set_buf(&iov, buf + 1024, (3072 + 128) + (4096 + 128) * 2 + 512);
+ spdk_dif_ctx_set_data_offset(&ctx, 1024);
+
+ rc = spdk_dif_verify_stream(&iov, 1, 0, 3072 + 4096 * 2 + 512, &ctx, &err_blk);
+ CU_ASSERT(rc == 0);
+
+ /* 3rd data segment */
+ _iov_set_buf(&iov, buf + (4096 + 128) * 3 + 512, 3584 + 128);
+ spdk_dif_ctx_set_data_offset(&ctx, 4096 * 3);
+
+ rc = spdk_dif_verify_stream(&iov, 1, 0, 3584, &ctx, &err_blk);
+ CU_ASSERT(rc == 0);
+
+ /* verify all data segments once */
+ _iov_set_buf(&iov, buf, (4096 + 128) * 4);
+ spdk_dif_ctx_set_data_offset(&ctx, 0);
+
+ rc = spdk_dif_verify(&iov, 1, 4, &ctx, &err_blk);
+ CU_ASSERT(rc == 0);
+
+ rc = ut_data_pattern_verify(&iov, 1, 4096 + 128, 128, 4);
+ CU_ASSERT(rc == 0);
+
+ free(buf);
+}
+
+#define UT_CRC32C_XOR 0xffffffffUL
+
+static void
+update_crc32c_test(void)
+{
+ struct spdk_dif_ctx ctx = {};
+ struct iovec iovs[7];
+ uint32_t crc32c1, crc32c2, crc32c3, crc32c4;
+ uint32_t dif_flags;
+ int i, rc;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ rc = spdk_dif_ctx_init(&ctx, 512 + 8, 8, true, false, SPDK_DIF_TYPE1,
+ dif_flags, 0, 0, 0, 0, 0);
+ CU_ASSERT(rc == 0);
+
+ /* data[0][255:0] */
+ _iov_alloc_buf(&iovs[0], 256);
+
+ /* data[0][511:256], md[0][0] */
+ _iov_alloc_buf(&iovs[1], 256 + 1);
+
+ /* md[0][4:1] */
+ _iov_alloc_buf(&iovs[2], 4);
+
+ /* md[0][7:5], data[1][122:0] */
+ _iov_alloc_buf(&iovs[3], 3 + 123);
+
+ /* data[1][511:123], md[1][5:0] */
+ _iov_alloc_buf(&iovs[4], 389 + 6);
+
+ /* md[1][7:6], data[2][511:0], md[2][7:0], data[3][431:0] */
+ _iov_alloc_buf(&iovs[5], 2 + 512 + 8 + 432);
+
+ /* data[3][511:432], md[3][7:0] */
+ _iov_alloc_buf(&iovs[6], 80 + 8);
+
+ rc = ut_data_pattern_generate(iovs, 7, 512 + 8, 8, 4);
+ CU_ASSERT(rc == 0);
+
+ crc32c1 = UT_CRC32C_XOR;
+
+ rc = spdk_dif_update_crc32c(iovs, 7, 4, &crc32c1, &ctx);
+ CU_ASSERT(rc == 0);
+
+ /* Test if DIF doesn't affect CRC for split case. */
+ rc = spdk_dif_generate(iovs, 7, 4, &ctx);
+ CU_ASSERT(rc == 0);
+
+ crc32c2 = UT_CRC32C_XOR;
+
+ rc = spdk_dif_update_crc32c(iovs, 7, 4, &crc32c2, &ctx);
+ CU_ASSERT(rc == 0);
+
+ CU_ASSERT(crc32c1 == crc32c2);
+
+ for (i = 0; i < 7; i++) {
+ _iov_free_buf(&iovs[i]);
+ }
+
+ /* Test if CRC is same regardless of splitting. */
+ for (i = 0; i < 4; i++) {
+ _iov_alloc_buf(&iovs[i], 512 + 8);
+ }
+
+ rc = ut_data_pattern_generate(iovs, 4, 512 + 8, 8, 4);
+ CU_ASSERT(rc == 0);
+
+ crc32c3 = UT_CRC32C_XOR;
+
+ rc = spdk_dif_update_crc32c(iovs, 4, 4, &crc32c3, &ctx);
+ CU_ASSERT(rc == 0);
+
+ CU_ASSERT(crc32c1 == crc32c3);
+
+ /* Test if DIF doesn't affect CRC for non-split case. */
+ rc = spdk_dif_generate(iovs, 4, 4, &ctx);
+ CU_ASSERT(rc == 0);
+
+ crc32c4 = UT_CRC32C_XOR;
+
+ rc = spdk_dif_update_crc32c(iovs, 4, 4, &crc32c4, &ctx);
+ CU_ASSERT(rc == 0);
+
+ CU_ASSERT(crc32c1 == crc32c4);
+
+ for (i = 0; i < 4; i++) {
+ _iov_free_buf(&iovs[i]);
+ }
+}
+
+static void
+_dif_update_crc32c_split_test(void)
+{
+ struct spdk_dif_ctx ctx = {};
+ struct iovec iov;
+ uint8_t *buf;
+ struct _dif_sgl sgl;
+ uint32_t dif_flags, crc32c, prev_crc32c;
+ int rc;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ rc = spdk_dif_ctx_init(&ctx, 4096 + 128, 128, true, false, SPDK_DIF_TYPE1,
+ dif_flags, 0, 0, 0, 0, GUARD_SEED);
+ CU_ASSERT(rc == 0);
+
+ buf = calloc(1, 4096 + 128);
+ SPDK_CU_ASSERT_FATAL(buf != NULL);
+ _iov_set_buf(&iov, buf, 4096 + 128);
+
+ rc = ut_data_pattern_generate(&iov, 1, 4096 + 128, 128, 1);
+ CU_ASSERT(rc == 0);
+
+ _dif_sgl_init(&sgl, &iov, 1);
+
+ dif_generate(&sgl, 1, &ctx);
+
+ _dif_sgl_init(&sgl, &iov, 1);
+
+ crc32c = _dif_update_crc32c_split(&sgl, 0, 1000, UT_CRC32C_XOR, &ctx);
+ CU_ASSERT(crc32c == spdk_crc32c_update(buf, 1000, UT_CRC32C_XOR));
+
+ prev_crc32c = crc32c;
+
+ crc32c = _dif_update_crc32c_split(&sgl, 1000, 3000, prev_crc32c, &ctx);
+ CU_ASSERT(crc32c == spdk_crc32c_update(buf + 1000, 3000, prev_crc32c));
+
+ prev_crc32c = crc32c;
+
+ crc32c = _dif_update_crc32c_split(&sgl, 4000, 96 + 128, prev_crc32c, &ctx);
+ CU_ASSERT(crc32c == spdk_crc32c_update(buf + 4000, 96, prev_crc32c));
+
+ CU_ASSERT(crc32c == spdk_crc32c_update(buf, 4096, UT_CRC32C_XOR));
+
+ free(buf);
+}
+
+static void
+dif_update_crc32c_stream_multi_segments_test(void)
+{
+ struct spdk_dif_ctx ctx = {};
+ struct iovec iov = {};
+ uint8_t *buf;
+ uint32_t dif_flags, crc32c1, crc32c2;
+ int rc;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ rc = spdk_dif_ctx_init(&ctx, 4096 + 128, 128, true, false, SPDK_DIF_TYPE1,
+ dif_flags, 22, 0xFFFF, 0x22, 0, GUARD_SEED);
+ CU_ASSERT(rc == 0);
+
+ buf = calloc(1, (4096 + 128) * 4);
+ SPDK_CU_ASSERT_FATAL(buf != NULL);
+ _iov_set_buf(&iov, buf, (4096 + 128) * 4);
+
+ rc = ut_data_pattern_generate(&iov, 1, 4096 + 128, 128, 4);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_generate(&iov, 1, 4, &ctx);
+ CU_ASSERT(rc == 0);
+
+ crc32c1 = UT_CRC32C_XOR;
+ crc32c2 = UT_CRC32C_XOR;
+
+ /* 1st data segment */
+ _iov_set_buf(&iov, buf, 1024);
+ spdk_dif_ctx_set_data_offset(&ctx, 0);
+
+ rc = spdk_dif_update_crc32c_stream(&iov, 1, 0, 1024, &crc32c1, &ctx);
+ CU_ASSERT(rc == 0);
+
+ /* 2nd data segment */
+ _iov_set_buf(&iov, buf + 1024, (3072 + 128) + (4096 + 128) * 2 + 512);
+ spdk_dif_ctx_set_data_offset(&ctx, 1024);
+
+ rc = spdk_dif_update_crc32c_stream(&iov, 1, 0, 3072 + 4096 * 2 + 512, &crc32c1, &ctx);
+ CU_ASSERT(rc == 0);
+
+ /* 3rd data segment */
+ _iov_set_buf(&iov, buf + (4096 + 128) * 3 + 512, 3584 + 128);
+ spdk_dif_ctx_set_data_offset(&ctx, 4096 * 3);
+
+ rc = spdk_dif_update_crc32c_stream(&iov, 1, 0, 3584, &crc32c1, &ctx);
+ CU_ASSERT(rc == 0);
+
+ /* Update CRC32C for all data segments once */
+ _iov_set_buf(&iov, buf, (4096 + 128) * 4);
+ spdk_dif_ctx_set_data_offset(&ctx, 0);
+
+ rc = spdk_dif_update_crc32c(&iov, 1, 4, &crc32c2, &ctx);
+ CU_ASSERT(rc == 0);
+
+ CU_ASSERT(crc32c1 == crc32c2);
+
+ free(buf);
+}
+
+static void
+get_range_with_md_test(void)
+{
+ struct spdk_dif_ctx ctx = {};
+ uint32_t buf_offset, buf_len;
+ int rc;
+
+ rc = spdk_dif_ctx_init(&ctx, 4096 + 128, 128, true, false, 0, 0, 0, 0, 0, 0, 0);
+ CU_ASSERT(rc == 0);
+
+ spdk_dif_get_range_with_md(0, 2048, &buf_offset, &buf_len, &ctx);
+ CU_ASSERT(buf_offset == 0);
+ CU_ASSERT(buf_len == 2048);
+
+ spdk_dif_get_range_with_md(2048, 4096, &buf_offset, &buf_len, &ctx);
+ CU_ASSERT(buf_offset == 2048);
+ CU_ASSERT(buf_len == 4096 + 128);
+
+ spdk_dif_get_range_with_md(4096, 10240, &buf_offset, &buf_len, &ctx);
+ CU_ASSERT(buf_offset == 4096 + 128);
+ CU_ASSERT(buf_len == 10240 + 256);
+
+ spdk_dif_get_range_with_md(10240, 2048, &buf_offset, &buf_len, &ctx);
+ CU_ASSERT(buf_offset == 10240 + 256);
+ CU_ASSERT(buf_len == 2048 + 128);
+
+ buf_len = spdk_dif_get_length_with_md(6144, &ctx);
+ CU_ASSERT(buf_len == 6144 + 128);
+}
+
+static void
+dif_generate_remap_and_verify(struct iovec *iovs, int iovcnt,
+ uint32_t block_size, uint32_t md_size, uint32_t num_blocks,
+ bool dif_loc, enum spdk_dif_type dif_type, uint32_t dif_flags,
+ uint32_t init_ref_tag, uint32_t remapped_init_ref_tag,
+ uint16_t apptag_mask, uint16_t app_tag)
+{
+ struct spdk_dif_ctx ctx = {};
+ int rc;
+
+ rc = ut_data_pattern_generate(iovs, iovcnt, block_size, md_size, num_blocks);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_ctx_init(&ctx, block_size, md_size, true, dif_loc, dif_type, dif_flags,
+ init_ref_tag, apptag_mask, app_tag, 0, GUARD_SEED);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_generate(iovs, iovcnt, num_blocks, &ctx);
+ CU_ASSERT(rc == 0);
+
+ spdk_dif_ctx_set_remapped_init_ref_tag(&ctx, remapped_init_ref_tag);
+
+ rc = spdk_dif_remap_ref_tag(iovs, iovcnt, num_blocks, &ctx, NULL);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_ctx_init(&ctx, block_size, md_size, true, dif_loc, dif_type, dif_flags,
+ remapped_init_ref_tag, apptag_mask, app_tag, 0, GUARD_SEED);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_verify(iovs, iovcnt, num_blocks, &ctx, NULL);
+ CU_ASSERT(rc == 0);
+
+ rc = ut_data_pattern_verify(iovs, iovcnt, block_size, md_size, num_blocks);
+ CU_ASSERT(rc == 0);
+}
+
+static void
+dif_sec_4096_md_128_prchk_7_multi_iovs_remap_test(void)
+{
+ struct iovec iovs[4];
+ int i, num_blocks;
+ uint32_t dif_flags;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ num_blocks = 0;
+
+ for (i = 0; i < 4; i++) {
+ _iov_alloc_buf(&iovs[i], (512 + 8) * (i + 1));
+ num_blocks += i + 1;
+ }
+
+ dif_generate_remap_and_verify(iovs, 4, 512 + 8, 8, num_blocks, false, SPDK_DIF_TYPE1,
+ dif_flags, 22, 99, 0xFFFF, 0x22);
+
+ dif_generate_remap_and_verify(iovs, 4, 512 + 8, 8, num_blocks, true, SPDK_DIF_TYPE1,
+ dif_flags, 22, 99, 0xFFFF, 0x22);
+
+ for (i = 0; i < 4; i++) {
+ _iov_free_buf(&iovs[i]);
+ }
+}
+
+static void
+dif_sec_4096_md_128_prchk_7_multi_iovs_complex_splits_remap_test(void)
+{
+ struct iovec iovs[11];
+ uint32_t dif_flags;
+ int i;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ /* data[0][1000:0] */
+ _iov_alloc_buf(&iovs[0], 1000);
+
+ /* data[0][3095:1000], guard[0][0] */
+ _iov_alloc_buf(&iovs[1], 3096 + 1);
+
+ /* guard[0][1], apptag[0][0] */
+ _iov_alloc_buf(&iovs[2], 1 + 1);
+
+ /* apptag[0][1], reftag[0][0] */
+ _iov_alloc_buf(&iovs[3], 1 + 1);
+
+ /* reftag[0][3:1], ignore[0][59:0] */
+ _iov_alloc_buf(&iovs[4], 3 + 60);
+
+ /* ignore[119:60], data[1][3050:0] */
+ _iov_alloc_buf(&iovs[5], 60 + 3051);
+
+ /* data[1][4095:3050], guard[1][0] */
+ _iov_alloc_buf(&iovs[6], 1045 + 1);
+
+ /* guard[1][1], apptag[1][0] */
+ _iov_alloc_buf(&iovs[7], 1 + 1);
+
+ /* apptag[1][1], reftag[1][0] */
+ _iov_alloc_buf(&iovs[8], 1 + 1);
+
+ /* reftag[1][3:1], ignore[1][9:0] */
+ _iov_alloc_buf(&iovs[9], 3 + 10);
+
+ /* ignore[1][127:9] */
+ _iov_alloc_buf(&iovs[10], 118);
+
+ dif_generate_remap_and_verify(iovs, 11, 4096 + 128, 128, 2, false, SPDK_DIF_TYPE1, dif_flags,
+ 22, 99, 0xFFFF, 0x22);
+ dif_generate_remap_and_verify(iovs, 11, 4096 + 128, 128, 2, true, SPDK_DIF_TYPE1, dif_flags,
+ 22, 99, 0xFFFF, 0x22);
+
+ for (i = 0; i < 11; i++) {
+ _iov_free_buf(&iovs[i]);
+ }
+}
+
+static void
+dix_generate_remap_and_verify(struct iovec *iovs, int iovcnt, struct iovec *md_iov,
+ uint32_t block_size, uint32_t md_size, uint32_t num_blocks,
+ bool dif_loc, enum spdk_dif_type dif_type, uint32_t dif_flags,
+ uint32_t init_ref_tag, uint32_t remapped_init_ref_tag,
+ uint16_t apptag_mask, uint16_t app_tag)
+{
+ struct spdk_dif_ctx ctx;
+ int rc;
+
+ rc = ut_data_pattern_generate(iovs, iovcnt, block_size, 0, num_blocks);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_ctx_init(&ctx, block_size, md_size, false, dif_loc, dif_type, dif_flags,
+ init_ref_tag, apptag_mask, app_tag, 0, GUARD_SEED);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dix_generate(iovs, iovcnt, md_iov, num_blocks, &ctx);
+ CU_ASSERT(rc == 0);
+
+ spdk_dif_ctx_set_remapped_init_ref_tag(&ctx, remapped_init_ref_tag);
+
+ rc = spdk_dix_remap_ref_tag(md_iov, num_blocks, &ctx, NULL);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_ctx_init(&ctx, block_size, md_size, false, dif_loc, dif_type, dif_flags,
+ remapped_init_ref_tag, apptag_mask, app_tag, 0, GUARD_SEED);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dix_verify(iovs, iovcnt, md_iov, num_blocks, &ctx, NULL);
+ CU_ASSERT(rc == 0);
+
+ rc = ut_data_pattern_verify(iovs, iovcnt, block_size, 0, num_blocks);
+ CU_ASSERT(rc == 0);
+}
+
+static void
+dix_sec_4096_md_128_prchk_7_multi_iovs_remap(void)
+{
+ struct iovec iovs[4], md_iov;
+ uint32_t dif_flags;
+ int i, num_blocks;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ num_blocks = 0;
+
+ for (i = 0; i < 4; i++) {
+ _iov_alloc_buf(&iovs[i], 4096 * (i + 1));
+ num_blocks += i + 1;
+ }
+ _iov_alloc_buf(&md_iov, 128 * num_blocks);
+
+ dix_generate_remap_and_verify(iovs, 4, &md_iov, 4096, 128, num_blocks, false, SPDK_DIF_TYPE1,
+ dif_flags, 22, 99, 0xFFFF, 0x22);
+ dix_generate_remap_and_verify(iovs, 4, &md_iov, 4096, 128, num_blocks, true, SPDK_DIF_TYPE1,
+ dif_flags, 22, 99, 0xFFFF, 0x22);
+
+ for (i = 0; i < 4; i++) {
+ _iov_free_buf(&iovs[i]);
+ }
+ _iov_free_buf(&md_iov);
+}
+
+static void
+dix_sec_512_md_8_prchk_7_multi_iovs_complex_splits_remap(void)
+{
+ struct iovec iovs[6], md_iov;
+ uint32_t dif_flags;
+ int i;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ /* data[0][255:0] */
+ _iov_alloc_buf(&iovs[0], 256);
+
+ /* data[0][511:256], data[1][255:0] */
+ _iov_alloc_buf(&iovs[1], 256 + 256);
+
+ /* data[1][382:256] */
+ _iov_alloc_buf(&iovs[2], 128);
+
+ /* data[1][383] */
+ _iov_alloc_buf(&iovs[3], 1);
+
+ /* data[1][510:384] */
+ _iov_alloc_buf(&iovs[4], 126);
+
+ /* data[1][511], data[2][511:0], data[3][511:0] */
+ _iov_alloc_buf(&iovs[5], 1 + 512 * 2);
+
+ _iov_alloc_buf(&md_iov, 8 * 4);
+
+ dix_generate_remap_and_verify(iovs, 6, &md_iov, 512, 8, 4, false, SPDK_DIF_TYPE1,
+ dif_flags, 22, 99, 0xFFFF, 0x22);
+
+ for (i = 0; i < 6; i++) {
+ _iov_free_buf(&iovs[i]);
+ }
+ _iov_free_buf(&md_iov);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("dif", NULL, NULL);
+
+ CU_ADD_TEST(suite, dif_generate_and_verify_test);
+ CU_ADD_TEST(suite, dif_disable_check_test);
+ CU_ADD_TEST(suite, dif_sec_512_md_0_error_test);
+ CU_ADD_TEST(suite, dif_guard_seed_test);
+ CU_ADD_TEST(suite, dif_disable_sec_512_md_8_single_iov_test);
+ CU_ADD_TEST(suite, dif_sec_512_md_8_prchk_0_single_iov_test);
+ CU_ADD_TEST(suite, dif_sec_512_md_8_prchk_0_1_2_4_multi_iovs_test);
+ CU_ADD_TEST(suite, dif_sec_4096_md_128_prchk_7_multi_iovs_test);
+ CU_ADD_TEST(suite, dif_sec_512_md_8_prchk_7_multi_iovs_split_data_and_md_test);
+ CU_ADD_TEST(suite, dif_sec_512_md_8_prchk_7_multi_iovs_split_data_test);
+ CU_ADD_TEST(suite, dif_sec_512_md_8_prchk_7_multi_iovs_split_guard_test);
+ CU_ADD_TEST(suite, dif_sec_512_md_8_prchk_7_multi_iovs_split_apptag_test);
+ CU_ADD_TEST(suite, dif_sec_512_md_8_prchk_7_multi_iovs_split_reftag_test);
+ CU_ADD_TEST(suite, dif_sec_512_md_8_prchk_7_multi_iovs_complex_splits_test);
+ CU_ADD_TEST(suite, dif_sec_4096_md_128_prchk_7_multi_iovs_complex_splits_test);
+ CU_ADD_TEST(suite, dif_sec_4096_md_128_inject_1_2_4_8_multi_iovs_test);
+ CU_ADD_TEST(suite, dif_sec_4096_md_128_inject_1_2_4_8_multi_iovs_split_data_and_md_test);
+ CU_ADD_TEST(suite, dif_sec_4096_md_128_inject_1_2_4_8_multi_iovs_split_data_test);
+ CU_ADD_TEST(suite, dif_sec_4096_md_128_inject_1_2_4_8_multi_iovs_split_guard_test);
+ CU_ADD_TEST(suite, dif_sec_4096_md_128_inject_1_2_4_8_multi_iovs_split_apptag_test);
+ CU_ADD_TEST(suite, dif_sec_4096_md_128_inject_1_2_4_8_multi_iovs_split_reftag_test);
+ CU_ADD_TEST(suite, dif_copy_sec_512_md_8_prchk_0_single_iov);
+ CU_ADD_TEST(suite, dif_copy_sec_512_md_8_prchk_0_1_2_4_multi_iovs);
+ CU_ADD_TEST(suite, dif_copy_sec_4096_md_128_prchk_7_multi_iovs);
+ CU_ADD_TEST(suite, dif_copy_sec_512_md_8_prchk_7_multi_iovs_split_data);
+ CU_ADD_TEST(suite, dif_copy_sec_512_md_8_prchk_7_multi_iovs_complex_splits);
+ CU_ADD_TEST(suite, dif_copy_sec_4096_md_128_inject_1_2_4_8_multi_iovs_test);
+ CU_ADD_TEST(suite, dif_copy_sec_4096_md_128_inject_1_2_4_8_multi_iovs_split_test);
+ CU_ADD_TEST(suite, dix_sec_512_md_0_error);
+ CU_ADD_TEST(suite, dix_sec_512_md_8_prchk_0_single_iov);
+ CU_ADD_TEST(suite, dix_sec_512_md_8_prchk_0_1_2_4_multi_iovs);
+ CU_ADD_TEST(suite, dix_sec_4096_md_128_prchk_7_multi_iovs);
+ CU_ADD_TEST(suite, dix_sec_512_md_8_prchk_7_multi_iovs_split_data);
+ CU_ADD_TEST(suite, dix_sec_512_md_8_prchk_7_multi_iovs_complex_splits);
+ CU_ADD_TEST(suite, dix_sec_4096_md_128_inject_1_2_4_8_multi_iovs_test);
+ CU_ADD_TEST(suite, dix_sec_4096_md_128_inject_1_2_4_8_multi_iovs_split_test);
+ CU_ADD_TEST(suite, set_md_interleave_iovs_test);
+ CU_ADD_TEST(suite, set_md_interleave_iovs_split_test);
+ CU_ADD_TEST(suite, dif_generate_stream_test);
+ CU_ADD_TEST(suite, set_md_interleave_iovs_alignment_test);
+ CU_ADD_TEST(suite, _dif_generate_split_test);
+ CU_ADD_TEST(suite, set_md_interleave_iovs_multi_segments_test);
+ CU_ADD_TEST(suite, _dif_verify_split_test);
+ CU_ADD_TEST(suite, dif_verify_stream_multi_segments_test);
+ CU_ADD_TEST(suite, update_crc32c_test);
+ CU_ADD_TEST(suite, _dif_update_crc32c_split_test);
+ CU_ADD_TEST(suite, dif_update_crc32c_stream_multi_segments_test);
+ CU_ADD_TEST(suite, get_range_with_md_test);
+ CU_ADD_TEST(suite, dif_sec_4096_md_128_prchk_7_multi_iovs_remap_test);
+ CU_ADD_TEST(suite, dif_sec_4096_md_128_prchk_7_multi_iovs_complex_splits_remap_test);
+ CU_ADD_TEST(suite, dix_sec_4096_md_128_prchk_7_multi_iovs_remap);
+ CU_ADD_TEST(suite, dix_sec_512_md_8_prchk_7_multi_iovs_complex_splits_remap);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+
+ CU_basic_run_tests();
+
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/util/iov.c/.gitignore b/src/spdk/test/unit/lib/util/iov.c/.gitignore
new file mode 100644
index 000000000..94d8d9621
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/iov.c/.gitignore
@@ -0,0 +1 @@
+iov_ut
diff --git a/src/spdk/test/unit/lib/util/iov.c/Makefile b/src/spdk/test/unit/lib/util/iov.c/Makefile
new file mode 100644
index 000000000..c7b4ccd5a
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/iov.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = iov_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/util/iov.c/iov_ut.c b/src/spdk/test/unit/lib/util/iov.c/iov_ut.c
new file mode 100644
index 000000000..248ab91ff
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/iov.c/iov_ut.c
@@ -0,0 +1,249 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "util/iov.c"
+
+static int
+_check_val(void *buf, size_t len, uint8_t val)
+{
+ size_t i;
+ uint8_t *data = buf;
+
+ for (i = 0; i < len; i++) {
+ if (data[i] != val) {
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static void
+test_single_iov(void)
+{
+ struct iovec siov[1];
+ struct iovec diov[1];
+ uint8_t sdata[64];
+ uint8_t ddata[64];
+ ssize_t rc;
+
+ /* Simplest cases- 1 element in each iovec. */
+
+ /* Same size. */
+ memset(sdata, 1, sizeof(sdata));
+ memset(ddata, 0, sizeof(ddata));
+ siov[0].iov_base = sdata;
+ siov[0].iov_len = sizeof(sdata);
+ diov[0].iov_base = ddata;
+ diov[0].iov_len = sizeof(ddata);
+
+ rc = spdk_iovcpy(siov, 1, diov, 1);
+ CU_ASSERT(rc == sizeof(sdata));
+ CU_ASSERT(_check_val(ddata, 64, 1) == 0);
+
+ /* Source smaller than dest */
+ memset(sdata, 1, sizeof(sdata));
+ memset(ddata, 0, sizeof(ddata));
+ siov[0].iov_base = sdata;
+ siov[0].iov_len = 48;
+ diov[0].iov_base = ddata;
+ diov[0].iov_len = sizeof(ddata);
+
+ rc = spdk_iovcpy(siov, 1, diov, 1);
+ CU_ASSERT(rc == 48);
+ CU_ASSERT(_check_val(ddata, 48, 1) == 0);
+ CU_ASSERT(_check_val(&ddata[48], 16, 0) == 0);
+
+ /* Dest smaller than source */
+ memset(sdata, 1, sizeof(sdata));
+ memset(ddata, 0, sizeof(ddata));
+ siov[0].iov_base = sdata;
+ siov[0].iov_len = sizeof(sdata);
+ diov[0].iov_base = ddata;
+ diov[0].iov_len = 48;
+
+ rc = spdk_iovcpy(siov, 1, diov, 1);
+ CU_ASSERT(rc == 48);
+ CU_ASSERT(_check_val(ddata, 48, 1) == 0);
+ CU_ASSERT(_check_val(&ddata[48], 16, 0) == 0);
+}
+
+static void
+test_simple_iov(void)
+{
+ struct iovec siov[4];
+ struct iovec diov[4];
+ uint8_t sdata[64];
+ uint8_t ddata[64];
+ ssize_t rc;
+ int i;
+
+ /* Simple cases with 4 iov elements */
+
+ /* Same size. */
+ memset(sdata, 1, sizeof(sdata));
+ memset(ddata, 0, sizeof(ddata));
+ for (i = 0; i < 4; i++) {
+ siov[i].iov_base = sdata + (16 * i);
+ siov[i].iov_len = 16;
+ diov[i].iov_base = ddata + (16 * i);
+ diov[i].iov_len = 16;
+ }
+
+ rc = spdk_iovcpy(siov, 4, diov, 4);
+ CU_ASSERT(rc == sizeof(sdata));
+ CU_ASSERT(_check_val(ddata, 64, 1) == 0);
+
+ /* Source smaller than dest */
+ memset(sdata, 1, sizeof(sdata));
+ memset(ddata, 0, sizeof(ddata));
+ for (i = 0; i < 4; i++) {
+ siov[i].iov_base = sdata + (8 * i);
+ siov[i].iov_len = 8;
+ diov[i].iov_base = ddata + (16 * i);
+ diov[i].iov_len = 16;
+ }
+
+ rc = spdk_iovcpy(siov, 4, diov, 4);
+ CU_ASSERT(rc == 32);
+ CU_ASSERT(_check_val(ddata, 32, 1) == 0);
+ CU_ASSERT(_check_val(&ddata[32], 32, 0) == 0);
+
+ /* Dest smaller than source */
+ memset(sdata, 1, sizeof(sdata));
+ memset(ddata, 0, sizeof(ddata));
+ for (i = 0; i < 4; i++) {
+ siov[i].iov_base = sdata + (16 * i);
+ siov[i].iov_len = 16;
+ diov[i].iov_base = ddata + (8 * i);
+ diov[i].iov_len = 8;
+ }
+
+ rc = spdk_iovcpy(siov, 4, diov, 4);
+ CU_ASSERT(rc == 32);
+ CU_ASSERT(_check_val(ddata, 32, 1) == 0);
+ CU_ASSERT(_check_val(&ddata[32], 32, 0) == 0);
+}
+
+static void
+test_complex_iov(void)
+{
+ struct iovec siov[4];
+ struct iovec diov[4];
+ uint8_t sdata[64];
+ uint8_t ddata[64];
+ ssize_t rc;
+ int i;
+
+ /* More source elements */
+ memset(sdata, 1, sizeof(sdata));
+ memset(ddata, 0, sizeof(ddata));
+ for (i = 0; i < 4; i++) {
+ siov[i].iov_base = sdata + (16 * i);
+ siov[i].iov_len = 16;
+ }
+ diov[0].iov_base = ddata;
+ diov[0].iov_len = sizeof(ddata);
+
+ rc = spdk_iovcpy(siov, 4, diov, 1);
+ CU_ASSERT(rc == sizeof(sdata));
+ CU_ASSERT(_check_val(ddata, 64, 1) == 0);
+
+ /* More dest elements */
+ memset(sdata, 1, sizeof(sdata));
+ memset(ddata, 0, sizeof(ddata));
+ for (i = 0; i < 4; i++) {
+ diov[i].iov_base = ddata + (16 * i);
+ diov[i].iov_len = 16;
+ }
+ siov[0].iov_base = sdata;
+ siov[0].iov_len = sizeof(sdata);
+
+ rc = spdk_iovcpy(siov, 1, diov, 4);
+ CU_ASSERT(rc == sizeof(sdata));
+ CU_ASSERT(_check_val(ddata, 64, 1) == 0);
+
+ /* Build one by hand that's really terrible */
+ memset(sdata, 1, sizeof(sdata));
+ memset(ddata, 0, sizeof(ddata));
+ siov[0].iov_base = sdata;
+ siov[0].iov_len = 1;
+ siov[1].iov_base = siov[0].iov_base + siov[0].iov_len;
+ siov[1].iov_len = 13;
+ siov[2].iov_base = siov[1].iov_base + siov[1].iov_len;
+ siov[2].iov_len = 6;
+ siov[3].iov_base = siov[2].iov_base + siov[2].iov_len;
+ siov[3].iov_len = 44;
+
+ diov[0].iov_base = ddata;
+ diov[0].iov_len = 31;
+ diov[1].iov_base = diov[0].iov_base + diov[0].iov_len;
+ diov[1].iov_len = 9;
+ diov[2].iov_base = diov[1].iov_base + diov[1].iov_len;
+ diov[2].iov_len = 1;
+ diov[3].iov_base = diov[2].iov_base + diov[2].iov_len;
+ diov[3].iov_len = 23;
+
+ rc = spdk_iovcpy(siov, 4, diov, 4);
+ CU_ASSERT(rc == 64);
+ CU_ASSERT(_check_val(ddata, 64, 1) == 0);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("iov", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_single_iov);
+ CU_ADD_TEST(suite, test_simple_iov);
+ CU_ADD_TEST(suite, test_complex_iov);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+
+ CU_basic_run_tests();
+
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/util/math.c/.gitignore b/src/spdk/test/unit/lib/util/math.c/.gitignore
new file mode 100644
index 000000000..e51846f2b
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/math.c/.gitignore
@@ -0,0 +1 @@
+math_ut
diff --git a/src/spdk/test/unit/lib/util/math.c/Makefile b/src/spdk/test/unit/lib/util/math.c/Makefile
new file mode 100644
index 000000000..e8b20c6be
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/math.c/Makefile
@@ -0,0 +1,39 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+TEST_FILE = math_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/util/math.c/math_ut.c b/src/spdk/test/unit/lib/util/math.c/math_ut.c
new file mode 100644
index 000000000..66e063e12
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/math.c/math_ut.c
@@ -0,0 +1,81 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "util/math.c"
+
+static void
+test_serial_number_arithmetic(void)
+{
+ CU_ASSERT(spdk_sn32_add(0, 1) == 1);
+ CU_ASSERT(spdk_sn32_add(1, 1) == 2);
+ CU_ASSERT(spdk_sn32_add(1, 2) == 3);
+ CU_ASSERT(spdk_sn32_add(1, UINT32_MAX) == 0);
+ CU_ASSERT(spdk_sn32_add(UINT32_MAX, UINT32_MAX) == UINT32_MAX - 1);
+ CU_ASSERT(spdk_sn32_gt(1, 0) == true);
+ CU_ASSERT(spdk_sn32_gt(2, 1) == true);
+ CU_ASSERT(spdk_sn32_gt(UINT32_MAX, UINT32_MAX - 1) == true);
+ CU_ASSERT(spdk_sn32_gt(0, UINT32_MAX) == true);
+ CU_ASSERT(spdk_sn32_gt(100, UINT32_MAX - 100) == true);
+ CU_ASSERT(spdk_sn32_lt(1, 0) == false);
+ CU_ASSERT(spdk_sn32_lt(2, 1) == false);
+ CU_ASSERT(spdk_sn32_lt(UINT32_MAX, UINT32_MAX - 1) == false);
+ CU_ASSERT(spdk_sn32_lt(0, UINT32_MAX) == false);
+ CU_ASSERT(spdk_sn32_lt(100, UINT32_MAX - 100) == false);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("math", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_serial_number_arithmetic);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+
+ CU_basic_run_tests();
+
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/util/pipe.c/.gitignore b/src/spdk/test/unit/lib/util/pipe.c/.gitignore
new file mode 100644
index 000000000..493aa5572
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/pipe.c/.gitignore
@@ -0,0 +1 @@
+pipe_ut
diff --git a/src/spdk/test/unit/lib/util/pipe.c/Makefile b/src/spdk/test/unit/lib/util/pipe.c/Makefile
new file mode 100644
index 000000000..99592cfb4
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/pipe.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = pipe_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/util/pipe.c/pipe_ut.c b/src/spdk/test/unit/lib/util/pipe.c/pipe_ut.c
new file mode 100644
index 000000000..8ac76dfe9
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/pipe.c/pipe_ut.c
@@ -0,0 +1,653 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "util/pipe.c"
+#include "common/lib/test_env.c"
+
+static void
+test_create_destroy(void)
+{
+ struct spdk_pipe *pipe;
+ uint8_t mem[10];
+
+ pipe = spdk_pipe_create(mem, sizeof(mem));
+ SPDK_CU_ASSERT_FATAL(pipe != NULL);
+
+ spdk_pipe_destroy(pipe);
+}
+
+static void
+test_write_get_buffer(void)
+{
+ struct spdk_pipe *pipe;
+ uint8_t mem[10];
+ struct iovec iovs[2];
+ int rc;
+
+ pipe = spdk_pipe_create(mem, sizeof(mem));
+ SPDK_CU_ASSERT_FATAL(pipe != NULL);
+
+ /* Get some available memory. */
+ rc = spdk_pipe_writer_get_buffer(pipe, 5, iovs);
+ CU_ASSERT(rc == 5);
+ CU_ASSERT(iovs[0].iov_base == mem);
+ CU_ASSERT(iovs[0].iov_len == 5);
+ CU_ASSERT(iovs[1].iov_base == NULL);
+ CU_ASSERT(iovs[1].iov_len == 0);
+ CU_ASSERT(pipe->write == 0);
+ CU_ASSERT(pipe->read == 0);
+
+ memset(iovs, 0, sizeof(iovs));
+
+ /* Get 0 bytes. */
+ rc = spdk_pipe_writer_get_buffer(pipe, 0, iovs);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(iovs[0].iov_base == NULL);
+ CU_ASSERT(iovs[0].iov_len == 0);
+ CU_ASSERT(iovs[1].iov_base == NULL);
+ CU_ASSERT(iovs[1].iov_len == 0);
+ CU_ASSERT(pipe->write == 0);
+ CU_ASSERT(pipe->read == 0);
+
+ memset(iovs, 0, sizeof(iovs));
+
+ /* Get all available memory */
+ rc = spdk_pipe_writer_get_buffer(pipe, 9, iovs);
+ CU_ASSERT(rc == 9);
+ CU_ASSERT(iovs[0].iov_base == mem);
+ CU_ASSERT(iovs[0].iov_len == 9);
+ CU_ASSERT(iovs[1].iov_base == NULL);
+ CU_ASSERT(iovs[1].iov_len == 0);
+ CU_ASSERT(pipe->write == 0);
+ CU_ASSERT(pipe->read == 0);
+
+ memset(iovs, 0, sizeof(iovs));
+
+ /* Get the full size of the data buffer backing the pipe, which isn't allowed */
+ rc = spdk_pipe_writer_get_buffer(pipe, 10, iovs);
+ CU_ASSERT(rc == 9);
+ CU_ASSERT(iovs[0].iov_base == mem);
+ CU_ASSERT(iovs[0].iov_len == 9);
+ CU_ASSERT(iovs[1].iov_base == NULL);
+ CU_ASSERT(iovs[1].iov_len == 0);
+ CU_ASSERT(pipe->write == 0);
+ CU_ASSERT(pipe->read == 0);
+
+ memset(iovs, 0, sizeof(iovs));
+
+ /* Advance the write pointer 7 bytes in. */
+ pipe->write = 7;
+
+ /* Get all of the available memory. */
+ rc = spdk_pipe_writer_get_buffer(pipe, 2, iovs);
+ CU_ASSERT(rc == 2);
+ CU_ASSERT(iovs[0].iov_base == (mem + 7));
+ CU_ASSERT(iovs[0].iov_len == 2);
+ CU_ASSERT(iovs[1].iov_base == NULL);
+ CU_ASSERT(iovs[1].iov_len == 0);
+ CU_ASSERT(pipe->write == 7);
+ CU_ASSERT(pipe->read == 0);
+
+ memset(iovs, 0, sizeof(iovs));
+
+ /* Get more than the available memory */
+ rc = spdk_pipe_writer_get_buffer(pipe, 3, iovs);
+ CU_ASSERT(rc == 2);
+ CU_ASSERT(iovs[0].iov_base == (mem + 7));
+ CU_ASSERT(iovs[0].iov_len == 2);
+ CU_ASSERT(iovs[1].iov_base == NULL);
+ CU_ASSERT(iovs[1].iov_len == 0);
+ CU_ASSERT(pipe->write == 7);
+ CU_ASSERT(pipe->read == 0);
+
+ memset(iovs, 0, sizeof(iovs));
+
+ /* Advance the read pointer 3 bytes in. */
+ pipe->read = 3;
+
+ /* Get all of the available memory. */
+ rc = spdk_pipe_writer_get_buffer(pipe, 5, iovs);
+ CU_ASSERT(rc == 5);
+ CU_ASSERT(iovs[0].iov_base == (mem + 7));
+ CU_ASSERT(iovs[0].iov_len == 3);
+ CU_ASSERT(iovs[1].iov_base == mem);
+ CU_ASSERT(iovs[1].iov_len == 2);
+ CU_ASSERT(pipe->write == 7);
+ CU_ASSERT(pipe->read == 3);
+
+ memset(iovs, 0, sizeof(iovs));
+
+ /* Get more than the available memory */
+ rc = spdk_pipe_writer_get_buffer(pipe, 6, iovs);
+ CU_ASSERT(rc == 5);
+ CU_ASSERT(iovs[0].iov_base == (mem + 7));
+ CU_ASSERT(iovs[0].iov_len == 3);
+ CU_ASSERT(iovs[1].iov_base == mem);
+ CU_ASSERT(iovs[1].iov_len == 2);
+ CU_ASSERT(pipe->write == 7);
+ CU_ASSERT(pipe->read == 3);
+
+ memset(iovs, 0, sizeof(iovs));
+
+ /* Advance the read pointer past the write pointer */
+ pipe->read = 9;
+
+ /* Get all of the available memory. */
+ rc = spdk_pipe_writer_get_buffer(pipe, 1, iovs);
+ CU_ASSERT(rc == 1);
+ CU_ASSERT(iovs[0].iov_base == (mem + 7));
+ CU_ASSERT(iovs[0].iov_len == 1);
+ CU_ASSERT(iovs[1].iov_base == NULL);
+ CU_ASSERT(iovs[1].iov_len == 0);
+ CU_ASSERT(pipe->write == 7);
+ CU_ASSERT(pipe->read == 9);
+
+ memset(iovs, 0, sizeof(iovs));
+
+ /* Get more than the available memory */
+ rc = spdk_pipe_writer_get_buffer(pipe, 2, iovs);
+ CU_ASSERT(rc == 1);
+ CU_ASSERT(iovs[0].iov_base == (mem + 7));
+ CU_ASSERT(iovs[0].iov_len == 1);
+ CU_ASSERT(iovs[1].iov_base == NULL);
+ CU_ASSERT(iovs[1].iov_len == 0);
+ CU_ASSERT(pipe->write == 7);
+ CU_ASSERT(pipe->read == 9);
+
+ memset(iovs, 0, sizeof(iovs));
+
+ /* Fill the pipe */
+ pipe->write = 8;
+
+ /* Get data while the pipe is full */
+ rc = spdk_pipe_writer_get_buffer(pipe, 1, iovs);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(iovs[0].iov_base == NULL);
+ CU_ASSERT(iovs[0].iov_len == 0);
+ CU_ASSERT(iovs[1].iov_base == NULL);
+ CU_ASSERT(iovs[1].iov_len == 0);
+ CU_ASSERT(pipe->write == 8);
+ CU_ASSERT(pipe->read == 9);
+
+ spdk_pipe_destroy(pipe);
+}
+
+static void
+test_write_advance(void)
+{
+ struct spdk_pipe *pipe;
+ uint8_t mem[10];
+ int rc;
+
+ pipe = spdk_pipe_create(mem, sizeof(mem));
+ SPDK_CU_ASSERT_FATAL(pipe != NULL);
+
+ /* Advance half way through the pipe */
+ rc = spdk_pipe_writer_advance(pipe, 5);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(pipe->write == 5);
+ CU_ASSERT(pipe->read == 0);
+
+ pipe->write = 0;
+
+ /* Advance to the end of the pipe */
+ rc = spdk_pipe_writer_advance(pipe, 9);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(pipe->write == 9);
+ CU_ASSERT(pipe->read == 0);
+
+ pipe->write = 0;
+
+ /* Advance beyond the end */
+ rc = spdk_pipe_writer_advance(pipe, 10);
+ CU_ASSERT(rc == -EINVAL);
+ CU_ASSERT(pipe->write == 0);
+ CU_ASSERT(pipe->read == 0);
+
+ /* Move the read pointer forward */
+ pipe->write = 0;
+ pipe->read = 5;
+
+ /* Advance to the end of the pipe */
+ rc = spdk_pipe_writer_advance(pipe, 4);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(pipe->write == 4);
+ CU_ASSERT(pipe->read == 5);
+
+ pipe->write = 0;
+ pipe->read = 5;
+
+ /* Advance beyond the end */
+ rc = spdk_pipe_writer_advance(pipe, 5);
+ CU_ASSERT(rc == -EINVAL);
+ CU_ASSERT(pipe->write == 0);
+ CU_ASSERT(pipe->read == 5);
+
+ /* Test wrap around */
+ pipe->write = 7;
+ pipe->read = 3;
+
+ /* Advance to the end of the pipe */
+ rc = spdk_pipe_writer_advance(pipe, 5);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(pipe->write == 2);
+ CU_ASSERT(pipe->read == 3);
+
+ pipe->write = 7;
+ pipe->read = 3;
+
+ /* Advance beyond the end */
+ rc = spdk_pipe_writer_advance(pipe, 6);
+ CU_ASSERT(rc == -EINVAL);
+ CU_ASSERT(pipe->write == 7);
+ CU_ASSERT(pipe->read == 3);
+
+ spdk_pipe_destroy(pipe);
+}
+
+static void
+test_read_get_buffer(void)
+{
+ struct spdk_pipe *pipe;
+ uint8_t mem[10];
+ struct iovec iovs[2];
+ int rc;
+
+ pipe = spdk_pipe_create(mem, sizeof(mem));
+ SPDK_CU_ASSERT_FATAL(pipe != NULL);
+
+ /* Set the write pointer to the end, making all data available. */
+ pipe->write = 9;
+
+ /* Get half the available memory. */
+ rc = spdk_pipe_reader_get_buffer(pipe, 5, iovs);
+ CU_ASSERT(rc == 5);
+ CU_ASSERT(iovs[0].iov_base == mem);
+ CU_ASSERT(iovs[0].iov_len == 5);
+ CU_ASSERT(iovs[1].iov_base == NULL);
+ CU_ASSERT(iovs[1].iov_len == 0);
+ CU_ASSERT(pipe->write == 9);
+ CU_ASSERT(pipe->read == 0);
+
+ memset(iovs, 0, sizeof(iovs));
+
+ /* Get 0 bytes. */
+ rc = spdk_pipe_reader_get_buffer(pipe, 0, iovs);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(iovs[0].iov_base == NULL);
+ CU_ASSERT(iovs[0].iov_len == 0);
+ CU_ASSERT(iovs[1].iov_base == NULL);
+ CU_ASSERT(iovs[1].iov_len == 0);
+ CU_ASSERT(pipe->write == 9);
+ CU_ASSERT(pipe->read == 0);
+
+ memset(iovs, 0, sizeof(iovs));
+
+ /* Get all available memory */
+ rc = spdk_pipe_reader_get_buffer(pipe, 9, iovs);
+ CU_ASSERT(rc == 9);
+ CU_ASSERT(iovs[0].iov_base == mem);
+ CU_ASSERT(iovs[0].iov_len == 9);
+ CU_ASSERT(iovs[1].iov_base == NULL);
+ CU_ASSERT(iovs[1].iov_len == 0);
+ CU_ASSERT(pipe->write == 9);
+ CU_ASSERT(pipe->read == 0);
+
+ memset(iovs, 0, sizeof(iovs));
+
+ /* Get more bytes than exist */
+ rc = spdk_pipe_reader_get_buffer(pipe, 10, iovs);
+ CU_ASSERT(rc == 9);
+ CU_ASSERT(iovs[0].iov_base == mem);
+ CU_ASSERT(iovs[0].iov_len == 9);
+ CU_ASSERT(iovs[1].iov_base == NULL);
+ CU_ASSERT(iovs[1].iov_len == 0);
+ CU_ASSERT(pipe->write == 9);
+ CU_ASSERT(pipe->read == 0);
+
+ memset(iovs, 0, sizeof(iovs));
+
+ /* Advance the read pointer 5 bytes in. */
+ pipe->read = 5;
+ pipe->write = 0;
+
+ /* Get all of the available memory. */
+ rc = spdk_pipe_reader_get_buffer(pipe, 5, iovs);
+ CU_ASSERT(rc == 5);
+ CU_ASSERT(iovs[0].iov_base == (mem + 5));
+ CU_ASSERT(iovs[0].iov_len == 5);
+ CU_ASSERT(iovs[1].iov_base == NULL);
+ CU_ASSERT(iovs[1].iov_len == 0);
+ CU_ASSERT(pipe->write == 0);
+ CU_ASSERT(pipe->read == 5);
+
+ memset(iovs, 0, sizeof(iovs));
+
+ /* Get more than the available memory */
+ rc = spdk_pipe_reader_get_buffer(pipe, 6, iovs);
+ CU_ASSERT(rc == 5);
+ CU_ASSERT(iovs[0].iov_base == (mem + 5));
+ CU_ASSERT(iovs[0].iov_len == 5);
+ CU_ASSERT(iovs[1].iov_base == NULL);
+ CU_ASSERT(iovs[1].iov_len == 0);
+ CU_ASSERT(pipe->write == 0);
+ CU_ASSERT(pipe->read == 5);
+
+ memset(iovs, 0, sizeof(iovs));
+
+ /* Invert the write and read pointers */
+ pipe->read = 7;
+ pipe->write = 3;
+
+ /* Get all of the available memory. */
+ rc = spdk_pipe_reader_get_buffer(pipe, 6, iovs);
+ CU_ASSERT(rc == 6);
+ CU_ASSERT(iovs[0].iov_base == (mem + 7));
+ CU_ASSERT(iovs[0].iov_len == 3);
+ CU_ASSERT(iovs[1].iov_base == mem);
+ CU_ASSERT(iovs[1].iov_len == 3);
+ CU_ASSERT(pipe->write == 3);
+ CU_ASSERT(pipe->read == 7);
+
+ memset(iovs, 0, sizeof(iovs));
+
+ /* Get more than the available memory */
+ rc = spdk_pipe_reader_get_buffer(pipe, 7, iovs);
+ CU_ASSERT(rc == 6);
+ CU_ASSERT(iovs[0].iov_base == (mem + 7));
+ CU_ASSERT(iovs[0].iov_len == 3);
+ CU_ASSERT(iovs[1].iov_base == mem);
+ CU_ASSERT(iovs[1].iov_len == 3);
+ CU_ASSERT(pipe->write == 3);
+ CU_ASSERT(pipe->read == 7);
+
+ memset(iovs, 0, sizeof(iovs));
+
+ /* Empty the pipe */
+ pipe->read = 8;
+ pipe->write = 8;
+
+ /* Get data while the pipe is empty */
+ rc = spdk_pipe_reader_get_buffer(pipe, 1, iovs);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(iovs[0].iov_base == NULL);
+ CU_ASSERT(iovs[0].iov_len == 0);
+ CU_ASSERT(iovs[1].iov_base == NULL);
+ CU_ASSERT(iovs[1].iov_len == 0);
+ CU_ASSERT(pipe->write == 8);
+ CU_ASSERT(pipe->read == 8);
+
+ spdk_pipe_destroy(pipe);
+}
+
+static void
+test_read_advance(void)
+{
+ struct spdk_pipe *pipe;
+ uint8_t mem[10];
+ int rc;
+
+ pipe = spdk_pipe_create(mem, sizeof(mem));
+ SPDK_CU_ASSERT_FATAL(pipe != NULL);
+
+ pipe->read = 0;
+ pipe->write = 9;
+
+ /* Advance half way through the pipe */
+ rc = spdk_pipe_reader_advance(pipe, 5);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(pipe->read == 5);
+ CU_ASSERT(pipe->write == 9);
+
+ pipe->read = 0;
+ pipe->write = 9;
+
+ /* Advance to the end of the pipe */
+ rc = spdk_pipe_reader_advance(pipe, 9);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(pipe->read == 9);
+ CU_ASSERT(pipe->write == 9);
+
+ pipe->read = 0;
+ pipe->write = 9;
+
+ /* Advance beyond the end */
+ rc = spdk_pipe_reader_advance(pipe, 10);
+ CU_ASSERT(rc == -EINVAL);
+ CU_ASSERT(pipe->read == 0);
+ CU_ASSERT(pipe->write == 9);
+
+ /* Move the write pointer forward */
+ pipe->read = 0;
+ pipe->write = 5;
+
+ /* Advance to the end of the pipe */
+ rc = spdk_pipe_reader_advance(pipe, 5);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(pipe->write == 5);
+ CU_ASSERT(pipe->read == 5);
+
+ pipe->read = 0;
+ pipe->write = 5;
+
+ /* Advance beyond the end */
+ rc = spdk_pipe_reader_advance(pipe, 6);
+ CU_ASSERT(rc == -EINVAL);
+ CU_ASSERT(pipe->read == 0);
+ CU_ASSERT(pipe->write == 5);
+
+ /* Test wrap around */
+ pipe->read = 7;
+ pipe->write = 3;
+
+ /* Advance to the end of the pipe */
+ rc = spdk_pipe_reader_advance(pipe, 6);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(pipe->read == 3);
+ CU_ASSERT(pipe->write == 3);
+
+ pipe->read = 7;
+ pipe->write = 3;
+
+ /* Advance beyond the end */
+ rc = spdk_pipe_writer_advance(pipe, 7);
+ CU_ASSERT(rc == -EINVAL);
+ CU_ASSERT(pipe->read == 7);
+ CU_ASSERT(pipe->write == 3);
+
+ spdk_pipe_destroy(pipe);
+}
+
+static void
+test_data(void)
+{
+ struct spdk_pipe *pipe;
+ uint8_t mem[10];
+ struct iovec iovs[2];
+ uint8_t *data;
+ int rc;
+ size_t i;
+
+ memset(mem, 0, sizeof(mem));
+ memset(iovs, 0, sizeof(iovs));
+
+ pipe = spdk_pipe_create(mem, sizeof(mem));
+ SPDK_CU_ASSERT_FATAL(pipe != NULL);
+
+ /* Place 1 byte in the pipe */
+ rc = spdk_pipe_writer_get_buffer(pipe, 1, iovs);
+ CU_ASSERT(rc == 1);
+ CU_ASSERT(iovs[0].iov_base != NULL);
+ CU_ASSERT(iovs[0].iov_len == 1);
+
+ memset(iovs[0].iov_base, 'A', 1);
+
+ rc = spdk_pipe_writer_advance(pipe, 1);
+ CU_ASSERT(rc == 0);
+
+ CU_ASSERT(mem[0] == 'A');
+ CU_ASSERT(mem[1] == 0);
+ CU_ASSERT(mem[2] == 0);
+ CU_ASSERT(mem[3] == 0);
+ CU_ASSERT(mem[4] == 0);
+ CU_ASSERT(mem[5] == 0);
+ CU_ASSERT(mem[6] == 0);
+ CU_ASSERT(mem[7] == 0);
+ CU_ASSERT(mem[8] == 0);
+ CU_ASSERT(mem[9] == 0);
+
+ memset(iovs, 0, sizeof(iovs));
+
+ /* Get 1 byte from the pipe */
+ CU_ASSERT(spdk_pipe_reader_bytes_available(pipe) == 1);
+ rc = spdk_pipe_reader_get_buffer(pipe, 10, iovs);
+ CU_ASSERT(rc == 1);
+
+ data = iovs[0].iov_base;
+ CU_ASSERT(*data = 'A');
+
+ spdk_pipe_reader_advance(pipe, 1);
+
+ /* Put 9 more bytes in the pipe, so every byte has
+ * been written */
+ rc = spdk_pipe_writer_get_buffer(pipe, 9, iovs);
+ CU_ASSERT(rc == 9);
+ CU_ASSERT(iovs[0].iov_len == 9);
+ CU_ASSERT(iovs[1].iov_len == 0);
+
+ memset(iovs[0].iov_base, 'B', iovs[0].iov_len);
+
+ rc = spdk_pipe_writer_advance(pipe, 9);
+ CU_ASSERT(rc == 0);
+
+ CU_ASSERT(mem[0] == 'A');
+ CU_ASSERT(mem[1] == 'B');
+ CU_ASSERT(mem[2] == 'B');
+ CU_ASSERT(mem[3] == 'B');
+ CU_ASSERT(mem[4] == 'B');
+ CU_ASSERT(mem[5] == 'B');
+ CU_ASSERT(mem[6] == 'B');
+ CU_ASSERT(mem[7] == 'B');
+ CU_ASSERT(mem[8] == 'B');
+ CU_ASSERT(mem[9] == 'B');
+
+ memset(iovs, 0, sizeof(iovs));
+
+ /* Get 7 bytes of the previously written 9. */
+ CU_ASSERT(spdk_pipe_reader_bytes_available(pipe) == 9);
+ rc = spdk_pipe_reader_get_buffer(pipe, 7, iovs);
+ CU_ASSERT(rc == 7);
+
+ CU_ASSERT(iovs[0].iov_len == 7);
+ data = iovs[0].iov_base;
+ for (i = 0; i < iovs[0].iov_len; i++) {
+ CU_ASSERT(data[i] == 'B');
+ }
+
+ spdk_pipe_reader_advance(pipe, 7);
+
+ memset(iovs, 0, sizeof(iovs));
+
+ /* Put 1 more byte in the pipe, overwriting the original 'A' */
+ rc = spdk_pipe_writer_get_buffer(pipe, 1, iovs);
+ CU_ASSERT(rc == 1);
+ CU_ASSERT(iovs[0].iov_len == 1);
+ CU_ASSERT(iovs[1].iov_len == 0);
+
+ memset(iovs[0].iov_base, 'C', iovs[0].iov_len);
+
+ rc = spdk_pipe_writer_advance(pipe, 1);
+ CU_ASSERT(rc == 0);
+
+ CU_ASSERT(mem[0] == 'C');
+ CU_ASSERT(mem[1] == 'B');
+ CU_ASSERT(mem[2] == 'B');
+ CU_ASSERT(mem[3] == 'B');
+ CU_ASSERT(mem[4] == 'B');
+ CU_ASSERT(mem[5] == 'B');
+ CU_ASSERT(mem[6] == 'B');
+ CU_ASSERT(mem[7] == 'B');
+ CU_ASSERT(mem[8] == 'B');
+ CU_ASSERT(mem[9] == 'B');
+
+ memset(iovs, 0, sizeof(iovs));
+
+ /* Get all of the data out of the pipe */
+ CU_ASSERT(spdk_pipe_reader_bytes_available(pipe) == 3);
+ rc = spdk_pipe_reader_get_buffer(pipe, 3, iovs);
+ CU_ASSERT(rc == 3);
+ CU_ASSERT(iovs[0].iov_len == 2);
+ CU_ASSERT(iovs[1].iov_len == 1);
+
+ data = iovs[0].iov_base;
+ CU_ASSERT(data[0] == 'B');
+ CU_ASSERT(data[1] == 'B');
+ data = iovs[1].iov_base;
+ CU_ASSERT(data[0] == 'C');
+
+ spdk_pipe_reader_advance(pipe, 3);
+
+ spdk_pipe_destroy(pipe);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("pipe", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_create_destroy);
+ CU_ADD_TEST(suite, test_write_get_buffer);
+ CU_ADD_TEST(suite, test_write_advance);
+ CU_ADD_TEST(suite, test_read_get_buffer);
+ CU_ADD_TEST(suite, test_read_advance);
+ CU_ADD_TEST(suite, test_data);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+
+ CU_basic_run_tests();
+
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/util/string.c/.gitignore b/src/spdk/test/unit/lib/util/string.c/.gitignore
new file mode 100644
index 000000000..5d85d4d93
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/string.c/.gitignore
@@ -0,0 +1 @@
+string_ut
diff --git a/src/spdk/test/unit/lib/util/string.c/Makefile b/src/spdk/test/unit/lib/util/string.c/Makefile
new file mode 100644
index 000000000..016fb07e9
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/string.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = string_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/util/string.c/string_ut.c b/src/spdk/test/unit/lib/util/string.c/string_ut.c
new file mode 100644
index 000000000..d61c62536
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/string.c/string_ut.c
@@ -0,0 +1,407 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "util/string.c"
+
+static void
+test_parse_ip_addr(void)
+{
+ int rc;
+ char *host;
+ char *port;
+ char ip[255];
+
+ /* IPv4 */
+ snprintf(ip, 255, "%s", "192.168.0.1");
+ rc = spdk_parse_ip_addr(ip, &host, &port);
+ CU_ASSERT_EQUAL(rc, 0);
+ SPDK_CU_ASSERT_FATAL(host != NULL);
+ CU_ASSERT(strcmp(host, "192.168.0.1") == 0);
+ CU_ASSERT_EQUAL(strlen(host), 11);
+ CU_ASSERT_EQUAL(port, NULL);
+
+ /* IPv4 with port */
+ snprintf(ip, 255, "%s", "123.456.789.0:5520");
+ rc = spdk_parse_ip_addr(ip, &host, &port);
+ CU_ASSERT_EQUAL(rc, 0);
+ SPDK_CU_ASSERT_FATAL(host != NULL);
+ CU_ASSERT(strcmp(host, "123.456.789.0") == 0);
+ CU_ASSERT_EQUAL(strlen(host), 13);
+ SPDK_CU_ASSERT_FATAL(port != NULL);
+ CU_ASSERT(strcmp(port, "5520") == 0);
+ CU_ASSERT_EQUAL(strlen(port), 4);
+
+ /* IPv6 */
+ snprintf(ip, 255, "%s", "[2001:db8:85a3:8d3:1319:8a2e:370:7348]");
+ rc = spdk_parse_ip_addr(ip, &host, &port);
+ CU_ASSERT_EQUAL(rc, 0);
+ SPDK_CU_ASSERT_FATAL(host != NULL);
+ CU_ASSERT(strcmp(host, "2001:db8:85a3:8d3:1319:8a2e:370:7348") == 0);
+ CU_ASSERT_EQUAL(strlen(host), 36);
+ CU_ASSERT_EQUAL(port, NULL);
+
+ /* IPv6 with port */
+ snprintf(ip, 255, "%s", "[2001:db8:85a3:8d3:1319:8a2e:370:7348]:443");
+ rc = spdk_parse_ip_addr(ip, &host, &port);
+ CU_ASSERT_EQUAL(rc, 0);
+ SPDK_CU_ASSERT_FATAL(host != NULL);
+ CU_ASSERT(strcmp(host, "2001:db8:85a3:8d3:1319:8a2e:370:7348") == 0);
+ CU_ASSERT_EQUAL(strlen(host), 36);
+ SPDK_CU_ASSERT_FATAL(port != NULL);
+ CU_ASSERT(strcmp(port, "443") == 0);
+ CU_ASSERT_EQUAL(strlen(port), 3);
+
+ /* IPv6 dangling colon */
+ snprintf(ip, 255, "%s", "[2001:db8:85a3:8d3:1319:8a2e:370:7348]:");
+ rc = spdk_parse_ip_addr(ip, &host, &port);
+ CU_ASSERT_EQUAL(rc, 0);
+ SPDK_CU_ASSERT_FATAL(host != NULL);
+ CU_ASSERT(strcmp(host, "2001:db8:85a3:8d3:1319:8a2e:370:7348") == 0);
+ CU_ASSERT_EQUAL(strlen(host), 36);
+ CU_ASSERT_EQUAL(port, NULL);
+}
+
+static void
+test_str_chomp(void)
+{
+ char s[1024];
+
+ /* One \n newline */
+ snprintf(s, sizeof(s), "%s", "hello world\n");
+ CU_ASSERT(spdk_str_chomp(s) == 1);
+ CU_ASSERT(strcmp(s, "hello world") == 0);
+
+ /* One \r\n newline */
+ snprintf(s, sizeof(s), "%s", "hello world\r\n");
+ CU_ASSERT(spdk_str_chomp(s) == 2);
+ CU_ASSERT(strcmp(s, "hello world") == 0);
+
+ /* No newlines */
+ snprintf(s, sizeof(s), "%s", "hello world");
+ CU_ASSERT(spdk_str_chomp(s) == 0);
+ CU_ASSERT(strcmp(s, "hello world") == 0);
+
+ /* Two newlines */
+ snprintf(s, sizeof(s), "%s", "hello world\n\n");
+ CU_ASSERT(spdk_str_chomp(s) == 2);
+ CU_ASSERT(strcmp(s, "hello world") == 0);
+
+ /* Empty string */
+ snprintf(s, sizeof(s), "%s", "");
+ CU_ASSERT(spdk_str_chomp(s) == 0);
+ CU_ASSERT(strcmp(s, "") == 0);
+
+ /* One-character string with only \n */
+ snprintf(s, sizeof(s), "%s", "\n");
+ CU_ASSERT(spdk_str_chomp(s) == 1);
+ CU_ASSERT(strcmp(s, "") == 0);
+
+ /* One-character string without a newline */
+ snprintf(s, sizeof(s), "%s", "a");
+ CU_ASSERT(spdk_str_chomp(s) == 0);
+ CU_ASSERT(strcmp(s, "a") == 0);
+}
+
+static void
+test_parse_capacity(void)
+{
+ char str[128];
+ uint64_t cap;
+ int rc;
+ bool has_prefix = true;
+
+ rc = spdk_parse_capacity("472", &cap, &has_prefix);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cap == 472);
+ CU_ASSERT(has_prefix == false);
+
+ snprintf(str, sizeof(str), "%"PRIu64, UINT64_MAX);
+ rc = spdk_parse_capacity(str, &cap, &has_prefix);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cap == UINT64_MAX);
+ CU_ASSERT(has_prefix == false);
+
+ rc = spdk_parse_capacity("12k", &cap, &has_prefix);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cap == 12 * 1024);
+ CU_ASSERT(has_prefix == true);
+
+ rc = spdk_parse_capacity("12K", &cap, &has_prefix);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cap == 12 * 1024);
+ CU_ASSERT(has_prefix == true);
+
+ rc = spdk_parse_capacity("12KB", &cap, &has_prefix);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cap == 12 * 1024);
+ CU_ASSERT(has_prefix == true);
+
+ rc = spdk_parse_capacity("100M", &cap, &has_prefix);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cap == 100 * 1024 * 1024);
+ CU_ASSERT(has_prefix == true);
+
+ rc = spdk_parse_capacity("128M", &cap, &has_prefix);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cap == 128 * 1024 * 1024);
+ CU_ASSERT(has_prefix == true);
+
+ rc = spdk_parse_capacity("4G", &cap, &has_prefix);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cap == 4ULL * 1024 * 1024 * 1024);
+ CU_ASSERT(has_prefix == true);
+
+ rc = spdk_parse_capacity("100M 512k", &cap, &has_prefix);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cap == 100ULL * 1024 * 1024);
+
+ rc = spdk_parse_capacity("12k8K", &cap, &has_prefix);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cap == 12 * 1024);
+ CU_ASSERT(has_prefix == true);
+
+ /* Non-number */
+ rc = spdk_parse_capacity("G", &cap, &has_prefix);
+ CU_ASSERT(rc != 0);
+
+ rc = spdk_parse_capacity("darsto", &cap, &has_prefix);
+ CU_ASSERT(rc != 0);
+}
+
+static void
+test_sprintf_append_realloc(void)
+{
+ char *str1, *str2, *str3, *str4;
+
+ /* Test basic functionality. */
+ str1 = spdk_sprintf_alloc("hello world\ngood morning\n" \
+ "good afternoon\ngood evening\n");
+ SPDK_CU_ASSERT_FATAL(str1 != NULL);
+
+ str2 = spdk_sprintf_append_realloc(NULL, "hello world\n");
+ SPDK_CU_ASSERT_FATAL(str2);
+
+ str2 = spdk_sprintf_append_realloc(str2, "good morning\n");
+ SPDK_CU_ASSERT_FATAL(str2);
+
+ str2 = spdk_sprintf_append_realloc(str2, "good afternoon\n");
+ SPDK_CU_ASSERT_FATAL(str2);
+
+ str2 = spdk_sprintf_append_realloc(str2, "good evening\n");
+ SPDK_CU_ASSERT_FATAL(str2);
+
+ CU_ASSERT(strcmp(str1, str2) == 0);
+
+ free(str1);
+ free(str2);
+
+ /* Test doubling buffer size. */
+ str3 = spdk_sprintf_append_realloc(NULL, "aaaaaaaaaa\n");
+ str3 = spdk_sprintf_append_realloc(str3, "bbbbbbbbbb\n");
+ str3 = spdk_sprintf_append_realloc(str3, "cccccccccc\n");
+
+ str4 = malloc(33 + 1);
+ memset(&str4[0], 'a', 10);
+ str4[10] = '\n';
+ memset(&str4[11], 'b', 10);
+ str4[21] = '\n';
+ memset(&str4[22], 'c', 10);
+ str4[32] = '\n';
+ str4[33] = 0;
+
+ CU_ASSERT(strcmp(str3, str4) == 0);
+
+ free(str3);
+ free(str4);
+}
+static void
+test_strtol(void)
+{
+ long int val;
+
+ const char *val1 = "no_digits";
+ /* LLONG_MIN - 1 */
+ const char *val2 = "-9223372036854775809";
+ /* LONG_MIN */
+ const char *val3 = "-9223372036854775808";
+ /* LONG_MIN + 1 */
+ const char *val4 = "-9223372036854775807";
+ /* LONG_MAX - 1 */
+ const char *val5 = "9223372036854775806";
+ /* LONG_MAX */
+ const char *val6 = "9223372036854775807";
+ /* LONG_MAX + 1 */
+ const char *val7 = "9223372036854775808";
+ /* digits + chars */
+ const char *val8 = "10_is_ten";
+ /* chars + digits */
+ const char *val9 = "ten_is_10";
+ /* all zeroes */
+ const char *val10 = "00000000";
+ /* leading minus sign, but not negative */
+ const char *val11 = "-0";
+
+ val = spdk_strtol(val1, 10);
+ CU_ASSERT(val == -EINVAL);
+
+ val = spdk_strtol(val2, 10);
+ CU_ASSERT(val == -ERANGE);
+
+ val = spdk_strtol(val3, 10);
+ CU_ASSERT(val == -ERANGE);
+
+ val = spdk_strtol(val4, 10);
+ CU_ASSERT(val == -ERANGE);
+
+ val = spdk_strtol(val5, 10);
+ CU_ASSERT(val == LONG_MAX - 1);
+
+ val = spdk_strtol(val6, 10);
+ CU_ASSERT(val == LONG_MAX);
+
+ val = spdk_strtol(val7, 10);
+ CU_ASSERT(val == -ERANGE);
+
+ val = spdk_strtol(val8, 10);
+ CU_ASSERT(val == -EINVAL);
+
+ val = spdk_strtol(val9, 10);
+ CU_ASSERT(val == -EINVAL);
+
+ val = spdk_strtol(val10, 10);
+ CU_ASSERT(val == 0);
+
+ /* Invalid base */
+ val = spdk_strtol(val10, 1);
+ CU_ASSERT(val == -EINVAL);
+
+ val = spdk_strtol(val11, 10);
+ CU_ASSERT(val == 0);
+}
+
+static void
+test_strtoll(void)
+{
+ long long int val;
+
+ const char *val1 = "no_digits";
+ /* LLONG_MIN - 1 */
+ const char *val2 = "-9223372036854775809";
+ /* LLONG_MIN */
+ const char *val3 = "-9223372036854775808";
+ /* LLONG_MIN + 1 */
+ const char *val4 = "-9223372036854775807";
+ /* LLONG_MAX - 1 */
+ const char *val5 = "9223372036854775806";
+ /* LLONG_MAX */
+ const char *val6 = "9223372036854775807";
+ /* LLONG_MAX + 1 */
+ const char *val7 = "9223372036854775808";
+ /* digits + chars */
+ const char *val8 = "10_is_ten";
+ /* chars + digits */
+ const char *val9 = "ten_is_10";
+ /* all zeroes */
+ const char *val10 = "00000000";
+ /* leading minus sign, but not negative */
+ const char *val11 = "-0";
+
+ val = spdk_strtoll(val1, 10);
+ CU_ASSERT(val == -EINVAL);
+
+ val = spdk_strtoll(val2, 10);
+ CU_ASSERT(val == -ERANGE);
+
+ val = spdk_strtoll(val3, 10);
+ CU_ASSERT(val == -ERANGE);
+
+ val = spdk_strtoll(val4, 10);
+ CU_ASSERT(val == -ERANGE);
+
+ val = spdk_strtoll(val5, 10);
+ CU_ASSERT(val == LLONG_MAX - 1);
+
+ val = spdk_strtoll(val6, 10);
+ CU_ASSERT(val == LLONG_MAX);
+
+ val = spdk_strtoll(val7, 10);
+ CU_ASSERT(val == -ERANGE);
+
+ val = spdk_strtoll(val8, 10);
+ CU_ASSERT(val == -EINVAL);
+
+ val = spdk_strtoll(val9, 10);
+ CU_ASSERT(val == -EINVAL);
+
+ val = spdk_strtoll(val10, 10);
+ CU_ASSERT(val == 0);
+
+ /* Invalid base */
+ val = spdk_strtoll(val10, 1);
+ CU_ASSERT(val == -EINVAL);
+
+ val = spdk_strtoll(val11, 10);
+ CU_ASSERT(val == 0);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("string", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_parse_ip_addr);
+ CU_ADD_TEST(suite, test_str_chomp);
+ CU_ADD_TEST(suite, test_parse_capacity);
+ CU_ADD_TEST(suite, test_sprintf_append_realloc);
+ CU_ADD_TEST(suite, test_strtol);
+ CU_ADD_TEST(suite, test_strtoll);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+
+ CU_basic_run_tests();
+
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/vhost/Makefile b/src/spdk/test/unit/lib/vhost/Makefile
new file mode 100644
index 000000000..0f569f6d2
--- /dev/null
+++ b/src/spdk/test/unit/lib/vhost/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = vhost.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/vhost/vhost.c/.gitignore b/src/spdk/test/unit/lib/vhost/vhost.c/.gitignore
new file mode 100644
index 000000000..16cead8f9
--- /dev/null
+++ b/src/spdk/test/unit/lib/vhost/vhost.c/.gitignore
@@ -0,0 +1 @@
+vhost_ut
diff --git a/src/spdk/test/unit/lib/vhost/vhost.c/Makefile b/src/spdk/test/unit/lib/vhost/vhost.c/Makefile
new file mode 100644
index 000000000..23438ec4d
--- /dev/null
+++ b/src/spdk/test/unit/lib/vhost/vhost.c/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/config.mk
+
+ifeq ($(CONFIG_VHOST_INTERNAL_LIB),y)
+CFLAGS += -I$(SPDK_ROOT_DIR)/lib/rte_vhost
+endif
+
+CFLAGS += $(ENV_CFLAGS)
+TEST_FILE = vhost_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/vhost/vhost.c/vhost_ut.c b/src/spdk/test/unit/lib/vhost/vhost.c/vhost_ut.c
new file mode 100644
index 000000000..a62c7666f
--- /dev/null
+++ b/src/spdk/test/unit/lib/vhost/vhost.c/vhost_ut.c
@@ -0,0 +1,547 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "CUnit/Basic.h"
+#include "spdk_cunit.h"
+#include "spdk/thread.h"
+#include "spdk_internal/mock.h"
+#include "common/lib/test_env.c"
+#include "unit/lib/json_mock.c"
+
+#include "vhost/vhost.c"
+
+DEFINE_STUB(rte_vhost_set_vring_base, int, (int vid, uint16_t queue_id,
+ uint16_t last_avail_idx, uint16_t last_used_idx), 0);
+DEFINE_STUB(rte_vhost_get_vring_base, int, (int vid, uint16_t queue_id,
+ uint16_t *last_avail_idx, uint16_t *last_used_idx), 0);
+DEFINE_STUB_V(vhost_session_install_rte_compat_hooks,
+ (struct spdk_vhost_session *vsession));
+DEFINE_STUB(vhost_register_unix_socket, int, (const char *path, const char *name,
+ uint64_t virtio_features, uint64_t disabled_features, uint64_t protocol_features), 0);
+DEFINE_STUB(vhost_driver_unregister, int, (const char *path), 0);
+DEFINE_STUB(spdk_mem_register, int, (void *vaddr, size_t len), 0);
+DEFINE_STUB(spdk_mem_unregister, int, (void *vaddr, size_t len), 0);
+DEFINE_STUB(rte_vhost_vring_call, int, (int vid, uint16_t vring_idx), 0);
+DEFINE_STUB_V(rte_vhost_log_used_vring, (int vid, uint16_t vring_idx,
+ uint64_t offset, uint64_t len));
+
+DEFINE_STUB(rte_vhost_get_mem_table, int, (int vid, struct rte_vhost_memory **mem), 0);
+DEFINE_STUB(rte_vhost_get_negotiated_features, int, (int vid, uint64_t *features), 0);
+DEFINE_STUB(rte_vhost_get_vhost_vring, int,
+ (int vid, uint16_t vring_idx, struct rte_vhost_vring *vring), 0);
+DEFINE_STUB(rte_vhost_enable_guest_notification, int,
+ (int vid, uint16_t queue_id, int enable), 0);
+DEFINE_STUB(rte_vhost_get_ifname, int, (int vid, char *buf, size_t len), 0);
+DEFINE_STUB(rte_vhost_driver_start, int, (const char *name), 0);
+DEFINE_STUB(rte_vhost_driver_callback_register, int,
+ (const char *path, struct vhost_device_ops const *const ops), 0);
+DEFINE_STUB(rte_vhost_driver_disable_features, int, (const char *path, uint64_t features), 0);
+DEFINE_STUB(rte_vhost_driver_set_features, int, (const char *path, uint64_t features), 0);
+DEFINE_STUB(rte_vhost_driver_register, int, (const char *path, uint64_t flags), 0);
+DEFINE_STUB(vhost_nvme_admin_passthrough, int, (int vid, void *cmd, void *cqe, void *buf), 0);
+DEFINE_STUB(vhost_nvme_set_cq_call, int, (int vid, uint16_t qid, int fd), 0);
+DEFINE_STUB(vhost_nvme_set_bar_mr, int, (int vid, void *bar, uint64_t bar_size), 0);
+DEFINE_STUB(vhost_nvme_get_cap, int, (int vid, uint64_t *cap), 0);
+
+void *
+spdk_call_unaffinitized(void *cb(void *arg), void *arg)
+{
+ return cb(arg);
+}
+
+static struct spdk_vhost_dev_backend g_vdev_backend;
+
+static int
+test_setup(void)
+{
+ return 0;
+}
+
+static int
+alloc_vdev(struct spdk_vhost_dev **vdev_p, const char *name, const char *cpumask)
+{
+ struct spdk_vhost_dev *vdev = NULL;
+ int rc;
+
+ /* spdk_vhost_dev must be allocated on a cache line boundary. */
+ rc = posix_memalign((void **)&vdev, 64, sizeof(*vdev));
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(vdev != NULL);
+ memset(vdev, 0, sizeof(*vdev));
+ rc = vhost_dev_register(vdev, name, cpumask, &g_vdev_backend);
+ if (rc == 0) {
+ *vdev_p = vdev;
+ } else {
+ free(vdev);
+ *vdev_p = NULL;
+ }
+
+ return rc;
+}
+
+static void
+start_vdev(struct spdk_vhost_dev *vdev)
+{
+ struct rte_vhost_memory *mem;
+ struct spdk_vhost_session *vsession = NULL;
+ int rc;
+
+ mem = calloc(1, sizeof(*mem) + 2 * sizeof(struct rte_vhost_mem_region));
+ SPDK_CU_ASSERT_FATAL(mem != NULL);
+ mem->nregions = 2;
+ mem->regions[0].guest_phys_addr = 0;
+ mem->regions[0].size = 0x400000; /* 4 MB */
+ mem->regions[0].host_user_addr = 0x1000000;
+ mem->regions[1].guest_phys_addr = 0x400000;
+ mem->regions[1].size = 0x400000; /* 4 MB */
+ mem->regions[1].host_user_addr = 0x2000000;
+
+ assert(TAILQ_EMPTY(&vdev->vsessions));
+ /* spdk_vhost_dev must be allocated on a cache line boundary. */
+ rc = posix_memalign((void **)&vsession, 64, sizeof(*vsession));
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(vsession != NULL);
+ vsession->started = true;
+ vsession->vid = 0;
+ vsession->mem = mem;
+ TAILQ_INSERT_TAIL(&vdev->vsessions, vsession, tailq);
+}
+
+static void
+stop_vdev(struct spdk_vhost_dev *vdev)
+{
+ struct spdk_vhost_session *vsession = TAILQ_FIRST(&vdev->vsessions);
+
+ TAILQ_REMOVE(&vdev->vsessions, vsession, tailq);
+ free(vsession->mem);
+ free(vsession);
+}
+
+static void
+cleanup_vdev(struct spdk_vhost_dev *vdev)
+{
+ if (!TAILQ_EMPTY(&vdev->vsessions)) {
+ stop_vdev(vdev);
+ }
+ vhost_dev_unregister(vdev);
+ free(vdev);
+}
+
+static void
+desc_to_iov_test(void)
+{
+ struct spdk_vhost_dev *vdev;
+ struct spdk_vhost_session *vsession;
+ struct iovec iov[SPDK_VHOST_IOVS_MAX];
+ uint16_t iov_index;
+ struct vring_desc desc;
+ int rc;
+
+ spdk_cpuset_set_cpu(&g_vhost_core_mask, 0, true);
+
+ rc = alloc_vdev(&vdev, "vdev_name_0", "0x1");
+ SPDK_CU_ASSERT_FATAL(rc == 0 && vdev);
+ start_vdev(vdev);
+
+ vsession = TAILQ_FIRST(&vdev->vsessions);
+
+ /* Test simple case where iov falls fully within a 2MB page. */
+ desc.addr = 0x110000;
+ desc.len = 0x1000;
+ iov_index = 0;
+ rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(iov_index == 1);
+ CU_ASSERT(iov[0].iov_base == (void *)0x1110000);
+ CU_ASSERT(iov[0].iov_len == 0x1000);
+ /*
+ * Always memset the iov to ensure each test validates data written by its call
+ * to the function under test.
+ */
+ memset(iov, 0, sizeof(iov));
+
+ /* Same test, but ensure it respects the non-zero starting iov_index. */
+ iov_index = SPDK_VHOST_IOVS_MAX - 1;
+ rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(iov_index == SPDK_VHOST_IOVS_MAX);
+ CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_base == (void *)0x1110000);
+ CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_len == 0x1000);
+ memset(iov, 0, sizeof(iov));
+
+ /* Test for failure if iov_index already equals SPDK_VHOST_IOVS_MAX. */
+ iov_index = SPDK_VHOST_IOVS_MAX;
+ rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
+ CU_ASSERT(rc != 0);
+ memset(iov, 0, sizeof(iov));
+
+ /* Test case where iov spans a 2MB boundary, but does not span a vhost memory region. */
+ desc.addr = 0x1F0000;
+ desc.len = 0x20000;
+ iov_index = 0;
+ rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(iov_index == 1);
+ CU_ASSERT(iov[0].iov_base == (void *)0x11F0000);
+ CU_ASSERT(iov[0].iov_len == 0x20000);
+ memset(iov, 0, sizeof(iov));
+
+ /* Same test, but ensure it respects the non-zero starting iov_index. */
+ iov_index = SPDK_VHOST_IOVS_MAX - 1;
+ rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(iov_index == SPDK_VHOST_IOVS_MAX);
+ CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_base == (void *)0x11F0000);
+ CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_len == 0x20000);
+ memset(iov, 0, sizeof(iov));
+
+ /* Test case where iov spans a vhost memory region. */
+ desc.addr = 0x3F0000;
+ desc.len = 0x20000;
+ iov_index = 0;
+ rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(iov_index == 2);
+ CU_ASSERT(iov[0].iov_base == (void *)0x13F0000);
+ CU_ASSERT(iov[0].iov_len == 0x10000);
+ CU_ASSERT(iov[1].iov_base == (void *)0x2000000);
+ CU_ASSERT(iov[1].iov_len == 0x10000);
+ memset(iov, 0, sizeof(iov));
+
+ cleanup_vdev(vdev);
+
+ CU_ASSERT(true);
+}
+
+static void
+create_controller_test(void)
+{
+ struct spdk_vhost_dev *vdev, *vdev2;
+ int ret;
+ char long_name[PATH_MAX];
+
+ spdk_cpuset_set_cpu(&g_vhost_core_mask, 0, true);
+
+ /* Create device with no name */
+ ret = alloc_vdev(&vdev, NULL, "0x1");
+ CU_ASSERT(ret != 0);
+
+ /* Create device with incorrect cpumask */
+ ret = alloc_vdev(&vdev, "vdev_name_0", "0x2");
+ CU_ASSERT(ret != 0);
+
+ /* Create device with too long name and path */
+ memset(long_name, 'x', sizeof(long_name));
+ long_name[PATH_MAX - 1] = 0;
+ snprintf(dev_dirname, sizeof(dev_dirname), "some_path/");
+ ret = alloc_vdev(&vdev, long_name, "0x1");
+ CU_ASSERT(ret != 0);
+ dev_dirname[0] = 0;
+
+ /* Create device when device name is already taken */
+ ret = alloc_vdev(&vdev, "vdev_name_0", "0x1");
+ SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
+ ret = alloc_vdev(&vdev2, "vdev_name_0", "0x1");
+ CU_ASSERT(ret != 0);
+ cleanup_vdev(vdev);
+}
+
+static void
+session_find_by_vid_test(void)
+{
+ struct spdk_vhost_dev *vdev;
+ struct spdk_vhost_session *vsession;
+ struct spdk_vhost_session *tmp;
+ int rc;
+
+ rc = alloc_vdev(&vdev, "vdev_name_0", "0x1");
+ SPDK_CU_ASSERT_FATAL(rc == 0 && vdev);
+ start_vdev(vdev);
+
+ vsession = TAILQ_FIRST(&vdev->vsessions);
+
+ tmp = vhost_session_find_by_vid(vsession->vid);
+ CU_ASSERT(tmp == vsession);
+
+ /* Search for a device with incorrect vid */
+ tmp = vhost_session_find_by_vid(vsession->vid + 0xFF);
+ CU_ASSERT(tmp == NULL);
+
+ cleanup_vdev(vdev);
+}
+
+static void
+remove_controller_test(void)
+{
+ struct spdk_vhost_dev *vdev;
+ int ret;
+
+ ret = alloc_vdev(&vdev, "vdev_name_0", "0x1");
+ SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
+
+ /* Remove device when controller is in use */
+ start_vdev(vdev);
+ SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&vdev->vsessions));
+ ret = vhost_dev_unregister(vdev);
+ CU_ASSERT(ret != 0);
+
+ cleanup_vdev(vdev);
+}
+
+static void
+vq_avail_ring_get_test(void)
+{
+ struct spdk_vhost_virtqueue vq;
+ uint16_t avail_mem[34];
+ uint16_t reqs[32];
+ uint16_t reqs_len, ret, i;
+
+ /* Basic example reap all requests */
+ vq.vring.avail = (struct vring_avail *)avail_mem;
+ vq.vring.size = 32;
+ vq.last_avail_idx = 24;
+ vq.vring.avail->idx = 29;
+ reqs_len = 6;
+
+ for (i = 0; i < 32; i++) {
+ vq.vring.avail->ring[i] = i;
+ }
+
+ ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len);
+ CU_ASSERT(ret == 5);
+ CU_ASSERT(vq.last_avail_idx == 29);
+ for (i = 0; i < ret; i++) {
+ CU_ASSERT(reqs[i] == vq.vring.avail->ring[i + 24]);
+ }
+
+ /* Basic example reap only some requests */
+ vq.last_avail_idx = 20;
+ vq.vring.avail->idx = 29;
+ reqs_len = 6;
+
+ ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len);
+ CU_ASSERT(ret == reqs_len);
+ CU_ASSERT(vq.last_avail_idx == 26);
+ for (i = 0; i < ret; i++) {
+ CU_ASSERT(reqs[i] == vq.vring.avail->ring[i + 20]);
+ }
+
+ /* Test invalid example */
+ vq.last_avail_idx = 20;
+ vq.vring.avail->idx = 156;
+ reqs_len = 6;
+
+ ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len);
+ CU_ASSERT(ret == 0);
+
+ /* Test overflow in the avail->idx variable. */
+ vq.last_avail_idx = 65535;
+ vq.vring.avail->idx = 4;
+ reqs_len = 6;
+ ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len);
+ CU_ASSERT(ret == 5);
+ CU_ASSERT(vq.last_avail_idx == 4);
+ CU_ASSERT(reqs[0] == vq.vring.avail->ring[31]);
+ for (i = 1; i < ret; i++) {
+ CU_ASSERT(reqs[i] == vq.vring.avail->ring[i - 1]);
+ }
+}
+
+static bool
+vq_desc_guest_is_used(struct spdk_vhost_virtqueue *vq, int16_t guest_last_used_idx,
+ int16_t guest_used_phase)
+{
+ return (!!(vq->vring.desc_packed[guest_last_used_idx].flags & VRING_DESC_F_USED) ==
+ !!guest_used_phase);
+}
+
+static void
+vq_desc_guest_set_avail(struct spdk_vhost_virtqueue *vq, int16_t *guest_last_avail_idx,
+ int16_t *guest_avail_phase)
+{
+ if (*guest_avail_phase) {
+ vq->vring.desc_packed[*guest_last_avail_idx].flags |= VRING_DESC_F_AVAIL;
+ vq->vring.desc_packed[*guest_last_avail_idx].flags &= ~VRING_DESC_F_USED;
+ } else {
+ vq->vring.desc_packed[*guest_last_avail_idx].flags &= ~VRING_DESC_F_AVAIL;
+ vq->vring.desc_packed[*guest_last_avail_idx].flags |= VRING_DESC_F_USED;
+ }
+
+ if (++(*guest_last_avail_idx) >= vq->vring.size) {
+ *guest_last_avail_idx -= vq->vring.size;
+ *guest_avail_phase = !(*guest_avail_phase);
+ }
+}
+
+static int16_t
+vq_desc_guest_handle_completed_desc(struct spdk_vhost_virtqueue *vq, int16_t *guest_last_used_idx,
+ int16_t *guest_used_phase)
+{
+ int16_t buffer_id = -1;
+
+ if (vq_desc_guest_is_used(vq, *guest_last_used_idx, *guest_used_phase)) {
+ buffer_id = vq->vring.desc_packed[*guest_last_used_idx].id;
+ if (++(*guest_last_used_idx) >= vq->vring.size) {
+ *guest_last_used_idx -= vq->vring.size;
+ *guest_used_phase = !(*guest_used_phase);
+ }
+
+ return buffer_id;
+ }
+
+ return -1;
+}
+
+static void
+vq_packed_ring_test(void)
+{
+ struct spdk_vhost_session vs = {};
+ struct spdk_vhost_virtqueue vq = {};
+ struct vring_packed_desc descs[4];
+ uint16_t guest_last_avail_idx = 0, guest_last_used_idx = 0;
+ uint16_t guest_avail_phase = 1, guest_used_phase = 1;
+ int i;
+ int16_t chain_num;
+
+ vq.vring.desc_packed = descs;
+ vq.vring.size = 4;
+
+ /* avail and used wrap counter are initialized to 1 */
+ vq.packed.avail_phase = 1;
+ vq.packed.used_phase = 1;
+ vq.packed.packed_ring = true;
+ memset(descs, 0, sizeof(descs));
+
+ CU_ASSERT(vhost_vq_packed_ring_is_avail(&vq) == false);
+
+ /* Guest send requests */
+ for (i = 0; i < vq.vring.size; i++) {
+ descs[guest_last_avail_idx].id = i;
+ /* Set the desc available */
+ vq_desc_guest_set_avail(&vq, &guest_last_avail_idx, &guest_avail_phase);
+ }
+ CU_ASSERT(guest_last_avail_idx == 0);
+ CU_ASSERT(guest_avail_phase == 0);
+
+ /* Host handle available descs */
+ CU_ASSERT(vhost_vq_packed_ring_is_avail(&vq) == true);
+ i = 0;
+ while (vhost_vq_packed_ring_is_avail(&vq)) {
+ CU_ASSERT(vhost_vring_packed_desc_get_buffer_id(&vq, vq.last_avail_idx, &chain_num) == i++);
+ CU_ASSERT(chain_num == 1);
+ }
+
+ /* Host complete them out of order: 1, 0, 2. */
+ vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 1, 1);
+ vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 0, 1);
+ vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 2, 1);
+
+ /* Host has got all the available request but only complete three requests */
+ CU_ASSERT(vq.last_avail_idx == 0);
+ CU_ASSERT(vq.packed.avail_phase == 0);
+ CU_ASSERT(vq.last_used_idx == 3);
+ CU_ASSERT(vq.packed.used_phase == 1);
+
+ /* Guest handle completed requests */
+ CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 1);
+ CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 0);
+ CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 2);
+ CU_ASSERT(guest_last_used_idx == 3);
+ CU_ASSERT(guest_used_phase == 1);
+
+ /* There are three descs available the guest can send three request again */
+ for (i = 0; i < 3; i++) {
+ descs[guest_last_avail_idx].id = 2 - i;
+ /* Set the desc available */
+ vq_desc_guest_set_avail(&vq, &guest_last_avail_idx, &guest_avail_phase);
+ }
+
+ /* Host handle available descs */
+ CU_ASSERT(vhost_vq_packed_ring_is_avail(&vq) == true);
+ i = 2;
+ while (vhost_vq_packed_ring_is_avail(&vq)) {
+ CU_ASSERT(vhost_vring_packed_desc_get_buffer_id(&vq, vq.last_avail_idx, &chain_num) == i--);
+ CU_ASSERT(chain_num == 1);
+ }
+
+ /* There are four requests in Host, the new three ones and left one */
+ CU_ASSERT(vq.last_avail_idx == 3);
+ /* Available wrap conter should overturn */
+ CU_ASSERT(vq.packed.avail_phase == 0);
+
+ /* Host complete all the requests */
+ vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 1, 1);
+ vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 0, 1);
+ vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 3, 1);
+ vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 2, 1);
+
+ CU_ASSERT(vq.last_used_idx == vq.last_avail_idx);
+ CU_ASSERT(vq.packed.used_phase == vq.packed.avail_phase);
+
+ /* Guest handle completed requests */
+ CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 1);
+ CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 0);
+ CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 3);
+ CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 2);
+
+ CU_ASSERT(guest_last_avail_idx == guest_last_used_idx);
+ CU_ASSERT(guest_avail_phase == guest_used_phase);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("vhost_suite", test_setup, NULL);
+
+ CU_ADD_TEST(suite, desc_to_iov_test);
+ CU_ADD_TEST(suite, create_controller_test);
+ CU_ADD_TEST(suite, session_find_by_vid_test);
+ CU_ADD_TEST(suite, remove_controller_test);
+ CU_ADD_TEST(suite, vq_avail_ring_get_test);
+ CU_ADD_TEST(suite, vq_packed_ring_test);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/unittest.sh b/src/spdk/test/unit/unittest.sh
new file mode 100755
index 000000000..39bfdbb4a
--- /dev/null
+++ b/src/spdk/test/unit/unittest.sh
@@ -0,0 +1,253 @@
+#!/usr/bin/env bash
+#
+# Environment variables:
+# $valgrind Specify the valgrind command line, if not
+# then a default command line is used
+
+set -xe
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $(dirname $0)/../..)
+source "$rootdir/test/common/autotest_common.sh"
+
+cd "$rootdir"
+
+function unittest_bdev() {
+ $valgrind $testdir/lib/bdev/bdev.c/bdev_ut
+ $valgrind $testdir/lib/bdev/bdev_ocssd.c/bdev_ocssd_ut
+ $valgrind $testdir/lib/bdev/raid/bdev_raid.c/bdev_raid_ut
+ $valgrind $testdir/lib/bdev/bdev_zone.c/bdev_zone_ut
+ $valgrind $testdir/lib/bdev/gpt/gpt.c/gpt_ut
+ $valgrind $testdir/lib/bdev/part.c/part_ut
+ $valgrind $testdir/lib/bdev/scsi_nvme.c/scsi_nvme_ut
+ $valgrind $testdir/lib/bdev/vbdev_lvol.c/vbdev_lvol_ut
+ $valgrind $testdir/lib/bdev/vbdev_zone_block.c/vbdev_zone_block_ut
+ $valgrind $testdir/lib/bdev/mt/bdev.c/bdev_ut
+}
+
+function unittest_blob() {
+ $valgrind $testdir/lib/blob/blob.c/blob_ut
+ $valgrind $testdir/lib/blobfs/tree.c/tree_ut
+ $valgrind $testdir/lib/blobfs/blobfs_async_ut/blobfs_async_ut
+ # blobfs_sync_ut hangs when run under valgrind, so don't use $valgrind
+ $testdir/lib/blobfs/blobfs_sync_ut/blobfs_sync_ut
+ $valgrind $testdir/lib/blobfs/blobfs_bdev.c/blobfs_bdev_ut
+}
+
+function unittest_event() {
+ $valgrind $testdir/lib/event/subsystem.c/subsystem_ut
+ $valgrind $testdir/lib/event/app.c/app_ut
+ $valgrind $testdir/lib/event/reactor.c/reactor_ut
+}
+
+function unittest_ftl() {
+ $valgrind $testdir/lib/ftl/ftl_ppa/ftl_ppa_ut
+ $valgrind $testdir/lib/ftl/ftl_band.c/ftl_band_ut
+ $valgrind $testdir/lib/ftl/ftl_reloc.c/ftl_reloc_ut
+ $valgrind $testdir/lib/ftl/ftl_wptr/ftl_wptr_ut
+ $valgrind $testdir/lib/ftl/ftl_md/ftl_md_ut
+ $valgrind $testdir/lib/ftl/ftl_io.c/ftl_io_ut
+}
+
+function unittest_iscsi() {
+ $valgrind $testdir/lib/iscsi/conn.c/conn_ut
+ $valgrind $testdir/lib/iscsi/param.c/param_ut
+ $valgrind $testdir/lib/iscsi/tgt_node.c/tgt_node_ut $testdir/lib/iscsi/tgt_node.c/tgt_node.conf
+ $valgrind $testdir/lib/iscsi/iscsi.c/iscsi_ut
+ $valgrind $testdir/lib/iscsi/init_grp.c/init_grp_ut $testdir/lib/iscsi/init_grp.c/init_grp.conf
+ $valgrind $testdir/lib/iscsi/portal_grp.c/portal_grp_ut $testdir/lib/iscsi/portal_grp.c/portal_grp.conf
+}
+
+function unittest_json() {
+ $valgrind $testdir/lib/json/json_parse.c/json_parse_ut
+ $valgrind $testdir/lib/json/json_util.c/json_util_ut
+ $valgrind $testdir/lib/json/json_write.c/json_write_ut
+ $valgrind $testdir/lib/jsonrpc/jsonrpc_server.c/jsonrpc_server_ut
+}
+
+function unittest_nvme() {
+ $valgrind $testdir/lib/nvme/nvme.c/nvme_ut
+ $valgrind $testdir/lib/nvme/nvme_ctrlr.c/nvme_ctrlr_ut
+ $valgrind $testdir/lib/nvme/nvme_ctrlr_cmd.c/nvme_ctrlr_cmd_ut
+ $valgrind $testdir/lib/nvme/nvme_ctrlr_ocssd_cmd.c/nvme_ctrlr_ocssd_cmd_ut
+ $valgrind $testdir/lib/nvme/nvme_ns.c/nvme_ns_ut
+ $valgrind $testdir/lib/nvme/nvme_ns_cmd.c/nvme_ns_cmd_ut
+ $valgrind $testdir/lib/nvme/nvme_ns_ocssd_cmd.c/nvme_ns_ocssd_cmd_ut
+ $valgrind $testdir/lib/nvme/nvme_qpair.c/nvme_qpair_ut
+ $valgrind $testdir/lib/nvme/nvme_pcie.c/nvme_pcie_ut
+ $valgrind $testdir/lib/nvme/nvme_poll_group.c/nvme_poll_group_ut
+ $valgrind $testdir/lib/nvme/nvme_quirks.c/nvme_quirks_ut
+ $valgrind $testdir/lib/nvme/nvme_tcp.c/nvme_tcp_ut
+ $valgrind $testdir/lib/nvme/nvme_uevent.c/nvme_uevent_ut
+}
+
+function unittest_nvmf() {
+ $valgrind $testdir/lib/nvmf/ctrlr.c/ctrlr_ut
+ $valgrind $testdir/lib/nvmf/ctrlr_bdev.c/ctrlr_bdev_ut
+ $valgrind $testdir/lib/nvmf/ctrlr_discovery.c/ctrlr_discovery_ut
+ $valgrind $testdir/lib/nvmf/subsystem.c/subsystem_ut
+ $valgrind $testdir/lib/nvmf/tcp.c/tcp_ut
+}
+
+function unittest_scsi() {
+ $valgrind $testdir/lib/scsi/dev.c/dev_ut
+ $valgrind $testdir/lib/scsi/lun.c/lun_ut
+ $valgrind $testdir/lib/scsi/scsi.c/scsi_ut
+ $valgrind $testdir/lib/scsi/scsi_bdev.c/scsi_bdev_ut
+ $valgrind $testdir/lib/scsi/scsi_pr.c/scsi_pr_ut
+}
+
+function unittest_sock() {
+ $valgrind $testdir/lib/sock/sock.c/sock_ut
+ $valgrind $testdir/lib/sock/posix.c/posix_ut
+ # Check whether uring is configured
+ if grep -q '#define SPDK_CONFIG_URING 1' $rootdir/include/spdk/config.h; then
+ $valgrind $testdir/lib/sock/uring.c/uring_ut
+ fi
+}
+
+function unittest_util() {
+ $valgrind $testdir/lib/util/base64.c/base64_ut
+ $valgrind $testdir/lib/util/bit_array.c/bit_array_ut
+ $valgrind $testdir/lib/util/cpuset.c/cpuset_ut
+ $valgrind $testdir/lib/util/crc16.c/crc16_ut
+ $valgrind $testdir/lib/util/crc32_ieee.c/crc32_ieee_ut
+ $valgrind $testdir/lib/util/crc32c.c/crc32c_ut
+ $valgrind $testdir/lib/util/string.c/string_ut
+ $valgrind $testdir/lib/util/dif.c/dif_ut
+ $valgrind $testdir/lib/util/iov.c/iov_ut
+ $valgrind $testdir/lib/util/math.c/math_ut
+ $valgrind $testdir/lib/util/pipe.c/pipe_ut
+}
+
+# if ASAN is enabled, use it. If not use valgrind if installed but allow
+# the env variable to override the default shown below.
+if [ -z ${valgrind+x} ]; then
+ if grep -q '#undef SPDK_CONFIG_ASAN' $rootdir/include/spdk/config.h && hash valgrind; then
+ valgrind='valgrind --leak-check=full --error-exitcode=2'
+ else
+ valgrind=''
+ fi
+fi
+
+# setup local unit test coverage if cov is available
+if hash lcov && grep -q '#define SPDK_CONFIG_COVERAGE 1' $rootdir/include/spdk/config.h; then
+ cov_avail="yes"
+else
+ cov_avail="no"
+fi
+if [ "$cov_avail" = "yes" ]; then
+ # set unit test output dir if not specified in env var
+ if [ -z ${UT_COVERAGE+x} ]; then
+ UT_COVERAGE="ut_coverage"
+ fi
+ mkdir -p $UT_COVERAGE
+ export LCOV_OPTS="
+ --rc lcov_branch_coverage=1
+ --rc lcov_function_coverage=1
+ --rc genhtml_branch_coverage=1
+ --rc genhtml_function_coverage=1
+ --rc genhtml_legend=1
+ --rc geninfo_all_blocks=1
+ "
+ export LCOV="lcov $LCOV_OPTS --no-external"
+ # zero out coverage data
+ $LCOV -q -c -i -d . -t "Baseline" -o $UT_COVERAGE/ut_cov_base.info
+fi
+
+# workaround for valgrind v3.13 on arm64
+if [ $(uname -m) = "aarch64" ]; then
+ export LD_HWCAP_MASK=1
+fi
+
+run_test "unittest_include" $valgrind $testdir/include/spdk/histogram_data.h/histogram_ut
+run_test "unittest_bdev" unittest_bdev
+if grep -q '#define SPDK_CONFIG_CRYPTO 1' $rootdir/include/spdk/config.h; then
+ run_test "unittest_bdev_crypto" $valgrind $testdir/lib/bdev/crypto.c/crypto_ut
+fi
+
+if grep -q '#define SPDK_CONFIG_REDUCE 1' $rootdir/include/spdk/config.h; then
+ run_test "unittest_bdev_reduce" $valgrind $testdir/lib/bdev/compress.c/compress_ut
+fi
+
+if grep -q '#define SPDK_CONFIG_PMDK 1' $rootdir/include/spdk/config.h; then
+ run_test "unittest_bdev_pmem" $valgrind $testdir/lib/bdev/pmem/bdev_pmem_ut
+fi
+
+if grep -q '#define SPDK_CONFIG_RAID5 1' $rootdir/include/spdk/config.h; then
+ run_test "unittest_bdev_raid5" $valgrind $testdir/lib/bdev/raid/raid5.c/raid5_ut
+fi
+
+run_test "unittest_blob_blobfs" unittest_blob
+run_test "unittest_event" unittest_event
+if [ $(uname -s) = Linux ]; then
+ run_test "unittest_ftl" unittest_ftl
+fi
+
+run_test "unittest_ioat" $valgrind $testdir/lib/ioat/ioat.c/ioat_ut
+if grep -q '#define SPDK_CONFIG_IDXD 1' $rootdir/include/spdk/config.h; then
+ run_test "unittest_idxd" $valgrind $testdir/lib/idxd/idxd.c/idxd_ut
+fi
+run_test "unittest_iscsi" unittest_iscsi
+run_test "unittest_json" unittest_json
+run_test "unittest_notify" $valgrind $testdir/lib/notify/notify.c/notify_ut
+run_test "unittest_nvme" unittest_nvme
+run_test "unittest_log" $valgrind $testdir/lib/log/log.c/log_ut
+run_test "unittest_lvol" $valgrind $testdir/lib/lvol/lvol.c/lvol_ut
+if grep -q '#define SPDK_CONFIG_RDMA 1' $rootdir/include/spdk/config.h; then
+ run_test "unittest_nvme_rdma" $valgrind $testdir/lib/nvme/nvme_rdma.c/nvme_rdma_ut
+fi
+
+run_test "unittest_nvmf" unittest_nvmf
+if grep -q '#define SPDK_CONFIG_FC 1' $rootdir/include/spdk/config.h; then
+ run_test "unittest_nvmf_fc" $valgrind $testdir/lib/nvmf/fc.c/fc_ut
+ run_test "unittest_nvmf_fc_ls" $valgrind $testdir/lib/nvmf/fc_ls.c/fc_ls_ut
+fi
+
+if grep -q '#define SPDK_CONFIG_RDMA 1' $rootdir/include/spdk/config.h; then
+ run_test "unittest_nvmf_rdma" $valgrind $testdir/lib/nvmf/rdma.c/rdma_ut
+fi
+
+run_test "unittest_scsi" unittest_scsi
+run_test "unittest_sock" unittest_sock
+run_test "unittest_thread" $valgrind $testdir/lib/thread/thread.c/thread_ut
+run_test "unittest_util" unittest_util
+if grep -q '#define SPDK_CONFIG_VHOST 1' $rootdir/include/spdk/config.h; then
+ run_test "unittest_vhost" $valgrind $testdir/lib/vhost/vhost.c/vhost_ut
+fi
+
+# local unit test coverage
+if [ "$cov_avail" = "yes" ]; then
+ $LCOV -q -d . -c -t "$(hostname)" -o $UT_COVERAGE/ut_cov_test.info
+ $LCOV -q -a $UT_COVERAGE/ut_cov_base.info -a $UT_COVERAGE/ut_cov_test.info -o $UT_COVERAGE/ut_cov_total.info
+ $LCOV -q -a $UT_COVERAGE/ut_cov_total.info -o $UT_COVERAGE/ut_cov_unit.info
+ $LCOV -q -r $UT_COVERAGE/ut_cov_unit.info "$rootdir/app/*" -o $UT_COVERAGE/ut_cov_unit.info
+ $LCOV -q -r $UT_COVERAGE/ut_cov_unit.info "$rootdir/dpdk/*" -o $UT_COVERAGE/ut_cov_unit.info
+ $LCOV -q -r $UT_COVERAGE/ut_cov_unit.info "$rootdir/examples/*" -o $UT_COVERAGE/ut_cov_unit.info
+ $LCOV -q -r $UT_COVERAGE/ut_cov_unit.info "$rootdir/include/*" -o $UT_COVERAGE/ut_cov_unit.info
+ $LCOV -q -r $UT_COVERAGE/ut_cov_unit.info "$rootdir/lib/vhost/rte_vhost/*" -o $UT_COVERAGE/ut_cov_unit.info
+ $LCOV -q -r $UT_COVERAGE/ut_cov_unit.info "$rootdir/test/*" -o $UT_COVERAGE/ut_cov_unit.info
+ rm -f $UT_COVERAGE/ut_cov_base.info $UT_COVERAGE/ut_cov_test.info
+ genhtml $UT_COVERAGE/ut_cov_unit.info --output-directory $UT_COVERAGE
+ # git -C option not used for compatibility reasons
+ (cd $rootdir && git clean -f "*.gcda")
+fi
+
+set +x
+
+echo
+echo
+echo "====================="
+echo "All unit tests passed"
+echo "====================="
+if [ "$cov_avail" = "yes" ]; then
+ echo "Note: coverage report is here: $rootdir/$UT_COVERAGE"
+else
+ echo "WARN: lcov not installed or SPDK built without coverage!"
+fi
+if grep -q '#undef SPDK_CONFIG_ASAN' $rootdir/include/spdk/config.h && [ "$valgrind" = "" ]; then
+ echo "WARN: neither valgrind nor ASAN is enabled!"
+fi
+
+echo
+echo