summaryrefslogtreecommitdiffstats
path: root/src/spdk/test
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:45:59 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:45:59 +0000
commit19fcec84d8d7d21e796c7624e521b60d28ee21ed (patch)
tree42d26aa27d1e3f7c0b8bd3fd14e7d7082f5008dc /src/spdk/test
parentInitial commit. (diff)
downloadceph-19fcec84d8d7d21e796c7624e521b60d28ee21ed.tar.xz
ceph-19fcec84d8d7d21e796c7624e521b60d28ee21ed.zip
Adding upstream version 16.2.11+ds.upstream/16.2.11+dsupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/spdk/test')
-rw-r--r--src/spdk/test/Makefile48
-rw-r--r--src/spdk/test/app/Makefile44
-rw-r--r--src/spdk/test/app/bdev_svc/.gitignore1
-rw-r--r--src/spdk/test/app/bdev_svc/Makefile63
-rw-r--r--src/spdk/test/app/bdev_svc/bdev_svc.c112
-rw-r--r--src/spdk/test/app/fuzz/Makefile48
-rw-r--r--src/spdk/test/app/fuzz/common/fuzz_common.h303
-rwxr-xr-xsrc/spdk/test/app/fuzz/common/fuzz_rpc.py106
-rw-r--r--src/spdk/test/app/fuzz/iscsi_fuzz/.gitignore1
-rw-r--r--src/spdk/test/app/fuzz/iscsi_fuzz/Makefile51
-rw-r--r--src/spdk/test/app/fuzz/iscsi_fuzz/README.md27
-rw-r--r--src/spdk/test/app/fuzz/iscsi_fuzz/iscsi_fuzz.c1092
-rw-r--r--src/spdk/test/app/fuzz/nvme_fuzz/.gitignore1
-rw-r--r--src/spdk/test/app/fuzz/nvme_fuzz/Makefile49
-rw-r--r--src/spdk/test/app/fuzz/nvme_fuzz/README.md52
-rw-r--r--src/spdk/test/app/fuzz/nvme_fuzz/example.json290
-rw-r--r--src/spdk/test/app/fuzz/nvme_fuzz/nvme_fuzz.c931
-rw-r--r--src/spdk/test/app/fuzz/vhost_fuzz/.gitignore1
-rw-r--r--src/spdk/test/app/fuzz/vhost_fuzz/Makefile42
-rw-r--r--src/spdk/test/app/fuzz/vhost_fuzz/README.md46
-rw-r--r--src/spdk/test/app/fuzz/vhost_fuzz/example.json95
-rw-r--r--src/spdk/test/app/fuzz/vhost_fuzz/vhost_fuzz.c1146
-rw-r--r--src/spdk/test/app/fuzz/vhost_fuzz/vhost_fuzz.h41
-rw-r--r--src/spdk/test/app/fuzz/vhost_fuzz/vhost_fuzz_rpc.c108
-rw-r--r--src/spdk/test/app/histogram_perf/.gitignore1
-rw-r--r--src/spdk/test/app/histogram_perf/Makefile43
-rw-r--r--src/spdk/test/app/histogram_perf/histogram_perf.c102
-rw-r--r--src/spdk/test/app/jsoncat/.gitignore1
-rw-r--r--src/spdk/test/app/jsoncat/Makefile43
-rw-r--r--src/spdk/test/app/jsoncat/jsoncat.c192
-rwxr-xr-xsrc/spdk/test/app/match/match332
-rw-r--r--src/spdk/test/app/stub/.gitignore1
-rw-r--r--src/spdk/test/app/stub/Makefile49
-rw-r--r--src/spdk/test/app/stub/stub.c203
-rw-r--r--src/spdk/test/bdev/Makefile44
-rwxr-xr-xsrc/spdk/test/bdev/bdev_raid.sh119
-rw-r--r--src/spdk/test/bdev/bdevio/.gitignore1
-rw-r--r--src/spdk/test/bdev/bdevio/Makefile48
-rw-r--r--src/spdk/test/bdev/bdevio/bdevio.c1433
-rwxr-xr-xsrc/spdk/test/bdev/bdevio/tests.py88
-rw-r--r--src/spdk/test/bdev/bdevperf/.gitignore1
-rw-r--r--src/spdk/test/bdev/bdevperf/Makefile55
-rw-r--r--src/spdk/test/bdev/bdevperf/bdevperf.c2137
-rwxr-xr-xsrc/spdk/test/bdev/bdevperf/bdevperf.py86
-rw-r--r--src/spdk/test/bdev/bdevperf/common.sh33
-rw-r--r--src/spdk/test/bdev/bdevperf/conf.json25
-rwxr-xr-xsrc/spdk/test/bdev/bdevperf/test_config.sh41
-rwxr-xr-xsrc/spdk/test/bdev/blockdev.sh408
-rw-r--r--src/spdk/test/bdev/nbd_common.sh123
-rw-r--r--src/spdk/test/blobfs/Makefile45
-rwxr-xr-xsrc/spdk/test/blobfs/blobfs.sh141
-rw-r--r--src/spdk/test/blobfs/fuse/.gitignore1
-rw-r--r--src/spdk/test/blobfs/fuse/Makefile50
-rw-r--r--src/spdk/test/blobfs/fuse/fuse.c114
-rw-r--r--src/spdk/test/blobfs/mkfs/.gitignore1
-rw-r--r--src/spdk/test/blobfs/mkfs/Makefile52
-rw-r--r--src/spdk/test/blobfs/mkfs/mkfs.c115
-rw-r--r--src/spdk/test/blobfs/rocksdb/.gitignore1
-rw-r--r--src/spdk/test/blobfs/rocksdb/common_flags.txt27
-rwxr-xr-xsrc/spdk/test/blobfs/rocksdb/postprocess.py70
-rwxr-xr-xsrc/spdk/test/blobfs/rocksdb/rocksdb.sh155
-rw-r--r--src/spdk/test/blobfs/rocksdb/rocksdb_commit_id1
-rwxr-xr-xsrc/spdk/test/blobstore/blob_io_wait/blob_io_wait.sh61
-rwxr-xr-xsrc/spdk/test/blobstore/blobstore.sh30
-rw-r--r--src/spdk/test/blobstore/btest.out.ignore5
-rw-r--r--src/spdk/test/blobstore/btest.out.match90
-rw-r--r--src/spdk/test/blobstore/test.bs12
-rw-r--r--src/spdk/test/common/applications.sh24
-rwxr-xr-xsrc/spdk/test/common/autotest_common.sh1350
-rw-r--r--src/spdk/test/common/config/README.md104
-rw-r--r--src/spdk/test/common/config/pkgdep/apt-get100
-rw-r--r--src/spdk/test/common/config/pkgdep/dnf72
-rw-r--r--src/spdk/test/common/config/pkgdep/git325
-rw-r--r--src/spdk/test/common/config/pkgdep/pacman62
-rw-r--r--src/spdk/test/common/config/pkgdep/pkg27
-rw-r--r--src/spdk/test/common/config/pkgdep/swupd21
-rw-r--r--src/spdk/test/common/config/pkgdep/yum67
-rw-r--r--src/spdk/test/common/config/vm_setup.conf12
-rwxr-xr-xsrc/spdk/test/common/config/vm_setup.sh176
-rw-r--r--src/spdk/test/common/lib/nvme/common_stubs.h117
-rw-r--r--src/spdk/test/common/lib/test_env.c637
-rw-r--r--src/spdk/test/common/lib/test_rdma.c49
-rw-r--r--src/spdk/test/common/lib/test_sock.c70
-rw-r--r--src/spdk/test/common/lib/ut_multithread.c214
-rw-r--r--src/spdk/test/common/skipped_build_files.txt60
-rw-r--r--src/spdk/test/common/skipped_tests.txt73
-rwxr-xr-xsrc/spdk/test/compress/compress.sh119
-rw-r--r--src/spdk/test/config_converter/config.ini153
-rw-r--r--src/spdk/test/config_converter/config_virtio.ini21
-rw-r--r--src/spdk/test/config_converter/spdk_config.json526
-rw-r--r--src/spdk/test/config_converter/spdk_config_virtio.json133
-rwxr-xr-xsrc/spdk/test/config_converter/test_converter.sh26
-rw-r--r--src/spdk/test/cpp_headers/.gitignore1
-rw-r--r--src/spdk/test/cpp_headers/Makefile59
-rwxr-xr-xsrc/spdk/test/dd/basic_rw.sh107
-rwxr-xr-xsrc/spdk/test/dd/bdev_to_bdev.sh111
-rw-r--r--src/spdk/test/dd/common.sh154
-rwxr-xr-xsrc/spdk/test/dd/dd.sh13
-rwxr-xr-xsrc/spdk/test/dd/posix.sh122
-rwxr-xr-xsrc/spdk/test/dpdk_memory_utility/test_dpdk_mem_info.sh25
-rw-r--r--src/spdk/test/env/Makefile50
-rwxr-xr-xsrc/spdk/test/env/env.sh27
-rw-r--r--src/spdk/test/env/env_dpdk_post_init/.gitignore1
-rw-r--r--src/spdk/test/env/env_dpdk_post_init/Makefile39
-rw-r--r--src/spdk/test/env/env_dpdk_post_init/env_dpdk_post_init.c126
-rw-r--r--src/spdk/test/env/mem_callbacks/.gitignore1
-rw-r--r--src/spdk/test/env/mem_callbacks/Makefile41
-rw-r--r--src/spdk/test/env/mem_callbacks/mem_callbacks.c217
-rw-r--r--src/spdk/test/env/memory/.gitignore1
-rw-r--r--src/spdk/test/env/memory/Makefile40
-rw-r--r--src/spdk/test/env/memory/memory_ut.c524
-rw-r--r--src/spdk/test/env/pci/.gitignore1
-rw-r--r--src/spdk/test/env/pci/Makefile40
-rw-r--r--src/spdk/test/env/pci/pci_ut.c238
-rw-r--r--src/spdk/test/env/vtophys/.gitignore1
-rw-r--r--src/spdk/test/env/vtophys/Makefile39
-rw-r--r--src/spdk/test/env/vtophys/vtophys.c196
-rw-r--r--src/spdk/test/event/Makefile48
-rw-r--r--src/spdk/test/event/app_repeat/.gitignore1
-rw-r--r--src/spdk/test/event/app_repeat/Makefile54
-rw-r--r--src/spdk/test/event/app_repeat/app_repeat.c115
-rwxr-xr-xsrc/spdk/test/event/event.sh44
-rw-r--r--src/spdk/test/event/event_perf/.gitignore1
-rw-r--r--src/spdk/test/event/event_perf/Makefile42
-rw-r--r--src/spdk/test/event/event_perf/event_perf.c184
-rw-r--r--src/spdk/test/event/reactor/.gitignore1
-rw-r--r--src/spdk/test/event/reactor/Makefile42
-rw-r--r--src/spdk/test/event/reactor/reactor.c144
-rw-r--r--src/spdk/test/event/reactor_perf/.gitignore1
-rw-r--r--src/spdk/test/event/reactor_perf/Makefile42
-rw-r--r--src/spdk/test/event/reactor_perf/reactor_perf.c151
-rw-r--r--src/spdk/test/external_code/Makefile80
-rw-r--r--src/spdk/test/external_code/README.md17
-rw-r--r--src/spdk/test/external_code/hello_world/.gitignore1
-rw-r--r--src/spdk/test/external_code/hello_world/Makefile73
-rw-r--r--src/spdk/test/external_code/hello_world/bdev.conf17
-rw-r--r--src/spdk/test/external_code/hello_world/bdev_external.conf24
-rw-r--r--src/spdk/test/external_code/hello_world/hello_bdev.c300
-rw-r--r--src/spdk/test/external_code/passthru/Makefile43
-rw-r--r--src/spdk/test/external_code/passthru/vbdev_passthru.c748
-rw-r--r--src/spdk/test/external_code/passthru/vbdev_passthru.h65
-rw-r--r--src/spdk/test/external_code/passthru/vbdev_passthru_rpc.c142
-rwxr-xr-xsrc/spdk/test/external_code/test_make.sh63
-rwxr-xr-xsrc/spdk/test/ftl/bdevperf.sh31
-rw-r--r--src/spdk/test/ftl/common.sh68
-rw-r--r--src/spdk/test/ftl/config/.gitignore2
-rw-r--r--src/spdk/test/ftl/config/fio/drive-prep.fio15
-rw-r--r--src/spdk/test/ftl/config/fio/randr.fio19
-rw-r--r--src/spdk/test/ftl/config/fio/randrw.fio20
-rw-r--r--src/spdk/test/ftl/config/fio/randw-verify-depth128.fio20
-rw-r--r--src/spdk/test/ftl/config/fio/randw-verify-j2.fio25
-rw-r--r--src/spdk/test/ftl/config/fio/randw-verify-qd128-ext.fio20
-rw-r--r--src/spdk/test/ftl/config/fio/randw-verify.fio20
-rw-r--r--src/spdk/test/ftl/config/fio/randw.fio18
-rwxr-xr-xsrc/spdk/test/ftl/dirty_shutdown.sh93
-rwxr-xr-xsrc/spdk/test/ftl/fio.sh68
-rwxr-xr-xsrc/spdk/test/ftl/ftl.sh80
-rwxr-xr-xsrc/spdk/test/ftl/json.sh38
-rwxr-xr-xsrc/spdk/test/ftl/restore.sh99
-rwxr-xr-xsrc/spdk/test/fuzz/autofuzz.sh74
-rwxr-xr-xsrc/spdk/test/fuzz/autofuzz_iscsi.sh75
-rwxr-xr-xsrc/spdk/test/fuzz/autofuzz_nvmf.sh52
-rwxr-xr-xsrc/spdk/test/fuzz/autofuzz_vhost.sh75
-rwxr-xr-xsrc/spdk/test/ioat/ioat.sh9
-rwxr-xr-xsrc/spdk/test/iscsi_tgt/bdev_io_wait/bdev_io_wait.sh50
-rwxr-xr-xsrc/spdk/test/iscsi_tgt/calsoft/calsoft.py121
-rwxr-xr-xsrc/spdk/test/iscsi_tgt/calsoft/calsoft.sh63
-rw-r--r--src/spdk/test/iscsi_tgt/calsoft/iscsi.json15
-rw-r--r--src/spdk/test/iscsi_tgt/calsoft/its.conf7
-rw-r--r--src/spdk/test/iscsi_tgt/common.sh209
-rwxr-xr-xsrc/spdk/test/iscsi_tgt/digests/digests.sh94
-rwxr-xr-xsrc/spdk/test/iscsi_tgt/ext4test/ext4test.sh131
-rwxr-xr-xsrc/spdk/test/iscsi_tgt/filesystem/filesystem.sh145
-rwxr-xr-xsrc/spdk/test/iscsi_tgt/fio/fio.sh150
-rw-r--r--src/spdk/test/iscsi_tgt/fio/iscsi.json32
-rwxr-xr-xsrc/spdk/test/iscsi_tgt/fuzz/fuzz.sh65
-rwxr-xr-xsrc/spdk/test/iscsi_tgt/initiator/initiator.sh52
-rwxr-xr-xsrc/spdk/test/iscsi_tgt/ip_migration/ip_migration.sh131
-rwxr-xr-xsrc/spdk/test/iscsi_tgt/iscsi_tgt.sh97
-rwxr-xr-xsrc/spdk/test/iscsi_tgt/lvol/iscsi_lvol.sh85
-rwxr-xr-xsrc/spdk/test/iscsi_tgt/multiconnection/multiconnection.sh84
-rwxr-xr-xsrc/spdk/test/iscsi_tgt/nvme_remote/fio_remote_nvme.sh99
-rwxr-xr-xsrc/spdk/test/iscsi_tgt/perf/iscsi_initiator.sh37
-rwxr-xr-xsrc/spdk/test/iscsi_tgt/perf/iscsi_target.sh134
-rw-r--r--src/spdk/test/iscsi_tgt/perf/perf.job19
-rwxr-xr-xsrc/spdk/test/iscsi_tgt/pmem/iscsi_pmem.sh74
-rwxr-xr-xsrc/spdk/test/iscsi_tgt/qos/qos.sh145
-rwxr-xr-xsrc/spdk/test/iscsi_tgt/rbd/rbd.sh72
-rwxr-xr-xsrc/spdk/test/iscsi_tgt/reset/reset.sh77
-rwxr-xr-xsrc/spdk/test/iscsi_tgt/rpc_config/rpc_config.py481
-rwxr-xr-xsrc/spdk/test/iscsi_tgt/rpc_config/rpc_config.sh63
-rwxr-xr-xsrc/spdk/test/iscsi_tgt/sock/sock.sh142
-rwxr-xr-xsrc/spdk/test/iscsi_tgt/trace_record/trace_record.sh135
-rwxr-xr-xsrc/spdk/test/json_config/alias_rpc/alias_rpc.sh20
-rw-r--r--src/spdk/test/json_config/alias_rpc/conf.json44
-rwxr-xr-xsrc/spdk/test/json_config/clear_config.py215
-rwxr-xr-xsrc/spdk/test/json_config/config_filter.py96
-rwxr-xr-xsrc/spdk/test/json_config/json_config.sh475
-rwxr-xr-xsrc/spdk/test/json_config/json_diff.sh41
-rwxr-xr-xsrc/spdk/test/lvol/basic.sh568
-rw-r--r--src/spdk/test/lvol/common.sh53
-rwxr-xr-xsrc/spdk/test/lvol/hotremove.sh216
-rwxr-xr-xsrc/spdk/test/lvol/lvol2.sh19
-rwxr-xr-xsrc/spdk/test/lvol/rename.sh219
-rwxr-xr-xsrc/spdk/test/lvol/resize.sh219
-rwxr-xr-xsrc/spdk/test/lvol/snapshot_clone.sh617
-rwxr-xr-xsrc/spdk/test/lvol/tasting.sh171
-rwxr-xr-xsrc/spdk/test/lvol/thin_provisioning.sh236
-rwxr-xr-xsrc/spdk/test/make/check_so_deps.sh508
-rw-r--r--src/spdk/test/nvme/Makefile46
-rw-r--r--src/spdk/test/nvme/aer/.gitignore1
-rw-r--r--src/spdk/test/nvme/aer/Makefile38
-rw-r--r--src/spdk/test/nvme/aer/aer.c610
-rw-r--r--src/spdk/test/nvme/cuse/.gitignore1
-rw-r--r--src/spdk/test/nvme/cuse/Makefile38
-rw-r--r--src/spdk/test/nvme/cuse/cuse.c189
-rwxr-xr-xsrc/spdk/test/nvme/cuse/nvme_cuse.sh46
-rwxr-xr-xsrc/spdk/test/nvme/cuse/nvme_cuse_rpc.sh58
-rwxr-xr-xsrc/spdk/test/nvme/cuse/nvme_ns_manage_cuse.sh164
-rwxr-xr-xsrc/spdk/test/nvme/cuse/spdk_nvme_cli_cuse.sh109
-rwxr-xr-xsrc/spdk/test/nvme/cuse/spdk_smartctl_cuse.sh79
-rw-r--r--src/spdk/test/nvme/deallocated_value/.gitignore1
-rw-r--r--src/spdk/test/nvme/deallocated_value/Makefile38
-rw-r--r--src/spdk/test/nvme/deallocated_value/deallocated_value.c447
-rw-r--r--src/spdk/test/nvme/e2edp/.gitignore1
-rw-r--r--src/spdk/test/nvme/e2edp/Makefile38
-rw-r--r--src/spdk/test/nvme/e2edp/nvme_dp.c652
-rw-r--r--src/spdk/test/nvme/err_injection/.gitignore1
-rw-r--r--src/spdk/test/nvme/err_injection/Makefile38
-rw-r--r--src/spdk/test/nvme/err_injection/err_injection.c279
-rwxr-xr-xsrc/spdk/test/nvme/hotplug.sh134
-rwxr-xr-xsrc/spdk/test/nvme/hw_hotplug.sh79
-rwxr-xr-xsrc/spdk/test/nvme/nvme.sh134
-rwxr-xr-xsrc/spdk/test/nvme/nvme_opal.sh133
-rwxr-xr-xsrc/spdk/test/nvme/nvme_rpc.sh37
-rw-r--r--src/spdk/test/nvme/overhead/.gitignore1
-rw-r--r--src/spdk/test/nvme/overhead/Makefile43
-rw-r--r--src/spdk/test/nvme/overhead/README24
-rw-r--r--src/spdk/test/nvme/overhead/overhead.c730
-rw-r--r--src/spdk/test/nvme/perf/README.md103
-rwxr-xr-xsrc/spdk/test/nvme/perf/common.sh471
-rw-r--r--src/spdk/test/nvme/perf/config.fio.tmp6
-rwxr-xr-xsrc/spdk/test/nvme/perf/run_perf.sh374
-rw-r--r--src/spdk/test/nvme/reserve/.gitignore1
-rw-r--r--src/spdk/test/nvme/reserve/Makefile38
-rw-r--r--src/spdk/test/nvme/reserve/reserve.c457
-rw-r--r--src/spdk/test/nvme/reset/.gitignore1
-rw-r--r--src/spdk/test/nvme/reset/Makefile38
-rw-r--r--src/spdk/test/nvme/reset/reset.c716
-rw-r--r--src/spdk/test/nvme/sgl/.gitignore1
-rw-r--r--src/spdk/test/nvme/sgl/Makefile38
-rw-r--r--src/spdk/test/nvme/sgl/sgl.c545
-rwxr-xr-xsrc/spdk/test/nvme/spdk_nvme_cli.sh40
-rw-r--r--src/spdk/test/nvme/startup/.gitignore1
-rw-r--r--src/spdk/test/nvme/startup/Makefile38
-rw-r--r--src/spdk/test/nvme/startup/startup.c218
-rw-r--r--src/spdk/test/nvmf/README.md5
-rw-r--r--src/spdk/test/nvmf/common.sh292
-rwxr-xr-xsrc/spdk/test/nvmf/host/aer.sh50
-rwxr-xr-xsrc/spdk/test/nvmf/host/bdevperf.sh50
-rwxr-xr-xsrc/spdk/test/nvmf/host/fio.sh82
-rwxr-xr-xsrc/spdk/test/nvmf/host/identify.sh54
-rwxr-xr-xsrc/spdk/test/nvmf/host/identify_kernel_nvmf.sh71
-rwxr-xr-xsrc/spdk/test/nvmf/host/perf.sh93
-rwxr-xr-xsrc/spdk/test/nvmf/host/target_disconnect.sh89
-rwxr-xr-xsrc/spdk/test/nvmf/nvmf.sh60
-rwxr-xr-xsrc/spdk/test/nvmf/target/abort.sh35
-rwxr-xr-xsrc/spdk/test/nvmf/target/bdev_io_wait.sh45
-rwxr-xr-xsrc/spdk/test/nvmf/target/bdevio.sh29
-rwxr-xr-xsrc/spdk/test/nvmf/target/connect_disconnect.sh43
-rwxr-xr-xsrc/spdk/test/nvmf/target/create_transport.sh52
-rwxr-xr-xsrc/spdk/test/nvmf/target/discovery.sh49
-rwxr-xr-xsrc/spdk/test/nvmf/target/filesystem.sh92
-rwxr-xr-xsrc/spdk/test/nvmf/target/fio.sh77
-rwxr-xr-xsrc/spdk/test/nvmf/target/fuzz.sh43
-rwxr-xr-xsrc/spdk/test/nvmf/target/identify_passthru.sh76
-rwxr-xr-xsrc/spdk/test/nvmf/target/initiator_timeout.sh71
-rwxr-xr-xsrc/spdk/test/nvmf/target/invalid.sh63
-rwxr-xr-xsrc/spdk/test/nvmf/target/multiconnection.sh53
-rwxr-xr-xsrc/spdk/test/nvmf/target/multitarget.sh37
-rwxr-xr-xsrc/spdk/test/nvmf/target/multitarget_rpc.py84
-rwxr-xr-xsrc/spdk/test/nvmf/target/nmic.sh56
-rwxr-xr-xsrc/spdk/test/nvmf/target/nvme_cli.sh75
-rwxr-xr-xsrc/spdk/test/nvmf/target/nvmf_example.sh59
-rwxr-xr-xsrc/spdk/test/nvmf/target/nvmf_lvol.sh60
-rwxr-xr-xsrc/spdk/test/nvmf/target/nvmf_vhost.sh69
-rw-r--r--src/spdk/test/nvmf/target/nvmf_vhost_fio.job19
-rwxr-xr-xsrc/spdk/test/nvmf/target/rpc.sh124
-rwxr-xr-xsrc/spdk/test/nvmf/target/shutdown.sh155
-rwxr-xr-xsrc/spdk/test/nvmf/target/srq_overwhelm.sh50
-rw-r--r--src/spdk/test/ocf/common.sh27
-rwxr-xr-xsrc/spdk/test/ocf/integrity/bdevperf-iotypes.sh13
-rwxr-xr-xsrc/spdk/test/ocf/integrity/fio-modes.sh90
-rw-r--r--src/spdk/test/ocf/integrity/mallocs.conf59
-rwxr-xr-xsrc/spdk/test/ocf/integrity/stats.sh17
-rw-r--r--src/spdk/test/ocf/integrity/test.fio39
-rwxr-xr-xsrc/spdk/test/ocf/management/create-destruct.sh88
-rwxr-xr-xsrc/spdk/test/ocf/management/multicore.sh82
-rwxr-xr-xsrc/spdk/test/ocf/management/persistent-metadata.sh88
-rwxr-xr-xsrc/spdk/test/ocf/management/remove.sh81
-rwxr-xr-xsrc/spdk/test/ocf/ocf.sh14
-rwxr-xr-xsrc/spdk/test/openstack/install_devstack.sh51
-rwxr-xr-xsrc/spdk/test/openstack/run_openstack_tests.sh77
-rw-r--r--src/spdk/test/pmem/common.sh91
-rwxr-xr-xsrc/spdk/test/pmem/pmem.sh683
-rwxr-xr-xsrc/spdk/test/rpc/rpc.sh56
-rw-r--r--src/spdk/test/rpc/rpc_plugin.py24
-rw-r--r--src/spdk/test/rpc_client/.gitignore1
-rw-r--r--src/spdk/test/rpc_client/Makefile44
-rwxr-xr-xsrc/spdk/test/rpc_client/rpc_client.sh9
-rw-r--r--src/spdk/test/rpc_client/rpc_client_test.c461
-rw-r--r--src/spdk/test/spdk_cunit.h56
-rw-r--r--src/spdk/test/spdkcli/common.sh45
-rwxr-xr-xsrc/spdk/test/spdkcli/iscsi.sh72
-rw-r--r--src/spdk/test/spdkcli/match_files/spdkcli_details_lvs.test.match9
-rw-r--r--src/spdk/test/spdkcli/match_files/spdkcli_details_vhost.test.match32
-rw-r--r--src/spdk/test/spdkcli/match_files/spdkcli_details_vhost_ctrl.test.match22
-rw-r--r--src/spdk/test/spdkcli/match_files/spdkcli_details_vhost_target.test.match11
-rw-r--r--src/spdk/test/spdkcli/match_files/spdkcli_iscsi.test.match55
-rw-r--r--src/spdk/test/spdkcli/match_files/spdkcli_nvmf.test.match34
-rw-r--r--src/spdk/test/spdkcli/match_files/spdkcli_pmem.test.match3
-rw-r--r--src/spdk/test/spdkcli/match_files/spdkcli_pmem_info.test.match12
-rw-r--r--src/spdk/test/spdkcli/match_files/spdkcli_raid.test.match17
-rw-r--r--src/spdk/test/spdkcli/match_files/spdkcli_rbd.test.match3
-rw-r--r--src/spdk/test/spdkcli/match_files/spdkcli_vhost.test.match54
-rw-r--r--src/spdk/test/spdkcli/match_files/spdkcli_virtio_pci.test.match19
-rw-r--r--src/spdk/test/spdkcli/match_files/spdkcli_virtio_user.test.match8
-rwxr-xr-xsrc/spdk/test/spdkcli/nvmf.sh85
-rwxr-xr-xsrc/spdk/test/spdkcli/pmem.sh47
-rwxr-xr-xsrc/spdk/test/spdkcli/raid.sh46
-rwxr-xr-xsrc/spdk/test/spdkcli/rbd.sh32
-rwxr-xr-xsrc/spdk/test/spdkcli/spdkcli_job.py59
-rwxr-xr-xsrc/spdk/test/spdkcli/tcp.sh35
-rwxr-xr-xsrc/spdk/test/spdkcli/vhost.sh147
-rwxr-xr-xsrc/spdk/test/spdkcli/virtio.sh78
-rw-r--r--src/spdk/test/unit/Makefile44
-rw-r--r--src/spdk/test/unit/include/Makefile44
-rw-r--r--src/spdk/test/unit/include/spdk/Makefile44
-rw-r--r--src/spdk/test/unit/include/spdk/histogram_data.h/.gitignore1
-rw-r--r--src/spdk/test/unit/include/spdk/histogram_data.h/Makefile37
-rw-r--r--src/spdk/test/unit/include/spdk/histogram_data.h/histogram_ut.c161
-rw-r--r--src/spdk/test/unit/lib/Makefile51
-rw-r--r--src/spdk/test/unit/lib/bdev/Makefile51
-rw-r--r--src/spdk/test/unit/lib/bdev/bdev.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/bdev/bdev.c/Makefile37
-rw-r--r--src/spdk/test/unit/lib/bdev/bdev.c/bdev_ut.c3417
-rw-r--r--src/spdk/test/unit/lib/bdev/bdev_ocssd.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/bdev/bdev_ocssd.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/bdev/bdev_ocssd.c/bdev_ocssd_ut.c1195
-rw-r--r--src/spdk/test/unit/lib/bdev/bdev_zone.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/bdev/bdev_zone.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/bdev/bdev_zone.c/bdev_zone_ut.c429
-rw-r--r--src/spdk/test/unit/lib/bdev/compress.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/bdev/compress.c/Makefile39
-rw-r--r--src/spdk/test/unit/lib/bdev/compress.c/compress_ut.c1140
-rw-r--r--src/spdk/test/unit/lib/bdev/crypto.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/bdev/crypto.c/Makefile39
-rw-r--r--src/spdk/test/unit/lib/bdev/crypto.c/crypto_ut.c1084
-rw-r--r--src/spdk/test/unit/lib/bdev/gpt/Makefile44
-rw-r--r--src/spdk/test/unit/lib/bdev/gpt/gpt.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/bdev/gpt/gpt.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/bdev/gpt/gpt.c/gpt_ut.c363
-rw-r--r--src/spdk/test/unit/lib/bdev/mt/Makefile44
-rw-r--r--src/spdk/test/unit/lib/bdev/mt/bdev.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/bdev/mt/bdev.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/bdev/mt/bdev.c/bdev_ut.c1994
-rw-r--r--src/spdk/test/unit/lib/bdev/part.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/bdev/part.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/bdev/part.c/part_ut.c173
-rw-r--r--src/spdk/test/unit/lib/bdev/pmem/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/bdev/pmem/Makefile38
-rw-r--r--src/spdk/test/unit/lib/bdev/pmem/bdev_pmem_ut.c772
-rw-r--r--src/spdk/test/unit/lib/bdev/raid/Makefile46
-rw-r--r--src/spdk/test/unit/lib/bdev/raid/bdev_raid.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/bdev/raid/bdev_raid.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/bdev/raid/bdev_raid.c/bdev_raid_ut.c2258
-rw-r--r--src/spdk/test/unit/lib/bdev/raid/raid5.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/bdev/raid/raid5.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/bdev/raid/raid5.c/raid5_ut.c214
-rw-r--r--src/spdk/test/unit/lib/bdev/scsi_nvme.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/bdev/scsi_nvme.c/Makefile37
-rw-r--r--src/spdk/test/unit/lib/bdev/scsi_nvme.c/scsi_nvme_ut.c131
-rw-r--r--src/spdk/test/unit/lib/bdev/vbdev_lvol.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/bdev/vbdev_lvol.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/bdev/vbdev_lvol.c/vbdev_lvol_ut.c1440
-rw-r--r--src/spdk/test/unit/lib/bdev/vbdev_zone_block.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/bdev/vbdev_zone_block.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/bdev/vbdev_zone_block.c/vbdev_zone_block_ut.c1502
-rw-r--r--src/spdk/test/unit/lib/blob/Makefile49
-rw-r--r--src/spdk/test/unit/lib/blob/blob.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/blob/blob.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/blob/blob.c/blob_ut.c6693
-rw-r--r--src/spdk/test/unit/lib/blob/bs_dev_common.c395
-rw-r--r--src/spdk/test/unit/lib/blob/bs_scheduler.c87
-rw-r--r--src/spdk/test/unit/lib/blobfs/Makefile44
-rw-r--r--src/spdk/test/unit/lib/blobfs/blobfs_async_ut/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/blobfs/blobfs_async_ut/Makefile39
-rw-r--r--src/spdk/test/unit/lib/blobfs/blobfs_async_ut/blobfs_async_ut.c704
-rw-r--r--src/spdk/test/unit/lib/blobfs/blobfs_bdev.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/blobfs/blobfs_bdev.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/blobfs/blobfs_bdev.c/blobfs_bdev_ut.c348
-rw-r--r--src/spdk/test/unit/lib/blobfs/blobfs_sync_ut/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/blobfs/blobfs_sync_ut/Makefile39
-rw-r--r--src/spdk/test/unit/lib/blobfs/blobfs_sync_ut/blobfs_sync_ut.c703
-rw-r--r--src/spdk/test/unit/lib/blobfs/tree.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/blobfs/tree.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/blobfs/tree.c/tree_ut.c150
-rw-r--r--src/spdk/test/unit/lib/event/Makefile44
-rw-r--r--src/spdk/test/unit/lib/event/app.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/event/app.c/Makefile39
-rw-r--r--src/spdk/test/unit/lib/event/app.c/app_ut.c193
-rw-r--r--src/spdk/test/unit/lib/event/reactor.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/event/reactor.c/Makefile39
-rw-r--r--src/spdk/test/unit/lib/event/reactor.c/reactor_ut.c455
-rw-r--r--src/spdk/test/unit/lib/event/subsystem.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/event/subsystem.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/event/subsystem.c/subsystem_ut.c255
-rw-r--r--src/spdk/test/unit/lib/ftl/Makefile44
-rw-r--r--src/spdk/test/unit/lib/ftl/common/utils.c173
-rw-r--r--src/spdk/test/unit/lib/ftl/ftl_band.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/ftl/ftl_band.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/ftl/ftl_band.c/ftl_band_ut.c307
-rw-r--r--src/spdk/test/unit/lib/ftl/ftl_io.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/ftl/ftl_io.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/ftl/ftl_io.c/ftl_io_ut.c1068
-rw-r--r--src/spdk/test/unit/lib/ftl/ftl_md/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/ftl/ftl_md/Makefile38
-rw-r--r--src/spdk/test/unit/lib/ftl/ftl_md/ftl_md_ut.c150
-rw-r--r--src/spdk/test/unit/lib/ftl/ftl_ppa/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/ftl/ftl_ppa/Makefile38
-rw-r--r--src/spdk/test/unit/lib/ftl/ftl_ppa/ftl_ppa_ut.c226
-rw-r--r--src/spdk/test/unit/lib/ftl/ftl_reloc.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/ftl/ftl_reloc.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/ftl/ftl_reloc.c/ftl_reloc_ut.c508
-rw-r--r--src/spdk/test/unit/lib/ftl/ftl_wptr/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/ftl/ftl_wptr/Makefile38
-rw-r--r--src/spdk/test/unit/lib/ftl/ftl_wptr/ftl_wptr_ut.c223
-rw-r--r--src/spdk/test/unit/lib/idxd/Makefile44
-rw-r--r--src/spdk/test/unit/lib/idxd/idxd.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/idxd/idxd.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/idxd/idxd.c/idxd_ut.c300
-rw-r--r--src/spdk/test/unit/lib/ioat/Makefile44
-rw-r--r--src/spdk/test/unit/lib/ioat/ioat.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/ioat/ioat.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/ioat/ioat.c/ioat_ut.c144
-rw-r--r--src/spdk/test/unit/lib/iscsi/Makefile44
-rw-r--r--src/spdk/test/unit/lib/iscsi/common.c209
-rw-r--r--src/spdk/test/unit/lib/iscsi/conn.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/iscsi/conn.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/iscsi/conn.c/conn_ut.c927
-rw-r--r--src/spdk/test/unit/lib/iscsi/init_grp.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/iscsi/init_grp.c/Makefile39
-rw-r--r--src/spdk/test/unit/lib/iscsi/init_grp.c/init_grp.conf31
-rw-r--r--src/spdk/test/unit/lib/iscsi/init_grp.c/init_grp_ut.c674
-rw-r--r--src/spdk/test/unit/lib/iscsi/iscsi.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/iscsi/iscsi.c/Makefile46
-rw-r--r--src/spdk/test/unit/lib/iscsi/iscsi.c/iscsi_ut.c2024
-rw-r--r--src/spdk/test/unit/lib/iscsi/param.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/iscsi/param.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/iscsi/param.c/param_ut.c400
-rw-r--r--src/spdk/test/unit/lib/iscsi/portal_grp.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/iscsi/portal_grp.c/Makefile40
-rw-r--r--src/spdk/test/unit/lib/iscsi/portal_grp.c/portal_grp_ut.c419
-rw-r--r--src/spdk/test/unit/lib/iscsi/tgt_node.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/iscsi/tgt_node.c/Makefile39
-rw-r--r--src/spdk/test/unit/lib/iscsi/tgt_node.c/tgt_node.conf95
-rw-r--r--src/spdk/test/unit/lib/iscsi/tgt_node.c/tgt_node_ut.c832
-rw-r--r--src/spdk/test/unit/lib/json/Makefile44
-rw-r--r--src/spdk/test/unit/lib/json/json_parse.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/json/json_parse.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/json/json_parse.c/json_parse_ut.c931
-rw-r--r--src/spdk/test/unit/lib/json/json_util.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/json/json_util.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/json/json_util.c/json_util_ut.c954
-rw-r--r--src/spdk/test/unit/lib/json/json_write.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/json/json_write.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/json/json_write.c/json_write_ut.c736
-rw-r--r--src/spdk/test/unit/lib/json_mock.c81
-rw-r--r--src/spdk/test/unit/lib/jsonrpc/Makefile44
-rw-r--r--src/spdk/test/unit/lib/jsonrpc/jsonrpc_server.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/jsonrpc/jsonrpc_server.c/Makefile39
-rw-r--r--src/spdk/test/unit/lib/jsonrpc/jsonrpc_server.c/jsonrpc_server_ut.c410
-rw-r--r--src/spdk/test/unit/lib/log/Makefile44
-rw-r--r--src/spdk/test/unit/lib/log/log.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/log/log.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/log/log.c/log_ut.c106
-rw-r--r--src/spdk/test/unit/lib/lvol/Makefile44
-rw-r--r--src/spdk/test/unit/lib/lvol/lvol.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/lvol/lvol.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/lvol/lvol.c/lvol_ut.c2096
-rw-r--r--src/spdk/test/unit/lib/notify/Makefile44
-rw-r--r--src/spdk/test/unit/lib/notify/notify.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/notify/notify.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/notify/notify.c/notify_ut.c111
-rw-r--r--src/spdk/test/unit/lib/nvme/Makefile47
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme.c/nvme_ut.c1376
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/nvme_ctrlr_ut.c2150
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/nvme_ctrlr_cmd_ut.c751
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/nvme_ctrlr_ocssd_cmd_ut.c106
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ns.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ns.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ns.c/nvme_ns_ut.c153
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/nvme_ns_cmd_ut.c1739
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/nvme_ns_ocssd_cmd_ut.c650
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_pcie.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_pcie.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_pcie.c/nvme_pcie_ut.c498
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_poll_group.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_poll_group.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_poll_group.c/nvme_poll_group_ut.c484
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_qpair.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_qpair.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_qpair.c/nvme_qpair_ut.c625
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_quirks.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_quirks.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_quirks.c/nvme_quirks_ut.c92
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_rdma.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_rdma.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_rdma.c/nvme_rdma_ut.c406
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_tcp.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_tcp.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_tcp.c/nvme_tcp_ut.c459
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_uevent.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_uevent.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_uevent.c/nvme_uevent_ut.c165
-rw-r--r--src/spdk/test/unit/lib/nvmf/Makefile48
-rw-r--r--src/spdk/test/unit/lib/nvmf/ctrlr.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvmf/ctrlr.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvmf/ctrlr.c/ctrlr_ut.c1711
-rw-r--r--src/spdk/test/unit/lib/nvmf/ctrlr_bdev.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvmf/ctrlr_bdev.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvmf/ctrlr_bdev.c/ctrlr_bdev_ut.c415
-rw-r--r--src/spdk/test/unit/lib/nvmf/ctrlr_discovery.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvmf/ctrlr_discovery.c/Makefile39
-rw-r--r--src/spdk/test/unit/lib/nvmf/ctrlr_discovery.c/ctrlr_discovery_ut.c303
-rw-r--r--src/spdk/test/unit/lib/nvmf/fc.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvmf/fc.c/Makefile58
-rw-r--r--src/spdk/test/unit/lib/nvmf/fc.c/fc_ut.c505
-rw-r--r--src/spdk/test/unit/lib/nvmf/fc_ls.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvmf/fc_ls.c/Makefile45
-rw-r--r--src/spdk/test/unit/lib/nvmf/fc_ls.c/fc_ls_ut.c1070
-rw-r--r--src/spdk/test/unit/lib/nvmf/rdma.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvmf/rdma.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvmf/rdma.c/rdma_ut.c1283
-rw-r--r--src/spdk/test/unit/lib/nvmf/subsystem.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvmf/subsystem.c/Makefile39
-rw-r--r--src/spdk/test/unit/lib/nvmf/subsystem.c/subsystem_ut.c1342
-rw-r--r--src/spdk/test/unit/lib/nvmf/tcp.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvmf/tcp.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvmf/tcp.c/tcp_ut.c722
-rw-r--r--src/spdk/test/unit/lib/reduce/Makefile44
-rw-r--r--src/spdk/test/unit/lib/reduce/reduce.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/reduce/reduce.c/Makefile39
-rw-r--r--src/spdk/test/unit/lib/reduce/reduce.c/reduce_ut.c1300
-rw-r--r--src/spdk/test/unit/lib/scsi/Makefile44
-rw-r--r--src/spdk/test/unit/lib/scsi/dev.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/scsi/dev.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/scsi/dev.c/dev_ut.c682
-rw-r--r--src/spdk/test/unit/lib/scsi/lun.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/scsi/lun.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/scsi/lun.c/lun_ut.c750
-rw-r--r--src/spdk/test/unit/lib/scsi/scsi.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/scsi/scsi.c/Makefile39
-rw-r--r--src/spdk/test/unit/lib/scsi/scsi.c/scsi_ut.c69
-rw-r--r--src/spdk/test/unit/lib/scsi/scsi_bdev.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/scsi/scsi_bdev.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/scsi/scsi_bdev.c/scsi_bdev_ut.c1037
-rw-r--r--src/spdk/test/unit/lib/scsi/scsi_pr.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/scsi/scsi_pr.c/Makefile39
-rw-r--r--src/spdk/test/unit/lib/scsi/scsi_pr.c/scsi_pr_ut.c673
-rw-r--r--src/spdk/test/unit/lib/sock/Makefile48
-rw-r--r--src/spdk/test/unit/lib/sock/posix.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/sock/posix.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/sock/posix.c/posix_ut.c174
-rw-r--r--src/spdk/test/unit/lib/sock/sock.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/sock/sock.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/sock/sock.c/sock_ut.c982
-rw-r--r--src/spdk/test/unit/lib/sock/uring.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/sock/uring.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/sock/uring.c/uring_ut.c272
-rw-r--r--src/spdk/test/unit/lib/thread/Makefile44
-rw-r--r--src/spdk/test/unit/lib/thread/thread.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/thread/thread.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/thread/thread.c/thread_ut.c1270
-rw-r--r--src/spdk/test/unit/lib/util/Makefile45
-rw-r--r--src/spdk/test/unit/lib/util/base64.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/util/base64.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/util/base64.c/base64_ut.c381
-rw-r--r--src/spdk/test/unit/lib/util/bit_array.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/util/bit_array.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/util/bit_array.c/bit_array_ut.c376
-rw-r--r--src/spdk/test/unit/lib/util/cpuset.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/util/cpuset.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/util/cpuset.c/cpuset_ut.c262
-rw-r--r--src/spdk/test/unit/lib/util/crc16.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/util/crc16.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/util/crc16.c/crc16_ut.c104
-rw-r--r--src/spdk/test/unit/lib/util/crc32_ieee.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/util/crc32_ieee.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/util/crc32_ieee.c/crc32_ieee_ut.c74
-rw-r--r--src/spdk/test/unit/lib/util/crc32c.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/util/crc32c.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/util/crc32c.c/crc32c_ut.c145
-rw-r--r--src/spdk/test/unit/lib/util/dif.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/util/dif.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/util/dif.c/dif_ut.c2669
-rw-r--r--src/spdk/test/unit/lib/util/iov.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/util/iov.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/util/iov.c/iov_ut.c249
-rw-r--r--src/spdk/test/unit/lib/util/math.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/util/math.c/Makefile39
-rw-r--r--src/spdk/test/unit/lib/util/math.c/math_ut.c81
-rw-r--r--src/spdk/test/unit/lib/util/pipe.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/util/pipe.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/util/pipe.c/pipe_ut.c653
-rw-r--r--src/spdk/test/unit/lib/util/string.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/util/string.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/util/string.c/string_ut.c407
-rw-r--r--src/spdk/test/unit/lib/vhost/Makefile44
-rw-r--r--src/spdk/test/unit/lib/vhost/vhost.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/vhost/vhost.c/Makefile44
-rw-r--r--src/spdk/test/unit/lib/vhost/vhost.c/vhost_ut.c547
-rwxr-xr-xsrc/spdk/test/unit/unittest.sh253
-rw-r--r--src/spdk/test/vhost/common.sh1266
-rw-r--r--src/spdk/test/vhost/common/autotest.config38
-rw-r--r--src/spdk/test/vhost/common/fio_jobs/default_initiator.job11
-rw-r--r--src/spdk/test/vhost/common/fio_jobs/default_integrity.job19
-rw-r--r--src/spdk/test/vhost/common/fio_jobs/default_integrity_nightly.job23
-rw-r--r--src/spdk/test/vhost/common/fio_jobs/default_performance.job16
-rwxr-xr-xsrc/spdk/test/vhost/fio/fio.sh58
-rw-r--r--src/spdk/test/vhost/fio/vhost_fio.job19
-rwxr-xr-xsrc/spdk/test/vhost/fiotest/fio.sh288
-rwxr-xr-xsrc/spdk/test/vhost/fuzz/fuzz.sh66
-rw-r--r--src/spdk/test/vhost/hotplug/blk_hotremove.sh235
-rw-r--r--src/spdk/test/vhost/hotplug/common.sh230
-rw-r--r--src/spdk/test/vhost/hotplug/fio_jobs/default_integrity.job16
-rwxr-xr-xsrc/spdk/test/vhost/hotplug/scsi_hotattach.sh103
-rwxr-xr-xsrc/spdk/test/vhost/hotplug/scsi_hotdetach.sh212
-rwxr-xr-xsrc/spdk/test/vhost/hotplug/scsi_hotplug.sh92
-rw-r--r--src/spdk/test/vhost/hotplug/scsi_hotremove.sh233
-rw-r--r--src/spdk/test/vhost/initiator/autotest.config5
-rw-r--r--src/spdk/test/vhost/initiator/bdev.fio51
-rw-r--r--src/spdk/test/vhost/initiator/bdev_pci.conf2
-rwxr-xr-xsrc/spdk/test/vhost/initiator/blockdev.sh82
-rwxr-xr-xsrc/spdk/test/vhost/integrity/integrity_start.sh106
-rwxr-xr-xsrc/spdk/test/vhost/integrity/integrity_vm.sh83
-rw-r--r--src/spdk/test/vhost/lvol/autotest.config74
-rwxr-xr-xsrc/spdk/test/vhost/lvol/lvol_test.sh289
-rwxr-xr-xsrc/spdk/test/vhost/manual.sh86
-rw-r--r--src/spdk/test/vhost/migration/autotest.config14
-rw-r--r--src/spdk/test/vhost/migration/migration-tc1.job25
-rw-r--r--src/spdk/test/vhost/migration/migration-tc1.sh119
-rw-r--r--src/spdk/test/vhost/migration/migration-tc2.job20
-rw-r--r--src/spdk/test/vhost/migration/migration-tc2.sh203
-rw-r--r--src/spdk/test/vhost/migration/migration-tc3.job20
-rw-r--r--src/spdk/test/vhost/migration/migration-tc3a.sh218
-rw-r--r--src/spdk/test/vhost/migration/migration-tc3b.sh77
-rwxr-xr-xsrc/spdk/test/vhost/migration/migration.sh143
-rw-r--r--src/spdk/test/vhost/other/invalid.config18
-rwxr-xr-xsrc/spdk/test/vhost/other/negative.sh209
-rwxr-xr-xsrc/spdk/test/vhost/perf_bench/vhost_perf.sh473
-rwxr-xr-xsrc/spdk/test/vhost/readonly/delete_partition_vm.sh42
-rwxr-xr-xsrc/spdk/test/vhost/readonly/disabled_readonly_vm.sh47
-rwxr-xr-xsrc/spdk/test/vhost/readonly/enabled_readonly_vm.sh72
-rwxr-xr-xsrc/spdk/test/vhost/readonly/readonly.sh136
-rw-r--r--src/spdk/test/vhost/shared/bdev.json20
-rwxr-xr-xsrc/spdk/test/vhost/shared/shared.sh32
-rwxr-xr-xsrc/spdk/test/vhost/vhost.sh107
-rwxr-xr-xsrc/spdk/test/vhost/vhost_boot/vhost_boot.sh126
-rwxr-xr-xsrc/spdk/test/vhost/windows/windows.sh141
-rw-r--r--src/spdk/test/vhost/windows/windows_fs_test.ps178
-rw-r--r--src/spdk/test/vhost/windows/windows_scsi_compliance.ps173
-rwxr-xr-xsrc/spdk/test/vhost/windows/windows_scsi_compliance.py147
-rwxr-xr-xsrc/spdk/test/vhost/windows/windows_scsi_compliance.sh89
-rw-r--r--src/spdk/test/vmd/config/config.fio18
-rwxr-xr-xsrc/spdk/test/vmd/vmd.sh78
688 files changed, 122799 insertions, 0 deletions
diff --git a/src/spdk/test/Makefile b/src/spdk/test/Makefile
new file mode 100644
index 000000000..ec88de032
--- /dev/null
+++ b/src/spdk/test/Makefile
@@ -0,0 +1,48 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+# These directories contain tests.
+TESTDIRS = app bdev blobfs cpp_headers env event nvme rpc_client
+
+DIRS-$(CONFIG_TESTS) += $(TESTDIRS)
+DIRS-$(CONFIG_UNIT_TESTS) += unit
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/app/Makefile b/src/spdk/test/app/Makefile
new file mode 100644
index 000000000..2eb259018
--- /dev/null
+++ b/src/spdk/test/app/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y += bdev_svc fuzz histogram_perf jsoncat stub
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/app/bdev_svc/.gitignore b/src/spdk/test/app/bdev_svc/.gitignore
new file mode 100644
index 000000000..77ddb987e
--- /dev/null
+++ b/src/spdk/test/app/bdev_svc/.gitignore
@@ -0,0 +1 @@
+bdev_svc
diff --git a/src/spdk/test/app/bdev_svc/Makefile b/src/spdk/test/app/bdev_svc/Makefile
new file mode 100644
index 000000000..1736d57f0
--- /dev/null
+++ b/src/spdk/test/app/bdev_svc/Makefile
@@ -0,0 +1,63 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.modules.mk
+
+APP = bdev_svc
+
+C_SRCS := bdev_svc.c
+
+SPDK_LIB_LIST = $(ALL_MODULES_LIST)
+SPDK_LIB_LIST += $(EVENT_BDEV_SUBSYSTEM)
+SPDK_LIB_LIST += nvmf event log trace conf thread util bdev accel rpc jsonrpc json sock blobfs_bdev
+SPDK_LIB_LIST += app_rpc log_rpc bdev_rpc notify
+
+ifeq ($(OS),Linux)
+SPDK_LIB_LIST += event_nbd nbd
+endif
+
+ifeq ($(CONFIG_FC),y)
+ifneq ($(strip $(CONFIG_FC_PATH)),)
+SYS_LIBS += -L$(CONFIG_FC_PATH)
+endif
+SYS_LIBS += -lufc
+endif
+
+# libfuse3 is required internally by blobfs_bdev
+ifeq ($(CONFIG_FUSE),y)
+LIBS+= -L/usr/local/lib -lfuse3
+endif
+
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
diff --git a/src/spdk/test/app/bdev_svc/bdev_svc.c b/src/spdk/test/app/bdev_svc/bdev_svc.c
new file mode 100644
index 000000000..84580d3f6
--- /dev/null
+++ b/src/spdk/test/app/bdev_svc/bdev_svc.c
@@ -0,0 +1,112 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk/env.h"
+#include "spdk/event.h"
+
+static char g_path[256];
+static bool g_unaffinitize_thread = false;
+
+static void
+bdev_svc_usage(void)
+{
+}
+
+static int
+bdev_svc_parse_arg(int ch, char *arg)
+{
+ return 0;
+}
+
+static void
+bdev_svc_start(void *arg1)
+{
+ int fd;
+ int shm_id = (intptr_t)arg1;
+
+ if (g_unaffinitize_thread) {
+ spdk_unaffinitize_thread();
+ }
+
+ snprintf(g_path, sizeof(g_path), "/var/run/spdk_bdev%d", shm_id);
+ fd = open(g_path, O_CREAT | O_EXCL | O_RDWR, S_IFREG);
+ if (fd < 0) {
+ fprintf(stderr, "could not create sentinel file %s\n", g_path);
+ exit(1);
+ }
+ close(fd);
+}
+
+static void
+bdev_svc_shutdown(void)
+{
+ unlink(g_path);
+ spdk_app_stop(0);
+}
+
+int
+main(int argc, char **argv)
+{
+ int rc;
+ struct spdk_app_opts opts = {};
+
+ /* default value in opts structure */
+ spdk_app_opts_init(&opts);
+
+ opts.name = "bdev_svc";
+ opts.shutdown_cb = bdev_svc_shutdown;
+
+ if ((rc = spdk_app_parse_args(argc, argv, &opts, "", NULL,
+ bdev_svc_parse_arg, bdev_svc_usage)) !=
+ SPDK_APP_PARSE_ARGS_SUCCESS) {
+ exit(rc);
+ }
+
+ /* User did not specify a reactor mask. Test scripts may do this when using
+ * bdev_svc as a primary process to speed up nvme test programs by running
+ * them as secondary processes. In that case, we will unaffinitize the thread
+ * in the bdev_svc_start routine, which will allow the scheduler to move this
+ * thread so it doesn't conflict with pinned threads in the secondary processes.
+ */
+ if (opts.reactor_mask == NULL) {
+ g_unaffinitize_thread = true;
+ }
+
+ rc = spdk_app_start(&opts, bdev_svc_start, (void *)(intptr_t)opts.shm_id);
+
+ spdk_app_fini();
+
+ return rc;
+}
diff --git a/src/spdk/test/app/fuzz/Makefile b/src/spdk/test/app/fuzz/Makefile
new file mode 100644
index 000000000..1f0a81b92
--- /dev/null
+++ b/src/spdk/test/app/fuzz/Makefile
@@ -0,0 +1,48 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y += nvme_fuzz
+DIRS-y += iscsi_fuzz
+
+ifeq ($(OS),Linux)
+DIRS-$(CONFIG_VIRTIO) += vhost_fuzz
+endif
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/app/fuzz/common/fuzz_common.h b/src/spdk/test/app/fuzz/common/fuzz_common.h
new file mode 100644
index 000000000..7619f4fb1
--- /dev/null
+++ b/src/spdk/test/app/fuzz/common/fuzz_common.h
@@ -0,0 +1,303 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+#include "spdk/env.h"
+#include "spdk/file.h"
+#include "spdk/base64.h"
+#include "spdk/json.h"
+
+#define DEFAULT_RUNTIME 30 /* seconds */
+#define MAX_RUNTIME_S 86400 /* 24 hours */
+#define IO_TIMEOUT_S 5
+
+#define UNSIGNED_2BIT_MAX ((1 << 2) - 1)
+#define UNSIGNED_4BIT_MAX ((1 << 4) - 1)
+#define UNSIGNED_8BIT_MAX ((1 << 8) - 1)
+
+typedef bool (*json_parse_fn)(void *ele, struct spdk_json_val *val, size_t num_vals);
+
+static void
+fuzz_fill_random_bytes(char *character_repr, size_t len, unsigned int *rand_seed)
+{
+ size_t i;
+
+ for (i = 0; i < len; i++) {
+ character_repr[i] = rand_r(rand_seed) % UINT8_MAX;
+ }
+}
+
+static uint64_t
+fuzz_refresh_timeout(void)
+{
+ uint64_t current_ticks;
+ uint64_t new_timeout_ticks;
+
+ current_ticks = spdk_get_ticks();
+
+ new_timeout_ticks = current_ticks + IO_TIMEOUT_S * spdk_get_ticks_hz();
+ assert(new_timeout_ticks > current_ticks);
+
+ return new_timeout_ticks;
+}
+
+static char *
+fuzz_get_value_base_64_buffer(void *item, size_t len)
+{
+ char *value_string;
+ size_t total_size;
+ int rc;
+
+ /* Null pointer */
+ total_size = spdk_base64_get_encoded_strlen(len) + 1;
+
+ value_string = calloc(1, total_size);
+ if (value_string == NULL) {
+ return NULL;
+ }
+
+ rc = spdk_base64_encode(value_string, item, len);
+ if (rc < 0) {
+ free(value_string);
+ return NULL;
+ }
+
+ return value_string;
+}
+
+static int
+fuzz_get_base_64_buffer_value(void *item, size_t len, char *buf, size_t buf_len)
+{
+ size_t size_of_data;
+ char *new_buf;
+ int rc;
+
+ new_buf = malloc(buf_len + 1);
+ if (new_buf == NULL) {
+ return -ENOMEM;
+ }
+
+ snprintf(new_buf, buf_len + 1, "%s", buf);
+
+ size_of_data = spdk_base64_get_decoded_len(buf_len);
+
+ if (size_of_data < len) {
+ free(new_buf);
+ return -EINVAL;
+ }
+
+ rc = spdk_base64_decode(item, &size_of_data, new_buf);
+
+ if (rc || size_of_data != len) {
+ free(new_buf);
+ return -EINVAL;
+ }
+
+ free(new_buf);
+ return 0;
+}
+
+static ssize_t
+read_json_into_buffer(const char *filename, struct spdk_json_val **values, void **file_data)
+{
+ FILE *file = fopen(filename, "r");
+ size_t file_data_size;
+ ssize_t num_json_values = 0, rc;
+
+ if (file == NULL) {
+ /* errno is set by fopen */
+ return 0;
+ }
+
+ *file_data = spdk_posix_file_load(file, &file_data_size);
+ if (*file_data == NULL) {
+ fclose(file);
+ return 0;
+ }
+
+ fclose(file);
+
+ num_json_values = spdk_json_parse(*file_data, file_data_size, NULL, 0, NULL,
+ SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS);
+
+ *values = calloc(num_json_values, sizeof(**values));
+ if (values == NULL) {
+ free(*file_data);
+ *file_data = NULL;
+ return 0;
+ }
+
+ rc = spdk_json_parse(*file_data, file_data_size, *values, num_json_values, NULL,
+ SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS);
+ if (num_json_values != rc) {
+ free(*values);
+ *values = NULL;
+ free(*file_data);
+ *file_data = NULL;
+ return 0;
+ }
+
+ return num_json_values;
+}
+
+static size_t
+double_arr_size(void **buffer, size_t num_ele, size_t ele_size)
+{
+ void *tmp;
+ size_t new_num_ele, allocation_size;
+
+ if (num_ele > SIZE_MAX / 2) {
+ return 0;
+ }
+
+ new_num_ele = num_ele * 2;
+
+ if (new_num_ele > SIZE_MAX / ele_size) {
+ return 0;
+ }
+
+ allocation_size = new_num_ele * ele_size;
+
+ tmp = realloc(*buffer, allocation_size);
+ if (tmp != NULL) {
+ *buffer = tmp;
+ return new_num_ele;
+ }
+
+ return 0;
+}
+
+static uint64_t
+fuzz_parse_args_into_array(const char *file, void **arr, size_t ele_size, const char *obj_name,
+ json_parse_fn cb_fn)
+{
+ ssize_t i, num_json_values;
+ struct spdk_json_val *values = NULL, *values_head = NULL, *obj_start;
+ void *file_data = NULL;;
+ char *arr_idx_pointer;
+ size_t num_arr_elements, arr_elements_used, values_in_obj;
+ bool rc;
+
+ num_json_values = read_json_into_buffer(file, &values_head, &file_data);
+ values = values_head;
+ if (num_json_values == 0 || values == NULL) {
+ if (file_data != NULL) {
+ free(file_data);
+ }
+ fprintf(stderr, "The file provided does not exist or we were unable to parse it.\n");
+ return 0;
+ }
+
+ num_arr_elements = 10;
+ arr_elements_used = 0;
+ *arr = calloc(num_arr_elements, ele_size);
+ arr_idx_pointer = (char *)*arr;
+ if (arr_idx_pointer == NULL) {
+ free(values);
+ free(file_data);
+ return 0;
+ }
+
+ i = 0;
+ while (i < num_json_values) {
+ if (values->type != SPDK_JSON_VAL_NAME) {
+ i++;
+ values++;
+ continue;
+ }
+
+ if (!strncmp(values->start, obj_name, values->len)) {
+ i++;
+ values++;
+ assert(values->type == SPDK_JSON_VAL_OBJECT_BEGIN);
+ obj_start = values;
+ values_in_obj = spdk_json_val_len(obj_start);
+ values += values_in_obj;
+ i += values_in_obj;
+
+ rc = cb_fn((void *)arr_idx_pointer, obj_start, values_in_obj);
+ if (rc == false) {
+ fprintf(stderr, "failed to parse file after %lu elements.\n", arr_elements_used);
+ goto fail;
+ }
+
+ arr_idx_pointer += ele_size;
+ arr_elements_used++;
+ if (arr_elements_used == num_arr_elements) {
+ num_arr_elements = double_arr_size(arr, num_arr_elements, ele_size);
+ if (num_arr_elements == 0) {
+ fprintf(stderr, "failed to allocate enough space for all json elements in your file.\n");
+ goto fail;
+ } else {
+ /* reset the array element position in case the pointer changed. */
+ arr_idx_pointer = ((char *)*arr) + arr_elements_used * ele_size;
+ }
+ }
+
+ continue;
+ } else {
+ i++;
+ values++;
+ continue;
+ }
+ }
+
+ if (arr_elements_used == 0) {
+ goto fail;
+ }
+
+ free(values_head);
+ free(file_data);
+ return arr_elements_used;
+fail:
+ free(values_head);
+ free(file_data);
+ free(*arr);
+ *arr = NULL;
+ return 0;
+}
+
+static int
+fuzz_parse_json_num(struct spdk_json_val *val, uint64_t max_val, uint64_t *val_ptr)
+{
+ uint64_t tmp_val;
+ int rc;
+
+ rc = spdk_json_number_to_uint64(val, &tmp_val);
+ if (rc || tmp_val > max_val) {
+ return -EINVAL;
+ } else {
+ *val_ptr = tmp_val;
+ return 0;
+ }
+}
diff --git a/src/spdk/test/app/fuzz/common/fuzz_rpc.py b/src/spdk/test/app/fuzz/common/fuzz_rpc.py
new file mode 100755
index 000000000..05cb67ed8
--- /dev/null
+++ b/src/spdk/test/app/fuzz/common/fuzz_rpc.py
@@ -0,0 +1,106 @@
+#!/usr/bin/env python3
+
+from rpc.client import print_dict, JSONRPCException
+
+import logging
+import argparse
+import rpc
+import sys
+import shlex
+
+try:
+ from shlex import quote
+except ImportError:
+ from pipes import quote
+
+
+def print_array(a):
+ print(" ".join((quote(v) for v in a)))
+
+
+def _fuzz_vhost_create_dev(client, socket, is_blk, use_bogus_buffer, use_valid_buffer, test_scsi_tmf, valid_lun):
+ """Create a new device in the vhost fuzzer.
+
+ Args:
+ socket: A valid unix domain socket for the dev to bind to.
+ is_blk: if set, create a virtio_blk device, otherwise use scsi.
+ use_bogus_buffer: if set, pass an invalid memory address as a buffer accompanying requests.
+ use_valid_buffer: if set, pass in a valid memory buffer with requests. Overrides use_bogus_buffer.
+ test_scsi_tmf: Test scsi management commands on the given device. Valid if and only if is_blk is false.
+ valid_lun: Supply only a valid lun number when submitting commands to the given device. Valid if and only if is_blk is false.
+
+ Returns:
+ True or False
+ """
+
+ params = {"socket": socket,
+ "is_blk": is_blk,
+ "use_bogus_buffer": use_bogus_buffer,
+ "use_valid_buffer": use_valid_buffer,
+ "test_scsi_tmf": test_scsi_tmf,
+ "valid_lun": valid_lun}
+
+ return client.call("fuzz_vhost_create_dev", params)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description='SPDK RPC command line interface. NOTE: spdk/scripts/ is expected in PYTHONPATH')
+ parser.add_argument('-s', dest='server_addr',
+ help='RPC domain socket path or IP address', default='/var/tmp/spdk.sock')
+ parser.add_argument('-p', dest='port',
+ help='RPC port number (if server_addr is IP address)',
+ default=5260, type=int)
+ parser.add_argument('-t', dest='timeout',
+ help='Timeout as a floating point number expressed in seconds waiting for response. Default: 60.0',
+ default=60.0, type=float)
+ parser.add_argument('-v', dest='verbose', action='store_const', const="INFO",
+ help='Set verbose mode to INFO', default="ERROR")
+ parser.add_argument('--verbose', dest='verbose', choices=['DEBUG', 'INFO', 'ERROR'],
+ help="""Set verbose level. """)
+ subparsers = parser.add_subparsers(help='RPC methods')
+
+ def fuzz_vhost_create_dev(args):
+ _fuzz_vhost_create_dev(
+ args.client,
+ args.socket,
+ args.is_blk,
+ args.use_bogus_buffer,
+ args.use_valid_buffer,
+ args.test_scsi_tmf,
+ args.valid_lun)
+
+ p = subparsers.add_parser('fuzz_vhost_create_dev', help="Add a new device to the vhost fuzzer.")
+ p.add_argument('-s', '--socket', help="Path to a valid unix domain socket for dev binding.")
+ p.add_argument('-b', '--is-blk', help='The specified socket corresponds to a vhost-blk dev.', action='store_true')
+ p.add_argument('-u', '--use-bogus-buffer', help='Pass bogus buffer addresses with requests when fuzzing.', action='store_true')
+ p.add_argument('-v', '--use-valid-buffer', help='Pass valid buffers when fuzzing. overrides use-bogus-buffer.', action='store_true')
+ p.add_argument('-m', '--test-scsi-tmf', help='for a scsi device, test scsi management commands.', action='store_true')
+ p.add_argument('-l', '--valid-lun', help='for a scsi device, test only using valid lun IDs.', action='store_true')
+ p.set_defaults(func=fuzz_vhost_create_dev)
+
+ def call_rpc_func(args):
+ try:
+ args.func(args)
+ except JSONRPCException as ex:
+ print(ex.message)
+ exit(1)
+
+ def execute_script(parser, client, fd):
+ for rpc_call in map(str.rstrip, fd):
+ if not rpc_call.strip():
+ continue
+ args = parser.parse_args(shlex.split(rpc_call))
+ args.client = client
+ call_rpc_func(args)
+
+ args = parser.parse_args()
+ args.client = rpc.client.JSONRPCClient(args.server_addr, args.port, args.timeout, log_level=getattr(logging, args.verbose.upper()))
+ if hasattr(args, 'func'):
+ call_rpc_func(args)
+ elif sys.stdin.isatty():
+ # No arguments and no data piped through stdin
+ parser.print_help()
+ exit(1)
+ else:
+ execute_script(parser, args.client, sys.stdin)
diff --git a/src/spdk/test/app/fuzz/iscsi_fuzz/.gitignore b/src/spdk/test/app/fuzz/iscsi_fuzz/.gitignore
new file mode 100644
index 000000000..3982bd220
--- /dev/null
+++ b/src/spdk/test/app/fuzz/iscsi_fuzz/.gitignore
@@ -0,0 +1 @@
+iscsi_fuzz
diff --git a/src/spdk/test/app/fuzz/iscsi_fuzz/Makefile b/src/spdk/test/app/fuzz/iscsi_fuzz/Makefile
new file mode 100644
index 000000000..0131e089a
--- /dev/null
+++ b/src/spdk/test/app/fuzz/iscsi_fuzz/Makefile
@@ -0,0 +1,51 @@
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.modules.mk
+
+APP = iscsi_fuzz
+
+CFLAGS += -I$(SPDK_ROOT_DIR)/lib
+CFLAGS += -I$(SPDK_ROOT_DIR)/include
+
+C_SRCS := iscsi_fuzz.c
+
+ISCSI_OBJS = md5 param conn tgt_node init_grp portal_grp
+
+LIBS += $(SCSI_OBJS:%=$(SPDK_ROOT_DIR)/lib/scsi/%.o)
+LIBS += $(ISCSI_OBJS:%=$(SPDK_ROOT_DIR)/lib/iscsi/%.o)
+
+SPDK_LIB_LIST += $(SOCK_MODULES_LIST)
+SPDK_LIB_LIST += conf event json jsonrpc log scsi bdev notify rpc sock thread trace util
+
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
diff --git a/src/spdk/test/app/fuzz/iscsi_fuzz/README.md b/src/spdk/test/app/fuzz/iscsi_fuzz/README.md
new file mode 100644
index 000000000..c9ef02e6a
--- /dev/null
+++ b/src/spdk/test/app/fuzz/iscsi_fuzz/README.md
@@ -0,0 +1,27 @@
+# Overview
+
+This application is intended to fuzz test the iSCSI target by submitting
+randomized PDU commands through a simulated iSCSI initiator.
+
+# Input
+
+1. iSCSI initiator send a login request PDU to iSCSI Target. Once a session is connected,
+2. iSCSI initiator send huge amount and random PDUs continuously to iSCSI Target.
+3. iSCSI initiator send a logout request PDU to iSCSI Target in the end.
+Especially, iSCSI initiator need to build different bhs according to different bhs opcode.
+Then iSCSI initiator will receive all kinds of responsed opcodes from iSCSI Target.
+The application will terminate when run time expires (see the -t flag).
+
+# Output
+
+By default, the fuzzer will print commands that:
+1. Complete successfully back from the target, or
+2. Are outstanding at the time of a connection error occurs.
+Commands are dumped as named objects in json format which can then be supplied back to the
+script for targeted debugging on a subsequent run.
+
+At the end of each test run, a summary is printed in the following format:
+
+~~~
+device 0x11c3b90 stats: Sent 1543 valid opcode PDUs, 16215 invalid opcode PDUs.
+~~~
diff --git a/src/spdk/test/app/fuzz/iscsi_fuzz/iscsi_fuzz.c b/src/spdk/test/app/fuzz/iscsi_fuzz/iscsi_fuzz.c
new file mode 100644
index 000000000..359b95981
--- /dev/null
+++ b/src/spdk/test/app/fuzz/iscsi_fuzz/iscsi_fuzz.c
@@ -0,0 +1,1092 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+#include "spdk/conf.h"
+#include "spdk/event.h"
+#include "spdk/util.h"
+#include "spdk/string.h"
+#include "spdk/likely.h"
+#include "spdk/json.h"
+#include "spdk/endian.h"
+#include "spdk/bdev.h"
+#include "spdk/notify.h"
+#include "spdk/scsi.h"
+#include "spdk_internal/mock.h"
+#include "spdk/scsi_spec.h"
+#include "iscsi/conn.h"
+#include "iscsi/iscsi.c"
+#include "scsi/scsi_internal.h"
+#include "spdk/sock.h"
+
+#define GET_PDU_LOOP_COUNT 16
+#define DEFAULT_RUNTIME 30 /* seconds */
+#define MAX_RUNTIME_S 86400 /* 24 hours */
+
+/* Global run state */
+uint64_t g_runtime_ticks;
+int g_runtime;
+int g_num_active_threads;
+bool g_run = true;
+bool g_is_valid_opcode = true;
+
+struct spdk_log_flag SPDK_LOG_ISCSI = {
+ .name = "iscsi",
+ .enabled = false,
+};
+
+/* Global resources */
+TAILQ_HEAD(, spdk_iscsi_pdu) g_get_pdu_list;
+TAILQ_HEAD(, fuzz_iscsi_dev_ctx) g_dev_list = TAILQ_HEAD_INITIALIZER(g_dev_list);
+struct spdk_poller *g_app_completion_poller;
+void *g_valid_buffer;
+unsigned int g_random_seed;
+char *g_tgt_ip = "127.0.0.1";
+char *g_tgt_port = "3260";
+/* TBD: Discovery login to get target information. We use fixed IQN for target for now. */
+char *g_tgt_name = "iqn.2016-06.io.spdk:disk1";
+char *g_init_name = "iqn.2016-06.io.spdk:fuzzinit";
+
+struct fuzz_iscsi_iov_ctx {
+ struct iovec iov_req;
+ struct iovec iov_data;
+ struct iovec iov_resp;
+};
+
+struct fuzz_iscsi_io_ctx {
+ struct fuzz_iscsi_iov_ctx iov_ctx;
+ union {
+ struct iscsi_bhs *bhs;
+ struct iscsi_bhs_nop_out *nop_out_req;
+ struct iscsi_bhs_scsi_req *scsi_req;
+ struct iscsi_bhs_task_req *task_req;
+ struct iscsi_bhs_login_req *login_req;
+ struct iscsi_bhs_text_req *text_req;
+ struct iscsi_bhs_data_out *data_out_req;
+ struct iscsi_bhs_logout_req *logout_req;
+ struct iscsi_bhs_snack_req *snack_req;
+ } req;
+};
+
+struct fuzz_iscsi_dev_ctx {
+ struct spdk_iscsi_sess sess;
+ struct spdk_iscsi_conn *conn;
+ struct fuzz_iscsi_io_ctx io_ctx;
+
+ struct spdk_thread *thread;
+ struct spdk_poller *poller;
+ unsigned int random_seed, current_cmd_sn;
+ uint64_t num_sent_pdus;
+ uint64_t num_valid_pdus;
+
+ TAILQ_ENTRY(fuzz_iscsi_dev_ctx) link;
+};
+
+static void
+fuzz_fill_random_bytes(char *character_repr, size_t len, unsigned int *rand_seed)
+{
+ size_t i;
+
+ for (i = 0; i < len; i++) {
+ character_repr[i] = rand_r(rand_seed) % UINT8_MAX;
+ }
+}
+
+static char *
+fuzz_get_value_base_64_buffer(void *item, size_t len)
+{
+ char *value_string;
+ size_t total_size;
+ int rc;
+
+ /* Null pointer */
+ total_size = spdk_base64_get_encoded_strlen(len) + 1;
+
+ value_string = calloc(1, total_size);
+ if (value_string == NULL) {
+ return NULL;
+ }
+
+ rc = spdk_base64_encode(value_string, item, len);
+ if (rc < 0) {
+ free(value_string);
+ return NULL;
+ }
+
+ return value_string;
+}
+
+int
+iscsi_chap_get_authinfo(struct iscsi_chap_auth *auth, const char *authuser,
+ int ag_tag)
+{
+ return 0;
+}
+
+void
+shutdown_iscsi_conns_done(void)
+{
+ return;
+}
+
+void
+iscsi_put_pdu(struct spdk_iscsi_pdu *pdu)
+{
+ if (!pdu) {
+ return;
+ }
+
+ pdu->ref--;
+ if (pdu->ref < 0) {
+ pdu->ref = 0;
+ }
+
+ if (pdu->ref == 0) {
+ if (pdu->data) {
+ free(pdu->data);
+ }
+ free(pdu);
+ }
+}
+
+struct spdk_iscsi_pdu *
+iscsi_get_pdu(struct spdk_iscsi_conn *conn)
+{
+ struct spdk_iscsi_pdu *pdu;
+
+ pdu = calloc(1, sizeof(*pdu));
+ if (!pdu) {
+ return NULL;
+ }
+
+ pdu->ref = 1;
+ pdu->conn = conn;
+
+ return pdu;
+}
+
+static void
+iscsi_task_free(struct spdk_scsi_task *scsi_task)
+{
+ struct spdk_iscsi_task *task = iscsi_task_from_scsi_task(scsi_task);
+
+ assert(task->parent == NULL);
+
+ iscsi_task_disassociate_pdu(task);
+ assert(task->conn->pending_task_cnt > 0);
+ task->conn->pending_task_cnt--;
+ free(task);
+}
+
+struct spdk_iscsi_task *
+iscsi_task_get(struct spdk_iscsi_conn *conn, struct spdk_iscsi_task *parent,
+ spdk_scsi_task_cpl cpl_fn)
+{
+ struct spdk_iscsi_task *task;
+
+ /* iSCSI subtask is not necessary for now. */
+ assert(parent == NULL);
+
+ task = calloc(1, sizeof(*task));
+ if (!task) {
+ printf("Unable to get task\n");
+ abort();
+ }
+
+ task->conn = conn;
+ assert(conn->pending_task_cnt < UINT32_MAX);
+ conn->pending_task_cnt++;
+ spdk_scsi_task_construct(&task->scsi, cpl_fn, iscsi_task_free);
+
+ return task;
+}
+
+static void
+cleanup(void)
+{
+ struct fuzz_iscsi_dev_ctx *dev_ctx, *tmp;
+
+ TAILQ_FOREACH_SAFE(dev_ctx, &g_dev_list, link, tmp) {
+ printf("device %p stats: Sent %lu valid opcode PDUs, %lu invalid opcode PDUs.\n",
+ dev_ctx, dev_ctx->num_valid_pdus,
+ dev_ctx->num_sent_pdus - dev_ctx->num_valid_pdus);
+ free(dev_ctx);
+ }
+
+ spdk_free(g_valid_buffer);
+}
+
+/* data dumping functions begin */
+static int
+dump_iscsi_cmd(void *ctx, const void *data, size_t size)
+{
+ fprintf(stderr, "%s\n", (const char *)data);
+ return 0;
+}
+
+static void
+print_scsi_io_data(struct spdk_json_write_ctx *w, struct fuzz_iscsi_io_ctx *io_ctx)
+{
+ char *data_segment_len;
+
+ data_segment_len = fuzz_get_value_base_64_buffer((void *)io_ctx->req.bhs->data_segment_len,
+ sizeof(io_ctx->req.bhs->data_segment_len));
+
+ spdk_json_write_named_uint32(w, "opcode", io_ctx->req.bhs->opcode);
+ spdk_json_write_named_uint32(w, "immediate", io_ctx->req.bhs->immediate);
+ spdk_json_write_named_uint32(w, "reserved", io_ctx->req.bhs->reserved);
+ spdk_json_write_named_uint32(w, "total_ahs_len", io_ctx->req.bhs->total_ahs_len);
+ spdk_json_write_named_string(w, "data_segment_len", data_segment_len);
+ spdk_json_write_named_uint32(w, "itt", io_ctx->req.bhs->itt);
+ spdk_json_write_named_uint32(w, "exp_stat_sn", io_ctx->req.bhs->exp_stat_sn);
+
+ free(data_segment_len);
+}
+
+static void
+print_req_obj(struct fuzz_iscsi_dev_ctx *dev_ctx, struct fuzz_iscsi_io_ctx *io_ctx)
+{
+ struct spdk_json_write_ctx *w;
+
+ w = spdk_json_write_begin(dump_iscsi_cmd, NULL, SPDK_JSON_WRITE_FLAG_FORMATTED);
+ spdk_json_write_named_object_begin(w, "bhs");
+ print_scsi_io_data(w, io_ctx);
+ spdk_json_write_object_end(w);
+ spdk_json_write_end(w);
+}
+
+/* data dumping functions end */
+
+/* dev initialization begin */
+static int
+fuzz_iscsi_dev_init(void)
+{
+ struct fuzz_iscsi_dev_ctx *dev_ctx;
+ int rc = 0;
+
+ dev_ctx = calloc(1, sizeof(*dev_ctx));
+ if (dev_ctx == NULL) {
+ return -ENOMEM;
+ }
+
+ dev_ctx->thread = spdk_get_thread();
+ if (dev_ctx->thread == NULL) {
+ fprintf(stderr, "Unable to get a thread for a fuzz device.\n");
+ rc = -EINVAL;
+ goto error_out;
+ }
+
+ dev_ctx->current_cmd_sn = 0;
+
+ TAILQ_INSERT_TAIL(&g_dev_list, dev_ctx, link);
+ return 0;
+
+error_out:
+ free(dev_ctx);
+ return rc;
+}
+/* dev initialization end */
+
+/* build requests begin */
+static void
+prep_iscsi_pdu_bhs_opcode_cmd(struct fuzz_iscsi_dev_ctx *dev_ctx, struct fuzz_iscsi_io_ctx *io_ctx)
+{
+ io_ctx->iov_ctx.iov_req.iov_len = sizeof(struct iscsi_bhs);
+ fuzz_fill_random_bytes((char *)io_ctx->req.bhs, sizeof(struct iscsi_bhs),
+ &dev_ctx->random_seed);
+}
+/* build requests end */
+
+static int
+iscsi_pdu_hdr_op_login_rsp(struct spdk_iscsi_conn *conn, struct spdk_iscsi_pdu *pdu)
+{
+ return 0;
+}
+
+static int
+iscsi_fuzz_pdu_hdr_handle(struct spdk_iscsi_conn *conn, struct spdk_iscsi_pdu *pdu)
+{
+ int opcode;
+ int rc = 0;
+
+ opcode = pdu->bhs.opcode;
+ if (opcode == ISCSI_OP_LOGIN_RSP) {
+ return iscsi_pdu_hdr_op_login_rsp(conn, pdu);
+ }
+
+ switch (opcode) {
+ case ISCSI_OP_LOGOUT_RSP:
+ fprintf(stderr, "Received logout hdr_handle response opcode(0x26) from Target.\n");
+ conn->is_logged_out = true;
+ break;
+ case ISCSI_OP_NOPIN:
+ case ISCSI_OP_SCSI_RSP:
+ case ISCSI_OP_TASK_RSP:
+ case ISCSI_OP_TEXT_RSP:
+ case ISCSI_OP_SCSI_DATAIN:
+ case ISCSI_OP_R2T:
+ case ISCSI_OP_ASYNC:
+ case ISCSI_OP_VENDOR_3C:
+ case ISCSI_OP_VENDOR_3D:
+ case ISCSI_OP_VENDOR_3E:
+ fprintf(stderr, "Received hdr_handle response opcode from Target is 0x%x.\n", pdu->bhs.opcode);
+ break;
+ case ISCSI_OP_REJECT:
+ fprintf(stderr, "Received rejected hdr_handle response opcode(0x3f) from Target.\n");
+ break;
+ default:
+ rc = -1;
+ break;
+ }
+
+ return rc;
+}
+
+static int
+iscsi_pdu_payload_op_login_rsp(struct spdk_iscsi_conn *conn, struct spdk_iscsi_pdu *pdu)
+{
+ struct iscsi_bhs_login_rsp *rsph;
+
+ rsph = (struct iscsi_bhs_login_rsp *)&pdu->bhs;
+ if (rsph == NULL) {
+ return -1;
+ }
+
+ assert(rsph->tsih != 0);
+ assert(rsph->status_class == 0);
+ assert(ISCSI_BHS_LOGIN_GET_TBIT(rsph->flags));
+ assert(!(rsph->flags & ISCSI_LOGIN_CONTINUE));
+ assert((rsph->flags & ISCSI_LOGIN_NEXT_STAGE_MASK) == ISCSI_LOGIN_NEXT_STAGE_3);
+
+ /* We got the Login Final Response and move to Full-Feature Phase. */
+ conn->full_feature = 1;
+ return 0;
+}
+
+static int
+iscsi_fuzz_pdu_payload_handle(struct spdk_iscsi_conn *conn, struct spdk_iscsi_pdu *pdu)
+{
+ int opcode;
+ int rc = 0;
+
+ opcode = pdu->bhs.opcode;
+ fprintf(stderr, "Received payload_handle response opcode from Target is 0x%x.\n", opcode);
+
+ switch (opcode) {
+ case ISCSI_OP_LOGIN_RSP:
+ rc = iscsi_pdu_payload_op_login_rsp(conn, pdu);
+ break;
+ case ISCSI_OP_NOPIN:
+ case ISCSI_OP_SCSI_RSP:
+ case ISCSI_OP_TASK_RSP:
+ case ISCSI_OP_TEXT_RSP:
+ case ISCSI_OP_SCSI_DATAIN:
+ case ISCSI_OP_R2T:
+ case ISCSI_OP_ASYNC:
+ case ISCSI_OP_VENDOR_3C:
+ case ISCSI_OP_VENDOR_3D:
+ case ISCSI_OP_VENDOR_3E:
+ case ISCSI_OP_REJECT:
+ break;
+ default:
+ rc = -1;
+ break;
+ }
+
+ return rc;
+}
+
+static int
+iscsi_fuzz_read_pdu(struct spdk_iscsi_conn *conn)
+{
+ enum iscsi_pdu_recv_state prev_state;
+ struct spdk_iscsi_pdu *pdu;
+ uint32_t data_len;
+ int rc;
+
+ do {
+ prev_state = conn->pdu_recv_state;
+ pdu = conn->pdu_in_progress;
+
+ switch (conn->pdu_recv_state) {
+ case ISCSI_PDU_RECV_STATE_AWAIT_PDU_READY:
+ assert(conn->pdu_in_progress == NULL);
+
+ conn->pdu_in_progress = iscsi_get_pdu(conn);
+ if (conn->pdu_in_progress == NULL) {
+ return SPDK_ISCSI_CONNECTION_FATAL;
+ }
+ TAILQ_INSERT_TAIL(&g_get_pdu_list, conn->pdu_in_progress, tailq);
+ conn->pdu_recv_state = ISCSI_PDU_RECV_STATE_AWAIT_PDU_HDR;
+ break;
+ case ISCSI_PDU_RECV_STATE_AWAIT_PDU_HDR:
+ if (pdu->bhs_valid_bytes < ISCSI_BHS_LEN) {
+ rc = iscsi_conn_read_data(conn,
+ ISCSI_BHS_LEN - pdu->bhs_valid_bytes,
+ (uint8_t *)&pdu->bhs + pdu->bhs_valid_bytes);
+ if (rc < 0) {
+ conn->pdu_recv_state = ISCSI_PDU_RECV_STATE_ERROR;
+ break;
+ }
+ pdu->bhs_valid_bytes += rc;
+ if (pdu->bhs_valid_bytes < ISCSI_BHS_LEN) {
+ return 0;
+ }
+ }
+
+ pdu->data_segment_len = ISCSI_ALIGN(DGET24(pdu->bhs.data_segment_len));
+
+ rc = iscsi_fuzz_pdu_hdr_handle(conn, pdu);
+ if (rc < 0) {
+ printf("Critical error is detected. Close the connection\n");
+ conn->pdu_recv_state = ISCSI_PDU_RECV_STATE_ERROR;
+ break;
+ }
+
+ if (conn->is_logged_out) {
+ printf("pdu received after logout\n");
+ conn->pdu_recv_state = ISCSI_PDU_RECV_STATE_ERROR;
+ break;
+ }
+
+ conn->pdu_recv_state = ISCSI_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD;
+ break;
+ case ISCSI_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD:
+ data_len = pdu->data_segment_len;
+ if (data_len != 0 && pdu->data_buf == NULL) {
+ pdu->data_buf = calloc(1, data_len);
+ if (pdu->data_buf == NULL) {
+ return 0;
+ }
+ pdu->data = pdu->data_buf;
+ }
+
+ /* copy the actual data into local buffer */
+ if (pdu->data_valid_bytes < data_len) {
+ rc = iscsi_conn_read_data_segment(conn, pdu, data_len);
+ if (rc < 0) {
+ conn->pdu_recv_state = ISCSI_PDU_RECV_STATE_ERROR;
+ break;
+ }
+ pdu->data_valid_bytes += rc;
+ if (pdu->data_valid_bytes < data_len) {
+ return 0;
+ }
+ }
+
+ /* All data for this PDU has now been read from the socket. */
+ spdk_trace_record(TRACE_ISCSI_READ_PDU, conn->id, pdu->data_valid_bytes,
+ (uintptr_t)pdu, pdu->bhs.opcode);
+
+ if (!pdu->is_rejected) {
+ rc = iscsi_fuzz_pdu_payload_handle(conn, pdu);
+ } else {
+ rc = 0;
+ }
+ if (rc == 0) {
+ spdk_trace_record(TRACE_ISCSI_TASK_EXECUTED, 0, 0, (uintptr_t)pdu, 0);
+ conn->pdu_in_progress = NULL;
+ conn->pdu_recv_state = ISCSI_PDU_RECV_STATE_AWAIT_PDU_READY;
+ return 1;
+ } else {
+ conn->pdu_recv_state = ISCSI_PDU_RECV_STATE_ERROR;
+ }
+ break;
+ case ISCSI_PDU_RECV_STATE_ERROR:
+ return SPDK_ISCSI_CONNECTION_FATAL;
+ default:
+ assert(false);
+ printf("code should not come here\n");
+ break;
+ }
+ } while (prev_state != conn->pdu_recv_state);
+
+ return 0;
+}
+
+#define GET_PDU_LOOP_COUNT 16
+
+static int
+fuzz_iscsi_handle_incoming_pdus(struct spdk_iscsi_conn *conn)
+{
+ int i, rc;
+
+ /* Read new PDUs from network */
+ for (i = 0; i < GET_PDU_LOOP_COUNT; i++) {
+ rc = iscsi_fuzz_read_pdu(conn);
+ if (rc == 0) {
+ break;
+ } else if (rc < 0) {
+ return rc;
+ }
+ }
+
+ return i;
+}
+
+static void
+fuzz_iscsi_send_login_request(struct fuzz_iscsi_dev_ctx *dev_ctx, uint8_t session_type)
+{
+ struct fuzz_iscsi_io_ctx *io_ctx = NULL;
+ struct spdk_iscsi_pdu *req_pdu;
+ struct iscsi_bhs_login_req *login_req;
+ struct spdk_iscsi_conn *conn = dev_ctx->conn;
+
+ req_pdu = iscsi_get_pdu(conn);
+ req_pdu->writev_offset = 0;
+ req_pdu->hdigest_valid_bytes = 0;
+ req_pdu->ahs_valid_bytes = 0;
+ req_pdu->data_buf_len = 8192;
+ req_pdu->data = calloc(1, 8192);
+ assert(req_pdu->data != NULL);
+ req_pdu->data_segment_len = 0;
+
+ login_req = (struct iscsi_bhs_login_req *)&req_pdu->bhs;
+ io_ctx = &dev_ctx->io_ctx;
+ io_ctx->req.login_req = login_req;
+ io_ctx->req.login_req->version_min = 0;
+ /* a new session */
+ io_ctx->req.login_req->tsih = 0;
+
+ req_pdu->bhs.opcode = ISCSI_OP_LOGIN;
+ req_pdu->bhs.immediate = 1;
+ req_pdu->bhs.reserved = 0;
+ req_pdu->bhs_valid_bytes = ISCSI_BHS_LEN;
+ req_pdu->bhs.total_ahs_len = 0;
+
+ /* An initiator that chooses to operate without iSCSI security and with
+ * all the operational parameters taking the default values issues the
+ * Login with the T bit set to 1, the CSG set to
+ * LoginOperationalNegotiation, and the NSG set to FullFeaturePhase.
+ *
+ * Byte / 0 | 1 | 2 | 3 |
+ * |0 1 2 3 4 5 6 7|0 1 2 3 4 5 6 7|0 1 2 3 4 5 6 7|0 1 2 3 4 5 6 7|
+ * +---------------+---------------+---------------+---------------+
+ * 0|.|1| 0x03 |T|C|.|.|CSG|NSG| Version-max | Version-min |
+ */
+ req_pdu->bhs.flags = ISCSI_LOGIN_TRANSIT | (ISCSI_OPERATIONAL_NEGOTIATION_PHASE << 2) |
+ ISCSI_FULL_FEATURE_PHASE;
+
+ req_pdu->data_segment_len = iscsi_append_text(conn, "InitiatorName", g_init_name,
+ req_pdu->data, req_pdu->data_buf_len, req_pdu->data_segment_len);
+ req_pdu->data_segment_len = iscsi_append_text(conn, "HeaderDigest", "None",
+ req_pdu->data, req_pdu->data_buf_len, req_pdu->data_segment_len);
+ req_pdu->data_segment_len = iscsi_append_text(conn, "DataDigest", "None",
+ req_pdu->data, req_pdu->data_buf_len, req_pdu->data_segment_len);
+ req_pdu->data_segment_len = iscsi_append_text(conn, "DefaultTime2Wait", "2",
+ req_pdu->data, req_pdu->data_buf_len, req_pdu->data_segment_len);
+ req_pdu->data_segment_len = iscsi_append_text(conn, "DefaultTime2Retain", "0",
+ req_pdu->data, req_pdu->data_buf_len, req_pdu->data_segment_len);
+ req_pdu->data_segment_len = iscsi_append_text(conn, "IFMarker", "No",
+ req_pdu->data, req_pdu->data_buf_len, req_pdu->data_segment_len);
+ req_pdu->data_segment_len = iscsi_append_text(conn, "OFMarker", "No",
+ req_pdu->data, req_pdu->data_buf_len, req_pdu->data_segment_len);
+ req_pdu->data_segment_len = iscsi_append_text(conn, "ErrorRecoveryLevel", "0",
+ req_pdu->data, req_pdu->data_buf_len, req_pdu->data_segment_len);
+
+ if (session_type == SESSION_TYPE_DISCOVERY) {
+ /* Discovery PDU */
+ conn->sess->session_type = SESSION_TYPE_DISCOVERY;
+ req_pdu->data_segment_len = iscsi_append_text(conn, "SessionType", "Discovery",
+ req_pdu->data, req_pdu->data_buf_len, req_pdu->data_segment_len);
+ req_pdu->data_segment_len = iscsi_append_text(conn, "MaxRecvDataSegmentLength", "32768",
+ req_pdu->data, req_pdu->data_buf_len, req_pdu->data_segment_len);
+ } else {
+ /* Login PDU */
+ conn->sess->session_type = SESSION_TYPE_NORMAL;
+ req_pdu->data_segment_len = iscsi_append_text(conn, "SessionType", "Normal",
+ req_pdu->data, req_pdu->data_buf_len, req_pdu->data_segment_len);
+ req_pdu->data_segment_len = iscsi_append_text(conn, "TargetName", g_tgt_name,
+ req_pdu->data, req_pdu->data_buf_len, req_pdu->data_segment_len);
+ req_pdu->data_segment_len = iscsi_append_text(conn, "InitialR2T", "No",
+ req_pdu->data, req_pdu->data_buf_len, req_pdu->data_segment_len);
+ req_pdu->data_segment_len = iscsi_append_text(conn, "ImmediateData", "Yes",
+ req_pdu->data, req_pdu->data_buf_len, req_pdu->data_segment_len);
+ req_pdu->data_segment_len = iscsi_append_text(conn, "MaxBurstLength", "16776192",
+ req_pdu->data, req_pdu->data_buf_len, req_pdu->data_segment_len);
+ req_pdu->data_segment_len = iscsi_append_text(conn, "FirstBurstLength", "262144",
+ req_pdu->data, req_pdu->data_buf_len, req_pdu->data_segment_len);
+ req_pdu->data_segment_len = iscsi_append_text(conn, "MaxOutstandingR2T", "1",
+ req_pdu->data, req_pdu->data_buf_len, req_pdu->data_segment_len);
+ req_pdu->data_segment_len = iscsi_append_text(conn, "MaxConnections", "1",
+ req_pdu->data, req_pdu->data_buf_len, req_pdu->data_segment_len);
+ req_pdu->data_segment_len = iscsi_append_text(conn, "DataPDUInOrder", "Yes",
+ req_pdu->data, req_pdu->data_buf_len, req_pdu->data_segment_len);
+ req_pdu->data_segment_len = iscsi_append_text(conn, "DataSequenceInOrder", "Yes",
+ req_pdu->data, req_pdu->data_buf_len, req_pdu->data_segment_len);
+ req_pdu->data_segment_len = iscsi_append_text(conn, "MaxRecvDataSegmentLength", "262144",
+ req_pdu->data, req_pdu->data_buf_len, req_pdu->data_segment_len);
+ }
+
+ DSET24(req_pdu->bhs.data_segment_len, req_pdu->data_segment_len);
+ iscsi_conn_write_pdu(conn, req_pdu, iscsi_conn_pdu_generic_complete, NULL);
+}
+
+static void
+fuzz_iscsi_send_logout_request(struct fuzz_iscsi_dev_ctx *dev_ctx)
+{
+ struct fuzz_iscsi_io_ctx *io_ctx = NULL;
+ struct spdk_iscsi_pdu *req_pdu;
+ struct iscsi_bhs_logout_req *logout_req;
+ struct spdk_iscsi_conn *conn = dev_ctx->conn;
+
+ conn->is_logged_out = true;
+
+ req_pdu = iscsi_get_pdu(conn);
+ req_pdu->writev_offset = 0;
+ req_pdu->hdigest_valid_bytes = 0;
+ req_pdu->ahs_valid_bytes = 0;
+ req_pdu->data_buf_len = 0;
+
+ logout_req = (struct iscsi_bhs_logout_req *)&req_pdu->bhs;
+ io_ctx = &dev_ctx->io_ctx;
+ io_ctx->req.logout_req = logout_req;
+
+ req_pdu->bhs.opcode = ISCSI_OP_LOGOUT;
+ req_pdu->bhs.immediate = 1;
+ req_pdu->bhs.reserved = 0;
+ req_pdu->bhs_valid_bytes = ISCSI_BHS_LEN;
+ req_pdu->bhs.total_ahs_len = 0;
+ req_pdu->bhs.flags = 0;
+
+ DSET24(req_pdu->bhs.data_segment_len, 0);
+ iscsi_conn_write_pdu(conn, req_pdu, iscsi_conn_pdu_generic_complete, conn);
+}
+
+static void
+iscsi_fuzz_conn_reset(struct spdk_iscsi_conn *conn, struct spdk_iscsi_sess *sess)
+{
+ conn->sess = sess;
+ conn->data_in_cnt = 0;
+ conn->params = NULL;
+ conn->header_digest = true;
+ conn->data_digest = false;
+ conn->header_digest = 0;
+ conn->MaxRecvDataSegmentLength = 8192;
+ conn->full_feature = 0;
+ conn->pdu_recv_state = ISCSI_PDU_RECV_STATE_AWAIT_PDU_READY;
+ conn->pdu_in_progress = NULL;
+ conn->is_logged_out = 0;
+}
+
+static void
+iscsi_fuzz_sock_connect(struct spdk_iscsi_conn *conn)
+{
+ const char *host = g_tgt_ip;
+ const char *port = g_tgt_port;
+ char saddr[INET6_ADDRSTRLEN], caddr[INET6_ADDRSTRLEN];
+ uint16_t cport, sport;
+ int rc = 0;
+
+ conn->sock = spdk_sock_connect(host, spdk_strtol(port, 10), NULL);
+ if (conn->sock == NULL) {
+ fprintf(stderr, "connect error(%d): %s\n", errno, spdk_strerror(errno));
+ spdk_sock_close(&conn->sock);
+ return;
+ }
+ fprintf(stderr, "\nConnecting to the server on %s:%s\n", host, port);
+
+ rc = spdk_sock_getaddr(conn->sock, saddr, sizeof(saddr), &sport, caddr, sizeof(caddr), &cport);
+ if (rc < 0) {
+ fprintf(stderr, "Cannot get connection addresses\n");
+ spdk_sock_close(&conn->sock);
+ return;
+ }
+
+ fprintf(stderr, "Connection accepted from (%s, %hu) to (%s, %hu)\n", caddr, cport, saddr, sport);
+
+}
+
+static void
+check_successful_op(struct fuzz_iscsi_dev_ctx *dev_ctx, struct fuzz_iscsi_io_ctx *io_ctx)
+{
+ if (g_is_valid_opcode) {
+ fprintf(stderr, "Sent a valid opcode PDU.\n");
+ dev_ctx->num_valid_pdus++;
+ } else {
+ fprintf(stderr, "Sent an invalid opcode PDU.\n");
+ }
+}
+
+/* submit requests begin */
+static void
+dev_submit_requests(struct fuzz_iscsi_dev_ctx *dev_ctx)
+{
+ struct fuzz_iscsi_io_ctx *io_ctx = NULL;
+ uint8_t opcode;
+ struct spdk_iscsi_pdu *req_pdu;
+ struct iscsi_bhs *bhs;
+ struct iscsi_bhs_nop_out *nop_out_req;
+ struct iscsi_bhs_scsi_req *scsi_req;
+ struct iscsi_bhs_task_req *task_req;
+ struct iscsi_bhs_text_req *text_req;
+ struct iscsi_bhs_data_out *data_out_req;
+ struct iscsi_bhs_snack_req *snack_req;
+ unsigned int rand_seed;
+ bool is_p99;
+
+ g_is_valid_opcode = true;
+
+ /* Random PDU */
+ opcode = rand() % 0x3f;
+ fprintf(stderr, "Random request bhs.opcode of Initiator is 0x%x.\n", opcode);
+
+ if ((opcode == ISCSI_OP_LOGIN) || (opcode == ISCSI_OP_LOGOUT)) {
+ /* only need send next */
+ fprintf(stderr, "LOGIN and LOGOUT opcodes are ignored here.\n");
+ return;
+ }
+
+ req_pdu = iscsi_get_pdu(dev_ctx->conn);
+ req_pdu->writev_offset = 0;
+ req_pdu->hdigest_valid_bytes = 0;
+ req_pdu->ahs_valid_bytes = 0;
+ req_pdu->data_buf_len = 0;
+
+ dev_ctx->conn->sess->session_type = SESSION_TYPE_NORMAL;
+
+ io_ctx = &dev_ctx->io_ctx;
+
+ switch (opcode) {
+ case ISCSI_OP_NOPOUT:
+ nop_out_req = (struct iscsi_bhs_nop_out *)&req_pdu->bhs;
+ io_ctx->req.nop_out_req = nop_out_req;
+ break;
+ case ISCSI_OP_SCSI:
+ scsi_req = (struct iscsi_bhs_scsi_req *)&req_pdu->bhs;
+ io_ctx->req.scsi_req = scsi_req;
+ break;
+ case ISCSI_OP_TASK:
+ task_req = (struct iscsi_bhs_task_req *)&req_pdu->bhs;
+ io_ctx->req.task_req = task_req;
+ break;
+ case ISCSI_OP_TEXT:
+ text_req = (struct iscsi_bhs_text_req *)&req_pdu->bhs;
+ io_ctx->req.text_req = text_req;
+ break;
+ case ISCSI_OP_SCSI_DATAOUT:
+ data_out_req = (struct iscsi_bhs_data_out *)&req_pdu->bhs;
+ io_ctx->req.data_out_req = data_out_req;
+ break;
+ case ISCSI_OP_SNACK:
+ snack_req = (struct iscsi_bhs_snack_req *)&req_pdu->bhs;
+ io_ctx->req.snack_req = snack_req;
+ break;
+ default:
+ bhs = (struct iscsi_bhs *)&req_pdu->bhs;
+ io_ctx->req.bhs = bhs;
+ g_is_valid_opcode = false;
+ break;
+ }
+
+ prep_iscsi_pdu_bhs_opcode_cmd(dev_ctx, io_ctx);
+ io_ctx->req.bhs->opcode = opcode;
+ req_pdu->bhs.opcode = opcode;
+ req_pdu->bhs.immediate = 1;
+ req_pdu->bhs.reserved = 0;
+ req_pdu->bhs_valid_bytes = ISCSI_BHS_LEN;
+ req_pdu->bhs.total_ahs_len = 0;
+ req_pdu->bhs.stat_sn = 0;
+ DSET24(req_pdu->bhs.data_segment_len, 0);
+
+ if (opcode <= ISCSI_OP_TEXT) {
+ rand_seed = time(NULL);
+ is_p99 = rand_r(&rand_seed) % 100 == 0 ? true : false;
+ if (!is_p99) { /* Remaining 1% */
+ switch (opcode) {
+ case ISCSI_OP_NOPOUT:
+ if (req_pdu->bhs.immediate) {
+ io_ctx->req.nop_out_req->cmd_sn = dev_ctx->current_cmd_sn;
+ } else {
+ io_ctx->req.nop_out_req->cmd_sn = dev_ctx->current_cmd_sn++;
+ }
+ break;
+ case ISCSI_OP_SCSI:
+ if (req_pdu->bhs.immediate) {
+ io_ctx->req.scsi_req->cmd_sn = dev_ctx->current_cmd_sn;
+ } else {
+ io_ctx->req.scsi_req->cmd_sn = dev_ctx->current_cmd_sn++;
+ }
+ break;
+ case ISCSI_OP_TASK:
+ if (req_pdu->bhs.immediate) {
+ io_ctx->req.task_req->cmd_sn = dev_ctx->current_cmd_sn;
+ } else {
+ io_ctx->req.task_req->cmd_sn = dev_ctx->current_cmd_sn++;
+ }
+ break;
+ case ISCSI_OP_TEXT:
+ if (req_pdu->bhs.immediate) {
+ io_ctx->req.text_req->cmd_sn = dev_ctx->current_cmd_sn;
+ } else {
+ io_ctx->req.text_req->cmd_sn = dev_ctx->current_cmd_sn++;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ if (opcode == ISCSI_OP_SCSI) {
+ /* avoid ((R_bit != 0) && (W_bit != 0)) is true */
+ io_ctx->req.scsi_req->read_bit = 0;
+ io_ctx->req.scsi_req->write_bit = 0;
+ }
+
+ if (opcode == ISCSI_OP_TEXT) {
+ /* avoid: (F_bit && C_bit) is true */
+ io_ctx->req.text_req->flags = 0;
+ /* avoid: correct itt is not equal to the current itt */
+ io_ctx->req.text_req->itt = 0;
+ }
+
+ fprintf(stderr, "Dumping this request bhs contents now.\n");
+ print_req_obj(dev_ctx, io_ctx);
+
+ check_successful_op(dev_ctx, io_ctx);
+ dev_ctx->num_sent_pdus++;
+
+ iscsi_conn_write_pdu(dev_ctx->conn, req_pdu,
+ iscsi_conn_pdu_generic_complete, NULL);
+}
+/* submit requests end */
+
+static int
+poll_dev(void *ctx)
+{
+ struct fuzz_iscsi_dev_ctx *dev_ctx = ctx;
+ struct spdk_iscsi_conn *conn = dev_ctx->conn;
+ uint64_t current_ticks;
+ struct spdk_iscsi_pdu *pdu, *tmp;
+
+ current_ticks = spdk_get_ticks();
+ if (current_ticks > g_runtime_ticks) {
+ g_run = false;
+ }
+
+ if (!g_run) {
+ /* Logout PDU */
+ fuzz_iscsi_send_logout_request(dev_ctx);
+ fuzz_iscsi_handle_incoming_pdus(conn);
+
+ TAILQ_FOREACH_SAFE(pdu, &g_get_pdu_list, tailq, tmp) {
+ TAILQ_REMOVE(&g_get_pdu_list, pdu, tailq);
+ iscsi_put_pdu(pdu);
+ }
+
+ spdk_sock_close(&conn->sock);
+
+ TAILQ_FOREACH_SAFE(pdu, &conn->write_pdu_list, tailq, tmp) {
+ TAILQ_REMOVE(&conn->write_pdu_list, pdu, tailq);
+ iscsi_put_pdu(pdu);
+ }
+
+ free(conn);
+
+ spdk_poller_unregister(&dev_ctx->poller);
+ __sync_sub_and_fetch(&g_num_active_threads, 1);
+
+ return -1;
+ }
+
+ if (conn->is_logged_out) {
+ spdk_sock_close(&conn->sock);
+ iscsi_fuzz_conn_reset(conn, &dev_ctx->sess);
+ iscsi_fuzz_sock_connect(conn);
+ usleep(1000);
+
+ /* Login PDU */
+ fuzz_iscsi_send_login_request(dev_ctx, SESSION_TYPE_NORMAL);
+ } else if (conn->full_feature == 1) {
+ dev_submit_requests(dev_ctx);
+ }
+
+ spdk_sock_flush(conn->sock);
+
+ fuzz_iscsi_handle_incoming_pdus(conn);
+
+ return 0;
+}
+
+static void
+start_io(void *ctx)
+{
+ struct fuzz_iscsi_dev_ctx *dev_ctx = ctx;
+
+ dev_ctx->sess.ExpCmdSN = 0;
+ dev_ctx->sess.MaxCmdSN = 64;
+ dev_ctx->sess.MaxBurstLength = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ dev_ctx->sess.MaxOutstandingR2T = 1;
+ dev_ctx->sess.tag = 1;
+ dev_ctx->sess.tsih = 256;
+
+ dev_ctx->conn = calloc(1, sizeof(*dev_ctx->conn));
+ assert(dev_ctx->conn != NULL);
+ TAILQ_INIT(&dev_ctx->conn->write_pdu_list);
+
+ iscsi_fuzz_conn_reset(dev_ctx->conn, &dev_ctx->sess);
+ iscsi_fuzz_sock_connect(dev_ctx->conn);
+ usleep(1000);
+
+ /* Login PDU */
+ fuzz_iscsi_send_login_request(dev_ctx, SESSION_TYPE_NORMAL);
+
+ if (g_random_seed) {
+ dev_ctx->random_seed = g_random_seed;
+ } else {
+ dev_ctx->random_seed = spdk_get_ticks();
+ }
+
+ dev_ctx->poller = SPDK_POLLER_REGISTER(poll_dev, dev_ctx, 0);
+ if (dev_ctx->poller == NULL) {
+ return;
+ }
+}
+
+static int
+check_app_completion(void *ctx)
+{
+ if (g_num_active_threads == 0) {
+ spdk_poller_unregister(&g_app_completion_poller);
+ printf("Fuzzing completed. Shutting down the fuzz application.\n\n");
+ cleanup();
+ spdk_app_stop(0);
+ }
+ return 0;
+}
+
+static void
+begin_iscsi_fuzz(void *ctx)
+{
+ struct fuzz_iscsi_dev_ctx *dev_ctx;
+ int rc;
+
+ g_runtime_ticks = spdk_get_ticks() + g_runtime * spdk_get_ticks_hz();
+
+ g_valid_buffer = spdk_malloc(0x1000, 0x200, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_SHARE);
+ if (g_valid_buffer == NULL) {
+ fprintf(stderr, "Failed to allocate a valid buffer for random PDUs\n");
+ goto out;
+ }
+
+ rc = fuzz_iscsi_dev_init();
+ if (rc) {
+ fprintf(stderr, "fuzz_iscsi_dev_init() failed.\n");
+ goto out;
+ }
+
+ TAILQ_FOREACH(dev_ctx, &g_dev_list, link) {
+ assert(dev_ctx->thread != NULL);
+ spdk_thread_send_msg(dev_ctx->thread, start_io, dev_ctx);
+ __sync_add_and_fetch(&g_num_active_threads, 1);
+ }
+
+ g_app_completion_poller = SPDK_POLLER_REGISTER(check_app_completion, NULL, 1000000);
+ if (g_app_completion_poller == NULL) {
+ fprintf(stderr, "Failed to register a poller for test completion checking.\n");
+ goto out;
+ }
+
+ return;
+out:
+ cleanup();
+ spdk_app_stop(0);
+}
+
+static void
+iscsi_fuzz_usage(void)
+{
+ fprintf(stderr, " -T <path> iSCSI Target IP address.\n");
+ fprintf(stderr, " -S <integer> Seed value for test.\n");
+ fprintf(stderr,
+ " -t <integer> Time in seconds to run the fuzz test. Only valid if -j is not specified.\n");
+}
+
+static int
+iscsi_fuzz_parse(int ch, char *arg)
+{
+ int64_t error_test;
+
+ switch (ch) {
+ case 'T':
+ g_tgt_ip = optarg;
+ break;
+ case 'S':
+ error_test = spdk_strtol(arg, 10);
+ if (error_test < 0) {
+ fprintf(stderr, "Invalid value supplied for the random seed.\n");
+ return -1;
+ } else {
+ g_random_seed = error_test;
+ }
+ break;
+ case 't':
+ g_runtime = spdk_strtol(optarg, 10);
+ if (g_runtime <= 0 || g_runtime > MAX_RUNTIME_S) {
+ fprintf(stderr, "You must supply a positive runtime value less than %d.\n", MAX_RUNTIME_S);
+ return -1;
+ }
+ break;
+ case '?':
+ default:
+ iscsi_fuzz_usage();
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int
+main(int argc, char **argv)
+{
+ struct spdk_app_opts opts = {};
+ int rc;
+
+ g_runtime = DEFAULT_RUNTIME;
+ srand((unsigned)time(0));
+
+ TAILQ_INIT(&g_get_pdu_list);
+
+ spdk_app_opts_init(&opts);
+ opts.name = "iscsi_fuzz";
+
+ if ((rc = spdk_app_parse_args(argc, argv, &opts, "T:S:t:", NULL, iscsi_fuzz_parse,
+ iscsi_fuzz_usage) != SPDK_APP_PARSE_ARGS_SUCCESS)) {
+ return rc;
+ }
+
+ rc = spdk_app_start(&opts, begin_iscsi_fuzz, NULL);
+
+ return rc;
+}
diff --git a/src/spdk/test/app/fuzz/nvme_fuzz/.gitignore b/src/spdk/test/app/fuzz/nvme_fuzz/.gitignore
new file mode 100644
index 000000000..801146458
--- /dev/null
+++ b/src/spdk/test/app/fuzz/nvme_fuzz/.gitignore
@@ -0,0 +1 @@
+nvme_fuzz
diff --git a/src/spdk/test/app/fuzz/nvme_fuzz/Makefile b/src/spdk/test/app/fuzz/nvme_fuzz/Makefile
new file mode 100644
index 000000000..b7ad5e172
--- /dev/null
+++ b/src/spdk/test/app/fuzz/nvme_fuzz/Makefile
@@ -0,0 +1,49 @@
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.modules.mk
+
+APP = nvme_fuzz
+
+CFLAGS += -I$(SPDK_ROOT_DIR)/test/app/fuzz/common
+
+C_SRCS := nvme_fuzz.c
+
+SPDK_LIB_LIST += $(SOCK_MODULES_LIST)
+SPDK_LIB_LIST += conf event json jsonrpc log nvme rpc sock thread trace util
+
+ifeq ($(CONFIG_RDMA),y)
+SPDK_LIB_LIST += rdma
+endif
+
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
diff --git a/src/spdk/test/app/fuzz/nvme_fuzz/README.md b/src/spdk/test/app/fuzz/nvme_fuzz/README.md
new file mode 100644
index 000000000..2f188b5b8
--- /dev/null
+++ b/src/spdk/test/app/fuzz/nvme_fuzz/README.md
@@ -0,0 +1,52 @@
+# Overview
+
+This application is intended to fuzz test the NVMe-oF target or a physical NVMe drive by
+submitting randomized NVMe commands through the SPDK NVMe initiator. Both local and remote
+drives are configured through a .ini style config file (See the -C option on the application).
+Multiple controllers and namespaces can be exposed to the fuzzer at a time. In order to
+handle multiple namespaces, the fuzzer will round robin assign a thread to each namespace and
+submit commands to that thread at a set queue depth. (currently 128 for I/O, 16 for Admin). The
+application will terminate under three conditions:
+
+1. The user specified run time expires (see the -t flag).
+2. One of the target controllers stops completing I/O operations back to the fuzzer i.e. controller timeout.
+3. The user specified a json file containing operations to run and the fuzzer has received valid completions for all of them.
+
+# Output
+
+By default, the fuzzer will print commands that:
+
+1. Complete successfully back from the target, or
+2. Are outstanding at the time of a controller timeout.
+
+Commands are dumped as named objects in json format which can then be supplied back to the
+script for targeted debugging on a subsequent run. See `Debugging` below.
+By default no output is generated when a specific command is returned with a failed status.
+This can be overridden with the -V flag. if -V is specified, each command will be dumped as
+it is completed in the JSON format specified above.
+At the end of each test run, a summary is printed for each namespace in the following format:
+
+~~~
+NS: 0x200079262300 admin qp, Total commands completed: 462459, total successful commands: 1960, random_seed: 4276918833
+~~~
+
+# Debugging
+
+If a controller hangs when processing I/O generated by the fuzzer, the fuzzer will stop
+submitting I/O and dump out all outstanding I/O on the qpair that timed out. The I/O are
+dumped as valid json. You can combine the dumped commands from the fuzzer into a json
+array in a file and then pass them to the fuzzer using the -j option. Please see the
+example.json file in this directory for an example of a properly formed array of command
+structures.
+
+Please note that you can also craft your own custom command values by using the output
+from the fuzzer as a template.
+
+# JSON Format
+
+Most of the variables in the spdk_nvme_cmd structure are represented as numbers in JSON.
+The only exception to this rule is the dptr union. This is a 16 byte union structure that
+is represented as a base64 string. If writing custom commands for input, please note this
+distinction or the application will be unable to load your custom input.
+
+Happy Fuzzing!
diff --git a/src/spdk/test/app/fuzz/nvme_fuzz/example.json b/src/spdk/test/app/fuzz/nvme_fuzz/example.json
new file mode 100644
index 000000000..95540746e
--- /dev/null
+++ b/src/spdk/test/app/fuzz/nvme_fuzz/example.json
@@ -0,0 +1,290 @@
+{
+"struct spdk_nvme_cmd": {
+ "opc": 7,
+ "fuse": 1,
+ "rsvd1": 13,
+ "psdt": 1,
+ "cid": 56732,
+ "nsid": 1,
+ "rsvd2": 1516848792,
+ "rsvd3": 1233945838,
+ "mptr": 3452736735,
+ "dptr": "FHENPcH+xM0tioD+0SrNrQ==",
+ "cdw10": 3190735246,
+ "cdw11": 2629178873,
+ "cdw12": 138580308,
+ "cdw13": 1603605200,
+ "cdw14": 3031880384,
+ "cdw15": 644909208
+ },
+ "struct spdk_nvme_cmd": {
+ "opc": 24,
+ "fuse": 1,
+ "rsvd1": 13,
+ "psdt": 1,
+ "cid": 56732,
+ "nsid": 1,
+ "rsvd2": 1516848792,
+ "rsvd3": 1233945838,
+ "mptr": 3452736735,
+ "dptr": "FHENPcH+xM0tioD+0SrNrQ==",
+ "cdw10": 3190735246,
+ "cdw11": 2629178873,
+ "cdw12": 138580308,
+ "cdw13": 1603605200,
+ "cdw14": 3031880384,
+ "cdw15": 644909208
+ },
+ "struct spdk_nvme_cmd": {
+ "opc": 43,
+ "fuse": 1,
+ "rsvd1": 13,
+ "psdt": 1,
+ "cid": 56732,
+ "nsid": 1,
+ "rsvd2": 1516848792,
+ "rsvd3": 1233945838,
+ "mptr": 3452736735,
+ "dptr": "FHENPcH+xM0tioD+0SrNrQ==",
+ "cdw10": 3190735246,
+ "cdw11": 2629178873,
+ "cdw12": 138580308,
+ "cdw13": 1603605200,
+ "cdw14": 3031880384,
+ "cdw15": 644909208
+ },
+ "struct spdk_nvme_cmd": {
+ "opc": 12,
+ "fuse": 1,
+ "rsvd1": 13,
+ "psdt": 1,
+ "cid": 56732,
+ "nsid": 1,
+ "rsvd2": 1516848792,
+ "rsvd3": 1233945838,
+ "mptr": 3452736735,
+ "dptr": "FHENPcH+xM0tioD+0SrNrQ==",
+ "cdw10": 3190735246,
+ "cdw11": 2629178873,
+ "cdw12": 138580308,
+ "cdw13": 1603605200,
+ "cdw14": 3031880384,
+ "cdw15": 644909208
+ },
+ "struct spdk_nvme_cmd": {
+ "opc": 7,
+ "fuse": 1,
+ "rsvd1": 13,
+ "psdt": 1,
+ "cid": 56732,
+ "nsid": 1,
+ "rsvd2": 1516848792,
+ "rsvd3": 1233945838,
+ "mptr": 3452736735,
+ "dptr": "FHENPcH+xM0tioD+0SrNrQ==",
+ "cdw10": 3190735246,
+ "cdw11": 2629178873,
+ "cdw12": 138580308,
+ "cdw13": 1603605200,
+ "cdw14": 3031880384,
+ "cdw15": 644909208
+ },
+ "struct spdk_nvme_cmd": {
+ "opc": 24,
+ "fuse": 1,
+ "rsvd1": 13,
+ "psdt": 1,
+ "cid": 56732,
+ "nsid": 1,
+ "rsvd2": 1516848792,
+ "rsvd3": 1233945838,
+ "mptr": 3452736735,
+ "dptr": "FHENPcH+xM0tioD+0SrNrQ==",
+ "cdw10": 3190735246,
+ "cdw11": 2629178873,
+ "cdw12": 138580308,
+ "cdw13": 1603605200,
+ "cdw14": 3031880384,
+ "cdw15": 644909208
+ },
+ "struct spdk_nvme_cmd": {
+ "opc": 43,
+ "fuse": 1,
+ "rsvd1": 13,
+ "psdt": 1,
+ "cid": 56732,
+ "nsid": 1,
+ "rsvd2": 1516848792,
+ "rsvd3": 1233945838,
+ "mptr": 3452736735,
+ "dptr": "FHENPcH+xM0tioD+0SrNrQ==",
+ "cdw10": 3190735246,
+ "cdw11": 2629178873,
+ "cdw12": 138580308,
+ "cdw13": 1603605200,
+ "cdw14": 3031880384,
+ "cdw15": 644909208
+ },
+ "struct spdk_nvme_cmd": {
+ "opc": 12,
+ "fuse": 1,
+ "rsvd1": 13,
+ "psdt": 1,
+ "cid": 56732,
+ "nsid": 1,
+ "rsvd2": 1516848792,
+ "rsvd3": 1233945838,
+ "mptr": 3452736735,
+ "dptr": "FHENPcH+xM0tioD+0SrNrQ==",
+ "cdw10": 3190735246,
+ "cdw11": 2629178873,
+ "cdw12": 138580308,
+ "cdw13": 1603605200,
+ "cdw14": 3031880384,
+ "cdw15": 644909208
+ },
+ "struct spdk_nvme_cmd": {
+ "opc": 7,
+ "fuse": 1,
+ "rsvd1": 13,
+ "psdt": 1,
+ "cid": 56732,
+ "nsid": 1,
+ "rsvd2": 1516848792,
+ "rsvd3": 1233945838,
+ "mptr": 3452736735,
+ "dptr": "FHENPcH+xM0tioD+0SrNrQ==",
+ "cdw10": 3190735246,
+ "cdw11": 2629178873,
+ "cdw12": 138580308,
+ "cdw13": 1603605200,
+ "cdw14": 3031880384,
+ "cdw15": 644909208
+ },
+ "struct spdk_nvme_cmd": {
+ "opc": 24,
+ "fuse": 1,
+ "rsvd1": 13,
+ "psdt": 1,
+ "cid": 56732,
+ "nsid": 1,
+ "rsvd2": 1516848792,
+ "rsvd3": 1233945838,
+ "mptr": 3452736735,
+ "dptr": "FHENPcH+xM0tioD+0SrNrQ==",
+ "cdw10": 3190735246,
+ "cdw11": 2629178873,
+ "cdw12": 138580308,
+ "cdw13": 1603605200,
+ "cdw14": 3031880384,
+ "cdw15": 644909208
+ },
+ "struct spdk_nvme_cmd": {
+ "opc": 43,
+ "fuse": 1,
+ "rsvd1": 13,
+ "psdt": 1,
+ "cid": 56732,
+ "nsid": 1,
+ "rsvd2": 1516848792,
+ "rsvd3": 1233945838,
+ "mptr": 3452736735,
+ "dptr": "FHENPcH+xM0tioD+0SrNrQ==",
+ "cdw10": 3190735246,
+ "cdw11": 2629178873,
+ "cdw12": 138580308,
+ "cdw13": 1603605200,
+ "cdw14": 3031880384,
+ "cdw15": 644909208
+ },
+ "struct spdk_nvme_cmd": {
+ "opc": 12,
+ "fuse": 1,
+ "rsvd1": 13,
+ "psdt": 1,
+ "cid": 56732,
+ "nsid": 1,
+ "rsvd2": 1516848792,
+ "rsvd3": 1233945838,
+ "mptr": 3452736735,
+ "dptr": "FHENPcH+xM0tioD+0SrNrQ==",
+ "cdw10": 3190735246,
+ "cdw11": 2629178873,
+ "cdw12": 138580308,
+ "cdw13": 1603605200,
+ "cdw14": 3031880384,
+ "cdw15": 644909208
+ },
+ "struct spdk_nvme_cmd": {
+ "opc": 7,
+ "fuse": 1,
+ "rsvd1": 13,
+ "psdt": 1,
+ "cid": 56732,
+ "nsid": 1,
+ "rsvd2": 1516848792,
+ "rsvd3": 1233945838,
+ "mptr": 3452736735,
+ "dptr": "FHENPcH+xM0tioD+0SrNrQ==",
+ "cdw10": 3190735246,
+ "cdw11": 2629178873,
+ "cdw12": 138580308,
+ "cdw13": 1603605200,
+ "cdw14": 3031880384,
+ "cdw15": 644909208
+ },
+ "struct spdk_nvme_cmd": {
+ "opc": 24,
+ "fuse": 1,
+ "rsvd1": 13,
+ "psdt": 1,
+ "cid": 56732,
+ "nsid": 1,
+ "rsvd2": 1516848792,
+ "rsvd3": 1233945838,
+ "mptr": 3452736735,
+ "dptr": "FHENPcH+xM0tioD+0SrNrQ==",
+ "cdw10": 3190735246,
+ "cdw11": 2629178873,
+ "cdw12": 138580308,
+ "cdw13": 1603605200,
+ "cdw14": 3031880384,
+ "cdw15": 644909208
+ },
+ "struct spdk_nvme_cmd": {
+ "opc": 43,
+ "fuse": 1,
+ "rsvd1": 13,
+ "psdt": 1,
+ "cid": 56732,
+ "nsid": 1,
+ "rsvd2": 1516848792,
+ "rsvd3": 1233945838,
+ "mptr": 3452736735,
+ "dptr": "FHENPcH+xM0tioD+0SrNrQ==",
+ "cdw10": 3190735246,
+ "cdw11": 2629178873,
+ "cdw12": 138580308,
+ "cdw13": 1603605200,
+ "cdw14": 3031880384,
+ "cdw15": 644909208
+ },
+ "struct spdk_nvme_cmd": {
+ "opc": 12,
+ "fuse": 1,
+ "rsvd1": 13,
+ "psdt": 1,
+ "cid": 56732,
+ "nsid": 1,
+ "rsvd2": 1516848792,
+ "rsvd3": 1233945838,
+ "mptr": 3452736735,
+ "dptr": "FHENPcH+xM0tioD+0SrNrQ==",
+ "cdw10": 3190735246,
+ "cdw11": 2629178873,
+ "cdw12": 138580308,
+ "cdw13": 1603605200,
+ "cdw14": 3031880384,
+ "cdw15": 644909208
+ }
+}
diff --git a/src/spdk/test/app/fuzz/nvme_fuzz/nvme_fuzz.c b/src/spdk/test/app/fuzz/nvme_fuzz/nvme_fuzz.c
new file mode 100644
index 000000000..127bc1bff
--- /dev/null
+++ b/src/spdk/test/app/fuzz/nvme_fuzz/nvme_fuzz.c
@@ -0,0 +1,931 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation. All rights reserved.
+ * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+#include "spdk/conf.h"
+#include "spdk/env.h"
+#include "spdk/event.h"
+#include "spdk/util.h"
+#include "spdk/string.h"
+#include "spdk/nvme_spec.h"
+#include "spdk/nvme.h"
+#include "spdk/likely.h"
+#include "spdk/json.h"
+#include "fuzz_common.h"
+
+#define UNIQUE_OPCODES 256
+
+const char g_nvme_cmd_json_name[] = "struct spdk_nvme_cmd";
+char *g_conf_file;
+char *g_json_file = NULL;
+uint64_t g_runtime_ticks;
+unsigned int g_seed_value = 0;
+int g_runtime;
+
+int g_num_active_threads = 0;
+uint32_t g_admin_depth = 16;
+uint32_t g_io_depth = 128;
+
+bool g_valid_ns_only = false;
+bool g_verbose_mode = false;
+bool g_run_admin_commands = false;
+bool g_run;
+
+struct spdk_poller *g_app_completion_poller;
+bool g_successful_io_opcodes[UNIQUE_OPCODES] = {0};
+bool g_successful_admin_opcodes[UNIQUE_OPCODES] = {0};
+
+struct spdk_nvme_cmd *g_cmd_array;
+size_t g_cmd_array_size;
+
+/* I need context objects here because I need to keep track of all I/O that are in flight. */
+struct nvme_fuzz_request {
+ struct spdk_nvme_cmd cmd;
+ struct nvme_fuzz_qp *qp;
+ TAILQ_ENTRY(nvme_fuzz_request) link;
+};
+
+struct nvme_fuzz_trid {
+ struct spdk_nvme_transport_id trid;
+ TAILQ_ENTRY(nvme_fuzz_trid) tailq;
+};
+
+struct nvme_fuzz_ctrlr {
+ struct spdk_nvme_ctrlr *ctrlr;
+ TAILQ_ENTRY(nvme_fuzz_ctrlr) tailq;
+};
+
+struct nvme_fuzz_qp {
+ struct spdk_nvme_qpair *qpair;
+ /* array of context objects equal in length to the queue depth */
+ struct nvme_fuzz_request *req_ctx;
+ TAILQ_HEAD(, nvme_fuzz_request) free_ctx_objs;
+ TAILQ_HEAD(, nvme_fuzz_request) outstanding_ctx_objs;
+ unsigned int random_seed;
+ uint64_t completed_cmd_counter;
+ uint64_t submitted_cmd_counter;
+ uint64_t successful_completed_cmd_counter;
+ uint64_t timeout_tsc;
+ uint32_t num_cmds_outstanding;
+ bool timed_out;
+ bool is_admin;
+};
+
+struct nvme_fuzz_ns {
+ struct spdk_nvme_ns *ns;
+ struct spdk_nvme_ctrlr *ctrlr;
+ struct spdk_thread *thread;
+ struct spdk_poller *req_poller;
+ struct nvme_fuzz_qp io_qp;
+ struct nvme_fuzz_qp a_qp;
+ uint32_t nsid;
+ TAILQ_ENTRY(nvme_fuzz_ns) tailq;
+};
+
+static TAILQ_HEAD(, nvme_fuzz_ns) g_ns_list = TAILQ_HEAD_INITIALIZER(g_ns_list);
+static TAILQ_HEAD(, nvme_fuzz_ctrlr) g_ctrlr_list = TAILQ_HEAD_INITIALIZER(g_ctrlr_list);
+static TAILQ_HEAD(, nvme_fuzz_trid) g_trid_list = TAILQ_HEAD_INITIALIZER(g_trid_list);
+
+static bool
+parse_nvme_cmd_obj(void *item, struct spdk_json_val *value, size_t num_values)
+{
+ struct spdk_nvme_cmd *cmd = item;
+ struct spdk_json_val *next_val;
+ uint64_t tmp_val;
+ size_t i = 0;
+
+ while (i < num_values) {
+ if (value->type == SPDK_JSON_VAL_NAME) {
+ next_val = value + 1;
+ if (!strncmp(value->start, "opc", value->len)) {
+ if (next_val->type == SPDK_JSON_VAL_NUMBER) {
+ if (fuzz_parse_json_num(next_val, UNSIGNED_8BIT_MAX, &tmp_val)) {
+ goto invalid;
+ }
+ cmd->opc = tmp_val;
+ }
+ } else if (!strncmp(value->start, "fuse", value->len)) {
+ if (next_val->type == SPDK_JSON_VAL_NUMBER) {
+ if (fuzz_parse_json_num(next_val, UNSIGNED_2BIT_MAX, &tmp_val)) {
+ goto invalid;
+ }
+ cmd->fuse = tmp_val;
+ }
+ } else if (!strncmp(value->start, "rsvd1", value->len)) {
+ if (next_val->type == SPDK_JSON_VAL_NUMBER) {
+ if (fuzz_parse_json_num(next_val, UNSIGNED_4BIT_MAX, &tmp_val)) {
+ goto invalid;
+ }
+ cmd->rsvd1 = tmp_val;
+ }
+ } else if (!strncmp(value->start, "psdt", value->len)) {
+ if (next_val->type == SPDK_JSON_VAL_NUMBER) {
+ if (fuzz_parse_json_num(next_val, UNSIGNED_2BIT_MAX, &tmp_val)) {
+ goto invalid;
+ }
+ cmd->psdt = tmp_val;
+ }
+ } else if (!strncmp(value->start, "cid", value->len)) {
+ if (next_val->type == SPDK_JSON_VAL_NUMBER) {
+ if (fuzz_parse_json_num(next_val, UINT16_MAX, &tmp_val)) {
+ goto invalid;
+ }
+ cmd->cid = tmp_val;
+ }
+ } else if (!strncmp(value->start, "nsid", value->len)) {
+ if (next_val->type == SPDK_JSON_VAL_NUMBER) {
+ if (fuzz_parse_json_num(next_val, UINT32_MAX, &tmp_val)) {
+ goto invalid;
+ }
+ cmd->nsid = tmp_val;
+ }
+ } else if (!strncmp(value->start, "rsvd2", value->len)) {
+ if (next_val->type == SPDK_JSON_VAL_NUMBER) {
+ if (fuzz_parse_json_num(next_val, UINT32_MAX, &tmp_val)) {
+ goto invalid;
+ }
+ cmd->rsvd2 = tmp_val;
+ }
+ } else if (!strncmp(value->start, "rsvd3", value->len)) {
+ if (next_val->type == SPDK_JSON_VAL_NUMBER) {
+ if (fuzz_parse_json_num(next_val, UINT32_MAX, &tmp_val)) {
+ goto invalid;
+ }
+ cmd->rsvd3 = tmp_val;
+ }
+ } else if (!strncmp(value->start, "mptr", value->len)) {
+ if (next_val->type == SPDK_JSON_VAL_NUMBER) {
+ if (fuzz_parse_json_num(next_val, UINT64_MAX, &tmp_val)) {
+ goto invalid;
+ }
+ cmd->mptr = tmp_val;
+ }
+ } else if (!strncmp(value->start, "dptr", value->len)) {
+ if (next_val->type == SPDK_JSON_VAL_STRING) {
+ if (fuzz_get_base_64_buffer_value(&cmd->dptr, sizeof(cmd->dptr), (char *)next_val->start,
+ next_val->len)) {
+ goto invalid;
+ }
+ }
+ } else if (!strncmp(value->start, "cdw10", value->len)) {
+ if (next_val->type == SPDK_JSON_VAL_NUMBER) {
+ if (fuzz_parse_json_num(next_val, UINT32_MAX, &tmp_val)) {
+ goto invalid;
+ }
+ cmd->cdw10 = tmp_val;
+ }
+ } else if (!strncmp(value->start, "cdw11", value->len)) {
+ if (next_val->type == SPDK_JSON_VAL_NUMBER) {
+ if (fuzz_parse_json_num(next_val, UINT32_MAX, &tmp_val)) {
+ goto invalid;
+ }
+ cmd->cdw11 = tmp_val;
+ }
+ } else if (!strncmp(value->start, "cdw12", value->len)) {
+ if (next_val->type == SPDK_JSON_VAL_NUMBER) {
+ if (fuzz_parse_json_num(next_val, UINT32_MAX, &tmp_val)) {
+ goto invalid;
+ }
+ cmd->cdw12 = tmp_val;
+ }
+ } else if (!strncmp(value->start, "cdw13", value->len)) {
+ if (next_val->type == SPDK_JSON_VAL_NUMBER) {
+ if (fuzz_parse_json_num(next_val, UINT32_MAX, &tmp_val)) {
+ goto invalid;
+ }
+ cmd->cdw13 = tmp_val;
+ }
+ } else if (!strncmp(value->start, "cdw14", value->len)) {
+ if (next_val->type == SPDK_JSON_VAL_NUMBER) {
+ if (fuzz_parse_json_num(next_val, UINT32_MAX, &tmp_val)) {
+ goto invalid;
+ }
+ cmd->cdw14 = tmp_val;
+ }
+ } else if (!strncmp(value->start, "cdw15", value->len)) {
+ if (next_val->type == SPDK_JSON_VAL_NUMBER) {
+ if (fuzz_parse_json_num(next_val, UINT32_MAX, &tmp_val)) {
+ goto invalid;
+ }
+ cmd->cdw15 = tmp_val;
+ }
+ }
+ }
+ i++;
+ value++;
+ }
+ return true;
+
+invalid:
+ fprintf(stderr, "Invalid value supplied for cmd->%.*s: %.*s\n", value->len, (char *)value->start,
+ next_val->len, (char *)next_val->start);
+ return false;
+}
+
+static void
+report_successful_opcodes(bool *array, int length)
+{
+ int i;
+
+ for (i = 0; i < length; i++) {
+ if (array[i] == true) {
+ printf("%d, ", i);
+ }
+ }
+ printf("\n");
+}
+
+static int
+print_nvme_cmd(void *cb_ctx, const void *data, size_t size)
+{
+ fprintf(stderr, "%s\n", (const char *)data);
+ return 0;
+}
+
+static void
+json_dump_nvme_cmd(struct spdk_nvme_cmd *cmd)
+{
+ struct spdk_json_write_ctx *w;
+ char *dptr_value;
+
+ dptr_value = fuzz_get_value_base_64_buffer(&cmd->dptr, sizeof(cmd->dptr));
+ if (dptr_value == NULL) {
+ fprintf(stderr, "Unable to allocate buffer context for printing command.\n");
+ return;
+ }
+
+ w = spdk_json_write_begin(print_nvme_cmd, cmd, SPDK_JSON_WRITE_FLAG_FORMATTED);
+ if (w == NULL) {
+ fprintf(stderr, "Unable to allocate json context for printing command.\n");
+ free(dptr_value);
+ return;
+ }
+
+ spdk_json_write_named_object_begin(w, g_nvme_cmd_json_name);
+ spdk_json_write_named_uint32(w, "opc", cmd->opc);
+ spdk_json_write_named_uint32(w, "fuse", cmd->fuse);
+ spdk_json_write_named_uint32(w, "rsvd1", cmd->rsvd1);
+ spdk_json_write_named_uint32(w, "psdt", cmd->psdt);
+ spdk_json_write_named_uint32(w, "cid", cmd->cid);
+ spdk_json_write_named_uint32(w, "nsid", cmd->nsid);
+ spdk_json_write_named_uint32(w, "rsvd2", cmd->rsvd2);
+ spdk_json_write_named_uint32(w, "rsvd3", cmd->rsvd3);
+ spdk_json_write_named_uint32(w, "mptr", cmd->mptr);
+ spdk_json_write_named_string(w, "dptr", dptr_value);
+ spdk_json_write_named_uint32(w, "cdw10", cmd->cdw10);
+ spdk_json_write_named_uint32(w, "cdw11", cmd->cdw11);
+ spdk_json_write_named_uint32(w, "cdw12", cmd->cdw12);
+ spdk_json_write_named_uint32(w, "cdw13", cmd->cdw13);
+ spdk_json_write_named_uint32(w, "cdw14", cmd->cdw14);
+ spdk_json_write_named_uint32(w, "cdw15", cmd->cdw15);
+ spdk_json_write_object_end(w);
+
+ free(dptr_value);
+ spdk_json_write_end(w);
+}
+
+static void
+json_dump_nvme_cmd_list(struct nvme_fuzz_qp *qp)
+{
+ struct nvme_fuzz_request *ctx;
+
+ TAILQ_FOREACH(ctx, &qp->outstanding_ctx_objs, link) {
+ json_dump_nvme_cmd(&ctx->cmd);
+ }
+}
+
+static void
+handle_timeout(struct nvme_fuzz_qp *qp, bool is_admin)
+{
+ fprintf(stderr, "An %s queue has timed out. Dumping all outstanding commands from that queue\n",
+ is_admin ? "Admin" : "I/O");
+ json_dump_nvme_cmd_list(qp);
+ qp->timed_out = true;
+}
+
+static void submit_ns_cmds(struct nvme_fuzz_ns *ns_entry);
+
+static void
+nvme_fuzz_cpl_cb(void *cb_arg, const struct spdk_nvme_cpl *cpl)
+{
+ struct nvme_fuzz_request *ctx = cb_arg;
+ struct nvme_fuzz_qp *qp = ctx->qp;
+
+ qp->completed_cmd_counter++;
+ if (spdk_unlikely(cpl->status.sc == SPDK_NVME_SC_SUCCESS)) {
+ fprintf(stderr, "The following %s command (command num %lu) completed successfully\n",
+ qp->is_admin ? "Admin" : "I/O", qp->completed_cmd_counter);
+ qp->successful_completed_cmd_counter++;
+ json_dump_nvme_cmd(&ctx->cmd);
+
+ if (qp->is_admin) {
+ __sync_bool_compare_and_swap(&g_successful_admin_opcodes[ctx->cmd.opc], false, true);
+ } else {
+ __sync_bool_compare_and_swap(&g_successful_io_opcodes[ctx->cmd.opc], false, true);
+ }
+ } else if (g_verbose_mode == true) {
+ fprintf(stderr, "The following %s command (command num %lu) failed as expected.\n",
+ qp->is_admin ? "Admin" : "I/O", qp->completed_cmd_counter);
+ json_dump_nvme_cmd(&ctx->cmd);
+ }
+
+ qp->timeout_tsc = fuzz_refresh_timeout();
+ TAILQ_REMOVE(&qp->outstanding_ctx_objs, ctx, link);
+ TAILQ_INSERT_HEAD(&qp->free_ctx_objs, ctx, link);
+ assert(qp->num_cmds_outstanding > 0);
+ qp->num_cmds_outstanding--;
+}
+
+static int
+poll_for_completions(void *arg)
+{
+ struct nvme_fuzz_ns *ns_entry = arg;
+ uint64_t current_ticks = spdk_get_ticks();
+ uint64_t *counter;
+ if (!ns_entry->io_qp.timed_out) {
+ spdk_nvme_qpair_process_completions(ns_entry->io_qp.qpair, 0);
+ /* SAlways have to process admin completions for the purposes of keep alive. */
+ spdk_nvme_ctrlr_process_admin_completions(ns_entry->ctrlr);
+ }
+
+ if (g_cmd_array) {
+ if (g_run_admin_commands) {
+ counter = &ns_entry->a_qp.submitted_cmd_counter;
+ } else {
+ counter = &ns_entry->io_qp.submitted_cmd_counter;
+ }
+
+ if (*counter >= g_cmd_array_size) {
+ g_run = false;
+ }
+ } else {
+ if (current_ticks > g_runtime_ticks) {
+ g_run = false;
+ }
+ }
+
+ if (ns_entry->a_qp.timeout_tsc < current_ticks && !ns_entry->a_qp.timed_out &&
+ ns_entry->a_qp.num_cmds_outstanding > 0) {
+ handle_timeout(&ns_entry->a_qp, true);
+ }
+
+ if (ns_entry->io_qp.timeout_tsc < current_ticks && !ns_entry->io_qp.timed_out &&
+ ns_entry->io_qp.num_cmds_outstanding > 0) {
+ handle_timeout(&ns_entry->io_qp, false);
+ }
+
+ submit_ns_cmds(ns_entry);
+
+ if (g_run) {
+ return 0;
+ }
+ /*
+ * We either processed all I/O properly and can shut down normally, or we
+ * had a qp time out and we need to exit without reducing the values to 0.
+ */
+ if (ns_entry->io_qp.num_cmds_outstanding == 0 &&
+ ns_entry->a_qp.num_cmds_outstanding == 0) {
+ goto exit_handler;
+ } else if (ns_entry->io_qp.timed_out && (!g_run_admin_commands || ns_entry->a_qp.timed_out)) {
+ goto exit_handler;
+ } else {
+ return 0;
+ }
+
+exit_handler:
+ spdk_poller_unregister(&ns_entry->req_poller);
+ __sync_sub_and_fetch(&g_num_active_threads, 1);
+ spdk_thread_exit(ns_entry->thread);
+ return 0;
+}
+
+static void
+prep_nvme_cmd(struct nvme_fuzz_ns *ns_entry, struct nvme_fuzz_qp *qp, struct nvme_fuzz_request *ctx)
+{
+ if (g_cmd_array) {
+ memcpy(&ctx->cmd, &g_cmd_array[qp->submitted_cmd_counter], sizeof(ctx->cmd));
+ } else {
+ fuzz_fill_random_bytes((char *)&ctx->cmd, sizeof(ctx->cmd), &qp->random_seed);
+
+ if (g_valid_ns_only) {
+ ctx->cmd.nsid = ns_entry->nsid;
+ }
+ }
+}
+
+static int
+submit_qp_cmds(struct nvme_fuzz_ns *ns, struct nvme_fuzz_qp *qp)
+{
+ struct nvme_fuzz_request *ctx;
+ int rc;
+
+ if (qp->timed_out) {
+ return 0;
+ }
+ /* If we are reading from an array, we need to stop after the last one. */
+ while ((qp->submitted_cmd_counter < g_cmd_array_size || g_cmd_array_size == 0) &&
+ !TAILQ_EMPTY(&qp->free_ctx_objs)) {
+ ctx = TAILQ_FIRST(&qp->free_ctx_objs);
+ do {
+ prep_nvme_cmd(ns, qp, ctx);
+ } while (qp->is_admin && ctx->cmd.opc == SPDK_NVME_OPC_ASYNC_EVENT_REQUEST);
+
+ TAILQ_REMOVE(&qp->free_ctx_objs, ctx, link);
+ TAILQ_INSERT_HEAD(&qp->outstanding_ctx_objs, ctx, link);
+ qp->num_cmds_outstanding++;
+ qp->submitted_cmd_counter++;
+ if (qp->is_admin) {
+ rc = spdk_nvme_ctrlr_cmd_admin_raw(ns->ctrlr, &ctx->cmd, NULL, 0, nvme_fuzz_cpl_cb, ctx);
+ } else {
+ rc = spdk_nvme_ctrlr_cmd_io_raw(ns->ctrlr, qp->qpair, &ctx->cmd, NULL, 0, nvme_fuzz_cpl_cb, ctx);
+ }
+ if (rc) {
+ return rc;
+ }
+ }
+ return 0;
+}
+
+static void
+submit_ns_cmds(struct nvme_fuzz_ns *ns_entry)
+{
+ int rc;
+
+ if (!g_run) {
+ return;
+ }
+
+ if (g_run_admin_commands) {
+ rc = submit_qp_cmds(ns_entry, &ns_entry->a_qp);
+ if (rc) {
+ goto err_exit;
+ }
+ }
+
+ if (g_cmd_array == NULL || !g_run_admin_commands) {
+ rc = submit_qp_cmds(ns_entry, &ns_entry->io_qp);
+ }
+err_exit:
+ if (rc) {
+ /*
+ * I see the prospect of having a broken qpair on one ns as interesting
+ * enough to recommend stopping the application.
+ */
+ fprintf(stderr, "Unable to submit command with rc %d\n", rc);
+ g_run = false;
+ }
+}
+
+static void
+free_namespaces(void)
+{
+ struct nvme_fuzz_ns *ns, *tmp;
+
+ TAILQ_FOREACH_SAFE(ns, &g_ns_list, tailq, tmp) {
+ printf("NS: %p I/O qp, Total commands completed: %lu, total successful commands: %lu, random_seed: %u\n",
+ ns->ns,
+ ns->io_qp.completed_cmd_counter, ns->io_qp.successful_completed_cmd_counter, ns->io_qp.random_seed);
+ printf("NS: %p admin qp, Total commands completed: %lu, total successful commands: %lu, random_seed: %u\n",
+ ns->ns,
+ ns->a_qp.completed_cmd_counter, ns->a_qp.successful_completed_cmd_counter, ns->a_qp.random_seed);
+
+ TAILQ_REMOVE(&g_ns_list, ns, tailq);
+ if (ns->io_qp.qpair) {
+ spdk_nvme_ctrlr_free_io_qpair(ns->io_qp.qpair);
+ }
+ if (ns->io_qp.req_ctx) {
+ free(ns->io_qp.req_ctx);
+ }
+ if (ns->a_qp.req_ctx) {
+ free(ns->a_qp.req_ctx);
+ }
+ free(ns);
+ }
+}
+
+static void
+free_controllers(void)
+{
+ struct nvme_fuzz_ctrlr *ctrlr, *tmp;
+
+ TAILQ_FOREACH_SAFE(ctrlr, &g_ctrlr_list, tailq, tmp) {
+ TAILQ_REMOVE(&g_ctrlr_list, ctrlr, tailq);
+ spdk_nvme_detach(ctrlr->ctrlr);
+ free(ctrlr);
+ }
+}
+
+static void
+free_trids(void)
+{
+ struct nvme_fuzz_trid *trid, *tmp;
+
+ TAILQ_FOREACH_SAFE(trid, &g_trid_list, tailq, tmp) {
+ TAILQ_REMOVE(&g_trid_list, trid, tailq);
+ free(trid);
+ }
+}
+
+static void
+register_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns *ns, uint32_t nsid)
+{
+ struct nvme_fuzz_ns *ns_entry;
+
+ ns_entry = calloc(1, sizeof(struct nvme_fuzz_ns));
+ if (ns_entry == NULL) {
+ fprintf(stderr, "Unable to allocate an entry for a namespace\n");
+ return;
+ }
+
+ ns_entry->ns = ns;
+ ns_entry->ctrlr = ctrlr;
+ ns_entry->nsid = nsid;
+
+ TAILQ_INIT(&ns_entry->io_qp.free_ctx_objs);
+ TAILQ_INIT(&ns_entry->io_qp.outstanding_ctx_objs);
+ if (g_run_admin_commands) {
+ ns_entry->a_qp.qpair = NULL;
+ TAILQ_INIT(&ns_entry->a_qp.free_ctx_objs);
+ TAILQ_INIT(&ns_entry->a_qp.outstanding_ctx_objs);
+ }
+ TAILQ_INSERT_TAIL(&g_ns_list, ns_entry, tailq);
+}
+
+static void
+register_ctrlr(struct spdk_nvme_ctrlr *ctrlr)
+{
+ struct nvme_fuzz_ctrlr *ctrlr_entry;
+ uint32_t nsid;
+ struct spdk_nvme_ns *ns;
+
+ ctrlr_entry = calloc(1, sizeof(struct nvme_fuzz_ctrlr));
+ if (ctrlr_entry == NULL) {
+ fprintf(stderr, "Unable to allocate an entry for a controller\n");
+ return;
+ }
+
+ ctrlr_entry->ctrlr = ctrlr;
+ TAILQ_INSERT_TAIL(&g_ctrlr_list, ctrlr_entry, tailq);
+
+ for (nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr); nsid != 0;
+ nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, nsid)) {
+ ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
+ if (ns == NULL) {
+ continue;
+ }
+ register_ns(ctrlr, ns, nsid);
+ }
+}
+
+static void
+attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
+ struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_ctrlr_opts *opts)
+{
+ register_ctrlr(ctrlr);
+}
+
+static bool
+probe_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid, struct spdk_nvme_ctrlr_opts *opts)
+{
+ printf("Controller trtype %s\ttraddr %s\n", spdk_nvme_transport_id_trtype_str(trid->trtype),
+ trid->traddr);
+
+ return true;
+}
+
+static int
+prep_qpair(struct nvme_fuzz_ns *ns, struct nvme_fuzz_qp *qp, uint32_t max_qdepth)
+{
+ uint32_t i;
+
+ /* ensure that each qpair gets a unique random seed for maximum command dispersion. */
+
+ if (g_seed_value != 0) {
+ qp->random_seed = g_seed_value;
+ } else {
+ /* Take the low 32 bits of spdk_get_ticks. This should be more granular than time(). */
+ qp->random_seed = spdk_get_ticks();
+ }
+
+ qp->timeout_tsc = fuzz_refresh_timeout();
+
+ qp->req_ctx = calloc(max_qdepth, sizeof(struct nvme_fuzz_request));
+ if (qp->req_ctx == NULL) {
+ fprintf(stderr, "Unable to allocate I/O contexts for I/O qpair.\n");
+ return -1;
+ }
+
+ for (i = 0; i < max_qdepth; i++) {
+ qp->req_ctx[i].qp = qp;
+ TAILQ_INSERT_HEAD(&qp->free_ctx_objs, &qp->req_ctx[i], link);
+ }
+
+ return 0;
+}
+
+static int
+prepare_qpairs(void)
+{
+ struct spdk_nvme_io_qpair_opts opts;
+ struct nvme_fuzz_ns *ns_entry;
+
+ TAILQ_FOREACH(ns_entry, &g_ns_list, tailq) {
+ spdk_nvme_ctrlr_get_default_io_qpair_opts(ns_entry->ctrlr, &opts, sizeof(opts));
+ ns_entry->io_qp.qpair = spdk_nvme_ctrlr_alloc_io_qpair(ns_entry->ctrlr, &opts, sizeof(opts));
+ if (ns_entry->io_qp.qpair == NULL) {
+ fprintf(stderr, "Unable to create a qpair for a namespace\n");
+ return -1;
+ }
+
+ ns_entry->io_qp.is_admin = false;
+ if (prep_qpair(ns_entry, &ns_entry->io_qp, g_io_depth) != 0) {
+ fprintf(stderr, "Unable to allocate request contexts for I/O qpair.\n");
+ return -1;
+ }
+
+ if (g_run_admin_commands) {
+ ns_entry->a_qp.is_admin = true;
+ if (prep_qpair(ns_entry, &ns_entry->a_qp, g_admin_depth) != 0) {
+ fprintf(stderr, "Unable to allocate request contexts for admin qpair.\n");
+ return -1;
+ }
+ }
+ }
+ return 0;
+}
+
+static void
+start_ns_poller(void *ctx)
+{
+ struct nvme_fuzz_ns *ns_entry = ctx;
+
+ ns_entry->req_poller = SPDK_POLLER_REGISTER(poll_for_completions, ns_entry, 0);
+ submit_ns_cmds(ns_entry);
+}
+
+static int
+check_app_completion(void *ctx)
+{
+
+ if (g_num_active_threads <= 0) {
+ spdk_poller_unregister(&g_app_completion_poller);
+ if (g_cmd_array) {
+ free(g_cmd_array);
+ }
+ printf("Fuzzing completed. Shutting down the fuzz application\n\n");
+ printf("Dumping successful admin opcodes:\n");
+ report_successful_opcodes(g_successful_admin_opcodes, UNIQUE_OPCODES);
+ printf("Dumping successful io opcodes:\n");
+ report_successful_opcodes(g_successful_io_opcodes, UNIQUE_OPCODES);
+ free_namespaces();
+ free_controllers();
+ free_trids();
+ spdk_app_stop(0);
+ }
+ return 0;
+}
+
+static void
+begin_fuzz(void *ctx)
+{
+ struct nvme_fuzz_ns *ns_entry;
+ struct nvme_fuzz_trid *trid;
+ int rc;
+
+ if (!spdk_iommu_is_enabled()) {
+ /* Don't set rc to an error code here. We don't want to fail an automated test based on this. */
+ fprintf(stderr, "The IOMMU must be enabled to run this program to avoid unsafe memory accesses.\n");
+ rc = 0;
+ goto out;
+ }
+
+ TAILQ_FOREACH(trid, &g_trid_list, tailq) {
+ if (spdk_nvme_probe(&trid->trid, trid, probe_cb, attach_cb, NULL) != 0) {
+ fprintf(stderr, "spdk_nvme_probe() failed for transport address '%s'\n",
+ trid->trid.traddr);
+ rc = -1;
+ goto out;
+ }
+ }
+
+ if (TAILQ_EMPTY(&g_ns_list)) {
+ fprintf(stderr, "No valid NVMe Namespaces to fuzz\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rc = prepare_qpairs();
+
+ if (rc < 0) {
+ fprintf(stderr, "Unable to prepare the qpairs\n");
+ goto out;
+ }
+
+ g_runtime_ticks = spdk_get_ticks() + g_runtime * spdk_get_ticks_hz();
+
+ /* Assigning all of the threads and then starting them makes cleanup easier. */
+ TAILQ_FOREACH(ns_entry, &g_ns_list, tailq) {
+ ns_entry->thread = spdk_thread_create(NULL, NULL);
+ if (ns_entry->thread == NULL) {
+ fprintf(stderr, "Failed to allocate thread for namespace.\n");
+ goto out;
+ }
+ }
+
+ TAILQ_FOREACH(ns_entry, &g_ns_list, tailq) {
+ spdk_thread_send_msg(ns_entry->thread, start_ns_poller, ns_entry);
+ __sync_add_and_fetch(&g_num_active_threads, 1);
+ }
+
+ g_app_completion_poller = SPDK_POLLER_REGISTER(check_app_completion, NULL, 1000000);
+ return;
+out:
+ printf("Shutting down the fuzz application\n");
+ free_namespaces();
+ free_controllers();
+ free_trids();
+ spdk_app_stop(rc);
+}
+
+static int
+parse_trids(void)
+{
+ struct spdk_conf *config = NULL;
+ struct spdk_conf_section *sp;
+ const char *trid_char;
+ struct nvme_fuzz_trid *current_trid;
+ int num_subsystems = 0;
+ int rc = 0;
+
+ if (g_conf_file) {
+ config = spdk_conf_allocate();
+ if (!config) {
+ fprintf(stderr, "Unable to allocate an spdk_conf object\n");
+ return -1;
+ }
+
+ rc = spdk_conf_read(config, g_conf_file);
+ if (rc) {
+ fprintf(stderr, "Unable to convert the conf file into a readable system\n");
+ rc = -1;
+ goto exit;
+ }
+
+ sp = spdk_conf_find_section(config, "Nvme");
+
+ if (sp == NULL) {
+ fprintf(stderr, "No Nvme configuration in conf file\n");
+ goto exit;
+ }
+
+ while ((trid_char = spdk_conf_section_get_nmval(sp, "TransportID", num_subsystems, 0)) != NULL) {
+ current_trid = malloc(sizeof(struct nvme_fuzz_trid));
+ if (!current_trid) {
+ fprintf(stderr, "Unable to allocate memory for transport ID\n");
+ rc = -1;
+ goto exit;
+ }
+ rc = spdk_nvme_transport_id_parse(&current_trid->trid, trid_char);
+
+ if (rc < 0) {
+ fprintf(stderr, "failed to parse transport ID: %s\n", trid_char);
+ free(current_trid);
+ rc = -1;
+ goto exit;
+ }
+ TAILQ_INSERT_TAIL(&g_trid_list, current_trid, tailq);
+ num_subsystems++;
+ }
+ }
+
+exit:
+ if (config != NULL) {
+ spdk_conf_free(config);
+ }
+ return rc;
+}
+
+static void
+nvme_fuzz_usage(void)
+{
+ fprintf(stderr, " -a Perform admin commands. if -j is specified, \
+only admin commands will run. Otherwise they will be run in tandem with I/O commands.\n");
+ fprintf(stderr, " -C <path> Path to a configuration file.\n");
+ fprintf(stderr,
+ " -j <path> Path to a json file containing named objects of type spdk_nvme_cmd. If this option is specified, -t will be ignored.\n");
+ fprintf(stderr, " -N Target only valid namespace with commands. \
+This helps dig deeper into other errors besides invalid namespace.\n");
+ fprintf(stderr, " -S <integer> Seed value for test.\n");
+ fprintf(stderr,
+ " -t <integer> Time in seconds to run the fuzz test. Only valid if -j is not specified.\n");
+ fprintf(stderr, " -V Enable logging of each submitted command.\n");
+}
+
+static int
+nvme_fuzz_parse(int ch, char *arg)
+{
+ int64_t error_test;
+
+ switch (ch) {
+ case 'a':
+ g_run_admin_commands = true;
+ break;
+ case 'C':
+ g_conf_file = optarg;
+ break;
+ case 'j':
+ g_json_file = optarg;
+ break;
+ case 'N':
+ g_valid_ns_only = true;
+ break;
+ case 'S':
+ error_test = spdk_strtol(arg, 10);
+ if (error_test < 0) {
+ fprintf(stderr, "Invalid value supplied for the random seed.\n");
+ return -1;
+ } else {
+ g_seed_value = error_test;
+ }
+ break;
+ case 't':
+ g_runtime = spdk_strtol(optarg, 10);
+ if (g_runtime < 0 || g_runtime > MAX_RUNTIME_S) {
+ fprintf(stderr, "You must supply a positive runtime value less than 86401.\n");
+ return -1;
+ }
+ break;
+ case 'V':
+ g_verbose_mode = true;
+ break;
+ case '?':
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int
+main(int argc, char **argv)
+{
+ struct spdk_app_opts opts = {};
+ int rc;
+
+ spdk_app_opts_init(&opts);
+ opts.name = "nvme_fuzz";
+
+ g_runtime = DEFAULT_RUNTIME;
+ g_run = true;
+
+ if ((rc = spdk_app_parse_args(argc, argv, &opts, "aC:j:NS:t:V", NULL, nvme_fuzz_parse,
+ nvme_fuzz_usage) != SPDK_APP_PARSE_ARGS_SUCCESS)) {
+ return rc;
+ }
+
+ if (g_conf_file) {
+ parse_trids();
+ }
+
+ if (g_json_file != NULL) {
+ g_cmd_array_size = fuzz_parse_args_into_array(g_json_file, (void **)&g_cmd_array,
+ sizeof(struct spdk_nvme_cmd), g_nvme_cmd_json_name, parse_nvme_cmd_obj);
+ if (g_cmd_array_size == 0) {
+ fprintf(stderr, "The provided json file did not contain any valid commands. Exiting.");
+ return -EINVAL;
+ }
+ }
+
+ rc = spdk_app_start(&opts, begin_fuzz, NULL);
+
+ return rc;
+}
diff --git a/src/spdk/test/app/fuzz/vhost_fuzz/.gitignore b/src/spdk/test/app/fuzz/vhost_fuzz/.gitignore
new file mode 100644
index 000000000..2df201f3f
--- /dev/null
+++ b/src/spdk/test/app/fuzz/vhost_fuzz/.gitignore
@@ -0,0 +1 @@
+vhost_fuzz
diff --git a/src/spdk/test/app/fuzz/vhost_fuzz/Makefile b/src/spdk/test/app/fuzz/vhost_fuzz/Makefile
new file mode 100644
index 000000000..69b8d1866
--- /dev/null
+++ b/src/spdk/test/app/fuzz/vhost_fuzz/Makefile
@@ -0,0 +1,42 @@
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+APP = vhost_fuzz
+
+CFLAGS += -I$(SPDK_ROOT_DIR)/test/app/fuzz/common
+
+C_SRCS := vhost_fuzz_rpc.c vhost_fuzz.c
+
+SPDK_LIB_LIST += event conf json jsonrpc rpc util log sock trace thread virtio
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
diff --git a/src/spdk/test/app/fuzz/vhost_fuzz/README.md b/src/spdk/test/app/fuzz/vhost_fuzz/README.md
new file mode 100644
index 000000000..ab9656c5b
--- /dev/null
+++ b/src/spdk/test/app/fuzz/vhost_fuzz/README.md
@@ -0,0 +1,46 @@
+# Overview
+
+This application is intended to fuzz test the SPDK vhost target by supplying
+malformed or invalid requests across a unix domain socket. This fuzzer
+currently supports fuzzing both vhost block and vhost scsi devices. When
+fuzzing a vhost scsi device, users can select whether to fuzz the scsi I/O
+queue or the scsi admin queue. Please see the NVMe fuzzer readme for information
+on how output is generated, debugging procedures, and the JSON format expected
+when supplying preconstructed values to the fuzzer.
+
+# Request Types
+
+Like the NVMe fuzzer, there is an example json file showing the types of requests
+that the application accepts. Since the vhost application accepts both vhost block
+and vhost scsi commands, there are three distinct object types that can be passed in
+to the application.
+
+1. vhost_blk_cmd
+2. vhost_scsi_cmd
+3. vhost_scsi_mgmt_cmd
+
+Each one of these objects contains distinct data types and they should not be used interchangeably.
+
+All three of the data types begin with three iovec structures describing the request, data, and response
+memory locations. By default, these values are overwritten by the application even when supplied as part
+of a json file. This is because the request and resp data pointers are intended to point to portions of
+the data structure.
+
+If you want to override these iovec values using a json file, you can specify the -k option.
+In most cases, this will just result in the application failing all I/O immediately since
+the request will no longer point to a valid memory location.
+
+It is possible to supply all three types of requests in a single array to the application. They will be parsed and
+submitted to the proper block devices.
+
+# RPC
+
+The vhost fuzzer differs from the NVMe fuzzer in that it expects devices to be configured via rpc. The fuzzer should
+always be started with the --wait-for-rpc argument. Please see below for an example of starting the fuzzer.
+
+~~~
+./test/app/fuzz/vhost_fuzz/vhost_fuzz -t 30 --wait-for-rpc &
+./scripts/rpc.py fuzz_vhost_create_dev -s ./Vhost.1 -b -V
+./scripts/rpc.py fuzz_vhost_create_dev -s ./naa.VhostScsi0.1 -l -V
+./scripts/rpc.py framework_start_init
+~~~
diff --git a/src/spdk/test/app/fuzz/vhost_fuzz/example.json b/src/spdk/test/app/fuzz/vhost_fuzz/example.json
new file mode 100644
index 000000000..9157350f8
--- /dev/null
+++ b/src/spdk/test/app/fuzz/vhost_fuzz/example.json
@@ -0,0 +1,95 @@
+{
+ "vhost_scsi_mgmt_cmd": {
+ "req_iov": {
+ "iov_base": "20007960ff60",
+ "iov_len": 51
+ },
+ "data_iov": {
+ "iov_base": "2000794dbe00",
+ "iov_len": 1024
+ },
+ "resp_iov": {
+ "iov_base": "20007960ff98",
+ "iov_len": 108
+ },
+ "lun": "AQA5vBf3KyE=",
+ "tag": 6163879237324549222,
+ "task_attr": 247,
+ "prio": 242,
+ "crn": 169,
+ "cdb": "ErxZ/qpHBau8gPzjbpotpbTnOW/2g0ns2yRh4jhe5kc="
+ },
+ "vhost_scsi_mgmt_cmd": {
+ "req_iov": {
+ "iov_base": "20007960fe78",
+ "iov_len": 51
+ },
+ "data_iov": {
+ "iov_base": "2000794dbe00",
+ "iov_len": 1024
+ },
+ "resp_iov": {
+ "iov_base": "20007960feb0",
+ "iov_len": 108
+ },
+ "lun": "AQAwWRrhAoo=",
+ "tag": 10457151189012466200,
+ "task_attr": 97,
+ "prio": 158,
+ "crn": 41,
+ "cdb": "Ejjxdzl8KwRDhq+MPfY3J3niYfAHj+2irE8Q2vIfQIk="
+ },
+ "vhost_scsi_cmd": {
+ "req_iov": {
+ "iov_base": "20007960fe78",
+ "iov_len": 24
+ },
+ "data_iov": {
+ "iov_base": "20007960fe78",
+ "iov_len": 1024
+ },
+ "resp_iov": {
+ "iov_base": "20007960fe78",
+ "iov_len": 5
+ },
+ "type": 3,
+ "subtype": 872683406,
+ "lun": "LdaLkHOIQxI=",
+ "tag": 8452696012704506104
+ },
+ "vhost_scsi_cmd": {
+ "req_iov": {
+ "iov_base": "20007960fe78",
+ "iov_len": 24
+ },
+ "data_iov": {
+ "iov_base": "20007960fe78",
+ "iov_len": 1024
+ },
+ "resp_iov": {
+ "iov_base": "20007960fe78",
+ "iov_len": 5
+ },
+ "type": 3,
+ "subtype": 872683406,
+ "lun": "LdaLkHOIQxI=",
+ "tag": 8452696012704506104
+ },
+ "vhost_blk_cmd": {
+ "req_iov": {
+ "iov_base": "20007960fe78",
+ "iov_len": 24
+ },
+ "data_iov": {
+ "iov_base": "20007960fe78",
+ "iov_len": 1024
+ },
+ "resp_iov": {
+ "iov_base": "20007960fe78",
+ "iov_len": 5
+ },
+ "type": 2,
+ "ioprio": 4343,
+ "sector": 24323523
+ }
+}
diff --git a/src/spdk/test/app/fuzz/vhost_fuzz/vhost_fuzz.c b/src/spdk/test/app/fuzz/vhost_fuzz/vhost_fuzz.c
new file mode 100644
index 000000000..47dbfbc65
--- /dev/null
+++ b/src/spdk/test/app/fuzz/vhost_fuzz/vhost_fuzz.c
@@ -0,0 +1,1146 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation. All rights reserved.
+ * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+#include "spdk/conf.h"
+#include "spdk/env.h"
+#include "spdk/json.h"
+#include "spdk/event.h"
+#include "spdk/likely.h"
+#include "spdk/util.h"
+#include "spdk/string.h"
+#include "spdk_internal/virtio.h"
+#include "spdk_internal/vhost_user.h"
+
+#include "fuzz_common.h"
+#include "vhost_fuzz.h"
+
+#include <linux/virtio_blk.h>
+#include <linux/virtio_scsi.h>
+
+/* Features desired/implemented by virtio blk. */
+#define VIRTIO_BLK_DEV_SUPPORTED_FEATURES \
+ (1ULL << VIRTIO_BLK_F_BLK_SIZE | \
+ 1ULL << VIRTIO_BLK_F_TOPOLOGY | \
+ 1ULL << VIRTIO_BLK_F_MQ | \
+ 1ULL << VIRTIO_BLK_F_RO | \
+ 1ULL << VIRTIO_BLK_F_DISCARD | \
+ 1ULL << VIRTIO_RING_F_EVENT_IDX | \
+ 1ULL << VHOST_USER_F_PROTOCOL_FEATURES)
+
+/* Features desired/implemented by virtio scsi. */
+#define VIRTIO_SCSI_DEV_SUPPORTED_FEATURES \
+ (1ULL << VIRTIO_SCSI_F_INOUT | \
+ 1ULL << VIRTIO_SCSI_F_HOTPLUG | \
+ 1ULL << VIRTIO_RING_F_EVENT_IDX | \
+ 1ULL << VHOST_USER_F_PROTOCOL_FEATURES)
+
+#define VIRTIO_DEV_FIXED_QUEUES 2
+#define VIRTIO_SCSI_CONTROLQ 0
+#define VIRTIO_SCSI_EVENTQ 1
+#define VIRTIO_REQUESTQ 2
+#define FUZZ_MAX_QUEUES 3
+
+#define FUZZ_QUEUE_DEPTH 128
+
+#define BLK_IO_NAME "vhost_blk_cmd"
+#define SCSI_IO_NAME "vhost_scsi_cmd"
+#define SCSI_MGMT_NAME "vhost_scsi_mgmt_cmd"
+
+struct fuzz_vhost_iov_ctx {
+ struct iovec iov_req;
+ struct iovec iov_data;
+ struct iovec iov_resp;
+};
+
+struct fuzz_vhost_io_ctx {
+ struct fuzz_vhost_iov_ctx iovs;
+ union {
+ struct virtio_blk_outhdr blk_req;
+ struct virtio_scsi_cmd_req scsi_req;
+ struct virtio_scsi_ctrl_tmf_req scsi_tmf_req;
+ } req;
+ union {
+ uint8_t blk_resp;
+ struct virtio_scsi_cmd_resp scsi_resp;
+ union {
+ struct virtio_scsi_ctrl_tmf_resp scsi_tmf_resp;
+ struct virtio_scsi_ctrl_an_resp an_resp;
+ } scsi_tmf_resp;
+ } resp;
+
+ TAILQ_ENTRY(fuzz_vhost_io_ctx) link;
+};
+
+struct fuzz_vhost_dev_ctx {
+ struct virtio_dev virtio_dev;
+ struct spdk_thread *thread;
+ struct spdk_poller *poller;
+
+ struct fuzz_vhost_io_ctx *io_ctx_array;
+ TAILQ_HEAD(, fuzz_vhost_io_ctx) free_io_ctx;
+ TAILQ_HEAD(, fuzz_vhost_io_ctx) outstanding_io_ctx;
+
+ unsigned int random_seed;
+
+ uint64_t submitted_io;
+ uint64_t completed_io;
+ uint64_t successful_io;
+ uint64_t timeout_tsc;
+
+ bool socket_is_blk;
+ bool test_scsi_tmf;
+ bool valid_lun;
+ bool use_bogus_buffer;
+ bool use_valid_buffer;
+ bool timed_out;
+
+ TAILQ_ENTRY(fuzz_vhost_dev_ctx) link;
+};
+
+/* Global run state */
+uint64_t g_runtime_ticks;
+int g_runtime;
+int g_num_active_threads;
+bool g_run = true;
+bool g_verbose_mode = false;
+
+/* Global resources */
+TAILQ_HEAD(, fuzz_vhost_dev_ctx) g_dev_list = TAILQ_HEAD_INITIALIZER(g_dev_list);
+struct spdk_poller *g_run_poller;
+void *g_valid_buffer;
+unsigned int g_random_seed;
+
+
+/* Global parameters and resources for parsed commands */
+bool g_keep_iov_pointers = false;
+char *g_json_file = NULL;
+struct fuzz_vhost_io_ctx *g_blk_cmd_array = NULL;
+struct fuzz_vhost_io_ctx *g_scsi_cmd_array = NULL;
+struct fuzz_vhost_io_ctx *g_scsi_mgmt_cmd_array = NULL;
+
+size_t g_blk_cmd_array_size;
+size_t g_scsi_cmd_array_size;
+size_t g_scsi_mgmt_cmd_array_size;
+
+static void
+cleanup(void)
+{
+ struct fuzz_vhost_dev_ctx *dev_ctx, *tmp;
+ printf("Fuzzing completed.\n");
+ TAILQ_FOREACH_SAFE(dev_ctx, &g_dev_list, link, tmp) {
+ printf("device %p stats: Completed I/O: %lu, Successful I/O: %lu\n", dev_ctx,
+ dev_ctx->completed_io, dev_ctx->successful_io);
+ virtio_dev_release_queue(&dev_ctx->virtio_dev, VIRTIO_REQUESTQ);
+ if (!dev_ctx->socket_is_blk) {
+ virtio_dev_release_queue(&dev_ctx->virtio_dev, VIRTIO_SCSI_EVENTQ);
+ virtio_dev_release_queue(&dev_ctx->virtio_dev, VIRTIO_SCSI_CONTROLQ);
+ }
+ virtio_dev_stop(&dev_ctx->virtio_dev);
+ virtio_dev_destruct(&dev_ctx->virtio_dev);
+ if (dev_ctx->io_ctx_array) {
+ spdk_free(dev_ctx->io_ctx_array);
+ }
+ free(dev_ctx);
+ }
+
+ spdk_free(g_valid_buffer);
+
+ if (g_blk_cmd_array) {
+ free(g_blk_cmd_array);
+ }
+ if (g_scsi_cmd_array) {
+ free(g_scsi_cmd_array);
+ }
+ if (g_scsi_mgmt_cmd_array) {
+ free(g_scsi_mgmt_cmd_array);
+ }
+}
+
+/* Get a memory address that is random and not located in our hugepage memory. */
+static void *
+get_invalid_mem_address(uint64_t length)
+{
+ uint64_t chosen_address = 0x0;
+
+ while (true) {
+ chosen_address = rand();
+ chosen_address = (chosen_address << 32) | rand();
+ if (spdk_vtophys((void *)chosen_address, &length) == SPDK_VTOPHYS_ERROR) {
+ return (void *)chosen_address;
+ }
+ }
+ return NULL;
+}
+
+/* dev initialization code begin. */
+static int
+virtio_dev_init(struct virtio_dev *vdev, const char *socket_path, uint64_t flags,
+ uint16_t max_queues)
+{
+ int rc;
+
+ rc = virtio_user_dev_init(vdev, "dev_ctx", socket_path, 1024);
+ if (rc != 0) {
+ fprintf(stderr, "Failed to initialize virtual bdev\n");
+ return rc;
+ }
+
+ rc = virtio_dev_reset(vdev, flags);
+ if (rc != 0) {
+ return rc;
+ }
+
+ rc = virtio_dev_start(vdev, max_queues, VIRTIO_DEV_FIXED_QUEUES);
+ if (rc != 0) {
+ return rc;
+ }
+
+ rc = virtio_dev_acquire_queue(vdev, VIRTIO_REQUESTQ);
+ if (rc < 0) {
+ fprintf(stderr, "Couldn't get an unused queue for the io_channel.\n");
+ virtio_dev_stop(vdev);
+ return rc;
+ }
+ return 0;
+}
+
+static int
+blk_dev_init(struct virtio_dev *vdev, const char *socket_path, uint16_t max_queues)
+{
+ uint16_t host_max_queues;
+ int rc;
+
+ if (virtio_dev_has_feature(vdev, VIRTIO_BLK_F_MQ)) {
+ rc = virtio_dev_read_dev_config(vdev, offsetof(struct virtio_blk_config, num_queues),
+ &host_max_queues, sizeof(host_max_queues));
+ if (rc) {
+ fprintf(stderr, "%s: config read failed: %s\n", vdev->name, spdk_strerror(-rc));
+ return rc;
+ }
+ } else {
+ host_max_queues = 1;
+ }
+
+ if (max_queues == 0) {
+ fprintf(stderr, "%s: requested 0 request queues (%"PRIu16" available).\n",
+ vdev->name, host_max_queues);
+ return -EINVAL;
+ }
+
+ if (max_queues > host_max_queues) {
+ fprintf(stderr, "%s: requested %"PRIu16" request queues "
+ "but only %"PRIu16" available.\n",
+ vdev->name, max_queues, host_max_queues);
+ max_queues = host_max_queues;
+ }
+
+ return virtio_dev_init(vdev, socket_path, VIRTIO_BLK_DEV_SUPPORTED_FEATURES, max_queues);
+}
+
+static int
+scsi_dev_init(struct virtio_dev *vdev, const char *socket_path, uint16_t max_queues)
+{
+ int rc;
+
+ rc = virtio_dev_init(vdev, socket_path, VIRTIO_SCSI_DEV_SUPPORTED_FEATURES, max_queues);
+ if (rc != 0) {
+ return rc;
+ }
+
+ rc = virtio_dev_acquire_queue(vdev, VIRTIO_SCSI_CONTROLQ);
+ if (rc != 0) {
+ SPDK_ERRLOG("Failed to acquire the controlq.\n");
+ return rc;
+ }
+
+ rc = virtio_dev_acquire_queue(vdev, VIRTIO_SCSI_EVENTQ);
+ if (rc != 0) {
+ SPDK_ERRLOG("Failed to acquire the eventq.\n");
+ virtio_dev_release_queue(vdev, VIRTIO_SCSI_CONTROLQ);
+ return rc;
+ }
+
+ return 0;
+}
+
+int
+fuzz_vhost_dev_init(const char *socket_path, bool is_blk_dev, bool use_bogus_buffer,
+ bool use_valid_buffer, bool valid_lun, bool test_scsi_tmf)
+{
+ struct fuzz_vhost_dev_ctx *dev_ctx;
+ int rc = 0, i;
+
+ dev_ctx = calloc(1, sizeof(*dev_ctx));
+ if (dev_ctx == NULL) {
+ return -ENOMEM;
+ }
+
+ dev_ctx->socket_is_blk = is_blk_dev;
+ dev_ctx->use_bogus_buffer = use_bogus_buffer;
+ dev_ctx->use_valid_buffer = use_valid_buffer;
+ dev_ctx->valid_lun = valid_lun;
+ dev_ctx->test_scsi_tmf = test_scsi_tmf;
+
+ TAILQ_INIT(&dev_ctx->free_io_ctx);
+ TAILQ_INIT(&dev_ctx->outstanding_io_ctx);
+
+ assert(sizeof(*dev_ctx->io_ctx_array) <= UINT64_MAX / FUZZ_QUEUE_DEPTH);
+ dev_ctx->io_ctx_array = spdk_malloc(sizeof(*dev_ctx->io_ctx_array) * FUZZ_QUEUE_DEPTH, 0x0, NULL,
+ SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_SHARE);
+ if (dev_ctx->io_ctx_array == NULL) {
+ free(dev_ctx);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < FUZZ_QUEUE_DEPTH; i++) {
+ TAILQ_INSERT_HEAD(&dev_ctx->free_io_ctx, &dev_ctx->io_ctx_array[i], link);
+ }
+
+ dev_ctx->thread = spdk_thread_create(NULL, NULL);
+ if (dev_ctx->thread == NULL) {
+ fprintf(stderr, "Unable to allocate a thread for a fuzz device.\n");
+ rc = -ENOMEM;
+ goto error_out;
+ }
+
+ if (is_blk_dev) {
+ rc = blk_dev_init(&dev_ctx->virtio_dev, socket_path, FUZZ_MAX_QUEUES);
+ } else {
+ rc = scsi_dev_init(&dev_ctx->virtio_dev, socket_path, FUZZ_MAX_QUEUES);
+ }
+
+ if (rc) {
+ fprintf(stderr, "Unable to prepare the device to perform I/O.\n");
+ goto error_out;
+ }
+
+ TAILQ_INSERT_TAIL(&g_dev_list, dev_ctx, link);
+ return 0;
+
+error_out:
+ spdk_free(dev_ctx->io_ctx_array);
+ free(dev_ctx);
+ return rc;
+}
+/* dev initialization code end */
+
+/* data dumping functions begin */
+static int
+dump_virtio_cmd(void *ctx, const void *data, size_t size)
+{
+ fprintf(stderr, "%s\n", (const char *)data);
+ return 0;
+}
+
+static void
+print_blk_io_data(struct spdk_json_write_ctx *w, struct fuzz_vhost_io_ctx *io_ctx)
+{
+ spdk_json_write_named_uint32(w, "type", io_ctx->req.blk_req.type);
+ spdk_json_write_named_uint32(w, "ioprio", io_ctx->req.blk_req.ioprio);
+ spdk_json_write_named_uint64(w, "sector", io_ctx->req.blk_req.sector);
+}
+
+static void
+print_scsi_tmf_io_data(struct spdk_json_write_ctx *w, struct fuzz_vhost_io_ctx *io_ctx)
+{
+ char *lun_data;
+
+ lun_data = fuzz_get_value_base_64_buffer(io_ctx->req.scsi_tmf_req.lun,
+ sizeof(io_ctx->req.scsi_tmf_req.lun));
+
+ spdk_json_write_named_uint32(w, "type", io_ctx->req.scsi_tmf_req.type);
+ spdk_json_write_named_uint32(w, "subtype", io_ctx->req.scsi_tmf_req.subtype);
+ spdk_json_write_named_string(w, "lun", lun_data);
+ spdk_json_write_named_uint64(w, "tag", io_ctx->req.scsi_tmf_req.tag);
+
+ free(lun_data);
+}
+
+static void
+print_scsi_io_data(struct spdk_json_write_ctx *w, struct fuzz_vhost_io_ctx *io_ctx)
+{
+ char *lun_data;
+ char *cdb_data;
+
+ lun_data = fuzz_get_value_base_64_buffer(io_ctx->req.scsi_req.lun,
+ sizeof(io_ctx->req.scsi_req.lun));
+ cdb_data = fuzz_get_value_base_64_buffer(io_ctx->req.scsi_req.cdb,
+ sizeof(io_ctx->req.scsi_req.cdb));
+
+ spdk_json_write_named_string(w, "lun", lun_data);
+ spdk_json_write_named_uint64(w, "tag", io_ctx->req.scsi_req.tag);
+ spdk_json_write_named_uint32(w, "task_attr", io_ctx->req.scsi_req.task_attr);
+ spdk_json_write_named_uint32(w, "prio", io_ctx->req.scsi_req.prio);
+ spdk_json_write_named_uint32(w, "crn", io_ctx->req.scsi_req.crn);
+ spdk_json_write_named_string(w, "cdb", cdb_data);
+
+ free(lun_data);
+ free(cdb_data);
+}
+
+static void
+print_iov_obj(struct spdk_json_write_ctx *w, const char *iov_name, struct iovec *iov)
+{
+ /* "0x" + up to 16 digits + null terminator */
+ char hex_addr[19];
+ int rc;
+
+ rc = snprintf(hex_addr, 19, "%lx", (uintptr_t)iov->iov_base);
+
+ /* default to 0. */
+ if (rc < 0 || rc >= 19) {
+ hex_addr[0] = '0';
+ hex_addr[1] = '\0';
+ }
+
+ spdk_json_write_named_object_begin(w, iov_name);
+ spdk_json_write_named_string(w, "iov_base", hex_addr);
+ spdk_json_write_named_uint64(w, "iov_len", iov->iov_len);
+ spdk_json_write_object_end(w);
+}
+
+static void
+print_iovs(struct spdk_json_write_ctx *w, struct fuzz_vhost_io_ctx *io_ctx)
+{
+ print_iov_obj(w, "req_iov", &io_ctx->iovs.iov_req);
+ print_iov_obj(w, "data_iov", &io_ctx->iovs.iov_data);
+ print_iov_obj(w, "resp_iov", &io_ctx->iovs.iov_resp);
+}
+
+static void
+print_req_obj(struct fuzz_vhost_dev_ctx *dev_ctx, struct fuzz_vhost_io_ctx *io_ctx)
+{
+
+ struct spdk_json_write_ctx *w;
+
+ w = spdk_json_write_begin(dump_virtio_cmd, NULL, SPDK_JSON_WRITE_FLAG_FORMATTED);
+
+ if (dev_ctx->socket_is_blk) {
+ spdk_json_write_named_object_begin(w, BLK_IO_NAME);
+ print_iovs(w, io_ctx);
+ print_blk_io_data(w, io_ctx);
+ } else if (dev_ctx->test_scsi_tmf) {
+ spdk_json_write_named_object_begin(w, SCSI_MGMT_NAME);
+ print_iovs(w, io_ctx);
+ print_scsi_tmf_io_data(w, io_ctx);
+ } else {
+ spdk_json_write_named_object_begin(w, SCSI_IO_NAME);
+ print_iovs(w, io_ctx);
+ print_scsi_io_data(w, io_ctx);
+ }
+ spdk_json_write_object_end(w);
+ spdk_json_write_end(w);
+}
+
+static void
+dump_outstanding_io(struct fuzz_vhost_dev_ctx *dev_ctx)
+{
+ struct fuzz_vhost_io_ctx *io_ctx, *tmp;
+
+ TAILQ_FOREACH_SAFE(io_ctx, &dev_ctx->outstanding_io_ctx, link, tmp) {
+ print_req_obj(dev_ctx, io_ctx);
+ TAILQ_REMOVE(&dev_ctx->outstanding_io_ctx, io_ctx, link);
+ TAILQ_INSERT_TAIL(&dev_ctx->free_io_ctx, io_ctx, link);
+ }
+}
+/* data dumping functions end */
+
+/* data parsing functions begin */
+static int
+hex_value(uint8_t c)
+{
+#define V(x, y) [x] = y + 1
+ static const int8_t val[256] = {
+ V('0', 0), V('1', 1), V('2', 2), V('3', 3), V('4', 4),
+ V('5', 5), V('6', 6), V('7', 7), V('8', 8), V('9', 9),
+ V('A', 0xA), V('B', 0xB), V('C', 0xC), V('D', 0xD), V('E', 0xE), V('F', 0xF),
+ V('a', 0xA), V('b', 0xB), V('c', 0xC), V('d', 0xD), V('e', 0xE), V('f', 0xF),
+ };
+#undef V
+
+ return val[c] - 1;
+}
+
+static int
+fuzz_json_decode_hex_uint64(const struct spdk_json_val *val, void *out)
+{
+ uint64_t *out_val = out;
+ size_t i;
+ char *val_pointer = val->start;
+ int current_val;
+
+ if (val->len > 16) {
+ return -EINVAL;
+ }
+
+ *out_val = 0;
+ for (i = 0; i < val->len; i++) {
+ *out_val = *out_val << 4;
+ current_val = hex_value(*val_pointer);
+ if (current_val < 0) {
+ return -EINVAL;
+ }
+ *out_val += current_val;
+ val_pointer++;
+ }
+
+ return 0;
+}
+
+static const struct spdk_json_object_decoder fuzz_vhost_iov_decoders[] = {
+ {"iov_base", offsetof(struct iovec, iov_base), fuzz_json_decode_hex_uint64},
+ {"iov_len", offsetof(struct iovec, iov_len), spdk_json_decode_uint64},
+};
+
+static size_t
+parse_iov_struct(struct iovec *iovec, struct spdk_json_val *value)
+{
+ int rc;
+
+ if (value->type != SPDK_JSON_VAL_OBJECT_BEGIN) {
+ return -1;
+ }
+
+ rc = spdk_json_decode_object(value,
+ fuzz_vhost_iov_decoders,
+ SPDK_COUNTOF(fuzz_vhost_iov_decoders),
+ iovec);
+ if (rc) {
+ return -1;
+ }
+
+ while (value->type != SPDK_JSON_VAL_OBJECT_END) {
+ value++;
+ rc++;
+ }
+
+ /* The +1 instructs the calling function to skip over the OBJECT_END function. */
+ rc += 1;
+ return rc;
+}
+
+static bool
+parse_vhost_blk_cmds(void *item, struct spdk_json_val *value, size_t num_values)
+{
+ struct fuzz_vhost_io_ctx *io_ctx = item;
+ struct spdk_json_val *prev_value;
+ int nested_object_size;
+ uint64_t tmp_val;
+ size_t i = 0;
+
+ while (i < num_values) {
+ nested_object_size = 1;
+ if (value->type == SPDK_JSON_VAL_NAME) {
+ prev_value = value;
+ value++;
+ i++;
+ if (!strncmp(prev_value->start, "req_iov", prev_value->len)) {
+ nested_object_size = parse_iov_struct(&io_ctx->iovs.iov_req, value);
+ } else if (!strncmp(prev_value->start, "data_iov", prev_value->len)) {
+ nested_object_size = parse_iov_struct(&io_ctx->iovs.iov_data, value);
+ } else if (!strncmp(prev_value->start, "resp_iov", prev_value->len)) {
+ nested_object_size = parse_iov_struct(&io_ctx->iovs.iov_data, value);
+ } else if (!strncmp(prev_value->start, "type", prev_value->len)) {
+ if (fuzz_parse_json_num(value, UINT32_MAX, &tmp_val)) {
+ nested_object_size = -1;
+ } else {
+ io_ctx->req.blk_req.type = tmp_val;
+ }
+ } else if (!strncmp(prev_value->start, "ioprio", prev_value->len)) {
+ if (fuzz_parse_json_num(value, UINT32_MAX, &tmp_val)) {
+ nested_object_size = -1;
+ } else {
+ io_ctx->req.blk_req.ioprio = tmp_val;
+ }
+ } else if (!strncmp(prev_value->start, "sector", prev_value->len)) {
+ if (fuzz_parse_json_num(value, UINT64_MAX, &tmp_val)) {
+ nested_object_size = -1;
+ } else {
+ io_ctx->req.blk_req.sector = tmp_val;
+ }
+ }
+ }
+ if (nested_object_size < 0) {
+ fprintf(stderr, "Invalid value supplied for io_ctx->%.*s: %.*s\n", prev_value->len,
+ (char *)prev_value->start, value->len, (char *)value->start);
+ return false;
+ }
+ value += nested_object_size;
+ i += nested_object_size;
+ }
+ return true;
+}
+
+static bool
+parse_vhost_scsi_cmds(void *item, struct spdk_json_val *value, size_t num_values)
+{
+ struct fuzz_vhost_io_ctx *io_ctx = item;
+ struct spdk_json_val *prev_value;
+ int nested_object_size;
+ uint64_t tmp_val;
+ size_t i = 0;
+
+ while (i < num_values) {
+ nested_object_size = 1;
+ if (value->type == SPDK_JSON_VAL_NAME) {
+ prev_value = value;
+ value++;
+ i++;
+ if (!strncmp(prev_value->start, "req_iov", prev_value->len)) {
+ nested_object_size = parse_iov_struct(&io_ctx->iovs.iov_req, value);
+ } else if (!strncmp(prev_value->start, "data_iov", prev_value->len)) {
+ nested_object_size = parse_iov_struct(&io_ctx->iovs.iov_data, value);
+ } else if (!strncmp(prev_value->start, "resp_iov", prev_value->len)) {
+ nested_object_size = parse_iov_struct(&io_ctx->iovs.iov_data, value);
+ } else if (!strncmp(prev_value->start, "lun", prev_value->len)) {
+ if (fuzz_get_base_64_buffer_value(&io_ctx->req.scsi_req.lun,
+ sizeof(io_ctx->req.scsi_req.lun),
+ (char *)value->start,
+ value->len)) {
+ nested_object_size = -1;
+ }
+ } else if (!strncmp(prev_value->start, "tag", prev_value->len)) {
+ if (fuzz_parse_json_num(value, UINT64_MAX, &tmp_val)) {
+ nested_object_size = -1;
+ } else {
+ io_ctx->req.scsi_req.tag = tmp_val;
+ }
+ } else if (!strncmp(prev_value->start, "task_attr", prev_value->len)) {
+ if (fuzz_parse_json_num(value, UINT8_MAX, &tmp_val)) {
+ nested_object_size = -1;
+ } else {
+ io_ctx->req.scsi_req.task_attr = tmp_val;
+ }
+ } else if (!strncmp(prev_value->start, "prio", prev_value->len)) {
+ if (fuzz_parse_json_num(value, UINT8_MAX, &tmp_val)) {
+ nested_object_size = -1;
+ } else {
+ io_ctx->req.scsi_req.prio = tmp_val;
+ }
+ } else if (!strncmp(prev_value->start, "crn", prev_value->len)) {
+ if (fuzz_parse_json_num(value, UINT8_MAX, &tmp_val)) {
+ nested_object_size = -1;
+ } else {
+ io_ctx->req.scsi_req.crn = tmp_val;
+ }
+ } else if (!strncmp(prev_value->start, "cdb", prev_value->len)) {
+ if (fuzz_get_base_64_buffer_value(&io_ctx->req.scsi_req.cdb,
+ sizeof(io_ctx->req.scsi_req.cdb),
+ (char *)value->start,
+ value->len)) {
+ nested_object_size = -1;
+ }
+ }
+ }
+ if (nested_object_size < 0) {
+ fprintf(stderr, "Invalid value supplied for io_ctx->%.*s: %.*s\n", prev_value->len,
+ (char *)prev_value->start, value->len, (char *)value->start);
+ return false;
+ }
+ value += nested_object_size;
+ i += nested_object_size;
+ }
+ return true;
+
+}
+
+static bool
+parse_vhost_scsi_mgmt_cmds(void *item, struct spdk_json_val *value, size_t num_values)
+{
+ struct fuzz_vhost_io_ctx *io_ctx = item;
+ struct spdk_json_val *prev_value;
+ int nested_object_size;
+ uint64_t tmp_val;
+ size_t i = 0;
+
+ while (i < num_values) {
+ nested_object_size = 1;
+ if (value->type == SPDK_JSON_VAL_NAME) {
+ prev_value = value;
+ value++;
+ i++;
+ if (!strncmp(prev_value->start, "req_iov", prev_value->len)) {
+ nested_object_size = parse_iov_struct(&io_ctx->iovs.iov_req, value);
+ } else if (!strncmp(prev_value->start, "data_iov", prev_value->len)) {
+ nested_object_size = parse_iov_struct(&io_ctx->iovs.iov_data, value);
+ } else if (!strncmp(prev_value->start, "resp_iov", prev_value->len)) {
+ nested_object_size = parse_iov_struct(&io_ctx->iovs.iov_data, value);
+ } else if (!strncmp(prev_value->start, "type", prev_value->len)) {
+ if (fuzz_parse_json_num(value, UINT32_MAX, &tmp_val)) {
+ nested_object_size = -1;
+ } else {
+ io_ctx->req.scsi_tmf_req.type = tmp_val;
+ }
+ } else if (!strncmp(prev_value->start, "subtype", prev_value->len)) {
+ if (fuzz_parse_json_num(value, UINT32_MAX, &tmp_val)) {
+ nested_object_size = -1;
+ } else {
+ io_ctx->req.scsi_tmf_req.subtype = tmp_val;
+ }
+ } else if (!strncmp(prev_value->start, "lun", prev_value->len)) {
+ if (fuzz_get_base_64_buffer_value(&io_ctx->req.scsi_tmf_req.lun,
+ sizeof(io_ctx->req.scsi_tmf_req.lun),
+ (char *)value->start,
+ value->len)) {
+ nested_object_size = -1;
+ }
+ } else if (!strncmp(prev_value->start, "tag", prev_value->len)) {
+ if (fuzz_parse_json_num(value, UINT64_MAX, &tmp_val)) {
+ nested_object_size = -1;
+ } else {
+ io_ctx->req.scsi_tmf_req.tag = tmp_val;
+ }
+ }
+ }
+ if (nested_object_size < 0) {
+ fprintf(stderr, "Invalid value supplied for io_ctx->%.*s: %.*s\n", prev_value->len,
+ (char *)prev_value->start, value->len, (char *)value->start);
+ return false;
+ }
+ value += nested_object_size;
+ i += nested_object_size;
+ }
+ return true;
+}
+/* data parsing functions end */
+
+/* build requests begin */
+static void
+craft_io_from_array(struct fuzz_vhost_io_ctx *src_ctx, struct fuzz_vhost_io_ctx *dest_ctx)
+{
+ if (g_keep_iov_pointers) {
+ dest_ctx->iovs = src_ctx->iovs;
+ }
+ dest_ctx->req = src_ctx->req;
+}
+
+static void
+craft_virtio_scsi_req(struct fuzz_vhost_dev_ctx *dev_ctx, struct fuzz_vhost_io_ctx *io_ctx)
+{
+ io_ctx->iovs.iov_req.iov_len = sizeof(io_ctx->req.scsi_req);
+ io_ctx->iovs.iov_resp.iov_len = sizeof(io_ctx->resp.scsi_resp);
+ fuzz_fill_random_bytes((char *)&io_ctx->req.scsi_req, sizeof(io_ctx->req.scsi_req),
+ &dev_ctx->random_seed);
+ /* TODO: set up the logic to find all luns on the target. Right now we are just assuming the first is OK. */
+ if (dev_ctx->valid_lun) {
+ io_ctx->req.scsi_req.lun[0] = 1;
+ io_ctx->req.scsi_req.lun[1] = 0;
+ }
+}
+
+static void
+craft_virtio_scsi_tmf_req(struct fuzz_vhost_dev_ctx *dev_ctx, struct fuzz_vhost_io_ctx *io_ctx)
+{
+ io_ctx->iovs.iov_req.iov_len = sizeof(io_ctx->req.scsi_tmf_req);
+ io_ctx->iovs.iov_resp.iov_len = sizeof(io_ctx->resp.scsi_tmf_resp);
+ fuzz_fill_random_bytes((char *)&io_ctx->req.scsi_tmf_req, sizeof(io_ctx->req.scsi_tmf_req),
+ &dev_ctx->random_seed);
+ /* TODO: set up the logic to find all luns on the target. Right now we are just assuming the first is OK. */
+ if (dev_ctx->valid_lun) {
+ io_ctx->req.scsi_tmf_req.lun[0] = 1;
+ io_ctx->req.scsi_tmf_req.lun[1] = 0;
+ }
+
+ /* Valid controlqueue commands have to be of type 0, 1, or 2. Any others just return immediately from the target. */
+ /* Try to only test the opcodes that will exercise extra paths in the target side. But allow for at least one invalid value. */
+ io_ctx->req.scsi_tmf_req.type = rand() % 4;
+}
+
+static void
+craft_virtio_blk_req(struct fuzz_vhost_io_ctx *io_ctx)
+{
+ io_ctx->iovs.iov_req.iov_len = sizeof(io_ctx->req.blk_req);
+ io_ctx->iovs.iov_resp.iov_len = sizeof(io_ctx->resp.blk_resp);
+ io_ctx->req.blk_req.type = rand();
+ io_ctx->req.blk_req.sector = rand();
+}
+
+static void
+craft_virtio_req_rsp_pair(struct fuzz_vhost_dev_ctx *dev_ctx, struct fuzz_vhost_io_ctx *io_ctx)
+{
+ struct fuzz_vhost_iov_ctx *iovs = &io_ctx->iovs;
+
+ /*
+ * Always set these buffer values up front.
+ * If the user wants to override this with the json values,
+ * they can specify -k when starting the app. */
+ iovs->iov_req.iov_base = &io_ctx->req;
+ if (dev_ctx->use_bogus_buffer) {
+ iovs->iov_data.iov_len = rand();
+ iovs->iov_data.iov_base = get_invalid_mem_address(iovs->iov_data.iov_len);
+ } else if (dev_ctx->use_valid_buffer) {
+ iovs->iov_data.iov_len = 1024;
+ iovs->iov_data.iov_base = g_valid_buffer;
+ }
+ iovs->iov_resp.iov_base = &io_ctx->resp;
+
+ if (dev_ctx->socket_is_blk && g_blk_cmd_array) {
+ craft_io_from_array(&g_blk_cmd_array[dev_ctx->submitted_io], io_ctx);
+ return;
+ } else if (dev_ctx->test_scsi_tmf && g_scsi_mgmt_cmd_array) {
+ craft_io_from_array(&g_scsi_mgmt_cmd_array[dev_ctx->submitted_io], io_ctx);
+ return;
+ } else if (g_scsi_cmd_array) {
+ craft_io_from_array(&g_scsi_cmd_array[dev_ctx->submitted_io], io_ctx);
+ return;
+ }
+
+ if (dev_ctx->socket_is_blk) {
+ craft_virtio_blk_req(io_ctx);
+ } else if (dev_ctx->test_scsi_tmf) {
+ craft_virtio_scsi_tmf_req(dev_ctx, io_ctx);
+ } else {
+ craft_virtio_scsi_req(dev_ctx, io_ctx);
+ }
+}
+/* build requests end */
+
+/* submit requests begin */
+static uint64_t
+get_max_num_io(struct fuzz_vhost_dev_ctx *dev_ctx)
+{
+ if (dev_ctx->socket_is_blk) {
+ return g_blk_cmd_array_size;
+ } else if (dev_ctx->test_scsi_tmf) {
+ return g_scsi_mgmt_cmd_array_size;
+ } else {
+ return g_scsi_cmd_array_size;
+ }
+}
+
+static int
+submit_virtio_req_rsp_pair(struct fuzz_vhost_dev_ctx *dev_ctx, struct virtqueue *vq,
+ struct fuzz_vhost_io_ctx *io_ctx)
+{
+ struct fuzz_vhost_iov_ctx *iovs = &io_ctx->iovs;
+ int num_iovs = 2, rc;
+
+ num_iovs += dev_ctx->use_bogus_buffer || dev_ctx->use_valid_buffer ? 1 : 0;
+
+ rc = virtqueue_req_start(vq, io_ctx, num_iovs);
+ if (rc) {
+ return rc;
+ }
+ virtqueue_req_add_iovs(vq, &iovs->iov_req, 1, SPDK_VIRTIO_DESC_RO);
+ /* blk and scsi requests favor different orders for the iov objects. */
+ if (dev_ctx->socket_is_blk) {
+ if (dev_ctx->use_bogus_buffer || dev_ctx->use_valid_buffer) {
+ virtqueue_req_add_iovs(vq, &iovs->iov_data, 1, SPDK_VIRTIO_DESC_WR);
+ }
+ virtqueue_req_add_iovs(vq, &iovs->iov_resp, 1, SPDK_VIRTIO_DESC_WR);
+ } else {
+ virtqueue_req_add_iovs(vq, &iovs->iov_resp, 1, SPDK_VIRTIO_DESC_WR);
+ if (dev_ctx->use_bogus_buffer || dev_ctx->use_valid_buffer) {
+ virtqueue_req_add_iovs(vq, &iovs->iov_data, 1, SPDK_VIRTIO_DESC_WR);
+ }
+ }
+ virtqueue_req_flush(vq);
+ return 0;
+}
+
+static void
+dev_submit_requests(struct fuzz_vhost_dev_ctx *dev_ctx, struct virtqueue *vq,
+ uint64_t max_io_to_submit)
+{
+ struct fuzz_vhost_io_ctx *io_ctx;
+ int rc;
+
+ while (!TAILQ_EMPTY(&dev_ctx->free_io_ctx) && dev_ctx->submitted_io < max_io_to_submit) {
+ io_ctx = TAILQ_FIRST(&dev_ctx->free_io_ctx);
+ craft_virtio_req_rsp_pair(dev_ctx, io_ctx);
+ rc = submit_virtio_req_rsp_pair(dev_ctx, vq, io_ctx);
+ if (rc == 0) {
+ TAILQ_REMOVE(&dev_ctx->free_io_ctx, io_ctx, link);
+ TAILQ_INSERT_TAIL(&dev_ctx->outstanding_io_ctx, io_ctx, link);
+ dev_ctx->submitted_io++;
+ } else if (rc == -ENOMEM) {
+ /* There are just not enough available buffers right now. try later. */
+ return;
+ } else if (rc == -EINVAL) {
+ /* The virtqueue must be broken. We know we can fit at least three descriptors */
+ fprintf(stderr, "One of the virtqueues for dev %p is broken. stopping all devices.\n", dev_ctx);
+ g_run = 0;
+ }
+ }
+}
+/* submit requests end */
+
+/* complete requests begin */
+static void
+check_successful_op(struct fuzz_vhost_dev_ctx *dev_ctx, struct fuzz_vhost_io_ctx *io_ctx)
+{
+ bool is_successful = false;
+
+ if (dev_ctx->socket_is_blk) {
+ if (io_ctx->resp.blk_resp == 0) {
+ is_successful = true;
+ }
+ } else if (dev_ctx->test_scsi_tmf) {
+ if (io_ctx->resp.scsi_tmf_resp.scsi_tmf_resp.response == 0 &&
+ io_ctx->resp.scsi_tmf_resp.an_resp.response == 0) {
+ is_successful = true;
+ }
+ } else {
+ if (io_ctx->resp.scsi_resp.status == 0) {
+ is_successful = true;
+ }
+ }
+
+ if (is_successful) {
+ fprintf(stderr, "An I/O completed without an error status. This could be worth looking into.\n");
+ fprintf(stderr,
+ "There is also a good chance that the target just failed before setting a status.\n");
+ dev_ctx->successful_io++;
+ print_req_obj(dev_ctx, io_ctx);
+ } else if (g_verbose_mode) {
+ fprintf(stderr, "The following I/O failed as expected.\n");
+ print_req_obj(dev_ctx, io_ctx);
+ }
+}
+
+static void
+complete_io(struct fuzz_vhost_dev_ctx *dev_ctx, struct fuzz_vhost_io_ctx *io_ctx)
+{
+ TAILQ_REMOVE(&dev_ctx->outstanding_io_ctx, io_ctx, link);
+ TAILQ_INSERT_HEAD(&dev_ctx->free_io_ctx, io_ctx, link);
+ check_successful_op(dev_ctx, io_ctx);
+ dev_ctx->completed_io++;
+ dev_ctx->timeout_tsc = fuzz_refresh_timeout();
+}
+
+static int
+poll_dev(void *ctx)
+{
+ struct fuzz_vhost_dev_ctx *dev_ctx = ctx;
+ struct virtqueue *vq;
+ struct fuzz_vhost_io_ctx *io_ctx[FUZZ_QUEUE_DEPTH];
+ int num_active_threads;
+ uint64_t max_io_to_complete = UINT64_MAX;
+ uint64_t current_ticks;
+ uint32_t len[FUZZ_QUEUE_DEPTH];
+ uint16_t num_cpl, i;
+
+ if (g_json_file) {
+ max_io_to_complete = get_max_num_io(dev_ctx);
+ }
+
+ if (!dev_ctx->socket_is_blk && dev_ctx->test_scsi_tmf) {
+ vq = dev_ctx->virtio_dev.vqs[VIRTIO_SCSI_CONTROLQ];
+ } else {
+ vq = dev_ctx->virtio_dev.vqs[VIRTIO_REQUESTQ];
+ }
+
+ num_cpl = virtio_recv_pkts(vq, (void **)io_ctx, len, FUZZ_QUEUE_DEPTH);
+
+ for (i = 0; i < num_cpl; i++) {
+ complete_io(dev_ctx, io_ctx[i]);
+ }
+
+ current_ticks = spdk_get_ticks();
+
+ if (current_ticks > dev_ctx->timeout_tsc) {
+ dev_ctx->timed_out = true;
+ g_run = false;
+ fprintf(stderr, "The VQ on device %p timed out. Dumping contents now.\n", dev_ctx);
+ dump_outstanding_io(dev_ctx);
+ }
+
+ if (current_ticks > g_runtime_ticks) {
+ g_run = 0;
+ }
+
+ if (!g_run || dev_ctx->completed_io >= max_io_to_complete) {
+ if (TAILQ_EMPTY(&dev_ctx->outstanding_io_ctx)) {
+ spdk_poller_unregister(&dev_ctx->poller);
+ num_active_threads = __sync_sub_and_fetch(&g_num_active_threads, 1);
+ if (num_active_threads == 0) {
+ g_run = 0;
+ }
+ spdk_thread_exit(dev_ctx->thread);
+ }
+ return 0;
+ }
+
+ dev_submit_requests(dev_ctx, vq, max_io_to_complete);
+ return 0;
+}
+/* complete requests end */
+
+static void
+start_io(void *ctx)
+{
+ struct fuzz_vhost_dev_ctx *dev_ctx = ctx;
+
+ if (g_random_seed) {
+ dev_ctx->random_seed = g_random_seed;
+ } else {
+ dev_ctx->random_seed = spdk_get_ticks();
+ }
+
+ dev_ctx->timeout_tsc = fuzz_refresh_timeout();
+
+ dev_ctx->poller = SPDK_POLLER_REGISTER(poll_dev, dev_ctx, 0);
+ if (dev_ctx->poller == NULL) {
+ return;
+ }
+
+}
+
+static int
+end_fuzz(void *ctx)
+{
+ if (!g_run && !g_num_active_threads) {
+ spdk_poller_unregister(&g_run_poller);
+ cleanup();
+ spdk_app_stop(0);
+ }
+ return 0;
+}
+
+static void
+begin_fuzz(void *ctx)
+{
+ struct fuzz_vhost_dev_ctx *dev_ctx;
+
+ g_runtime_ticks = spdk_get_ticks() + spdk_get_ticks_hz() * g_runtime;
+
+ g_valid_buffer = spdk_malloc(0x1000, 0x200, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_SHARE);
+ if (g_valid_buffer == NULL) {
+ fprintf(stderr, "Failed to allocate a valid buffer for I/O\n");
+ goto out;
+ }
+
+ g_run_poller = SPDK_POLLER_REGISTER(end_fuzz, NULL, 0);
+ if (g_run_poller == NULL) {
+ fprintf(stderr, "Failed to register a poller for test completion checking.\n");
+ }
+
+ TAILQ_FOREACH(dev_ctx, &g_dev_list, link) {
+ assert(dev_ctx->thread != NULL);
+ spdk_thread_send_msg(dev_ctx->thread, start_io, dev_ctx);
+ __sync_add_and_fetch(&g_num_active_threads, 1);
+ }
+
+ return;
+out:
+ cleanup();
+ spdk_app_stop(0);
+}
+
+static void
+fuzz_vhost_usage(void)
+{
+ fprintf(stderr, " -j <path> Path to a json file containing named objects.\n");
+ fprintf(stderr,
+ " -k Keep the iov pointer addresses from the json file. only valid with -j.\n");
+ fprintf(stderr, " -S <integer> Seed value for test.\n");
+ fprintf(stderr, " -t <integer> Time in seconds to run the fuzz test.\n");
+ fprintf(stderr, " -V Enable logging of each submitted command.\n");
+}
+
+static int
+fuzz_vhost_parse(int ch, char *arg)
+{
+ int64_t error_test;
+
+ switch (ch) {
+ case 'j':
+ g_json_file = optarg;
+ break;
+ case 'k':
+ g_keep_iov_pointers = true;
+ break;
+ case 'S':
+ error_test = spdk_strtol(arg, 10);
+ if (error_test < 0) {
+ fprintf(stderr, "Invalid value supplied for the random seed.\n");
+ return -1;
+ } else {
+ g_random_seed = spdk_strtol(arg, 10);
+ }
+ break;
+ case 't':
+ g_runtime = spdk_strtol(arg, 10);
+ if (g_runtime < 0 || g_runtime > MAX_RUNTIME_S) {
+ fprintf(stderr, "You must supply a positive runtime value less than 86401.\n");
+ return -1;
+ }
+ break;
+ case 'V':
+ g_verbose_mode = true;
+ break;
+ case '?':
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int
+main(int argc, char **argv)
+{
+ struct spdk_app_opts opts = {};
+ int rc;
+
+ spdk_app_opts_init(&opts);
+ opts.name = "vhost_fuzz";
+ g_runtime = DEFAULT_RUNTIME;
+
+ rc = spdk_app_parse_args(argc, argv, &opts, "j:kS:t:V", NULL, fuzz_vhost_parse, fuzz_vhost_usage);
+ if (rc != SPDK_APP_PARSE_ARGS_SUCCESS) {
+ fprintf(stderr, "Unable to parse the application arguments.\n");
+ return -1;
+ }
+
+ if (g_json_file != NULL) {
+ g_blk_cmd_array_size = fuzz_parse_args_into_array(g_json_file,
+ (void **)&g_blk_cmd_array,
+ sizeof(struct fuzz_vhost_io_ctx),
+ BLK_IO_NAME, parse_vhost_blk_cmds);
+ g_scsi_cmd_array_size = fuzz_parse_args_into_array(g_json_file,
+ (void **)&g_scsi_cmd_array,
+ sizeof(struct fuzz_vhost_io_ctx),
+ SCSI_IO_NAME, parse_vhost_scsi_cmds);
+ g_scsi_mgmt_cmd_array_size = fuzz_parse_args_into_array(g_json_file,
+ (void **)&g_scsi_mgmt_cmd_array,
+ sizeof(struct fuzz_vhost_io_ctx),
+ SCSI_IO_NAME, parse_vhost_scsi_mgmt_cmds);
+ if (g_blk_cmd_array_size == 0 && g_scsi_cmd_array_size == 0 && g_scsi_mgmt_cmd_array_size == 0) {
+ fprintf(stderr, "The provided json file did not contain any valid commands. Exiting.\n");
+ return -EINVAL;
+ }
+ }
+
+ spdk_app_start(&opts, begin_fuzz, NULL);
+}
diff --git a/src/spdk/test/app/fuzz/vhost_fuzz/vhost_fuzz.h b/src/spdk/test/app/fuzz/vhost_fuzz/vhost_fuzz.h
new file mode 100644
index 000000000..df71a846d
--- /dev/null
+++ b/src/spdk/test/app/fuzz/vhost_fuzz/vhost_fuzz.h
@@ -0,0 +1,41 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#ifndef VHOST_FUZZ_H
+#define VHOST_FUZZ_H
+
+int fuzz_vhost_dev_init(const char *socket_path, bool is_blk_dev, bool use_bogus_buffer,
+ bool use_valid_buffer, bool valid_lun, bool test_scsi_tmf);
+
+#endif
diff --git a/src/spdk/test/app/fuzz/vhost_fuzz/vhost_fuzz_rpc.c b/src/spdk/test/app/fuzz/vhost_fuzz/vhost_fuzz_rpc.c
new file mode 100644
index 000000000..b60e3f097
--- /dev/null
+++ b/src/spdk/test/app/fuzz/vhost_fuzz/vhost_fuzz_rpc.c
@@ -0,0 +1,108 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation. All rights reserved.
+ * Copyright (c) 2018 Mellanox Technologies LTD. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+#include "spdk/rpc.h"
+#include "spdk/util.h"
+
+#include "vhost_fuzz.h"
+
+struct rpc_fuzz_vhost_dev_create {
+ char *socket;
+ bool is_blk;
+ bool use_bogus_buffer;
+ bool use_valid_buffer;
+ bool valid_lun;
+ bool test_scsi_tmf;
+};
+
+static const struct spdk_json_object_decoder rpc_fuzz_vhost_dev_create_decoders[] = {
+ {"socket", offsetof(struct rpc_fuzz_vhost_dev_create, socket), spdk_json_decode_string},
+ {"is_blk", offsetof(struct rpc_fuzz_vhost_dev_create, is_blk), spdk_json_decode_bool, true},
+ {"use_bogus_buffer", offsetof(struct rpc_fuzz_vhost_dev_create, use_bogus_buffer), spdk_json_decode_bool, true},
+ {"use_valid_buffer", offsetof(struct rpc_fuzz_vhost_dev_create, use_valid_buffer), spdk_json_decode_bool, true},
+ {"valid_lun", offsetof(struct rpc_fuzz_vhost_dev_create, valid_lun), spdk_json_decode_bool, true},
+ {"test_scsi_tmf", offsetof(struct rpc_fuzz_vhost_dev_create, test_scsi_tmf), spdk_json_decode_bool, true},
+};
+
+static void
+spdk_rpc_fuzz_vhost_create_dev(struct spdk_jsonrpc_request *request,
+ const struct spdk_json_val *params)
+{
+ struct spdk_json_write_ctx *w;
+ struct rpc_fuzz_vhost_dev_create req = {0};
+ int rc;
+
+ if (spdk_json_decode_object(params, rpc_fuzz_vhost_dev_create_decoders,
+ SPDK_COUNTOF(rpc_fuzz_vhost_dev_create_decoders), &req)) {
+ fprintf(stderr, "Unable to parse the request.\n");
+ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS,
+ "Unable to parse the object parameters.\n");
+ return;
+ }
+
+ if (strlen(req.socket) > PATH_MAX) {
+ fprintf(stderr, "Socket address is too long.\n");
+ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS,
+ "Unable to parse the object parameters.\n");
+ free(req.socket);
+ return;
+ }
+
+ rc = fuzz_vhost_dev_init(req.socket, req.is_blk, req.use_bogus_buffer, req.use_valid_buffer,
+ req.valid_lun, req.test_scsi_tmf);
+
+ if (rc != 0) {
+ if (rc == -ENOMEM) {
+ fprintf(stderr, "No valid memory for device initialization.\n");
+ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
+ "No memory returned from host.\n");
+ } else if (rc == -EINVAL) {
+ fprintf(stderr, "Invalid device parameters provided.\n");
+ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS,
+ "Parameters provided were invalid.\n");
+ } else {
+ fprintf(stderr, "unknown error from the guest.\n");
+ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
+ "Unexpected error code.\n");
+ }
+ } else {
+ w = spdk_jsonrpc_begin_result(request);
+ spdk_json_write_bool(w, true);
+ spdk_jsonrpc_end_result(request, w);
+ }
+
+ free(req.socket);
+ return;
+}
+SPDK_RPC_REGISTER("fuzz_vhost_create_dev", spdk_rpc_fuzz_vhost_create_dev, SPDK_RPC_STARTUP);
diff --git a/src/spdk/test/app/histogram_perf/.gitignore b/src/spdk/test/app/histogram_perf/.gitignore
new file mode 100644
index 000000000..c77b05312
--- /dev/null
+++ b/src/spdk/test/app/histogram_perf/.gitignore
@@ -0,0 +1 @@
+histogram_perf
diff --git a/src/spdk/test/app/histogram_perf/Makefile b/src/spdk/test/app/histogram_perf/Makefile
new file mode 100644
index 000000000..b60c704a8
--- /dev/null
+++ b/src/spdk/test/app/histogram_perf/Makefile
@@ -0,0 +1,43 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+APP = histogram_perf
+
+C_SRCS = histogram_perf.c
+
+SPDK_LIB_LIST = thread util log
+
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
diff --git a/src/spdk/test/app/histogram_perf/histogram_perf.c b/src/spdk/test/app/histogram_perf/histogram_perf.c
new file mode 100644
index 000000000..5d9de5274
--- /dev/null
+++ b/src/spdk/test/app/histogram_perf/histogram_perf.c
@@ -0,0 +1,102 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk/env.h"
+#include "spdk/util.h"
+#include "spdk/histogram_data.h"
+
+/*
+ * This applications is a simple test app used to test the performance of
+ * tallying datapoints with struct spdk_histogram_data. It can be used
+ * to measure the effect of changes to the spdk_histogram_data implementation.
+ *
+ * There are no command line parameters currently - it just tallies
+ * datapoints for 10 seconds in a default-sized histogram structure and
+ * then prints out the number of tallies performed.
+ */
+
+static void
+usage(const char *prog)
+{
+ printf("usage: %s\n", prog);
+ printf("Options:\n");
+}
+
+int
+main(int argc, char **argv)
+{
+ struct spdk_histogram_data *h;
+ struct spdk_env_opts opts;
+ uint64_t tsc[128], t, end_tsc, count;
+ uint32_t i;
+ int ch;
+ int rc = 0;
+
+ while ((ch = getopt(argc, argv, "")) != -1) {
+ switch (ch) {
+ default:
+ usage(argv[0]);
+ return 1;
+ }
+ }
+
+ spdk_env_opts_init(&opts);
+ if (spdk_env_init(&opts)) {
+ printf("Err: Unable to initialize SPDK env\n");
+ return 1;
+ }
+
+ for (i = 0; i < SPDK_COUNTOF(tsc); i++) {
+ tsc[i] = spdk_get_ticks();
+ }
+
+ end_tsc = spdk_get_ticks() + (10 * spdk_get_ticks_hz());
+ count = 0;
+ h = spdk_histogram_data_alloc();
+
+ while (true) {
+ t = spdk_get_ticks();
+ spdk_histogram_data_tally(h, t - tsc[count % 128]);
+ count++;
+ if (t > end_tsc) {
+ break;
+ }
+ }
+
+ printf("count = %ju\n", count);
+ spdk_histogram_data_free(h);
+
+ return rc;
+}
diff --git a/src/spdk/test/app/jsoncat/.gitignore b/src/spdk/test/app/jsoncat/.gitignore
new file mode 100644
index 000000000..3e6db4f0e
--- /dev/null
+++ b/src/spdk/test/app/jsoncat/.gitignore
@@ -0,0 +1 @@
+jsoncat
diff --git a/src/spdk/test/app/jsoncat/Makefile b/src/spdk/test/app/jsoncat/Makefile
new file mode 100644
index 000000000..2635e501b
--- /dev/null
+++ b/src/spdk/test/app/jsoncat/Makefile
@@ -0,0 +1,43 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+APP = jsoncat
+
+C_SRCS = jsoncat.c
+
+SPDK_LIB_LIST = json thread util log
+
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
diff --git a/src/spdk/test/app/jsoncat/jsoncat.c b/src/spdk/test/app/jsoncat/jsoncat.c
new file mode 100644
index 000000000..e932b54bd
--- /dev/null
+++ b/src/spdk/test/app/jsoncat/jsoncat.c
@@ -0,0 +1,192 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* Simple JSON "cat" utility */
+
+#include "spdk/stdinc.h"
+
+#include "spdk/json.h"
+#include "spdk/file.h"
+
+static void
+usage(const char *prog)
+{
+ printf("usage: %s [-c] [-f] file.json\n", prog);
+ printf("Options:\n");
+ printf("-c\tallow comments in input (non-standard)\n");
+ printf("-f\tformatted output (default: compact output)\n");
+}
+
+static void
+print_json_error(FILE *pf, int rc, const char *filename)
+{
+ switch (rc) {
+ case SPDK_JSON_PARSE_INVALID:
+ fprintf(pf, "%s: invalid JSON\n", filename);
+ break;
+ case SPDK_JSON_PARSE_INCOMPLETE:
+ fprintf(pf, "%s: incomplete JSON\n", filename);
+ break;
+ case SPDK_JSON_PARSE_MAX_DEPTH_EXCEEDED:
+ fprintf(pf, "%s: maximum nesting depth exceeded\n", filename);
+ break;
+ default:
+ fprintf(pf, "%s: unknown JSON parse error\n", filename);
+ break;
+ }
+}
+
+static int
+json_write_cb(void *cb_ctx, const void *data, size_t size)
+{
+ FILE *f = cb_ctx;
+ size_t rc;
+
+ rc = fwrite(data, 1, size, f);
+ return rc == size ? 0 : -1;
+}
+
+static int
+process_file(const char *filename, FILE *f, uint32_t parse_flags, uint32_t write_flags)
+{
+ size_t size;
+ void *buf, *end;
+ ssize_t rc;
+ struct spdk_json_val *values;
+ size_t num_values;
+ struct spdk_json_write_ctx *w;
+
+ buf = spdk_posix_file_load(f, &size);
+ if (buf == NULL) {
+ fprintf(stderr, "%s: file read error\n", filename);
+ return 1;
+ }
+
+ rc = spdk_json_parse(buf, size, NULL, 0, NULL, parse_flags);
+ if (rc <= 0) {
+ print_json_error(stderr, rc, filename);
+ free(buf);
+ return 1;
+ }
+
+ num_values = (size_t)rc;
+ values = calloc(num_values, sizeof(*values));
+ if (values == NULL) {
+ perror("values calloc");
+ free(buf);
+ return 1;
+ }
+
+ rc = spdk_json_parse(buf, size, values, num_values, &end,
+ parse_flags | SPDK_JSON_PARSE_FLAG_DECODE_IN_PLACE);
+ if (rc <= 0) {
+ print_json_error(stderr, rc, filename);
+ free(values);
+ free(buf);
+ return 1;
+ }
+
+ w = spdk_json_write_begin(json_write_cb, stdout, write_flags);
+ if (w == NULL) {
+ fprintf(stderr, "json_write_begin failed\n");
+ free(values);
+ free(buf);
+ return 1;
+ }
+
+ spdk_json_write_val(w, values);
+ spdk_json_write_end(w);
+ printf("\n");
+
+ if (end != buf + size) {
+ fprintf(stderr, "%s: garbage at end of file\n", filename);
+ free(values);
+ free(buf);
+ return 1;
+ }
+
+ free(values);
+ free(buf);
+ return 0;
+}
+
+int
+main(int argc, char **argv)
+{
+ FILE *f;
+ int ch;
+ int rc;
+ uint32_t parse_flags = 0, write_flags = 0;
+ const char *filename;
+
+ while ((ch = getopt(argc, argv, "cf")) != -1) {
+ switch (ch) {
+ case 'c':
+ parse_flags |= SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS;
+ break;
+ case 'f':
+ write_flags |= SPDK_JSON_WRITE_FLAG_FORMATTED;
+ break;
+ default:
+ usage(argv[0]);
+ return 1;
+ }
+ }
+
+ if (optind == argc) {
+ filename = "-";
+ } else if (optind == argc - 1) {
+ filename = argv[optind];
+ } else {
+ usage(argv[0]);
+ return 1;
+ }
+
+ if (strcmp(filename, "-") == 0) {
+ f = stdin;
+ } else {
+ f = fopen(filename, "r");
+ if (f == NULL) {
+ perror("fopen");
+ return 1;
+ }
+ }
+
+ rc = process_file(filename, f, parse_flags, write_flags);
+
+ if (f != stdin) {
+ fclose(f);
+ }
+
+ return rc;
+}
diff --git a/src/spdk/test/app/match/match b/src/spdk/test/app/match/match
new file mode 100755
index 000000000..63fee5203
--- /dev/null
+++ b/src/spdk/test/app/match/match
@@ -0,0 +1,332 @@
+#!/usr/bin/env perl
+#
+# Copyright 2014-2017, Intel Corporation
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+#
+# * Neither the name of the copyright holder nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+#
+# match -- compare an output file with expected results
+#
+# usage: match [-adoqv] [match-file]...
+#
+# this script compares the output from a test run, stored in a file, with
+# the expected output. comparison is done line-by-line until either all
+# lines compare correctly (exit code 0) or a miscompare is found (exit
+# code nonzero).
+#
+# expected output is stored in a ".match" file, which contains a copy of
+# the expected output with embedded tokens for things that should not be
+# exact matches. the supported tokens are:
+#
+# $(N) an integer (i.e. one or more decimal digits)
+# $(NC) one or more decimal digits with comma separators
+# $(FP) a floating point number
+# $(S) ascii string
+# $(X) hex number
+# $(XX) hex number prefixed with 0x
+# $(W) whitespace
+# $(nW) non-whitespace
+# $(*) any string
+# $(DD) output of a "dd" run
+# $(OPT) line is optional (may be missing, matched if found)
+# $(OPX) ends a contiguous list of $(OPT)...$(OPX) lines, at least
+# one of which must match
+#
+# Additionally, if any "X.ignore" file exists, strings or phrases found per
+# line in the file will be ignored if found as a substring in the
+# corresponding output file (making it easy to skip entire output lines).
+#
+# arguments are:
+#
+# -a find all files of the form "X.match" in the current
+# directory and match them again the corresponding file "X".
+#
+# -o custom output filename - only one match file can be given
+#
+# -d debug -- show lots of debug output
+#
+# -q don't print log files on mismatch
+#
+# -v verbose -- show every line as it is being matched
+#
+
+use strict;
+use Getopt::Std;
+use Encode;
+use v5.16;
+
+select STDERR;
+binmode(STDOUT, ":utf8");
+binmode(STDERR, ":utf8");
+
+my $Me = $0;
+$Me =~ s,.*/,,;
+
+our ($opt_a, $opt_d, $opt_q, $opt_v, $opt_o);
+
+$SIG{HUP} = $SIG{INT} = $SIG{TERM} = $SIG{__DIE__} = sub {
+ die @_ if $^S;
+ my $errstr = shift;
+ die "FAIL: $Me: $errstr";
+};
+
+sub usage {
+ my $msg = shift;
+
+ warn "$Me: $msg\n" if $msg;
+ warn "Usage: $Me [-adqv] [match-file]...\n";
+ warn " or: $Me [-dqv] -o output-file match-file...\n";
+ exit 1;
+}
+
+getopts('adoqv') or usage;
+
+my %match2file;
+
+if ($opt_a) {
+ usage("-a and filename arguments are mutually exclusive")
+ if $#ARGV != -1;
+ opendir(DIR, '.') or die "opendir: .: $!\n";
+ my @matchfiles = grep { /(.*)\.match$/ && -f $1 } readdir(DIR);
+ closedir(DIR);
+ die "no files found to process\n" unless @matchfiles;
+ foreach my $mfile (@matchfiles) {
+ die "$mfile: $!\n" unless open(F, $mfile);
+ close(F);
+ my $ofile = $mfile;
+ $ofile =~ s/\.match$//;
+ die "$mfile found but cannot open $ofile: $!\n"
+ unless open(F, $ofile);
+ close(F);
+ $match2file{$mfile} = $ofile;
+ }
+} elsif ($opt_o) {
+ usage("-o argument requires two paths") if $#ARGV != 1;
+
+ $match2file{$ARGV[1]} = $ARGV[0];
+} else {
+ usage("no match-file arguments found") if $#ARGV == -1;
+
+ # to improve the failure case, check all filename args exist and
+ # are provided in pairs now, before going through and processing them
+ foreach my $mfile (@ARGV) {
+ my $ofile = $mfile;
+ usage("$mfile: not a .match file") unless
+ $ofile =~ s/\.match$//;
+ usage("$mfile: $!") unless open(F, $mfile);
+ close(F);
+ usage("$ofile: $!") unless open(F, $ofile);
+ close(F);
+ $match2file{$mfile} = $ofile;
+ }
+}
+
+my $mfile;
+my $ofile;
+my $ifile;
+print "Files to be processed:\n" if $opt_v;
+foreach $mfile (sort keys %match2file) {
+ $ofile = $match2file{$mfile};
+ $ifile = $ofile . ".ignore";
+ $ifile = undef unless (-f $ifile);
+ if ($opt_v) {
+ print " match-file \"$mfile\" output-file \"$ofile\"";
+ if ($ifile) {
+ print " ignore-file $ifile\n";
+ } else {
+ print "\n";
+ }
+ }
+ match($mfile, $ofile, $ifile);
+}
+
+exit 0;
+
+#
+# strip_it - user can optionally ignore lines from files that contain
+# any number of substrings listed in a file called "X.ignore" where X
+# is the name of the output file.
+#
+sub strip_it {
+ my ($ifile, $file, $input) = @_;
+ # if there is no ignore file just return unaltered input
+ return $input unless $ifile;
+ my @lines_in = split /^/, $input;
+ my $output;
+ my $line_in;
+ my @i_file = split /^/, snarf($ifile);
+ my $i_line;
+ my $ignore_it = 0;
+
+ foreach $line_in (@lines_in) {
+ my @i_lines = @i_file;
+ foreach $i_line (@i_lines) {
+ # Check if both ignore and input lines are new lines
+ if ($i_line eq "\n" && $line_in eq "\n") {
+ $ignore_it = 1;
+ last;
+ }
+ # Find the ignore string in input line
+ chomp($i_line);
+ if (index($line_in, $i_line) != -1 && length($i_line) != 0) {
+ $ignore_it = 1;
+ last;
+ }
+ }
+ if ($ignore_it == 0) {
+ $output .= $line_in;
+ } elsif($opt_v) {
+ print "Ignoring (from $file): $line_in";
+ }
+ $ignore_it = 0;
+ }
+ return $output;
+}
+
+#
+# match -- process a match-file, output-file pair
+#
+sub match {
+ my ($mfile, $ofile, $ifile) = @_;
+ my $pat;
+ my $output = snarf($ofile);
+ $output = strip_it($ifile, $ofile, $output);
+ my $all_lines = $output;
+ my $line_pat = 0;
+ my $line_out = 0;
+ my $opt = 0;
+ my $opx = 0;
+ my $opt_found = 0;
+ my $fstr = snarf($mfile);
+ $fstr = strip_it($ifile, $mfile, $fstr);
+ for (split /^/, $fstr) {
+ $pat = $_;
+ $line_pat++;
+ $line_out++;
+ s/([*+?|{}.\\^\$\[()])/\\$1/g;
+ s/\\\$\\\(FP\\\)/[-+]?\\d*\\.?\\d+([eE][-+]?\\d+)?/g;
+ s/\\\$\\\(N\\\)/[-+]?\\d+/g;
+ s/\\\$\\\(NC\\\)/[-+]?\\d+(,[0-9]+)*/g;
+ s/\\\$\\\(\\\*\\\)/\\p{Print}*/g;
+ s/\\\$\\\(S\\\)/\\P{IsC}+/g;
+ s/\\\$\\\(X\\\)/\\p{XPosixXDigit}+/g;
+ s/\\\$\\\(XX\\\)/0x\\p{XPosixXDigit}+/g;
+ s/\\\$\\\(W\\\)/\\p{Blank}*/g;
+ s/\\\$\\\(nW\\\)/\\p{Graph}*/g;
+ s/\\\$\\\(DD\\\)/\\d+\\+\\d+ records in\n\\d+\\+\\d+ records out\n\\d+ bytes \\\(\\d+ .B\\\) copied, [.0-9e-]+[^,]*, [.0-9]+ .B.s/g;
+ if (s/\\\$\\\(OPT\\\)//) {
+ $opt = 1;
+ } elsif (s/\\\$\\\(OPX\\\)//) {
+ $opx = 1;
+ } else {
+ $opt_found = 0;
+ }
+
+ if ($opt_v) {
+ my @lines = split /\n/, $output;
+ my $line;
+ if (@lines) {
+ $line = $lines[0];
+ } else {
+ $line = "[EOF]";
+ }
+
+ printf("%s:%-3d %s%s:%-3d %s\n", $mfile, $line_pat, $pat, $ofile, $line_out, $line);
+ }
+
+ print " => /$_/\n" if $opt_d;
+ print " [$output]\n" if $opt_d;
+ unless ($output =~ s/^$_//) {
+ if ($opt || ($opx && $opt_found)) {
+ printf("%s:%-3d [skipping optional line]\n", $ofile, $line_out) if $opt_v;
+ $line_out--;
+ $opt = 0;
+ } else {
+ if (!$opt_v) {
+ if ($opt_q) {
+ print "[MATCHING FAILED]\n";
+ } else {
+ print "[MATCHING FAILED, COMPLETE FILE ($ofile) BELOW]\n$all_lines\n[EOF]\n";
+ }
+ $opt_v = 1;
+ match($mfile, $ofile);
+ }
+
+ die "$mfile:$line_pat did not match pattern\n";
+ }
+ } elsif ($opt) {
+ $opt_found = 1;
+ }
+ $opx = 0;
+ }
+
+ if ($output ne '') {
+ if (!$opt_v) {
+ if ($opt_q) {
+ print "[MATCHING FAILED]\n";
+ } else {
+ print "[MATCHING FAILED, COMPLETE FILE ($ofile) BELOW]\n$all_lines\n[EOF]\n";
+ }
+ }
+
+ # make it a little more print-friendly...
+ $output =~ s/\n/\\n/g;
+ die "line $line_pat: unexpected output: \"$output\"\n";
+ }
+}
+
+
+#
+# snarf -- slurp an entire file into memory
+#
+sub snarf {
+ my ($file) = @_;
+ my $fh;
+ open($fh, '<', $file) or die "$file $!\n";
+
+ local $/;
+ $_ = <$fh>;
+ close $fh;
+
+ # check known encodings or die
+ my $decoded;
+ my @encodings = ("UTF-8", "UTF-16", "UTF-16LE", "UTF-16BE");
+
+ foreach my $enc (@encodings) {
+ eval { $decoded = decode( $enc, $_, Encode::FB_CROAK ) };
+
+ if (!$@) {
+ $decoded =~ s/\R/\n/g;
+ return $decoded;
+ }
+ }
+
+ die "$Me: ERROR: Unknown file encoding";
+}
diff --git a/src/spdk/test/app/stub/.gitignore b/src/spdk/test/app/stub/.gitignore
new file mode 100644
index 000000000..39802f642
--- /dev/null
+++ b/src/spdk/test/app/stub/.gitignore
@@ -0,0 +1 @@
+stub
diff --git a/src/spdk/test/app/stub/Makefile b/src/spdk/test/app/stub/Makefile
new file mode 100644
index 000000000..79ca8a912
--- /dev/null
+++ b/src/spdk/test/app/stub/Makefile
@@ -0,0 +1,49 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.modules.mk
+
+APP = stub
+
+C_SRCS := stub.c
+
+SPDK_LIB_LIST = $(SOCK_MODULES_LIST)
+SPDK_LIB_LIST += event conf nvme log trace rpc jsonrpc json thread util sock notify
+
+ifeq ($(CONFIG_RDMA),y)
+SPDK_LIB_LIST += rdma
+endif
+
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
diff --git a/src/spdk/test/app/stub/stub.c b/src/spdk/test/app/stub/stub.c
new file mode 100644
index 000000000..83d9f706f
--- /dev/null
+++ b/src/spdk/test/app/stub/stub.c
@@ -0,0 +1,203 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk/event.h"
+#include "spdk/nvme.h"
+#include "spdk/string.h"
+#include "spdk/thread.h"
+
+static char g_path[256];
+static struct spdk_poller *g_poller;
+
+struct ctrlr_entry {
+ struct spdk_nvme_ctrlr *ctrlr;
+ struct ctrlr_entry *next;
+};
+
+static struct ctrlr_entry *g_controllers = NULL;
+
+static void
+cleanup(void)
+{
+ struct ctrlr_entry *ctrlr_entry = g_controllers;
+
+ while (ctrlr_entry) {
+ struct ctrlr_entry *next = ctrlr_entry->next;
+
+ spdk_nvme_detach(ctrlr_entry->ctrlr);
+ free(ctrlr_entry);
+ ctrlr_entry = next;
+ }
+}
+
+static void
+usage(char *executable_name)
+{
+ printf("%s [options]\n", executable_name);
+ printf("options:\n");
+ printf(" -i shared memory ID [required]\n");
+ printf(" -m mask core mask for DPDK\n");
+ printf(" -n channel number of memory channels used for DPDK\n");
+ printf(" -p core master (primary) core for DPDK\n");
+ printf(" -s size memory size in MB for DPDK\n");
+ printf(" -H show this usage\n");
+}
+
+static bool
+probe_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
+ struct spdk_nvme_ctrlr_opts *opts)
+{
+ /*
+ * Set the io_queue_size to UINT16_MAX to initialize
+ * the controller with the possible largest queue size.
+ */
+ opts->io_queue_size = UINT16_MAX;
+ return true;
+}
+
+static void
+attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
+ struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_ctrlr_opts *opts)
+{
+ struct ctrlr_entry *entry;
+
+ entry = malloc(sizeof(struct ctrlr_entry));
+ if (entry == NULL) {
+ fprintf(stderr, "Malloc error\n");
+ exit(1);
+ }
+
+ entry->ctrlr = ctrlr;
+ entry->next = g_controllers;
+ g_controllers = entry;
+}
+
+static int
+stub_sleep(void *arg)
+{
+ usleep(1000 * 1000);
+ return 0;
+}
+
+static void
+stub_start(void *arg1)
+{
+ int shm_id = (intptr_t)arg1;
+
+ spdk_unaffinitize_thread();
+
+ if (spdk_nvme_probe(NULL, NULL, probe_cb, attach_cb, NULL) != 0) {
+ fprintf(stderr, "spdk_nvme_probe() failed\n");
+ exit(1);
+ }
+
+ snprintf(g_path, sizeof(g_path), "/var/run/spdk_stub%d", shm_id);
+ if (mknod(g_path, S_IFREG, 0) != 0) {
+ fprintf(stderr, "could not create sentinel file %s\n", g_path);
+ exit(1);
+ }
+
+ g_poller = SPDK_POLLER_REGISTER(stub_sleep, NULL, 0);
+}
+
+static void
+stub_shutdown(void)
+{
+ spdk_poller_unregister(&g_poller);
+ unlink(g_path);
+ spdk_app_stop(0);
+}
+
+int
+main(int argc, char **argv)
+{
+ int ch;
+ struct spdk_app_opts opts = {};
+ long int val;
+
+ /* default value in opts structure */
+ spdk_app_opts_init(&opts);
+
+ opts.name = "stub";
+ opts.rpc_addr = NULL;
+
+ while ((ch = getopt(argc, argv, "i:m:n:p:s:H")) != -1) {
+ if (ch == 'm') {
+ opts.reactor_mask = optarg;
+ } else if (ch == '?') {
+ usage(argv[0]);
+ exit(1);
+ } else {
+ val = spdk_strtol(optarg, 10);
+ if (val < 0) {
+ fprintf(stderr, "Converting a string to integer failed\n");
+ exit(1);
+ }
+ switch (ch) {
+ case 'i':
+ opts.shm_id = val;
+ break;
+ case 'n':
+ opts.mem_channel = val;
+ break;
+ case 'p':
+ opts.master_core = val;
+ break;
+ case 's':
+ opts.mem_size = val;
+ break;
+ case 'H':
+ default:
+ usage(argv[0]);
+ exit(EXIT_SUCCESS);
+ }
+ }
+ }
+
+ if (opts.shm_id < 0) {
+ fprintf(stderr, "%s: -i shared memory ID must be specified\n", argv[0]);
+ usage(argv[0]);
+ exit(1);
+ }
+
+ opts.shutdown_cb = stub_shutdown;
+
+ ch = spdk_app_start(&opts, stub_start, (void *)(intptr_t)opts.shm_id);
+
+ cleanup();
+ spdk_app_fini();
+
+ return ch;
+}
diff --git a/src/spdk/test/bdev/Makefile b/src/spdk/test/bdev/Makefile
new file mode 100644
index 000000000..cb15bd49a
--- /dev/null
+++ b/src/spdk/test/bdev/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = bdevio bdevperf
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/bdev/bdev_raid.sh b/src/spdk/test/bdev/bdev_raid.sh
new file mode 100755
index 000000000..c85d33f6e
--- /dev/null
+++ b/src/spdk/test/bdev/bdev_raid.sh
@@ -0,0 +1,119 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+rpc_server=/var/tmp/spdk-raid.sock
+rpc_py="$rootdir/scripts/rpc.py -s $rpc_server"
+tmp_file=$SPDK_TEST_STORAGE/raidrandtest
+
+source $rootdir/test/common/autotest_common.sh
+source $testdir/nbd_common.sh
+
+function raid_unmap_data_verify() {
+ if hash blkdiscard; then
+ local nbd=$1
+ local rpc_server=$2
+ local blksize
+ blksize=$(lsblk -o LOG-SEC $nbd | grep -v LOG-SEC | cut -d ' ' -f 5)
+ local rw_blk_num=4096
+ local rw_len=$((blksize * rw_blk_num))
+ local unmap_blk_offs=(0 1028 321)
+ local unmap_blk_nums=(128 2035 456)
+ local unmap_off
+ local unmap_len
+
+ # data write
+ dd if=/dev/urandom of=$tmp_file bs=$blksize count=$rw_blk_num
+ dd if=$tmp_file of=$nbd bs=$blksize count=$rw_blk_num oflag=direct
+ blockdev --flushbufs $nbd
+
+ # confirm random data is written correctly in raid0 device
+ cmp -b -n $rw_len $tmp_file $nbd
+
+ for ((i = 0; i < ${#unmap_blk_offs[@]}; i++)); do
+ unmap_off=$((blksize * ${unmap_blk_offs[$i]}))
+ unmap_len=$((blksize * ${unmap_blk_nums[$i]}))
+
+ # data unmap on tmp_file
+ dd if=/dev/zero of=$tmp_file bs=$blksize seek=${unmap_blk_offs[$i]} count=${unmap_blk_nums[$i]} conv=notrunc
+
+ # data unmap on raid bdev
+ blkdiscard -o $unmap_off -l $unmap_len $nbd
+ blockdev --flushbufs $nbd
+
+ # data verify after unmap
+ cmp -b -n $rw_len $tmp_file $nbd
+ done
+ fi
+
+ return 0
+}
+
+function on_error_exit() {
+ if [ -n "$raid_pid" ]; then
+ killprocess $raid_pid
+ fi
+
+ rm -f $tmp_file
+ print_backtrace
+ exit 1
+}
+
+function configure_raid_bdev() {
+ rm -rf $testdir/rpcs.txt
+
+ cat <<- EOL >> $testdir/rpcs.txt
+ bdev_malloc_create 32 512 -b Base_1
+ bdev_malloc_create 32 512 -b Base_2
+ bdev_raid_create -z 64 -r 0 -b "Base_1 Base_2" -n raid0
+ EOL
+ $rpc_py < $testdir/rpcs.txt
+
+ rm -rf $testdir/rpcs.txt
+}
+
+function raid_function_test() {
+ if [ $(uname -s) = Linux ] && modprobe -n nbd; then
+ local nbd=/dev/nbd0
+ local raid_bdev
+
+ modprobe nbd
+ $rootdir/test/app/bdev_svc/bdev_svc -r $rpc_server -i 0 -L bdev_raid &
+ raid_pid=$!
+ echo "Process raid pid: $raid_pid"
+ waitforlisten $raid_pid $rpc_server
+
+ configure_raid_bdev
+ raid_bdev=$($rpc_py bdev_raid_get_bdevs online | cut -d ' ' -f 1)
+ if [ $raid_bdev = "" ]; then
+ echo "No raid0 device in SPDK app"
+ return 1
+ fi
+
+ nbd_start_disks $rpc_server $raid_bdev $nbd
+ count=$(nbd_get_count $rpc_server)
+ if [ $count -ne 1 ]; then
+ return 1
+ fi
+
+ raid_unmap_data_verify $nbd $rpc_server
+
+ nbd_stop_disks $rpc_server $nbd
+ count=$(nbd_get_count $rpc_server)
+ if [ $count -ne 0 ]; then
+ return 1
+ fi
+
+ killprocess $raid_pid
+ else
+ echo "skipping bdev raid tests."
+ fi
+
+ return 0
+}
+
+trap 'on_error_exit;' ERR
+
+raid_function_test
+
+rm -f $tmp_file
diff --git a/src/spdk/test/bdev/bdevio/.gitignore b/src/spdk/test/bdev/bdevio/.gitignore
new file mode 100644
index 000000000..1bb55429d
--- /dev/null
+++ b/src/spdk/test/bdev/bdevio/.gitignore
@@ -0,0 +1 @@
+bdevio
diff --git a/src/spdk/test/bdev/bdevio/Makefile b/src/spdk/test/bdev/bdevio/Makefile
new file mode 100644
index 000000000..83aca58ca
--- /dev/null
+++ b/src/spdk/test/bdev/bdevio/Makefile
@@ -0,0 +1,48 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.modules.mk
+
+APP = bdevio
+
+C_SRCS := bdevio.c
+
+SPDK_LIB_LIST = $(ALL_MODULES_LIST)
+SPDK_LIB_LIST += $(EVENT_BDEV_SUBSYSTEM)
+SPDK_LIB_LIST += app_rpc bdev bdev_rpc accel event trace log conf thread util rpc jsonrpc json sock notify
+
+LIBS += -lcunit
+
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
diff --git a/src/spdk/test/bdev/bdevio/bdevio.c b/src/spdk/test/bdev/bdevio/bdevio.c
new file mode 100644
index 000000000..54d1712e3
--- /dev/null
+++ b/src/spdk/test/bdev/bdevio/bdevio.c
@@ -0,0 +1,1433 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk/bdev.h"
+#include "spdk/accel_engine.h"
+#include "spdk/env.h"
+#include "spdk/log.h"
+#include "spdk/thread.h"
+#include "spdk/event.h"
+#include "spdk/rpc.h"
+#include "spdk/util.h"
+#include "spdk/string.h"
+
+#include "CUnit/Basic.h"
+
+#define BUFFER_IOVS 1024
+#define BUFFER_SIZE 260 * 1024
+#define BDEV_TASK_ARRAY_SIZE 2048
+
+pthread_mutex_t g_test_mutex;
+pthread_cond_t g_test_cond;
+
+static struct spdk_thread *g_thread_init;
+static struct spdk_thread *g_thread_ut;
+static struct spdk_thread *g_thread_io;
+static bool g_wait_for_tests = false;
+static int g_num_failures = 0;
+
+struct io_target {
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_desc *bdev_desc;
+ struct spdk_io_channel *ch;
+ struct io_target *next;
+};
+
+struct bdevio_request {
+ char *buf;
+ char *fused_buf;
+ int data_len;
+ uint64_t offset;
+ struct iovec iov[BUFFER_IOVS];
+ int iovcnt;
+ struct iovec fused_iov[BUFFER_IOVS];
+ int fused_iovcnt;
+ struct io_target *target;
+};
+
+struct io_target *g_io_targets = NULL;
+struct io_target *g_current_io_target = NULL;
+static void rpc_perform_tests_cb(unsigned num_failures, struct spdk_jsonrpc_request *request);
+
+static void
+execute_spdk_function(spdk_msg_fn fn, void *arg)
+{
+ pthread_mutex_lock(&g_test_mutex);
+ spdk_thread_send_msg(g_thread_io, fn, arg);
+ pthread_cond_wait(&g_test_cond, &g_test_mutex);
+ pthread_mutex_unlock(&g_test_mutex);
+}
+
+static void
+wake_ut_thread(void)
+{
+ pthread_mutex_lock(&g_test_mutex);
+ pthread_cond_signal(&g_test_cond);
+ pthread_mutex_unlock(&g_test_mutex);
+}
+
+static void
+__get_io_channel(void *arg)
+{
+ struct io_target *target = arg;
+
+ target->ch = spdk_bdev_get_io_channel(target->bdev_desc);
+ assert(target->ch);
+ wake_ut_thread();
+}
+
+static int
+bdevio_construct_target(struct spdk_bdev *bdev)
+{
+ struct io_target *target;
+ int rc;
+ uint64_t num_blocks = spdk_bdev_get_num_blocks(bdev);
+ uint32_t block_size = spdk_bdev_get_block_size(bdev);
+
+ target = malloc(sizeof(struct io_target));
+ if (target == NULL) {
+ return -ENOMEM;
+ }
+
+ rc = spdk_bdev_open(bdev, true, NULL, NULL, &target->bdev_desc);
+ if (rc != 0) {
+ free(target);
+ SPDK_ERRLOG("Could not open leaf bdev %s, error=%d\n", spdk_bdev_get_name(bdev), rc);
+ return rc;
+ }
+
+ printf(" %s: %" PRIu64 " blocks of %" PRIu32 " bytes (%" PRIu64 " MiB)\n",
+ spdk_bdev_get_name(bdev),
+ num_blocks, block_size,
+ (num_blocks * block_size + 1024 * 1024 - 1) / (1024 * 1024));
+
+ target->bdev = bdev;
+ target->next = g_io_targets;
+ execute_spdk_function(__get_io_channel, target);
+ g_io_targets = target;
+
+ return 0;
+}
+
+static int
+bdevio_construct_targets(void)
+{
+ struct spdk_bdev *bdev;
+ int rc;
+
+ printf("I/O targets:\n");
+
+ bdev = spdk_bdev_first_leaf();
+ while (bdev != NULL) {
+ rc = bdevio_construct_target(bdev);
+ if (rc < 0) {
+ SPDK_ERRLOG("Could not construct bdev %s, error=%d\n", spdk_bdev_get_name(bdev), rc);
+ return rc;
+ }
+ bdev = spdk_bdev_next_leaf(bdev);
+ }
+
+ if (g_io_targets == NULL) {
+ SPDK_ERRLOG("No bdevs to perform tests on\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static void
+__put_io_channel(void *arg)
+{
+ struct io_target *target = arg;
+
+ spdk_put_io_channel(target->ch);
+ wake_ut_thread();
+}
+
+static void
+bdevio_cleanup_targets(void)
+{
+ struct io_target *target;
+
+ target = g_io_targets;
+ while (target != NULL) {
+ execute_spdk_function(__put_io_channel, target);
+ spdk_bdev_close(target->bdev_desc);
+ g_io_targets = target->next;
+ free(target);
+ target = g_io_targets;
+ }
+}
+
+static bool g_completion_success;
+
+static void
+initialize_buffer(char **buf, int pattern, int size)
+{
+ *buf = spdk_zmalloc(size, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
+ memset(*buf, pattern, size);
+}
+
+static void
+quick_test_complete(struct spdk_bdev_io *bdev_io, bool success, void *arg)
+{
+ g_completion_success = success;
+ spdk_bdev_free_io(bdev_io);
+ wake_ut_thread();
+}
+
+static void
+__blockdev_write(void *arg)
+{
+ struct bdevio_request *req = arg;
+ struct io_target *target = req->target;
+ int rc;
+
+ if (req->iovcnt) {
+ rc = spdk_bdev_writev(target->bdev_desc, target->ch, req->iov, req->iovcnt, req->offset,
+ req->data_len, quick_test_complete, NULL);
+ } else {
+ rc = spdk_bdev_write(target->bdev_desc, target->ch, req->buf, req->offset,
+ req->data_len, quick_test_complete, NULL);
+ }
+
+ if (rc) {
+ g_completion_success = false;
+ wake_ut_thread();
+ }
+}
+
+static void
+__blockdev_write_zeroes(void *arg)
+{
+ struct bdevio_request *req = arg;
+ struct io_target *target = req->target;
+ int rc;
+
+ rc = spdk_bdev_write_zeroes(target->bdev_desc, target->ch, req->offset,
+ req->data_len, quick_test_complete, NULL);
+ if (rc) {
+ g_completion_success = false;
+ wake_ut_thread();
+ }
+}
+
+static void
+__blockdev_compare_and_write(void *arg)
+{
+ struct bdevio_request *req = arg;
+ struct io_target *target = req->target;
+ int rc;
+
+ rc = spdk_bdev_comparev_and_writev_blocks(target->bdev_desc, target->ch, req->iov, req->iovcnt,
+ req->fused_iov, req->fused_iovcnt, req->offset, req->data_len, quick_test_complete, NULL);
+
+ if (rc) {
+ g_completion_success = false;
+ wake_ut_thread();
+ }
+}
+
+static void
+sgl_chop_buffer(struct bdevio_request *req, int iov_len)
+{
+ int data_len = req->data_len;
+ char *buf = req->buf;
+
+ req->iovcnt = 0;
+ if (!iov_len) {
+ return;
+ }
+
+ for (; data_len > 0 && req->iovcnt < BUFFER_IOVS; req->iovcnt++) {
+ if (data_len < iov_len) {
+ iov_len = data_len;
+ }
+
+ req->iov[req->iovcnt].iov_base = buf;
+ req->iov[req->iovcnt].iov_len = iov_len;
+
+ buf += iov_len;
+ data_len -= iov_len;
+ }
+
+ CU_ASSERT_EQUAL_FATAL(data_len, 0);
+}
+
+static void
+sgl_chop_fused_buffer(struct bdevio_request *req, int iov_len)
+{
+ int data_len = req->data_len;
+ char *buf = req->fused_buf;
+
+ req->fused_iovcnt = 0;
+ if (!iov_len) {
+ return;
+ }
+
+ for (; data_len > 0 && req->fused_iovcnt < BUFFER_IOVS; req->fused_iovcnt++) {
+ if (data_len < iov_len) {
+ iov_len = data_len;
+ }
+
+ req->fused_iov[req->fused_iovcnt].iov_base = buf;
+ req->fused_iov[req->fused_iovcnt].iov_len = iov_len;
+
+ buf += iov_len;
+ data_len -= iov_len;
+ }
+
+ CU_ASSERT_EQUAL_FATAL(data_len, 0);
+}
+
+static void
+blockdev_write(struct io_target *target, char *tx_buf,
+ uint64_t offset, int data_len, int iov_len)
+{
+ struct bdevio_request req;
+
+ req.target = target;
+ req.buf = tx_buf;
+ req.data_len = data_len;
+ req.offset = offset;
+ sgl_chop_buffer(&req, iov_len);
+
+ g_completion_success = false;
+
+ execute_spdk_function(__blockdev_write, &req);
+}
+
+static void
+_blockdev_compare_and_write(struct io_target *target, char *cmp_buf, char *write_buf,
+ uint64_t offset, int data_len, int iov_len)
+{
+ struct bdevio_request req;
+
+ req.target = target;
+ req.buf = cmp_buf;
+ req.fused_buf = write_buf;
+ req.data_len = data_len;
+ req.offset = offset;
+ sgl_chop_buffer(&req, iov_len);
+ sgl_chop_fused_buffer(&req, iov_len);
+
+ g_completion_success = false;
+
+ execute_spdk_function(__blockdev_compare_and_write, &req);
+}
+
+static void
+blockdev_write_zeroes(struct io_target *target, char *tx_buf,
+ uint64_t offset, int data_len)
+{
+ struct bdevio_request req;
+
+ req.target = target;
+ req.buf = tx_buf;
+ req.data_len = data_len;
+ req.offset = offset;
+
+ g_completion_success = false;
+
+ execute_spdk_function(__blockdev_write_zeroes, &req);
+}
+
+static void
+__blockdev_read(void *arg)
+{
+ struct bdevio_request *req = arg;
+ struct io_target *target = req->target;
+ int rc;
+
+ if (req->iovcnt) {
+ rc = spdk_bdev_readv(target->bdev_desc, target->ch, req->iov, req->iovcnt, req->offset,
+ req->data_len, quick_test_complete, NULL);
+ } else {
+ rc = spdk_bdev_read(target->bdev_desc, target->ch, req->buf, req->offset,
+ req->data_len, quick_test_complete, NULL);
+ }
+
+ if (rc) {
+ g_completion_success = false;
+ wake_ut_thread();
+ }
+}
+
+static void
+blockdev_read(struct io_target *target, char *rx_buf,
+ uint64_t offset, int data_len, int iov_len)
+{
+ struct bdevio_request req;
+
+ req.target = target;
+ req.buf = rx_buf;
+ req.data_len = data_len;
+ req.offset = offset;
+ req.iovcnt = 0;
+ sgl_chop_buffer(&req, iov_len);
+
+ g_completion_success = false;
+
+ execute_spdk_function(__blockdev_read, &req);
+}
+
+static int
+blockdev_write_read_data_match(char *rx_buf, char *tx_buf, int data_length)
+{
+ int rc;
+ rc = memcmp(rx_buf, tx_buf, data_length);
+
+ spdk_free(rx_buf);
+ spdk_free(tx_buf);
+
+ return rc;
+}
+
+static bool
+blockdev_io_valid_blocks(struct spdk_bdev *bdev, uint64_t data_length)
+{
+ if (data_length < spdk_bdev_get_block_size(bdev) ||
+ data_length % spdk_bdev_get_block_size(bdev) ||
+ data_length / spdk_bdev_get_block_size(bdev) > spdk_bdev_get_num_blocks(bdev)) {
+ return false;
+ }
+
+ return true;
+}
+
+static void
+blockdev_write_read(uint32_t data_length, uint32_t iov_len, int pattern, uint64_t offset,
+ int expected_rc, bool write_zeroes)
+{
+ struct io_target *target;
+ char *tx_buf = NULL;
+ char *rx_buf = NULL;
+ int rc;
+
+ target = g_current_io_target;
+
+ if (!blockdev_io_valid_blocks(target->bdev, data_length)) {
+ return;
+ }
+
+ if (!write_zeroes) {
+ initialize_buffer(&tx_buf, pattern, data_length);
+ initialize_buffer(&rx_buf, 0, data_length);
+
+ blockdev_write(target, tx_buf, offset, data_length, iov_len);
+ } else {
+ initialize_buffer(&tx_buf, 0, data_length);
+ initialize_buffer(&rx_buf, pattern, data_length);
+
+ blockdev_write_zeroes(target, tx_buf, offset, data_length);
+ }
+
+
+ if (expected_rc == 0) {
+ CU_ASSERT_EQUAL(g_completion_success, true);
+ } else {
+ CU_ASSERT_EQUAL(g_completion_success, false);
+ }
+ blockdev_read(target, rx_buf, offset, data_length, iov_len);
+
+ if (expected_rc == 0) {
+ CU_ASSERT_EQUAL(g_completion_success, true);
+ } else {
+ CU_ASSERT_EQUAL(g_completion_success, false);
+ }
+
+ if (g_completion_success) {
+ rc = blockdev_write_read_data_match(rx_buf, tx_buf, data_length);
+ /* Assert the write by comparing it with values read
+ * from each blockdev */
+ CU_ASSERT_EQUAL(rc, 0);
+ }
+}
+
+static void
+blockdev_compare_and_write(uint32_t data_length, uint32_t iov_len, uint64_t offset)
+{
+ struct io_target *target;
+ char *tx_buf = NULL;
+ char *write_buf = NULL;
+ char *rx_buf = NULL;
+ int rc;
+
+ target = g_current_io_target;
+
+ if (!blockdev_io_valid_blocks(target->bdev, data_length)) {
+ return;
+ }
+
+ initialize_buffer(&tx_buf, 0xAA, data_length);
+ initialize_buffer(&rx_buf, 0, data_length);
+ initialize_buffer(&write_buf, 0xBB, data_length);
+
+ blockdev_write(target, tx_buf, offset, data_length, iov_len);
+ CU_ASSERT_EQUAL(g_completion_success, true);
+
+ _blockdev_compare_and_write(target, tx_buf, write_buf, offset, data_length, iov_len);
+ CU_ASSERT_EQUAL(g_completion_success, true);
+
+ _blockdev_compare_and_write(target, tx_buf, write_buf, offset, data_length, iov_len);
+ CU_ASSERT_EQUAL(g_completion_success, false);
+
+ blockdev_read(target, rx_buf, offset, data_length, iov_len);
+ CU_ASSERT_EQUAL(g_completion_success, true);
+ rc = blockdev_write_read_data_match(rx_buf, write_buf, data_length);
+ /* Assert the write by comparing it with values read
+ * from each blockdev */
+ CU_ASSERT_EQUAL(rc, 0);
+}
+
+static void
+blockdev_write_read_4k(void)
+{
+ uint32_t data_length;
+ uint64_t offset;
+ int pattern;
+ int expected_rc;
+
+ /* Data size = 4K */
+ data_length = 4096;
+ CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
+ offset = 0;
+ pattern = 0xA3;
+ /* Params are valid, hence the expected return value
+ * of write and read for all blockdevs is 0. */
+ expected_rc = 0;
+
+ blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
+}
+
+static void
+blockdev_write_zeroes_read_4k(void)
+{
+ uint32_t data_length;
+ uint64_t offset;
+ int pattern;
+ int expected_rc;
+
+ /* Data size = 4K */
+ data_length = 4096;
+ offset = 0;
+ pattern = 0xA3;
+ /* Params are valid, hence the expected return value
+ * of write_zeroes and read for all blockdevs is 0. */
+ expected_rc = 0;
+
+ blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
+}
+
+/*
+ * This i/o will not have to split at the bdev layer.
+ */
+static void
+blockdev_write_zeroes_read_1m(void)
+{
+ uint32_t data_length;
+ uint64_t offset;
+ int pattern;
+ int expected_rc;
+
+ /* Data size = 1M */
+ data_length = 1048576;
+ offset = 0;
+ pattern = 0xA3;
+ /* Params are valid, hence the expected return value
+ * of write_zeroes and read for all blockdevs is 0. */
+ expected_rc = 0;
+
+ blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
+}
+
+/*
+ * This i/o will have to split at the bdev layer if
+ * write-zeroes is not supported by the bdev.
+ */
+static void
+blockdev_write_zeroes_read_3m(void)
+{
+ uint32_t data_length;
+ uint64_t offset;
+ int pattern;
+ int expected_rc;
+
+ /* Data size = 3M */
+ data_length = 3145728;
+ offset = 0;
+ pattern = 0xA3;
+ /* Params are valid, hence the expected return value
+ * of write_zeroes and read for all blockdevs is 0. */
+ expected_rc = 0;
+
+ blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
+}
+
+/*
+ * This i/o will have to split at the bdev layer if
+ * write-zeroes is not supported by the bdev. It also
+ * tests a write size that is not an even multiple of
+ * the bdev layer zero buffer size.
+ */
+static void
+blockdev_write_zeroes_read_3m_500k(void)
+{
+ uint32_t data_length;
+ uint64_t offset;
+ int pattern;
+ int expected_rc;
+
+ /* Data size = 3.5M */
+ data_length = 3670016;
+ offset = 0;
+ pattern = 0xA3;
+ /* Params are valid, hence the expected return value
+ * of write_zeroes and read for all blockdevs is 0. */
+ expected_rc = 0;
+
+ blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
+}
+
+static void
+blockdev_writev_readv_4k(void)
+{
+ uint32_t data_length, iov_len;
+ uint64_t offset;
+ int pattern;
+ int expected_rc;
+
+ /* Data size = 4K */
+ data_length = 4096;
+ iov_len = 4096;
+ CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
+ offset = 0;
+ pattern = 0xA3;
+ /* Params are valid, hence the expected return value
+ * of write and read for all blockdevs is 0. */
+ expected_rc = 0;
+
+ blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
+}
+
+static void
+blockdev_comparev_and_writev(void)
+{
+ uint32_t data_length, iov_len;
+ uint64_t offset;
+
+ data_length = 1;
+ iov_len = 1;
+ CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
+ offset = 0;
+
+ blockdev_compare_and_write(data_length, iov_len, offset);
+}
+
+static void
+blockdev_writev_readv_30x4k(void)
+{
+ uint32_t data_length, iov_len;
+ uint64_t offset;
+ int pattern;
+ int expected_rc;
+
+ /* Data size = 4K */
+ data_length = 4096 * 30;
+ iov_len = 4096;
+ CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
+ offset = 0;
+ pattern = 0xA3;
+ /* Params are valid, hence the expected return value
+ * of write and read for all blockdevs is 0. */
+ expected_rc = 0;
+
+ blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
+}
+
+static void
+blockdev_write_read_512Bytes(void)
+{
+ uint32_t data_length;
+ uint64_t offset;
+ int pattern;
+ int expected_rc;
+
+ /* Data size = 512 */
+ data_length = 512;
+ CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
+ offset = 8192;
+ pattern = 0xA3;
+ /* Params are valid, hence the expected return value
+ * of write and read for all blockdevs is 0. */
+ expected_rc = 0;
+
+ blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
+}
+
+static void
+blockdev_writev_readv_512Bytes(void)
+{
+ uint32_t data_length, iov_len;
+ uint64_t offset;
+ int pattern;
+ int expected_rc;
+
+ /* Data size = 512 */
+ data_length = 512;
+ iov_len = 512;
+ CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
+ offset = 8192;
+ pattern = 0xA3;
+ /* Params are valid, hence the expected return value
+ * of write and read for all blockdevs is 0. */
+ expected_rc = 0;
+
+ blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
+}
+
+static void
+blockdev_write_read_size_gt_128k(void)
+{
+ uint32_t data_length;
+ uint64_t offset;
+ int pattern;
+ int expected_rc;
+
+ /* Data size = 132K */
+ data_length = 135168;
+ CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
+ offset = 8192;
+ pattern = 0xA3;
+ /* Params are valid, hence the expected return value
+ * of write and read for all blockdevs is 0. */
+ expected_rc = 0;
+
+ blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
+}
+
+static void
+blockdev_writev_readv_size_gt_128k(void)
+{
+ uint32_t data_length, iov_len;
+ uint64_t offset;
+ int pattern;
+ int expected_rc;
+
+ /* Data size = 132K */
+ data_length = 135168;
+ iov_len = 135168;
+ CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
+ offset = 8192;
+ pattern = 0xA3;
+ /* Params are valid, hence the expected return value
+ * of write and read for all blockdevs is 0. */
+ expected_rc = 0;
+
+ blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
+}
+
+static void
+blockdev_writev_readv_size_gt_128k_two_iov(void)
+{
+ uint32_t data_length, iov_len;
+ uint64_t offset;
+ int pattern;
+ int expected_rc;
+
+ /* Data size = 132K */
+ data_length = 135168;
+ iov_len = 128 * 1024;
+ CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
+ offset = 8192;
+ pattern = 0xA3;
+ /* Params are valid, hence the expected return value
+ * of write and read for all blockdevs is 0. */
+ expected_rc = 0;
+
+ blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
+}
+
+static void
+blockdev_write_read_invalid_size(void)
+{
+ uint32_t data_length;
+ uint64_t offset;
+ int pattern;
+ int expected_rc;
+
+ /* Data size is not a multiple of the block size */
+ data_length = 0x1015;
+ CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
+ offset = 8192;
+ pattern = 0xA3;
+ /* Params are invalid, hence the expected return value
+ * of write and read for all blockdevs is < 0 */
+ expected_rc = -1;
+
+ blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
+}
+
+static void
+blockdev_write_read_offset_plus_nbytes_equals_bdev_size(void)
+{
+ struct io_target *target;
+ struct spdk_bdev *bdev;
+ char *tx_buf = NULL;
+ char *rx_buf = NULL;
+ uint64_t offset;
+ uint32_t block_size;
+ int rc;
+
+ target = g_current_io_target;
+ bdev = target->bdev;
+
+ block_size = spdk_bdev_get_block_size(bdev);
+
+ /* The start offset has been set to a marginal value
+ * such that offset + nbytes == Total size of
+ * blockdev. */
+ offset = ((spdk_bdev_get_num_blocks(bdev) - 1) * block_size);
+
+ initialize_buffer(&tx_buf, 0xA3, block_size);
+ initialize_buffer(&rx_buf, 0, block_size);
+
+ blockdev_write(target, tx_buf, offset, block_size, 0);
+ CU_ASSERT_EQUAL(g_completion_success, true);
+
+ blockdev_read(target, rx_buf, offset, block_size, 0);
+ CU_ASSERT_EQUAL(g_completion_success, true);
+
+ rc = blockdev_write_read_data_match(rx_buf, tx_buf, block_size);
+ /* Assert the write by comparing it with values read
+ * from each blockdev */
+ CU_ASSERT_EQUAL(rc, 0);
+}
+
+static void
+blockdev_write_read_offset_plus_nbytes_gt_bdev_size(void)
+{
+ struct io_target *target;
+ struct spdk_bdev *bdev;
+ char *tx_buf = NULL;
+ char *rx_buf = NULL;
+ int data_length;
+ uint64_t offset;
+ int pattern;
+
+ /* Tests the overflow condition of the blockdevs. */
+ data_length = 4096;
+ CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
+ pattern = 0xA3;
+
+ target = g_current_io_target;
+ bdev = target->bdev;
+
+ /* The start offset has been set to a valid value
+ * but offset + nbytes is greater than the Total size
+ * of the blockdev. The test should fail. */
+ offset = ((spdk_bdev_get_num_blocks(bdev) * spdk_bdev_get_block_size(bdev)) - 1024);
+
+ initialize_buffer(&tx_buf, pattern, data_length);
+ initialize_buffer(&rx_buf, 0, data_length);
+
+ blockdev_write(target, tx_buf, offset, data_length, 0);
+ CU_ASSERT_EQUAL(g_completion_success, false);
+
+ blockdev_read(target, rx_buf, offset, data_length, 0);
+ CU_ASSERT_EQUAL(g_completion_success, false);
+}
+
+static void
+blockdev_write_read_max_offset(void)
+{
+ int data_length;
+ uint64_t offset;
+ int pattern;
+ int expected_rc;
+
+ data_length = 4096;
+ CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
+ /* The start offset has been set to UINT64_MAX such that
+ * adding nbytes wraps around and points to an invalid address. */
+ offset = UINT64_MAX;
+ pattern = 0xA3;
+ /* Params are invalid, hence the expected return value
+ * of write and read for all blockdevs is < 0 */
+ expected_rc = -1;
+
+ blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
+}
+
+static void
+blockdev_overlapped_write_read_8k(void)
+{
+ int data_length;
+ uint64_t offset;
+ int pattern;
+ int expected_rc;
+
+ /* Data size = 8K */
+ data_length = 8192;
+ CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
+ offset = 0;
+ pattern = 0xA3;
+ /* Params are valid, hence the expected return value
+ * of write and read for all blockdevs is 0. */
+ expected_rc = 0;
+ /* Assert the write by comparing it with values read
+ * from the same offset for each blockdev */
+ blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
+
+ /* Overwrite the pattern 0xbb of size 8K on an address offset overlapping
+ * with the address written above and assert the new value in
+ * the overlapped address range */
+ /* Populate 8k with value 0xBB */
+ pattern = 0xBB;
+ /* Offset = 6144; Overlap offset addresses and write value 0xbb */
+ offset = 4096;
+ /* Assert the write by comparing it with values read
+ * from the overlapped offset for each blockdev */
+ blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
+}
+
+static void
+__blockdev_reset(void *arg)
+{
+ struct bdevio_request *req = arg;
+ struct io_target *target = req->target;
+ int rc;
+
+ rc = spdk_bdev_reset(target->bdev_desc, target->ch, quick_test_complete, NULL);
+ if (rc < 0) {
+ g_completion_success = false;
+ wake_ut_thread();
+ }
+}
+
+static void
+blockdev_test_reset(void)
+{
+ struct bdevio_request req;
+ struct io_target *target;
+
+ target = g_current_io_target;
+ req.target = target;
+
+ g_completion_success = false;
+
+ execute_spdk_function(__blockdev_reset, &req);
+
+ /* Workaround: NVMe-oF target doesn't support reset yet - so for now
+ * don't fail the test if it's an NVMe bdev.
+ */
+ if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) {
+ CU_ASSERT_EQUAL(g_completion_success, true);
+ }
+}
+
+struct bdevio_passthrough_request {
+ struct spdk_nvme_cmd cmd;
+ void *buf;
+ uint32_t len;
+ struct io_target *target;
+ int sct;
+ int sc;
+ uint32_t cdw0;
+};
+
+static void
+nvme_pt_test_complete(struct spdk_bdev_io *bdev_io, bool success, void *arg)
+{
+ struct bdevio_passthrough_request *pt_req = arg;
+
+ spdk_bdev_io_get_nvme_status(bdev_io, &pt_req->cdw0, &pt_req->sct, &pt_req->sc);
+ spdk_bdev_free_io(bdev_io);
+ wake_ut_thread();
+}
+
+static void
+__blockdev_nvme_passthru(void *arg)
+{
+ struct bdevio_passthrough_request *pt_req = arg;
+ struct io_target *target = pt_req->target;
+ int rc;
+
+ rc = spdk_bdev_nvme_io_passthru(target->bdev_desc, target->ch,
+ &pt_req->cmd, pt_req->buf, pt_req->len,
+ nvme_pt_test_complete, pt_req);
+ if (rc) {
+ wake_ut_thread();
+ }
+}
+
+static void
+blockdev_test_nvme_passthru_rw(void)
+{
+ struct bdevio_passthrough_request pt_req;
+ void *write_buf, *read_buf;
+ struct io_target *target;
+
+ target = g_current_io_target;
+
+ if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) {
+ return;
+ }
+
+ memset(&pt_req, 0, sizeof(pt_req));
+ pt_req.target = target;
+ pt_req.cmd.opc = SPDK_NVME_OPC_WRITE;
+ pt_req.cmd.nsid = 1;
+ *(uint64_t *)&pt_req.cmd.cdw10 = 4;
+ pt_req.cmd.cdw12 = 0;
+
+ pt_req.len = spdk_bdev_get_block_size(target->bdev);
+ write_buf = spdk_malloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
+ memset(write_buf, 0xA5, pt_req.len);
+ pt_req.buf = write_buf;
+
+ pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
+ pt_req.sc = SPDK_NVME_SC_INVALID_FIELD;
+ execute_spdk_function(__blockdev_nvme_passthru, &pt_req);
+ CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS);
+
+ pt_req.cmd.opc = SPDK_NVME_OPC_READ;
+ read_buf = spdk_zmalloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
+ pt_req.buf = read_buf;
+
+ pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
+ pt_req.sc = SPDK_NVME_SC_INVALID_FIELD;
+ execute_spdk_function(__blockdev_nvme_passthru, &pt_req);
+ CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS);
+
+ CU_ASSERT(!memcmp(read_buf, write_buf, pt_req.len));
+ spdk_free(read_buf);
+ spdk_free(write_buf);
+}
+
+static void
+blockdev_test_nvme_passthru_vendor_specific(void)
+{
+ struct bdevio_passthrough_request pt_req;
+ struct io_target *target;
+
+ target = g_current_io_target;
+
+ if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) {
+ return;
+ }
+
+ memset(&pt_req, 0, sizeof(pt_req));
+ pt_req.target = target;
+ pt_req.cmd.opc = 0x7F; /* choose known invalid opcode */
+ pt_req.cmd.nsid = 1;
+
+ pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
+ pt_req.sc = SPDK_NVME_SC_SUCCESS;
+ pt_req.cdw0 = 0xbeef;
+ execute_spdk_function(__blockdev_nvme_passthru, &pt_req);
+ CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(pt_req.sc == SPDK_NVME_SC_INVALID_OPCODE);
+ CU_ASSERT(pt_req.cdw0 == 0x0);
+}
+
+static void
+__blockdev_nvme_admin_passthru(void *arg)
+{
+ struct bdevio_passthrough_request *pt_req = arg;
+ struct io_target *target = pt_req->target;
+ int rc;
+
+ rc = spdk_bdev_nvme_admin_passthru(target->bdev_desc, target->ch,
+ &pt_req->cmd, pt_req->buf, pt_req->len,
+ nvme_pt_test_complete, pt_req);
+ if (rc) {
+ wake_ut_thread();
+ }
+}
+
+static void
+blockdev_test_nvme_admin_passthru(void)
+{
+ struct io_target *target;
+ struct bdevio_passthrough_request pt_req;
+
+ target = g_current_io_target;
+
+ if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_ADMIN)) {
+ return;
+ }
+
+ memset(&pt_req, 0, sizeof(pt_req));
+ pt_req.target = target;
+ pt_req.cmd.opc = SPDK_NVME_OPC_IDENTIFY;
+ pt_req.cmd.nsid = 0;
+ *(uint64_t *)&pt_req.cmd.cdw10 = SPDK_NVME_IDENTIFY_CTRLR;
+
+ pt_req.len = sizeof(struct spdk_nvme_ctrlr_data);
+ pt_req.buf = spdk_malloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
+
+ pt_req.sct = SPDK_NVME_SCT_GENERIC;
+ pt_req.sc = SPDK_NVME_SC_SUCCESS;
+ execute_spdk_function(__blockdev_nvme_admin_passthru, &pt_req);
+ CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS);
+}
+
+static void
+__stop_init_thread(void *arg)
+{
+ unsigned num_failures = g_num_failures;
+ struct spdk_jsonrpc_request *request = arg;
+
+ g_num_failures = 0;
+
+ bdevio_cleanup_targets();
+ if (g_wait_for_tests) {
+ /* Do not stop the app yet, wait for another RPC */
+ rpc_perform_tests_cb(num_failures, request);
+ return;
+ }
+ spdk_app_stop(num_failures);
+}
+
+static void
+stop_init_thread(unsigned num_failures, struct spdk_jsonrpc_request *request)
+{
+ g_num_failures = num_failures;
+
+ spdk_thread_send_msg(g_thread_init, __stop_init_thread, request);
+}
+
+static int
+suite_init(void)
+{
+ if (g_current_io_target == NULL) {
+ g_current_io_target = g_io_targets;
+ }
+ return 0;
+}
+
+static int
+suite_fini(void)
+{
+ g_current_io_target = g_current_io_target->next;
+ return 0;
+}
+
+#define SUITE_NAME_MAX 64
+
+static int
+__setup_ut_on_single_target(struct io_target *target)
+{
+ unsigned rc = 0;
+ CU_pSuite suite = NULL;
+ char name[SUITE_NAME_MAX];
+
+ snprintf(name, sizeof(name), "bdevio tests on: %s", spdk_bdev_get_name(target->bdev));
+ suite = CU_add_suite(name, suite_init, suite_fini);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ rc = CU_get_error();
+ return -rc;
+ }
+
+ if (
+ CU_add_test(suite, "blockdev write read 4k", blockdev_write_read_4k) == NULL
+ || CU_add_test(suite, "blockdev write zeroes read 4k", blockdev_write_zeroes_read_4k) == NULL
+ || CU_add_test(suite, "blockdev write zeroes read 1m", blockdev_write_zeroes_read_1m) == NULL
+ || CU_add_test(suite, "blockdev write zeroes read 3m", blockdev_write_zeroes_read_3m) == NULL
+ || CU_add_test(suite, "blockdev write zeroes read 3.5m", blockdev_write_zeroes_read_3m_500k) == NULL
+ || CU_add_test(suite, "blockdev reset",
+ blockdev_test_reset) == NULL
+ || CU_add_test(suite, "blockdev write read 512 bytes",
+ blockdev_write_read_512Bytes) == NULL
+ || CU_add_test(suite, "blockdev write read size > 128k",
+ blockdev_write_read_size_gt_128k) == NULL
+ || CU_add_test(suite, "blockdev write read invalid size",
+ blockdev_write_read_invalid_size) == NULL
+ || CU_add_test(suite, "blockdev write read offset + nbytes == size of blockdev",
+ blockdev_write_read_offset_plus_nbytes_equals_bdev_size) == NULL
+ || CU_add_test(suite, "blockdev write read offset + nbytes > size of blockdev",
+ blockdev_write_read_offset_plus_nbytes_gt_bdev_size) == NULL
+ || CU_add_test(suite, "blockdev write read max offset",
+ blockdev_write_read_max_offset) == NULL
+ || CU_add_test(suite, "blockdev write read 8k on overlapped address offset",
+ blockdev_overlapped_write_read_8k) == NULL
+ || CU_add_test(suite, "blockdev writev readv 4k", blockdev_writev_readv_4k) == NULL
+ || CU_add_test(suite, "blockdev writev readv 30 x 4k",
+ blockdev_writev_readv_30x4k) == NULL
+ || CU_add_test(suite, "blockdev writev readv 512 bytes",
+ blockdev_writev_readv_512Bytes) == NULL
+ || CU_add_test(suite, "blockdev writev readv size > 128k",
+ blockdev_writev_readv_size_gt_128k) == NULL
+ || CU_add_test(suite, "blockdev writev readv size > 128k in two iovs",
+ blockdev_writev_readv_size_gt_128k_two_iov) == NULL
+ || CU_add_test(suite, "blockdev comparev and writev", blockdev_comparev_and_writev) == NULL
+ || CU_add_test(suite, "blockdev nvme passthru rw",
+ blockdev_test_nvme_passthru_rw) == NULL
+ || CU_add_test(suite, "blockdev nvme passthru vendor specific",
+ blockdev_test_nvme_passthru_vendor_specific) == NULL
+ || CU_add_test(suite, "blockdev nvme admin passthru",
+ blockdev_test_nvme_admin_passthru) == NULL
+ ) {
+ CU_cleanup_registry();
+ rc = CU_get_error();
+ return -rc;
+ }
+ return 0;
+}
+
+static void
+__run_ut_thread(void *arg)
+{
+ struct spdk_jsonrpc_request *request = arg;
+ int rc = 0;
+ struct io_target *target;
+ unsigned num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ /* CUnit error, probably won't recover */
+ rc = CU_get_error();
+ stop_init_thread(-rc, request);
+ }
+
+ target = g_io_targets;
+ while (target != NULL) {
+ rc = __setup_ut_on_single_target(target);
+ if (rc < 0) {
+ /* CUnit error, probably won't recover */
+ stop_init_thread(-rc, request);
+ }
+ target = target->next;
+ }
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ stop_init_thread(num_failures, request);
+}
+
+static void
+__construct_targets(void *arg)
+{
+ if (bdevio_construct_targets() < 0) {
+ spdk_app_stop(-1);
+ return;
+ }
+
+ spdk_thread_send_msg(g_thread_ut, __run_ut_thread, NULL);
+}
+
+static void
+test_main(void *arg1)
+{
+ struct spdk_cpuset tmpmask = {}, *appmask;
+ uint32_t cpu, init_cpu;
+
+ pthread_mutex_init(&g_test_mutex, NULL);
+ pthread_cond_init(&g_test_cond, NULL);
+
+ appmask = spdk_app_get_core_mask();
+
+ if (spdk_cpuset_count(appmask) < 3) {
+ spdk_app_stop(-1);
+ return;
+ }
+
+ init_cpu = spdk_env_get_current_core();
+ g_thread_init = spdk_get_thread();
+
+ for (cpu = 0; cpu < SPDK_ENV_LCORE_ID_ANY; cpu++) {
+ if (cpu != init_cpu && spdk_cpuset_get_cpu(appmask, cpu)) {
+ spdk_cpuset_zero(&tmpmask);
+ spdk_cpuset_set_cpu(&tmpmask, cpu, true);
+ g_thread_ut = spdk_thread_create("ut_thread", &tmpmask);
+ break;
+ }
+ }
+
+ if (cpu == SPDK_ENV_LCORE_ID_ANY) {
+ spdk_app_stop(-1);
+ return;
+ }
+
+ for (cpu++; cpu < SPDK_ENV_LCORE_ID_ANY; cpu++) {
+ if (cpu != init_cpu && spdk_cpuset_get_cpu(appmask, cpu)) {
+ spdk_cpuset_zero(&tmpmask);
+ spdk_cpuset_set_cpu(&tmpmask, cpu, true);
+ g_thread_io = spdk_thread_create("io_thread", &tmpmask);
+ break;
+ }
+ }
+
+ if (cpu == SPDK_ENV_LCORE_ID_ANY) {
+ spdk_app_stop(-1);
+ return;
+ }
+
+ if (g_wait_for_tests) {
+ /* Do not perform any tests until RPC is received */
+ return;
+ }
+
+ spdk_thread_send_msg(g_thread_init, __construct_targets, NULL);
+}
+
+static void
+bdevio_usage(void)
+{
+ printf(" -w start bdevio app and wait for RPC to start the tests\n");
+}
+
+static int
+bdevio_parse_arg(int ch, char *arg)
+{
+ switch (ch) {
+ case 'w':
+ g_wait_for_tests = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+struct rpc_perform_tests {
+ char *name;
+};
+
+static void
+free_rpc_perform_tests(struct rpc_perform_tests *r)
+{
+ free(r->name);
+}
+
+static const struct spdk_json_object_decoder rpc_perform_tests_decoders[] = {
+ {"name", offsetof(struct rpc_perform_tests, name), spdk_json_decode_string, true},
+};
+
+static void
+rpc_perform_tests_cb(unsigned num_failures, struct spdk_jsonrpc_request *request)
+{
+ struct spdk_json_write_ctx *w;
+
+ if (num_failures == 0) {
+ w = spdk_jsonrpc_begin_result(request);
+ spdk_json_write_uint32(w, num_failures);
+ spdk_jsonrpc_end_result(request, w);
+ } else {
+ spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
+ "%d test cases failed", num_failures);
+ }
+}
+
+static void
+rpc_perform_tests(struct spdk_jsonrpc_request *request, const struct spdk_json_val *params)
+{
+ struct rpc_perform_tests req = {NULL};
+ struct spdk_bdev *bdev;
+ int rc;
+
+ if (params && spdk_json_decode_object(params, rpc_perform_tests_decoders,
+ SPDK_COUNTOF(rpc_perform_tests_decoders),
+ &req)) {
+ SPDK_ERRLOG("spdk_json_decode_object failed\n");
+ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, "Invalid parameters");
+ goto invalid;
+ }
+
+ if (req.name) {
+ bdev = spdk_bdev_get_by_name(req.name);
+ if (bdev == NULL) {
+ SPDK_ERRLOG("Bdev '%s' does not exist\n", req.name);
+ spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
+ "Bdev '%s' does not exist: %s",
+ req.name, spdk_strerror(ENODEV));
+ goto invalid;
+ }
+ rc = bdevio_construct_target(bdev);
+ if (rc < 0) {
+ SPDK_ERRLOG("Could not construct target for bdev '%s'\n", spdk_bdev_get_name(bdev));
+ spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
+ "Could not construct target for bdev '%s': %s",
+ spdk_bdev_get_name(bdev), spdk_strerror(-rc));
+ goto invalid;
+ }
+ } else {
+ rc = bdevio_construct_targets();
+ if (rc < 0) {
+ SPDK_ERRLOG("Could not construct targets for all bdevs\n");
+ spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
+ "Could not construct targets for all bdevs: %s",
+ spdk_strerror(-rc));
+ goto invalid;
+ }
+ }
+ free_rpc_perform_tests(&req);
+
+ spdk_thread_send_msg(g_thread_ut, __run_ut_thread, request);
+
+ return;
+
+invalid:
+ free_rpc_perform_tests(&req);
+}
+SPDK_RPC_REGISTER("perform_tests", rpc_perform_tests, SPDK_RPC_RUNTIME)
+
+int
+main(int argc, char **argv)
+{
+ int rc;
+ struct spdk_app_opts opts = {};
+
+ spdk_app_opts_init(&opts);
+ opts.name = "bdevio";
+ opts.reactor_mask = "0x7";
+
+ if ((rc = spdk_app_parse_args(argc, argv, &opts, "w", NULL,
+ bdevio_parse_arg, bdevio_usage)) !=
+ SPDK_APP_PARSE_ARGS_SUCCESS) {
+ return rc;
+ }
+
+ rc = spdk_app_start(&opts, test_main, NULL);
+ spdk_app_fini();
+
+ return rc;
+}
diff --git a/src/spdk/test/bdev/bdevio/tests.py b/src/spdk/test/bdev/bdevio/tests.py
new file mode 100755
index 000000000..8b46061d0
--- /dev/null
+++ b/src/spdk/test/bdev/bdevio/tests.py
@@ -0,0 +1,88 @@
+#!/usr/bin/env python3
+import logging
+import argparse
+import sys
+import shlex
+
+try:
+ from rpc.client import print_dict, JSONRPCException
+ import rpc
+except ImportError:
+ print("SPDK RPC library missing. Please add spdk/scripts/ directory to PYTHONPATH:")
+ print("'export PYTHONPATH=$PYTHONPATH:./spdk/scripts/'")
+ exit(1)
+
+try:
+ from shlex import quote
+except ImportError:
+ from pipes import quote
+
+
+def print_array(a):
+ print(" ".join((quote(v) for v in a)))
+
+
+def perform_tests_func(client, name=None):
+ """
+
+ Args:
+ name: bdev name to perform bdevio tests on (optional; if omitted, test all bdevs)
+
+ Returns:
+ Number of failures in tests. 0 means no errors found.
+ """
+ params = {}
+ if name:
+ params['name'] = name
+ return client.call('perform_tests', params)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description='SPDK RPC command line interface. NOTE: spdk/scripts/ is expected in PYTHONPATH')
+ parser.add_argument('-s', dest='server_addr',
+ help='RPC domain socket path or IP address', default='/var/tmp/spdk.sock')
+ parser.add_argument('-p', dest='port',
+ help='RPC port number (if server_addr is IP address)',
+ default=5260, type=int)
+ parser.add_argument('-t', dest='timeout',
+ help='Timeout as a floating point number expressed in seconds waiting for response. Default: 60.0',
+ default=60.0, type=float)
+ parser.add_argument('-v', dest='verbose', action='store_const', const="INFO",
+ help='Set verbose mode to INFO', default="ERROR")
+ parser.add_argument('--verbose', dest='verbose', choices=['DEBUG', 'INFO', 'ERROR'],
+ help="""Set verbose level. """)
+ subparsers = parser.add_subparsers(help='RPC methods')
+
+ def perform_tests(args):
+ print_dict(perform_tests_func(args.client, name=args.name))
+
+ p = subparsers.add_parser('perform_tests', help='Perform all bdevio tests on select bdev')
+ p.add_argument('-b', '--name', help="Name of the Blockdev. Example: Nvme0n1")
+ p.set_defaults(func=perform_tests)
+
+ def call_rpc_func(args):
+ try:
+ args.func(args)
+ except JSONRPCException as ex:
+ print(ex.message)
+ exit(1)
+
+ def execute_script(parser, client, fd):
+ for rpc_call in map(str.rstrip, fd):
+ if not rpc_call.strip():
+ continue
+ args = parser.parse_args(shlex.split(rpc_call))
+ args.client = client
+ call_rpc_func(args)
+
+ args = parser.parse_args()
+ args.client = rpc.client.JSONRPCClient(args.server_addr, args.port, args.timeout, log_level=getattr(logging, args.verbose.upper()))
+ if hasattr(args, 'func'):
+ call_rpc_func(args)
+ elif sys.stdin.isatty():
+ # No arguments and no data piped through stdin
+ parser.print_help()
+ exit(1)
+ else:
+ execute_script(parser, args.client, sys.stdin)
diff --git a/src/spdk/test/bdev/bdevperf/.gitignore b/src/spdk/test/bdev/bdevperf/.gitignore
new file mode 100644
index 000000000..e14ddd841
--- /dev/null
+++ b/src/spdk/test/bdev/bdevperf/.gitignore
@@ -0,0 +1 @@
+bdevperf
diff --git a/src/spdk/test/bdev/bdevperf/Makefile b/src/spdk/test/bdev/bdevperf/Makefile
new file mode 100644
index 000000000..689d7fe10
--- /dev/null
+++ b/src/spdk/test/bdev/bdevperf/Makefile
@@ -0,0 +1,55 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.modules.mk
+
+APP = bdevperf
+
+C_SRCS := bdevperf.c
+
+SPDK_LIB_LIST = $(ALL_MODULES_LIST)
+SPDK_LIB_LIST += $(EVENT_BDEV_SUBSYSTEM)
+SPDK_LIB_LIST += bdev accel event trace log conf thread util sock notify
+SPDK_LIB_LIST += rpc jsonrpc json app_rpc log_rpc bdev_rpc
+
+ifeq ($(OS),Linux)
+SPDK_LIB_LIST += event_nbd nbd
+endif
+
+ifeq ($(SPDK_ROOT_DIR)/lib/env_dpdk,$(CONFIG_ENV))
+SPDK_LIB_LIST += env_dpdk_rpc
+endif
+
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
diff --git a/src/spdk/test/bdev/bdevperf/bdevperf.c b/src/spdk/test/bdev/bdevperf/bdevperf.c
new file mode 100644
index 000000000..adcdf31cb
--- /dev/null
+++ b/src/spdk/test/bdev/bdevperf/bdevperf.c
@@ -0,0 +1,2137 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk/bdev.h"
+#include "spdk/accel_engine.h"
+#include "spdk/endian.h"
+#include "spdk/env.h"
+#include "spdk/event.h"
+#include "spdk/log.h"
+#include "spdk/util.h"
+#include "spdk/thread.h"
+#include "spdk/string.h"
+#include "spdk/rpc.h"
+#include "spdk/bit_array.h"
+#include "spdk/conf.h"
+
+#define BDEVPERF_CONFIG_MAX_FILENAME 1024
+#define BDEVPERF_CONFIG_UNDEFINED -1
+#define BDEVPERF_CONFIG_ERROR -2
+
+struct bdevperf_task {
+ struct iovec iov;
+ struct bdevperf_job *job;
+ struct spdk_bdev_io *bdev_io;
+ void *buf;
+ void *md_buf;
+ uint64_t offset_blocks;
+ struct bdevperf_task *task_to_abort;
+ enum spdk_bdev_io_type io_type;
+ TAILQ_ENTRY(bdevperf_task) link;
+ struct spdk_bdev_io_wait_entry bdev_io_wait;
+};
+
+static const char *g_workload_type = NULL;
+static int g_io_size = 0;
+/* initialize to invalid value so we can detect if user overrides it. */
+static int g_rw_percentage = -1;
+static bool g_verify = false;
+static bool g_reset = false;
+static bool g_continue_on_failure = false;
+static bool g_abort = false;
+static int g_queue_depth = 0;
+static uint64_t g_time_in_usec;
+static int g_show_performance_real_time = 0;
+static uint64_t g_show_performance_period_in_usec = 1000000;
+static uint64_t g_show_performance_period_num = 0;
+static uint64_t g_show_performance_ema_period = 0;
+static int g_run_rc = 0;
+static bool g_shutdown = false;
+static uint64_t g_shutdown_tsc;
+static bool g_zcopy = true;
+static struct spdk_thread *g_master_thread;
+static int g_time_in_sec = 0;
+static bool g_mix_specified = false;
+static const char *g_job_bdev_name;
+static bool g_wait_for_tests = false;
+static struct spdk_jsonrpc_request *g_request = NULL;
+static bool g_multithread_mode = false;
+static int g_timeout_in_sec;
+static struct spdk_conf *g_bdevperf_conf = NULL;
+static const char *g_bdevperf_conf_file = NULL;
+
+static struct spdk_cpuset g_all_cpuset;
+static struct spdk_poller *g_perf_timer = NULL;
+
+static void bdevperf_submit_single(struct bdevperf_job *job, struct bdevperf_task *task);
+static void rpc_perform_tests_cb(void);
+
+struct bdevperf_job {
+ char *name;
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_desc *bdev_desc;
+ struct spdk_io_channel *ch;
+ TAILQ_ENTRY(bdevperf_job) link;
+ struct spdk_thread *thread;
+
+ const char *workload_type;
+ int io_size;
+ int rw_percentage;
+ bool is_random;
+ bool verify;
+ bool reset;
+ bool continue_on_failure;
+ bool unmap;
+ bool write_zeroes;
+ bool flush;
+ bool abort;
+ int queue_depth;
+
+ uint64_t io_completed;
+ uint64_t io_failed;
+ uint64_t io_timeout;
+ uint64_t prev_io_completed;
+ double ema_io_per_second;
+ int current_queue_depth;
+ uint64_t size_in_ios;
+ uint64_t ios_base;
+ uint64_t offset_in_ios;
+ uint64_t io_size_blocks;
+ uint64_t buf_size;
+ uint32_t dif_check_flags;
+ bool is_draining;
+ struct spdk_poller *run_timer;
+ struct spdk_poller *reset_timer;
+ struct spdk_bit_array *outstanding;
+ TAILQ_HEAD(, bdevperf_task) task_list;
+};
+
+struct spdk_bdevperf {
+ TAILQ_HEAD(, bdevperf_job) jobs;
+ uint32_t running_jobs;
+};
+
+static struct spdk_bdevperf g_bdevperf = {
+ .jobs = TAILQ_HEAD_INITIALIZER(g_bdevperf.jobs),
+ .running_jobs = 0,
+};
+
+enum job_config_rw {
+ JOB_CONFIG_RW_READ = 0,
+ JOB_CONFIG_RW_WRITE,
+ JOB_CONFIG_RW_RANDREAD,
+ JOB_CONFIG_RW_RANDWRITE,
+ JOB_CONFIG_RW_RW,
+ JOB_CONFIG_RW_RANDRW,
+ JOB_CONFIG_RW_VERIFY,
+ JOB_CONFIG_RW_RESET,
+ JOB_CONFIG_RW_UNMAP,
+ JOB_CONFIG_RW_FLUSH,
+ JOB_CONFIG_RW_WRITE_ZEROES,
+};
+
+/* Storing values from a section of job config file */
+struct job_config {
+ const char *name;
+ const char *filename;
+ struct spdk_cpuset cpumask;
+ int bs;
+ int iodepth;
+ int rwmixread;
+ int offset;
+ int length;
+ enum job_config_rw rw;
+ TAILQ_ENTRY(job_config) link;
+};
+
+TAILQ_HEAD(, job_config) job_config_list
+ = TAILQ_HEAD_INITIALIZER(job_config_list);
+
+static bool g_performance_dump_active = false;
+
+struct bdevperf_aggregate_stats {
+ struct bdevperf_job *current_job;
+ uint64_t io_time_in_usec;
+ uint64_t ema_period;
+ double total_io_per_second;
+ double total_mb_per_second;
+ double total_failed_per_second;
+ double total_timeout_per_second;
+};
+
+static struct bdevperf_aggregate_stats g_stats = {};
+
+/*
+ * Cumulative Moving Average (CMA): average of all data up to current
+ * Exponential Moving Average (EMA): weighted mean of the previous n data and more weight is given to recent
+ * Simple Moving Average (SMA): unweighted mean of the previous n data
+ *
+ * Bdevperf supports CMA and EMA.
+ */
+static double
+get_cma_io_per_second(struct bdevperf_job *job, uint64_t io_time_in_usec)
+{
+ return (double)job->io_completed * 1000000 / io_time_in_usec;
+}
+
+static double
+get_ema_io_per_second(struct bdevperf_job *job, uint64_t ema_period)
+{
+ double io_completed, io_per_second;
+
+ io_completed = job->io_completed;
+ io_per_second = (double)(io_completed - job->prev_io_completed) * 1000000
+ / g_show_performance_period_in_usec;
+ job->prev_io_completed = io_completed;
+
+ job->ema_io_per_second += (io_per_second - job->ema_io_per_second) * 2
+ / (ema_period + 1);
+ return job->ema_io_per_second;
+}
+
+static void
+performance_dump_job(struct bdevperf_aggregate_stats *stats, struct bdevperf_job *job)
+{
+ double io_per_second, mb_per_second, failed_per_second, timeout_per_second;
+
+ printf("\r Thread name: %s\n", spdk_thread_get_name(job->thread));
+ printf("\r Core Mask: 0x%s\n", spdk_cpuset_fmt(spdk_thread_get_cpumask(job->thread)));
+
+ if (stats->ema_period == 0) {
+ io_per_second = get_cma_io_per_second(job, stats->io_time_in_usec);
+ } else {
+ io_per_second = get_ema_io_per_second(job, stats->ema_period);
+ }
+ mb_per_second = io_per_second * job->io_size / (1024 * 1024);
+ failed_per_second = (double)job->io_failed * 1000000 / stats->io_time_in_usec;
+ timeout_per_second = (double)job->io_timeout * 1000000 / stats->io_time_in_usec;
+
+ printf("\r %-20s: %10.2f IOPS %10.2f MiB/s\n",
+ job->name, io_per_second, mb_per_second);
+ if (failed_per_second != 0) {
+ printf("\r %-20s: %10.2f Fail/s %8.2f TO/s\n",
+ "", failed_per_second, timeout_per_second);
+ }
+ stats->total_io_per_second += io_per_second;
+ stats->total_mb_per_second += mb_per_second;
+ stats->total_failed_per_second += failed_per_second;
+ stats->total_timeout_per_second += timeout_per_second;
+}
+
+static void
+generate_data(void *buf, int buf_len, int block_size, void *md_buf, int md_size,
+ int num_blocks, int seed)
+{
+ int offset_blocks = 0, md_offset, data_block_size;
+
+ if (buf_len < num_blocks * block_size) {
+ return;
+ }
+
+ if (md_buf == NULL) {
+ data_block_size = block_size - md_size;
+ md_buf = (char *)buf + data_block_size;
+ md_offset = block_size;
+ } else {
+ data_block_size = block_size;
+ md_offset = md_size;
+ }
+
+ while (offset_blocks < num_blocks) {
+ memset(buf, seed, data_block_size);
+ memset(md_buf, seed, md_size);
+ buf += block_size;
+ md_buf += md_offset;
+ offset_blocks++;
+ }
+}
+
+static bool
+copy_data(void *wr_buf, int wr_buf_len, void *rd_buf, int rd_buf_len, int block_size,
+ void *wr_md_buf, void *rd_md_buf, int md_size, int num_blocks)
+{
+ if (wr_buf_len < num_blocks * block_size || rd_buf_len < num_blocks * block_size) {
+ return false;
+ }
+
+ assert((wr_md_buf != NULL) == (rd_md_buf != NULL));
+
+ memcpy(wr_buf, rd_buf, block_size * num_blocks);
+
+ if (wr_md_buf != NULL) {
+ memcpy(wr_md_buf, rd_md_buf, md_size * num_blocks);
+ }
+
+ return true;
+}
+
+static bool
+verify_data(void *wr_buf, int wr_buf_len, void *rd_buf, int rd_buf_len, int block_size,
+ void *wr_md_buf, void *rd_md_buf, int md_size, int num_blocks, bool md_check)
+{
+ int offset_blocks = 0, md_offset, data_block_size;
+
+ if (wr_buf_len < num_blocks * block_size || rd_buf_len < num_blocks * block_size) {
+ return false;
+ }
+
+ assert((wr_md_buf != NULL) == (rd_md_buf != NULL));
+
+ if (wr_md_buf == NULL) {
+ data_block_size = block_size - md_size;
+ wr_md_buf = (char *)wr_buf + data_block_size;
+ rd_md_buf = (char *)rd_buf + data_block_size;
+ md_offset = block_size;
+ } else {
+ data_block_size = block_size;
+ md_offset = md_size;
+ }
+
+ while (offset_blocks < num_blocks) {
+ if (memcmp(wr_buf, rd_buf, data_block_size) != 0) {
+ return false;
+ }
+
+ wr_buf += block_size;
+ rd_buf += block_size;
+
+ if (md_check) {
+ if (memcmp(wr_md_buf, rd_md_buf, md_size) != 0) {
+ return false;
+ }
+
+ wr_md_buf += md_offset;
+ rd_md_buf += md_offset;
+ }
+
+ offset_blocks++;
+ }
+
+ return true;
+}
+
+static void
+free_job_config(void)
+{
+ struct job_config *config, *tmp;
+
+ spdk_conf_free(g_bdevperf_conf);
+ g_bdevperf_conf = NULL;
+
+ TAILQ_FOREACH_SAFE(config, &job_config_list, link, tmp) {
+ TAILQ_REMOVE(&job_config_list, config, link);
+ free(config);
+ }
+}
+
+static void
+bdevperf_test_done(void *ctx)
+{
+ struct bdevperf_job *job, *jtmp;
+ struct bdevperf_task *task, *ttmp;
+
+ if (g_time_in_usec && !g_run_rc) {
+ g_stats.io_time_in_usec = g_time_in_usec;
+
+ if (g_performance_dump_active) {
+ spdk_thread_send_msg(spdk_get_thread(), bdevperf_test_done, NULL);
+ return;
+ }
+ } else {
+ printf("Job run time less than one microsecond, no performance data will be shown\n");
+ }
+
+ if (g_show_performance_real_time) {
+ spdk_poller_unregister(&g_perf_timer);
+ }
+
+ if (g_shutdown) {
+ g_time_in_usec = g_shutdown_tsc * 1000000 / spdk_get_ticks_hz();
+ printf("Received shutdown signal, test time was about %.6f seconds\n",
+ (double)g_time_in_usec / 1000000);
+ }
+
+ TAILQ_FOREACH_SAFE(job, &g_bdevperf.jobs, link, jtmp) {
+ TAILQ_REMOVE(&g_bdevperf.jobs, job, link);
+
+ performance_dump_job(&g_stats, job);
+
+ TAILQ_FOREACH_SAFE(task, &job->task_list, link, ttmp) {
+ TAILQ_REMOVE(&job->task_list, task, link);
+ spdk_free(task->buf);
+ spdk_free(task->md_buf);
+ free(task);
+ }
+
+ if (job->verify) {
+ spdk_bit_array_free(&job->outstanding);
+ }
+
+ free(job->name);
+ free(job);
+ }
+
+ printf("\r =====================================================\n");
+ printf("\r %-20s: %10.2f IOPS %10.2f MiB/s\n",
+ "Total", g_stats.total_io_per_second, g_stats.total_mb_per_second);
+ if (g_stats.total_failed_per_second != 0 || g_stats.total_timeout_per_second != 0) {
+ printf("\r %-20s: %10.2f Fail/s %8.2f TO/s\n",
+ "", g_stats.total_failed_per_second, g_stats.total_timeout_per_second);
+ }
+ fflush(stdout);
+
+ if (g_request && !g_shutdown) {
+ rpc_perform_tests_cb();
+ } else {
+ spdk_app_stop(g_run_rc);
+ }
+}
+
+static void
+bdevperf_job_end(void *ctx)
+{
+ assert(g_master_thread == spdk_get_thread());
+
+ if (--g_bdevperf.running_jobs == 0) {
+ bdevperf_test_done(NULL);
+ }
+}
+
+static void
+bdevperf_queue_io_wait_with_cb(struct bdevperf_task *task, spdk_bdev_io_wait_cb cb_fn)
+{
+ struct bdevperf_job *job = task->job;
+
+ task->bdev_io_wait.bdev = job->bdev;
+ task->bdev_io_wait.cb_fn = cb_fn;
+ task->bdev_io_wait.cb_arg = task;
+ spdk_bdev_queue_io_wait(job->bdev, job->ch, &task->bdev_io_wait);
+}
+
+static int
+bdevperf_job_drain(void *ctx)
+{
+ struct bdevperf_job *job = ctx;
+
+ spdk_poller_unregister(&job->run_timer);
+ if (job->reset) {
+ spdk_poller_unregister(&job->reset_timer);
+ }
+
+ job->is_draining = true;
+
+ return -1;
+}
+
+static void
+bdevperf_abort_complete(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
+{
+ struct bdevperf_task *task = cb_arg;
+ struct bdevperf_job *job = task->job;
+
+ job->current_queue_depth--;
+
+ if (success) {
+ job->io_completed++;
+ } else {
+ job->io_failed++;
+ if (!job->continue_on_failure) {
+ bdevperf_job_drain(job);
+ g_run_rc = -1;
+ }
+ }
+
+ spdk_bdev_free_io(bdev_io);
+
+ /* Return task to free list because abort is submitted on demand. */
+ TAILQ_INSERT_TAIL(&job->task_list, task, link);
+
+ if (job->is_draining) {
+ if (job->current_queue_depth == 0) {
+ spdk_put_io_channel(job->ch);
+ spdk_bdev_close(job->bdev_desc);
+ spdk_thread_send_msg(g_master_thread, bdevperf_job_end, NULL);
+ }
+ }
+}
+
+static void
+bdevperf_complete(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
+{
+ struct bdevperf_job *job;
+ struct bdevperf_task *task = cb_arg;
+ struct iovec *iovs;
+ int iovcnt;
+ bool md_check;
+ uint64_t offset_in_ios;
+
+ job = task->job;
+ md_check = spdk_bdev_get_dif_type(job->bdev) == SPDK_DIF_DISABLE;
+
+ if (!success) {
+ if (!job->reset && !job->continue_on_failure) {
+ bdevperf_job_drain(job);
+ g_run_rc = -1;
+ printf("task offset: %lu on job bdev=%s fails\n",
+ task->offset_blocks, job->name);
+ }
+ } else if (job->verify || job->reset) {
+ spdk_bdev_io_get_iovec(bdev_io, &iovs, &iovcnt);
+ assert(iovcnt == 1);
+ assert(iovs != NULL);
+ if (!verify_data(task->buf, job->buf_size, iovs[0].iov_base, iovs[0].iov_len,
+ spdk_bdev_get_block_size(job->bdev),
+ task->md_buf, spdk_bdev_io_get_md_buf(bdev_io),
+ spdk_bdev_get_md_size(job->bdev),
+ job->io_size_blocks, md_check)) {
+ printf("Buffer mismatch! Target: %s Disk Offset: %lu\n", job->name, task->offset_blocks);
+ printf(" First dword expected 0x%x got 0x%x\n", *(int *)task->buf, *(int *)iovs[0].iov_base);
+ bdevperf_job_drain(job);
+ g_run_rc = -1;
+ }
+ }
+
+ job->current_queue_depth--;
+
+ if (success) {
+ job->io_completed++;
+ } else {
+ job->io_failed++;
+ }
+
+ if (job->verify) {
+ assert(task->offset_blocks / job->io_size_blocks >= job->ios_base);
+ offset_in_ios = task->offset_blocks / job->io_size_blocks - job->ios_base;
+
+ assert(spdk_bit_array_get(job->outstanding, offset_in_ios) == true);
+ spdk_bit_array_clear(job->outstanding, offset_in_ios);
+ }
+
+ spdk_bdev_free_io(bdev_io);
+
+ /*
+ * is_draining indicates when time has expired for the test run
+ * and we are just waiting for the previously submitted I/O
+ * to complete. In this case, do not submit a new I/O to replace
+ * the one just completed.
+ */
+ if (!job->is_draining) {
+ bdevperf_submit_single(job, task);
+ } else {
+ TAILQ_INSERT_TAIL(&job->task_list, task, link);
+ if (job->current_queue_depth == 0) {
+ spdk_put_io_channel(job->ch);
+ spdk_bdev_close(job->bdev_desc);
+ spdk_thread_send_msg(g_master_thread, bdevperf_job_end, NULL);
+ }
+ }
+}
+
+static void
+bdevperf_verify_submit_read(void *cb_arg)
+{
+ struct bdevperf_job *job;
+ struct bdevperf_task *task = cb_arg;
+ int rc;
+
+ job = task->job;
+
+ /* Read the data back in */
+ if (spdk_bdev_is_md_separate(job->bdev)) {
+ rc = spdk_bdev_read_blocks_with_md(job->bdev_desc, job->ch, NULL, NULL,
+ task->offset_blocks, job->io_size_blocks,
+ bdevperf_complete, task);
+ } else {
+ rc = spdk_bdev_read_blocks(job->bdev_desc, job->ch, NULL,
+ task->offset_blocks, job->io_size_blocks,
+ bdevperf_complete, task);
+ }
+
+ if (rc == -ENOMEM) {
+ bdevperf_queue_io_wait_with_cb(task, bdevperf_verify_submit_read);
+ } else if (rc != 0) {
+ printf("Failed to submit read: %d\n", rc);
+ bdevperf_job_drain(job);
+ g_run_rc = rc;
+ }
+}
+
+static void
+bdevperf_verify_write_complete(struct spdk_bdev_io *bdev_io, bool success,
+ void *cb_arg)
+{
+ if (success) {
+ spdk_bdev_free_io(bdev_io);
+ bdevperf_verify_submit_read(cb_arg);
+ } else {
+ bdevperf_complete(bdev_io, success, cb_arg);
+ }
+}
+
+static void
+bdevperf_zcopy_populate_complete(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
+{
+ if (!success) {
+ bdevperf_complete(bdev_io, success, cb_arg);
+ return;
+ }
+
+ spdk_bdev_zcopy_end(bdev_io, false, bdevperf_complete, cb_arg);
+}
+
+static int
+bdevperf_generate_dif(struct bdevperf_task *task)
+{
+ struct bdevperf_job *job = task->job;
+ struct spdk_bdev *bdev = job->bdev;
+ struct spdk_dif_ctx dif_ctx;
+ int rc;
+
+ rc = spdk_dif_ctx_init(&dif_ctx,
+ spdk_bdev_get_block_size(bdev),
+ spdk_bdev_get_md_size(bdev),
+ spdk_bdev_is_md_interleaved(bdev),
+ spdk_bdev_is_dif_head_of_md(bdev),
+ spdk_bdev_get_dif_type(bdev),
+ job->dif_check_flags,
+ task->offset_blocks, 0, 0, 0, 0);
+ if (rc != 0) {
+ fprintf(stderr, "Initialization of DIF context failed\n");
+ return rc;
+ }
+
+ if (spdk_bdev_is_md_interleaved(bdev)) {
+ rc = spdk_dif_generate(&task->iov, 1, job->io_size_blocks, &dif_ctx);
+ } else {
+ struct iovec md_iov = {
+ .iov_base = task->md_buf,
+ .iov_len = spdk_bdev_get_md_size(bdev) * job->io_size_blocks,
+ };
+
+ rc = spdk_dix_generate(&task->iov, 1, &md_iov, job->io_size_blocks, &dif_ctx);
+ }
+
+ if (rc != 0) {
+ fprintf(stderr, "Generation of DIF/DIX failed\n");
+ }
+
+ return rc;
+}
+
+static void
+bdevperf_submit_task(void *arg)
+{
+ struct bdevperf_task *task = arg;
+ struct bdevperf_job *job = task->job;
+ struct spdk_bdev_desc *desc;
+ struct spdk_io_channel *ch;
+ spdk_bdev_io_completion_cb cb_fn;
+ uint64_t offset_in_ios;
+ int rc = 0;
+
+ desc = job->bdev_desc;
+ ch = job->ch;
+
+ switch (task->io_type) {
+ case SPDK_BDEV_IO_TYPE_WRITE:
+ if (spdk_bdev_get_md_size(job->bdev) != 0 && job->dif_check_flags != 0) {
+ rc = bdevperf_generate_dif(task);
+ }
+ if (rc == 0) {
+ cb_fn = (job->verify || job->reset) ? bdevperf_verify_write_complete : bdevperf_complete;
+
+ if (g_zcopy) {
+ spdk_bdev_zcopy_end(task->bdev_io, true, cb_fn, task);
+ return;
+ } else {
+ if (spdk_bdev_is_md_separate(job->bdev)) {
+ rc = spdk_bdev_writev_blocks_with_md(desc, ch, &task->iov, 1,
+ task->md_buf,
+ task->offset_blocks,
+ job->io_size_blocks,
+ cb_fn, task);
+ } else {
+ rc = spdk_bdev_writev_blocks(desc, ch, &task->iov, 1,
+ task->offset_blocks,
+ job->io_size_blocks,
+ cb_fn, task);
+ }
+ }
+ }
+ break;
+ case SPDK_BDEV_IO_TYPE_FLUSH:
+ rc = spdk_bdev_flush_blocks(desc, ch, task->offset_blocks,
+ job->io_size_blocks, bdevperf_complete, task);
+ break;
+ case SPDK_BDEV_IO_TYPE_UNMAP:
+ rc = spdk_bdev_unmap_blocks(desc, ch, task->offset_blocks,
+ job->io_size_blocks, bdevperf_complete, task);
+ break;
+ case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
+ rc = spdk_bdev_write_zeroes_blocks(desc, ch, task->offset_blocks,
+ job->io_size_blocks, bdevperf_complete, task);
+ break;
+ case SPDK_BDEV_IO_TYPE_READ:
+ if (g_zcopy) {
+ rc = spdk_bdev_zcopy_start(desc, ch, task->offset_blocks, job->io_size_blocks,
+ true, bdevperf_zcopy_populate_complete, task);
+ } else {
+ if (spdk_bdev_is_md_separate(job->bdev)) {
+ rc = spdk_bdev_read_blocks_with_md(desc, ch, task->buf, task->md_buf,
+ task->offset_blocks,
+ job->io_size_blocks,
+ bdevperf_complete, task);
+ } else {
+ rc = spdk_bdev_read_blocks(desc, ch, task->buf, task->offset_blocks,
+ job->io_size_blocks, bdevperf_complete, task);
+ }
+ }
+ break;
+ case SPDK_BDEV_IO_TYPE_ABORT:
+ rc = spdk_bdev_abort(desc, ch, task->task_to_abort, bdevperf_abort_complete, task);
+ break;
+ default:
+ assert(false);
+ rc = -EINVAL;
+ break;
+ }
+
+ if (rc == -ENOMEM) {
+ bdevperf_queue_io_wait_with_cb(task, bdevperf_submit_task);
+ return;
+ } else if (rc != 0) {
+ printf("Failed to submit bdev_io: %d\n", rc);
+ if (job->verify) {
+ assert(task->offset_blocks / job->io_size_blocks >= job->ios_base);
+ offset_in_ios = task->offset_blocks / job->io_size_blocks - job->ios_base;
+
+ assert(spdk_bit_array_get(job->outstanding, offset_in_ios) == true);
+ spdk_bit_array_clear(job->outstanding, offset_in_ios);
+ }
+ bdevperf_job_drain(job);
+ g_run_rc = rc;
+ return;
+ }
+
+ job->current_queue_depth++;
+}
+
+static void
+bdevperf_zcopy_get_buf_complete(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
+{
+ struct bdevperf_task *task = cb_arg;
+ struct bdevperf_job *job = task->job;
+ struct iovec *iovs;
+ int iovcnt;
+
+ if (!success) {
+ bdevperf_job_drain(job);
+ g_run_rc = -1;
+ return;
+ }
+
+ task->bdev_io = bdev_io;
+ task->io_type = SPDK_BDEV_IO_TYPE_WRITE;
+
+ if (job->verify || job->reset) {
+ /* When job->verify or job->reset is enabled, task->buf is used for
+ * verification of read after write. For write I/O, when zcopy APIs
+ * are used, task->buf cannot be used, and data must be written to
+ * the data buffer allocated underneath bdev layer instead.
+ * Hence we copy task->buf to the allocated data buffer here.
+ */
+ spdk_bdev_io_get_iovec(bdev_io, &iovs, &iovcnt);
+ assert(iovcnt == 1);
+ assert(iovs != NULL);
+
+ copy_data(iovs[0].iov_base, iovs[0].iov_len, task->buf, job->buf_size,
+ spdk_bdev_get_block_size(job->bdev),
+ spdk_bdev_io_get_md_buf(bdev_io), task->md_buf,
+ spdk_bdev_get_md_size(job->bdev), job->io_size_blocks);
+ }
+
+ bdevperf_submit_task(task);
+}
+
+static void
+bdevperf_prep_zcopy_write_task(void *arg)
+{
+ struct bdevperf_task *task = arg;
+ struct bdevperf_job *job = task->job;
+ int rc;
+
+ rc = spdk_bdev_zcopy_start(job->bdev_desc, job->ch,
+ task->offset_blocks, job->io_size_blocks,
+ false, bdevperf_zcopy_get_buf_complete, task);
+ if (rc != 0) {
+ assert(rc == -ENOMEM);
+ bdevperf_queue_io_wait_with_cb(task, bdevperf_prep_zcopy_write_task);
+ return;
+ }
+
+ job->current_queue_depth++;
+}
+
+static struct bdevperf_task *
+bdevperf_job_get_task(struct bdevperf_job *job)
+{
+ struct bdevperf_task *task;
+
+ task = TAILQ_FIRST(&job->task_list);
+ if (!task) {
+ printf("Task allocation failed\n");
+ abort();
+ }
+
+ TAILQ_REMOVE(&job->task_list, task, link);
+ return task;
+}
+
+static __thread unsigned int seed = 0;
+
+static void
+bdevperf_submit_single(struct bdevperf_job *job, struct bdevperf_task *task)
+{
+ uint64_t offset_in_ios;
+
+ if (job->is_random) {
+ offset_in_ios = rand_r(&seed) % job->size_in_ios;
+ } else {
+ offset_in_ios = job->offset_in_ios++;
+ if (job->offset_in_ios == job->size_in_ios) {
+ job->offset_in_ios = 0;
+ }
+
+ /* Increment of offset_in_ios if there's already an outstanding IO
+ * to that location. We only need this with job->verify as random
+ * offsets are not supported with job->verify at this time.
+ */
+ if (job->verify) {
+ assert(spdk_bit_array_find_first_clear(job->outstanding, 0) != UINT32_MAX);
+
+ while (spdk_bit_array_get(job->outstanding, offset_in_ios)) {
+ offset_in_ios = job->offset_in_ios++;
+ if (job->offset_in_ios == job->size_in_ios) {
+ job->offset_in_ios = 0;
+ }
+ }
+ spdk_bit_array_set(job->outstanding, offset_in_ios);
+ }
+ }
+
+ /* For multi-thread to same job, offset_in_ios is relative
+ * to the LBA range assigned for that job. job->offset_blocks
+ * is absolute (entire bdev LBA range).
+ */
+ task->offset_blocks = (offset_in_ios + job->ios_base) * job->io_size_blocks;
+
+ if (job->verify || job->reset) {
+ generate_data(task->buf, job->buf_size,
+ spdk_bdev_get_block_size(job->bdev),
+ task->md_buf, spdk_bdev_get_md_size(job->bdev),
+ job->io_size_blocks, rand_r(&seed) % 256);
+ if (g_zcopy) {
+ bdevperf_prep_zcopy_write_task(task);
+ return;
+ } else {
+ task->iov.iov_base = task->buf;
+ task->iov.iov_len = job->buf_size;
+ task->io_type = SPDK_BDEV_IO_TYPE_WRITE;
+ }
+ } else if (job->flush) {
+ task->io_type = SPDK_BDEV_IO_TYPE_FLUSH;
+ } else if (job->unmap) {
+ task->io_type = SPDK_BDEV_IO_TYPE_UNMAP;
+ } else if (job->write_zeroes) {
+ task->io_type = SPDK_BDEV_IO_TYPE_WRITE_ZEROES;
+ } else if ((job->rw_percentage == 100) ||
+ (job->rw_percentage != 0 && ((rand_r(&seed) % 100) < job->rw_percentage))) {
+ task->io_type = SPDK_BDEV_IO_TYPE_READ;
+ } else {
+ if (g_zcopy) {
+ bdevperf_prep_zcopy_write_task(task);
+ return;
+ } else {
+ task->iov.iov_base = task->buf;
+ task->iov.iov_len = job->buf_size;
+ task->io_type = SPDK_BDEV_IO_TYPE_WRITE;
+ }
+ }
+
+ bdevperf_submit_task(task);
+}
+
+static int reset_job(void *arg);
+
+static void
+reset_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
+{
+ struct bdevperf_task *task = cb_arg;
+ struct bdevperf_job *job = task->job;
+
+ if (!success) {
+ printf("Reset blockdev=%s failed\n", spdk_bdev_get_name(job->bdev));
+ bdevperf_job_drain(job);
+ g_run_rc = -1;
+ }
+
+ TAILQ_INSERT_TAIL(&job->task_list, task, link);
+ spdk_bdev_free_io(bdev_io);
+
+ job->reset_timer = SPDK_POLLER_REGISTER(reset_job, job,
+ 10 * 1000000);
+}
+
+static int
+reset_job(void *arg)
+{
+ struct bdevperf_job *job = arg;
+ struct bdevperf_task *task;
+ int rc;
+
+ spdk_poller_unregister(&job->reset_timer);
+
+ /* Do reset. */
+ task = bdevperf_job_get_task(job);
+ rc = spdk_bdev_reset(job->bdev_desc, job->ch,
+ reset_cb, task);
+ if (rc) {
+ printf("Reset failed: %d\n", rc);
+ bdevperf_job_drain(job);
+ g_run_rc = -1;
+ }
+
+ return -1;
+}
+
+static void
+bdevperf_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io)
+{
+ struct bdevperf_job *job = cb_arg;
+ struct bdevperf_task *task;
+
+ job->io_timeout++;
+
+ if (job->is_draining || !job->abort ||
+ !spdk_bdev_io_type_supported(job->bdev, SPDK_BDEV_IO_TYPE_ABORT)) {
+ return;
+ }
+
+ task = bdevperf_job_get_task(job);
+ if (task == NULL) {
+ return;
+ }
+
+ task->task_to_abort = spdk_bdev_io_get_cb_arg(bdev_io);
+ task->io_type = SPDK_BDEV_IO_TYPE_ABORT;
+
+ bdevperf_submit_task(task);
+}
+
+static void
+bdevperf_job_run(void *ctx)
+{
+ struct bdevperf_job *job = ctx;
+ struct bdevperf_task *task;
+ int i;
+
+ /* Submit initial I/O for this job. Each time one
+ * completes, another will be submitted. */
+
+ /* Start a timer to stop this I/O chain when the run is over */
+ job->run_timer = SPDK_POLLER_REGISTER(bdevperf_job_drain, job, g_time_in_usec);
+ if (job->reset) {
+ job->reset_timer = SPDK_POLLER_REGISTER(reset_job, job,
+ 10 * 1000000);
+ }
+
+ spdk_bdev_set_timeout(job->bdev_desc, g_timeout_in_sec, bdevperf_timeout_cb, job);
+
+ for (i = 0; i < job->queue_depth; i++) {
+ task = bdevperf_job_get_task(job);
+ bdevperf_submit_single(job, task);
+ }
+}
+
+static void
+_performance_dump_done(void *ctx)
+{
+ struct bdevperf_aggregate_stats *stats = ctx;
+
+ printf("\r =====================================================\n");
+ printf("\r %-20s: %10.2f IOPS %10.2f MiB/s\n",
+ "Total", stats->total_io_per_second, stats->total_mb_per_second);
+ if (stats->total_failed_per_second != 0 || stats->total_timeout_per_second != 0) {
+ printf("\r %-20s: %10.2f Fail/s %8.2f TO/s\n",
+ "", stats->total_failed_per_second, stats->total_timeout_per_second);
+ }
+ fflush(stdout);
+
+ g_performance_dump_active = false;
+
+ free(stats);
+}
+
+static void
+_performance_dump(void *ctx)
+{
+ struct bdevperf_aggregate_stats *stats = ctx;
+
+ performance_dump_job(stats, stats->current_job);
+
+ /* This assumes the jobs list is static after start up time.
+ * That's true right now, but if that ever changed this would need a lock. */
+ stats->current_job = TAILQ_NEXT(stats->current_job, link);
+ if (stats->current_job == NULL) {
+ spdk_thread_send_msg(g_master_thread, _performance_dump_done, stats);
+ } else {
+ spdk_thread_send_msg(stats->current_job->thread, _performance_dump, stats);
+ }
+}
+
+static int
+performance_statistics_thread(void *arg)
+{
+ struct bdevperf_aggregate_stats *stats;
+
+ if (g_performance_dump_active) {
+ return -1;
+ }
+
+ g_performance_dump_active = true;
+
+ stats = calloc(1, sizeof(*stats));
+ if (stats == NULL) {
+ return -1;
+ }
+
+ g_show_performance_period_num++;
+
+ stats->io_time_in_usec = g_show_performance_period_num * g_show_performance_period_in_usec;
+ stats->ema_period = g_show_performance_ema_period;
+
+ /* Iterate all of the jobs to gather stats
+ * These jobs will not get removed here until a final performance dump is run,
+ * so this should be safe without locking.
+ */
+ stats->current_job = TAILQ_FIRST(&g_bdevperf.jobs);
+ if (stats->current_job == NULL) {
+ spdk_thread_send_msg(g_master_thread, _performance_dump_done, stats);
+ } else {
+ spdk_thread_send_msg(stats->current_job->thread, _performance_dump, stats);
+ }
+
+ return -1;
+}
+
+static void
+bdevperf_test(void)
+{
+ struct bdevperf_job *job;
+
+ printf("Running I/O for %" PRIu64 " seconds...\n", g_time_in_usec / 1000000);
+ fflush(stdout);
+
+ /* Start a timer to dump performance numbers */
+ g_shutdown_tsc = spdk_get_ticks();
+ if (g_show_performance_real_time) {
+ g_perf_timer = SPDK_POLLER_REGISTER(performance_statistics_thread, NULL,
+ g_show_performance_period_in_usec);
+ }
+
+ /* Iterate jobs to start all I/O */
+ TAILQ_FOREACH(job, &g_bdevperf.jobs, link) {
+ g_bdevperf.running_jobs++;
+ spdk_thread_send_msg(job->thread, bdevperf_job_run, job);
+ }
+}
+
+static void
+bdevperf_bdev_removed(void *arg)
+{
+ struct bdevperf_job *job = arg;
+
+ bdevperf_job_drain(job);
+}
+
+static uint32_t g_construct_job_count = 0;
+
+static void
+_bdevperf_construct_job_done(void *ctx)
+{
+ if (--g_construct_job_count == 0) {
+
+ if (g_run_rc != 0) {
+ /* Something failed. */
+ bdevperf_test_done(NULL);
+ return;
+ }
+
+ /* Ready to run the test */
+ bdevperf_test();
+ }
+}
+
+/* Checkformat will not allow to use inlined type,
+ this is a workaround */
+typedef struct spdk_thread *spdk_thread_t;
+
+static spdk_thread_t
+construct_job_thread(struct spdk_cpuset *cpumask, const char *tag)
+{
+ char thread_name[32];
+ struct spdk_cpuset tmp;
+
+ /* This function runs on the master thread. */
+ assert(g_master_thread == spdk_get_thread());
+
+ /* Handle default mask */
+ if (spdk_cpuset_count(cpumask) == 0) {
+ cpumask = &g_all_cpuset;
+ }
+
+ /* Warn user that mask might need to be changed */
+ spdk_cpuset_copy(&tmp, cpumask);
+ spdk_cpuset_or(&tmp, &g_all_cpuset);
+ if (!spdk_cpuset_equal(&tmp, &g_all_cpuset)) {
+ fprintf(stderr, "cpumask for '%s' is too big\n", tag);
+ }
+
+ snprintf(thread_name, sizeof(thread_name), "%s_%s",
+ tag,
+ spdk_cpuset_fmt(cpumask));
+
+ return spdk_thread_create(thread_name, cpumask);
+}
+
+static uint32_t
+_get_next_core(void)
+{
+ static uint32_t current_core = SPDK_ENV_LCORE_ID_ANY;
+
+ if (current_core == SPDK_ENV_LCORE_ID_ANY) {
+ current_core = spdk_env_get_first_core();
+ return current_core;
+ }
+
+ current_core = spdk_env_get_next_core(current_core);
+ if (current_core == SPDK_ENV_LCORE_ID_ANY) {
+ current_core = spdk_env_get_first_core();
+ }
+
+ return current_core;
+}
+
+static void
+_bdevperf_construct_job(void *ctx)
+{
+ struct bdevperf_job *job = ctx;
+ int rc;
+
+ rc = spdk_bdev_open(job->bdev, true, bdevperf_bdev_removed, job, &job->bdev_desc);
+ if (rc != 0) {
+ SPDK_ERRLOG("Could not open leaf bdev %s, error=%d\n", spdk_bdev_get_name(job->bdev), rc);
+ g_run_rc = -EINVAL;
+ goto end;
+ }
+
+ job->ch = spdk_bdev_get_io_channel(job->bdev_desc);
+ if (!job->ch) {
+ SPDK_ERRLOG("Could not get io_channel for device %s, error=%d\n", spdk_bdev_get_name(job->bdev),
+ rc);
+ g_run_rc = -ENOMEM;
+ goto end;
+ }
+
+end:
+ spdk_thread_send_msg(g_master_thread, _bdevperf_construct_job_done, NULL);
+}
+
+static void
+job_init_rw(struct bdevperf_job *job, enum job_config_rw rw)
+{
+ switch (rw) {
+ case JOB_CONFIG_RW_READ:
+ job->rw_percentage = 100;
+ break;
+ case JOB_CONFIG_RW_WRITE:
+ job->rw_percentage = 0;
+ break;
+ case JOB_CONFIG_RW_RANDREAD:
+ job->is_random = true;
+ job->rw_percentage = 100;
+ break;
+ case JOB_CONFIG_RW_RANDWRITE:
+ job->is_random = true;
+ job->rw_percentage = 0;
+ break;
+ case JOB_CONFIG_RW_RW:
+ job->is_random = false;
+ break;
+ case JOB_CONFIG_RW_RANDRW:
+ job->is_random = true;
+ break;
+ case JOB_CONFIG_RW_VERIFY:
+ job->verify = true;
+ job->rw_percentage = 50;
+ break;
+ case JOB_CONFIG_RW_RESET:
+ job->reset = true;
+ job->verify = true;
+ job->rw_percentage = 50;
+ break;
+ case JOB_CONFIG_RW_UNMAP:
+ job->unmap = true;
+ break;
+ case JOB_CONFIG_RW_FLUSH:
+ job->flush = true;
+ break;
+ case JOB_CONFIG_RW_WRITE_ZEROES:
+ job->write_zeroes = true;
+ break;
+ }
+}
+
+static int
+bdevperf_construct_job(struct spdk_bdev *bdev, struct job_config *config,
+ struct spdk_thread *thread)
+{
+ struct bdevperf_job *job;
+ struct bdevperf_task *task;
+ int block_size, data_block_size;
+ int rc;
+ int task_num, n;
+
+ block_size = spdk_bdev_get_block_size(bdev);
+ data_block_size = spdk_bdev_get_data_block_size(bdev);
+
+ job = calloc(1, sizeof(struct bdevperf_job));
+ if (!job) {
+ fprintf(stderr, "Unable to allocate memory for new job.\n");
+ return -ENOMEM;
+ }
+
+ job->name = strdup(spdk_bdev_get_name(bdev));
+ if (!job->name) {
+ fprintf(stderr, "Unable to allocate memory for job name.\n");
+ free(job);
+ return -ENOMEM;
+ }
+
+ job->workload_type = g_workload_type;
+ job->io_size = config->bs;
+ job->rw_percentage = config->rwmixread;
+ job->continue_on_failure = g_continue_on_failure;
+ job->queue_depth = config->iodepth;
+ job->bdev = bdev;
+ job->io_size_blocks = job->io_size / data_block_size;
+ job->buf_size = job->io_size_blocks * block_size;
+ job_init_rw(job, config->rw);
+
+ if ((job->io_size % data_block_size) != 0) {
+ SPDK_ERRLOG("IO size (%d) is not multiples of data block size of bdev %s (%"PRIu32")\n",
+ job->io_size, spdk_bdev_get_name(bdev), data_block_size);
+ free(job->name);
+ free(job);
+ return -ENOTSUP;
+ }
+
+ if (job->unmap && !spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_UNMAP)) {
+ printf("Skipping %s because it does not support unmap\n", spdk_bdev_get_name(bdev));
+ free(job->name);
+ free(job);
+ return -ENOTSUP;
+ }
+
+ if (spdk_bdev_is_dif_check_enabled(bdev, SPDK_DIF_CHECK_TYPE_REFTAG)) {
+ job->dif_check_flags |= SPDK_DIF_FLAGS_REFTAG_CHECK;
+ }
+ if (spdk_bdev_is_dif_check_enabled(bdev, SPDK_DIF_CHECK_TYPE_GUARD)) {
+ job->dif_check_flags |= SPDK_DIF_FLAGS_GUARD_CHECK;
+ }
+
+ job->offset_in_ios = 0;
+
+ if (config->length != 0) {
+ /* Use subset of disk */
+ job->size_in_ios = config->length / job->io_size_blocks;
+ job->ios_base = config->offset / job->io_size_blocks;
+ } else {
+ /* Use whole disk */
+ job->size_in_ios = spdk_bdev_get_num_blocks(bdev) / job->io_size_blocks;
+ job->ios_base = 0;
+ }
+
+ if (job->verify) {
+ job->outstanding = spdk_bit_array_create(job->size_in_ios);
+ if (job->outstanding == NULL) {
+ SPDK_ERRLOG("Could not create outstanding array bitmap for bdev %s\n",
+ spdk_bdev_get_name(bdev));
+ free(job->name);
+ free(job);
+ return -ENOMEM;
+ }
+ }
+
+ TAILQ_INIT(&job->task_list);
+
+ task_num = job->queue_depth;
+ if (job->reset) {
+ task_num += 1;
+ }
+ if (job->abort) {
+ task_num += job->queue_depth;
+ }
+
+ TAILQ_INSERT_TAIL(&g_bdevperf.jobs, job, link);
+
+ for (n = 0; n < task_num; n++) {
+ task = calloc(1, sizeof(struct bdevperf_task));
+ if (!task) {
+ fprintf(stderr, "Failed to allocate task from memory\n");
+ return -ENOMEM;
+ }
+
+ task->buf = spdk_zmalloc(job->buf_size, spdk_bdev_get_buf_align(job->bdev), NULL,
+ SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
+ if (!task->buf) {
+ fprintf(stderr, "Cannot allocate buf for task=%p\n", task);
+ free(task);
+ return -ENOMEM;
+ }
+
+ if (spdk_bdev_is_md_separate(job->bdev)) {
+ task->md_buf = spdk_zmalloc(job->io_size_blocks *
+ spdk_bdev_get_md_size(job->bdev), 0, NULL,
+ SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
+ if (!task->md_buf) {
+ fprintf(stderr, "Cannot allocate md buf for task=%p\n", task);
+ spdk_free(task->buf);
+ free(task);
+ return -ENOMEM;
+ }
+ }
+
+ task->job = job;
+ TAILQ_INSERT_TAIL(&job->task_list, task, link);
+ }
+
+ job->thread = thread;
+
+ g_construct_job_count++;
+
+ rc = spdk_thread_send_msg(thread, _bdevperf_construct_job, job);
+ assert(rc == 0);
+
+ return rc;
+}
+
+static int
+parse_rw(const char *str, enum job_config_rw ret)
+{
+ if (str == NULL) {
+ return ret;
+ }
+
+ if (!strcmp(str, "read")) {
+ ret = JOB_CONFIG_RW_READ;
+ } else if (!strcmp(str, "randread")) {
+ ret = JOB_CONFIG_RW_RANDREAD;
+ } else if (!strcmp(str, "write")) {
+ ret = JOB_CONFIG_RW_WRITE;
+ } else if (!strcmp(str, "randwrite")) {
+ ret = JOB_CONFIG_RW_RANDWRITE;
+ } else if (!strcmp(str, "verify")) {
+ ret = JOB_CONFIG_RW_VERIFY;
+ } else if (!strcmp(str, "reset")) {
+ ret = JOB_CONFIG_RW_RESET;
+ } else if (!strcmp(str, "unmap")) {
+ ret = JOB_CONFIG_RW_UNMAP;
+ } else if (!strcmp(str, "write_zeroes")) {
+ ret = JOB_CONFIG_RW_WRITE_ZEROES;
+ } else if (!strcmp(str, "flush")) {
+ ret = JOB_CONFIG_RW_FLUSH;
+ } else if (!strcmp(str, "rw")) {
+ ret = JOB_CONFIG_RW_RW;
+ } else if (!strcmp(str, "randrw")) {
+ ret = JOB_CONFIG_RW_RANDRW;
+ } else {
+ fprintf(stderr, "rw must be one of\n"
+ "(read, write, randread, randwrite, rw, randrw, verify, reset, unmap, flush)\n");
+ ret = BDEVPERF_CONFIG_ERROR;
+ }
+
+ return ret;
+}
+
+static const char *
+config_filename_next(const char *filename, char *out)
+{
+ int i, k;
+
+ if (filename == NULL) {
+ out[0] = '\0';
+ return NULL;
+ }
+
+ if (filename[0] == ':') {
+ filename++;
+ }
+
+ for (i = 0, k = 0;
+ filename[i] != '\0' &&
+ filename[i] != ':' &&
+ i < BDEVPERF_CONFIG_MAX_FILENAME;
+ i++) {
+ if (filename[i] == ' ' || filename[i] == '\t') {
+ continue;
+ }
+
+ out[k++] = filename[i];
+ }
+ out[k] = 0;
+
+ return filename + i;
+}
+
+static void
+bdevperf_construct_config_jobs(void)
+{
+ char filename[BDEVPERF_CONFIG_MAX_FILENAME];
+ struct spdk_thread *thread;
+ struct job_config *config;
+ struct spdk_bdev *bdev;
+ const char *filenames;
+ int rc;
+
+ TAILQ_FOREACH(config, &job_config_list, link) {
+ filenames = config->filename;
+
+ thread = construct_job_thread(&config->cpumask, config->name);
+ assert(thread);
+
+ while (filenames) {
+ filenames = config_filename_next(filenames, filename);
+ if (strlen(filename) == 0) {
+ break;
+ }
+
+ bdev = spdk_bdev_get_by_name(filename);
+ if (!bdev) {
+ fprintf(stderr, "Unable to find bdev '%s'\n", filename);
+ g_run_rc = -EINVAL;
+ return;
+ }
+
+ rc = bdevperf_construct_job(bdev, config, thread);
+ if (rc < 0) {
+ g_run_rc = rc;
+ return;
+ }
+ }
+ }
+}
+
+static int
+make_cli_job_config(const char *filename, int offset, int range)
+{
+ struct job_config *config = calloc(1, sizeof(*config));
+
+ if (config == NULL) {
+ fprintf(stderr, "Unable to allocate memory for job config\n");
+ return -ENOMEM;
+ }
+
+ config->name = filename;
+ config->filename = filename;
+ spdk_cpuset_zero(&config->cpumask);
+ spdk_cpuset_set_cpu(&config->cpumask, _get_next_core(), true);
+ config->bs = g_io_size;
+ config->iodepth = g_queue_depth;
+ config->rwmixread = g_rw_percentage;
+ config->offset = offset;
+ config->length = range;
+ config->rw = parse_rw(g_workload_type, BDEVPERF_CONFIG_ERROR);
+ if ((int)config->rw == BDEVPERF_CONFIG_ERROR) {
+ return -EINVAL;
+ }
+
+ TAILQ_INSERT_TAIL(&job_config_list, config, link);
+ return 0;
+}
+
+static void
+bdevperf_construct_multithread_jobs(void)
+{
+ struct spdk_bdev *bdev;
+ uint32_t i;
+ uint32_t num_cores;
+ uint32_t blocks_per_job;
+ uint32_t offset;
+
+ num_cores = 0;
+ SPDK_ENV_FOREACH_CORE(i) {
+ num_cores++;
+ }
+
+ if (num_cores == 0) {
+ g_run_rc = -EINVAL;
+ return;
+ }
+
+ if (g_job_bdev_name != NULL) {
+ bdev = spdk_bdev_get_by_name(g_job_bdev_name);
+ if (!bdev) {
+ fprintf(stderr, "Unable to find bdev '%s'\n", g_job_bdev_name);
+ return;
+ }
+
+ blocks_per_job = spdk_bdev_get_num_blocks(bdev) / num_cores;
+ offset = 0;
+
+ SPDK_ENV_FOREACH_CORE(i) {
+ g_run_rc = make_cli_job_config(g_job_bdev_name, offset, blocks_per_job);
+ if (g_run_rc) {
+ return;
+ }
+
+ offset += blocks_per_job;
+ }
+ } else {
+ bdev = spdk_bdev_first_leaf();
+ while (bdev != NULL) {
+ blocks_per_job = spdk_bdev_get_num_blocks(bdev) / num_cores;
+ offset = 0;
+
+ SPDK_ENV_FOREACH_CORE(i) {
+ g_run_rc = make_cli_job_config(spdk_bdev_get_name(bdev),
+ offset, blocks_per_job);
+ if (g_run_rc) {
+ return;
+ }
+
+ offset += blocks_per_job;
+ }
+
+ bdev = spdk_bdev_next_leaf(bdev);
+ }
+ }
+}
+
+static void
+bdevperf_construct_jobs(void)
+{
+ struct spdk_bdev *bdev;
+
+ /* There are three different modes for allocating jobs. Standard mode
+ * (the default) creates one spdk_thread per bdev and runs the I/O job there.
+ *
+ * The -C flag places bdevperf into "multithread" mode, meaning it creates
+ * one spdk_thread per bdev PER CORE, and runs a copy of the job on each.
+ * This runs multiple threads per bdev, effectively.
+ *
+ * The -j flag implies "FIO" mode which tries to mimic semantic of FIO jobs.
+ * In "FIO" mode, threads are spawned per-job instead of per-bdev.
+ * Each FIO job can be individually parameterized by filename, cpu mask, etc,
+ * which is different from other modes in that they only support global options.
+ */
+
+ /* Increment initial construct_jobs count so that it will never reach 0 in the middle
+ * of iteration.
+ */
+ g_construct_job_count = 1;
+
+ if (g_bdevperf_conf) {
+ goto end;
+ } else if (g_multithread_mode) {
+ bdevperf_construct_multithread_jobs();
+ goto end;
+ }
+
+ if (g_job_bdev_name != NULL) {
+ bdev = spdk_bdev_get_by_name(g_job_bdev_name);
+ if (bdev) {
+ /* Construct the job */
+ g_run_rc = make_cli_job_config(g_job_bdev_name, 0, 0);
+ } else {
+ fprintf(stderr, "Unable to find bdev '%s'\n", g_job_bdev_name);
+ }
+ } else {
+ bdev = spdk_bdev_first_leaf();
+
+ while (bdev != NULL) {
+ /* Construct the job */
+ g_run_rc = make_cli_job_config(spdk_bdev_get_name(bdev), 0, 0);
+ if (g_run_rc) {
+ break;
+ }
+
+ bdev = spdk_bdev_next_leaf(bdev);
+ }
+ }
+
+end:
+ if (g_run_rc == 0) {
+ bdevperf_construct_config_jobs();
+ }
+
+ if (--g_construct_job_count == 0) {
+ if (g_run_rc != 0) {
+ /* Something failed. */
+ bdevperf_test_done(NULL);
+ return;
+ }
+
+ bdevperf_test();
+ }
+}
+
+static int
+parse_uint_option(struct spdk_conf_section *s, const char *name, int def)
+{
+ const char *job_name;
+ int tmp;
+
+ tmp = spdk_conf_section_get_intval(s, name);
+ if (tmp == -1) {
+ /* Field was not found. Check default value
+ * In [global] section it is ok to have undefined values
+ * but for other sections it is not ok */
+ if (def == BDEVPERF_CONFIG_UNDEFINED) {
+ job_name = spdk_conf_section_get_name(s);
+ if (strcmp(job_name, "global") == 0) {
+ return def;
+ }
+
+ fprintf(stderr,
+ "Job '%s' has no '%s' assigned\n",
+ job_name, name);
+ return BDEVPERF_CONFIG_ERROR;
+ }
+ return def;
+ }
+
+ /* NOTE: get_intval returns nonnegative on success */
+ if (tmp < 0) {
+ fprintf(stderr, "Job '%s' has bad '%s' value.\n",
+ spdk_conf_section_get_name(s), name);
+ return BDEVPERF_CONFIG_ERROR;
+ }
+
+ return tmp;
+}
+
+/* CLI arguments override parameters for global sections */
+static void
+config_set_cli_args(struct job_config *config)
+{
+ if (g_job_bdev_name) {
+ config->filename = g_job_bdev_name;
+ }
+ if (g_io_size > 0) {
+ config->bs = g_io_size;
+ }
+ if (g_queue_depth > 0) {
+ config->iodepth = g_queue_depth;
+ }
+ if (g_rw_percentage > 0) {
+ config->rwmixread = g_rw_percentage;
+ }
+ if (g_workload_type) {
+ config->rw = parse_rw(g_workload_type, config->rw);
+ }
+}
+
+static int
+read_job_config(void)
+{
+ struct job_config global_default_config;
+ struct job_config global_config;
+ struct spdk_conf_section *s;
+ struct job_config *config;
+ const char *cpumask;
+ const char *rw;
+ bool is_global;
+ int n = 0;
+
+ if (g_bdevperf_conf_file == NULL) {
+ return 0;
+ }
+
+ g_bdevperf_conf = spdk_conf_allocate();
+ if (g_bdevperf_conf == NULL) {
+ fprintf(stderr, "Could not allocate job config structure\n");
+ return 1;
+ }
+
+ spdk_conf_disable_sections_merge(g_bdevperf_conf);
+ if (spdk_conf_read(g_bdevperf_conf, g_bdevperf_conf_file)) {
+ fprintf(stderr, "Invalid job config");
+ return 1;
+ }
+
+ /* Initialize global defaults */
+ global_default_config.filename = NULL;
+ /* Zero mask is the same as g_all_cpuset
+ * The g_all_cpuset is not initialized yet,
+ * so use zero mask as the default instead */
+ spdk_cpuset_zero(&global_default_config.cpumask);
+ global_default_config.bs = BDEVPERF_CONFIG_UNDEFINED;
+ global_default_config.iodepth = BDEVPERF_CONFIG_UNDEFINED;
+ /* bdevperf has no default for -M option but in FIO the default is 50 */
+ global_default_config.rwmixread = 50;
+ global_default_config.offset = 0;
+ /* length 0 means 100% */
+ global_default_config.length = 0;
+ global_default_config.rw = BDEVPERF_CONFIG_UNDEFINED;
+ config_set_cli_args(&global_default_config);
+
+ if ((int)global_default_config.rw == BDEVPERF_CONFIG_ERROR) {
+ return 1;
+ }
+
+ /* There is only a single instance of global job_config
+ * We just reset its value when we encounter new [global] section */
+ global_config = global_default_config;
+
+ for (s = spdk_conf_first_section(g_bdevperf_conf);
+ s != NULL;
+ s = spdk_conf_next_section(s)) {
+ config = calloc(1, sizeof(*config));
+ if (config == NULL) {
+ fprintf(stderr, "Unable to allocate memory for job config\n");
+ return 1;
+ }
+
+ config->name = spdk_conf_section_get_name(s);
+ is_global = strcmp(config->name, "global") == 0;
+
+ if (is_global) {
+ global_config = global_default_config;
+ }
+
+ config->filename = spdk_conf_section_get_val(s, "filename");
+ if (config->filename == NULL) {
+ config->filename = global_config.filename;
+ }
+ if (!is_global) {
+ if (config->filename == NULL) {
+ fprintf(stderr, "Job '%s' expects 'filename' parameter\n", config->name);
+ goto error;
+ } else if (strnlen(config->filename, BDEVPERF_CONFIG_MAX_FILENAME)
+ >= BDEVPERF_CONFIG_MAX_FILENAME) {
+ fprintf(stderr,
+ "filename for '%s' job is too long. Max length is %d\n",
+ config->name, BDEVPERF_CONFIG_MAX_FILENAME);
+ goto error;
+ }
+ }
+
+ cpumask = spdk_conf_section_get_val(s, "cpumask");
+ if (cpumask == NULL) {
+ config->cpumask = global_config.cpumask;
+ } else if (spdk_cpuset_parse(&config->cpumask, cpumask)) {
+ fprintf(stderr, "Job '%s' has bad 'cpumask' value\n", config->name);
+ goto error;
+ }
+
+ config->bs = parse_uint_option(s, "bs", global_config.bs);
+ if (config->bs == BDEVPERF_CONFIG_ERROR) {
+ goto error;
+ } else if (config->bs == 0) {
+ fprintf(stderr, "'bs' of job '%s' must be greater than 0\n", config->name);
+ goto error;
+ }
+
+ config->iodepth = parse_uint_option(s, "iodepth", global_config.iodepth);
+ if (config->iodepth == BDEVPERF_CONFIG_ERROR) {
+ goto error;
+ } else if (config->iodepth == 0) {
+ fprintf(stderr,
+ "'iodepth' of job '%s' must be greater than 0\n",
+ config->name);
+ goto error;
+ }
+
+ config->rwmixread = parse_uint_option(s, "rwmixread", global_config.rwmixread);
+ if (config->rwmixread == BDEVPERF_CONFIG_ERROR) {
+ goto error;
+ } else if (config->rwmixread > 100) {
+ fprintf(stderr,
+ "'rwmixread' value of '%s' job is not in 0-100 range\n",
+ config->name);
+ goto error;
+ }
+
+ config->offset = parse_uint_option(s, "offset", global_config.offset);
+ if (config->offset == BDEVPERF_CONFIG_ERROR) {
+ goto error;
+ }
+
+ config->length = parse_uint_option(s, "length", global_config.length);
+ if (config->length == BDEVPERF_CONFIG_ERROR) {
+ goto error;
+ }
+
+ rw = spdk_conf_section_get_val(s, "rw");
+ config->rw = parse_rw(rw, global_config.rw);
+ if ((int)config->rw == BDEVPERF_CONFIG_ERROR) {
+ fprintf(stderr, "Job '%s' has bad 'rw' value\n", config->name);
+ goto error;
+ } else if (!is_global && (int)config->rw == BDEVPERF_CONFIG_UNDEFINED) {
+ fprintf(stderr, "Job '%s' has no 'rw' assigned\n", config->name);
+ goto error;
+ }
+
+ if (is_global) {
+ config_set_cli_args(config);
+ global_config = *config;
+ free(config);
+ } else {
+ TAILQ_INSERT_TAIL(&job_config_list, config, link);
+ n++;
+ }
+ }
+
+ printf("Using job config with %d jobs\n", n);
+ return 0;
+error:
+ free(config);
+ return 1;
+}
+
+static void
+bdevperf_run(void *arg1)
+{
+ uint32_t i;
+
+ g_master_thread = spdk_get_thread();
+
+ spdk_cpuset_zero(&g_all_cpuset);
+ SPDK_ENV_FOREACH_CORE(i) {
+ spdk_cpuset_set_cpu(&g_all_cpuset, i, true);
+ }
+
+ if (g_wait_for_tests) {
+ /* Do not perform any tests until RPC is received */
+ return;
+ }
+
+ bdevperf_construct_jobs();
+}
+
+static void
+rpc_perform_tests_cb(void)
+{
+ struct spdk_json_write_ctx *w;
+ struct spdk_jsonrpc_request *request = g_request;
+
+ g_request = NULL;
+
+ if (g_run_rc == 0) {
+ w = spdk_jsonrpc_begin_result(request);
+ spdk_json_write_uint32(w, g_run_rc);
+ spdk_jsonrpc_end_result(request, w);
+ } else {
+ spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
+ "bdevperf failed with error %s", spdk_strerror(-g_run_rc));
+ }
+
+ /* Reset g_run_rc to 0 for the next test run. */
+ g_run_rc = 0;
+}
+
+static void
+rpc_perform_tests(struct spdk_jsonrpc_request *request, const struct spdk_json_val *params)
+{
+ if (params != NULL) {
+ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS,
+ "perform_tests method requires no parameters");
+ return;
+ }
+ if (g_request != NULL) {
+ fprintf(stderr, "Another test is already in progress.\n");
+ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
+ spdk_strerror(-EINPROGRESS));
+ return;
+ }
+ g_request = request;
+
+ bdevperf_construct_jobs();
+}
+SPDK_RPC_REGISTER("perform_tests", rpc_perform_tests, SPDK_RPC_RUNTIME)
+
+static void
+_bdevperf_job_drain(void *ctx)
+{
+ bdevperf_job_drain(ctx);
+}
+
+static void
+spdk_bdevperf_shutdown_cb(void)
+{
+ g_shutdown = true;
+ struct bdevperf_job *job, *tmp;
+
+ if (g_bdevperf.running_jobs == 0) {
+ bdevperf_test_done(NULL);
+ return;
+ }
+
+ g_shutdown_tsc = spdk_get_ticks() - g_shutdown_tsc;
+
+ /* Iterate jobs to stop all I/O */
+ TAILQ_FOREACH_SAFE(job, &g_bdevperf.jobs, link, tmp) {
+ spdk_thread_send_msg(job->thread, _bdevperf_job_drain, job);
+ }
+}
+
+static int
+bdevperf_parse_arg(int ch, char *arg)
+{
+ long long tmp;
+
+ if (ch == 'w') {
+ g_workload_type = optarg;
+ } else if (ch == 'T') {
+ g_job_bdev_name = optarg;
+ } else if (ch == 'z') {
+ g_wait_for_tests = true;
+ } else if (ch == 'x') {
+ g_zcopy = false;
+ } else if (ch == 'A') {
+ g_abort = true;
+ } else if (ch == 'C') {
+ g_multithread_mode = true;
+ } else if (ch == 'f') {
+ g_continue_on_failure = true;
+ } else if (ch == 'j') {
+ g_bdevperf_conf_file = optarg;
+ } else {
+ tmp = spdk_strtoll(optarg, 10);
+ if (tmp < 0) {
+ fprintf(stderr, "Parse failed for the option %c.\n", ch);
+ return tmp;
+ } else if (tmp >= INT_MAX) {
+ fprintf(stderr, "Parsed option was too large %c.\n", ch);
+ return -ERANGE;
+ }
+
+ switch (ch) {
+ case 'q':
+ g_queue_depth = tmp;
+ break;
+ case 'o':
+ g_io_size = tmp;
+ break;
+ case 't':
+ g_time_in_sec = tmp;
+ break;
+ case 'k':
+ g_timeout_in_sec = tmp;
+ break;
+ case 'M':
+ g_rw_percentage = tmp;
+ g_mix_specified = true;
+ break;
+ case 'P':
+ g_show_performance_ema_period = tmp;
+ break;
+ case 'S':
+ g_show_performance_real_time = 1;
+ g_show_performance_period_in_usec = tmp * 1000000;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static void
+bdevperf_usage(void)
+{
+ printf(" -q <depth> io depth\n");
+ printf(" -o <size> io size in bytes\n");
+ printf(" -w <type> io pattern type, must be one of (read, write, randread, randwrite, rw, randrw, verify, reset, unmap, flush)\n");
+ printf(" -t <time> time in seconds\n");
+ printf(" -k <timeout> timeout in seconds to detect starved I/O (default is 0 and disabled)\n");
+ printf(" -M <percent> rwmixread (100 for reads, 0 for writes)\n");
+ printf(" -P <num> number of moving average period\n");
+ printf("\t\t(If set to n, show weighted mean of the previous n IO/s in real time)\n");
+ printf("\t\t(Formula: M = 2 / (n + 1), EMA[i+1] = IO/s * M + (1 - M) * EMA[i])\n");
+ printf("\t\t(only valid with -S)\n");
+ printf(" -S <period> show performance result in real time every <period> seconds\n");
+ printf(" -T <bdev> bdev to run against. Default: all available bdevs.\n");
+ printf(" -f continue processing I/O even after failures\n");
+ printf(" -x disable using zcopy bdev API for read or write I/O\n");
+ printf(" -z start bdevperf, but wait for RPC to start tests\n");
+ printf(" -A abort the timeout I/O\n");
+ printf(" -C enable every core to send I/Os to each bdev\n");
+ printf(" -j use job config file");
+}
+
+static int
+verify_test_params(struct spdk_app_opts *opts)
+{
+ /* When RPC is used for starting tests and
+ * no rpc_addr was configured for the app,
+ * use the default address. */
+ if (g_wait_for_tests && opts->rpc_addr == NULL) {
+ opts->rpc_addr = SPDK_DEFAULT_RPC_ADDR;
+ }
+
+ if (!g_bdevperf_conf_file && g_queue_depth <= 0) {
+ spdk_app_usage();
+ bdevperf_usage();
+ return 1;
+ }
+ if (!g_bdevperf_conf_file && g_io_size <= 0) {
+ spdk_app_usage();
+ bdevperf_usage();
+ return 1;
+ }
+ if (!g_bdevperf_conf_file && !g_workload_type) {
+ spdk_app_usage();
+ bdevperf_usage();
+ return 1;
+ }
+ if (g_time_in_sec <= 0) {
+ spdk_app_usage();
+ bdevperf_usage();
+ return 1;
+ }
+ g_time_in_usec = g_time_in_sec * 1000000LL;
+
+ if (g_timeout_in_sec < 0) {
+ spdk_app_usage();
+ bdevperf_usage();
+ return 1;
+ }
+
+ if (g_show_performance_ema_period > 0 &&
+ g_show_performance_real_time == 0) {
+ fprintf(stderr, "-P option must be specified with -S option\n");
+ return 1;
+ }
+
+ if (g_io_size > SPDK_BDEV_LARGE_BUF_MAX_SIZE) {
+ printf("I/O size of %d is greater than zero copy threshold (%d).\n",
+ g_io_size, SPDK_BDEV_LARGE_BUF_MAX_SIZE);
+ printf("Zero copy mechanism will not be used.\n");
+ g_zcopy = false;
+ }
+
+ if (g_bdevperf_conf_file) {
+ /* workload_type verification happens during config file parsing */
+ return 0;
+ }
+
+ if (!strcmp(g_workload_type, "verify") ||
+ !strcmp(g_workload_type, "reset")) {
+ g_rw_percentage = 50;
+ if (g_io_size > SPDK_BDEV_LARGE_BUF_MAX_SIZE) {
+ fprintf(stderr, "Unable to exceed max I/O size of %d for verify. (%d provided).\n",
+ SPDK_BDEV_LARGE_BUF_MAX_SIZE, g_io_size);
+ return 1;
+ }
+ g_verify = true;
+ if (!strcmp(g_workload_type, "reset")) {
+ g_reset = true;
+ }
+ }
+
+ if (!strcmp(g_workload_type, "read") ||
+ !strcmp(g_workload_type, "randread") ||
+ !strcmp(g_workload_type, "write") ||
+ !strcmp(g_workload_type, "randwrite") ||
+ !strcmp(g_workload_type, "verify") ||
+ !strcmp(g_workload_type, "reset") ||
+ !strcmp(g_workload_type, "unmap") ||
+ !strcmp(g_workload_type, "write_zeroes") ||
+ !strcmp(g_workload_type, "flush")) {
+ if (g_mix_specified) {
+ fprintf(stderr, "Ignoring -M option... Please use -M option"
+ " only when using rw or randrw.\n");
+ }
+ }
+
+ if (!strcmp(g_workload_type, "rw") ||
+ !strcmp(g_workload_type, "randrw")) {
+ if (g_rw_percentage < 0 || g_rw_percentage > 100) {
+ fprintf(stderr,
+ "-M must be specified to value from 0 to 100 "
+ "for rw or randrw.\n");
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+int
+main(int argc, char **argv)
+{
+ struct spdk_app_opts opts = {};
+ int rc;
+
+ spdk_app_opts_init(&opts);
+ opts.name = "bdevperf";
+ opts.rpc_addr = NULL;
+ opts.reactor_mask = NULL;
+ opts.shutdown_cb = spdk_bdevperf_shutdown_cb;
+
+ if ((rc = spdk_app_parse_args(argc, argv, &opts, "xzfq:o:t:w:k:ACM:P:S:T:j:", NULL,
+ bdevperf_parse_arg, bdevperf_usage)) !=
+ SPDK_APP_PARSE_ARGS_SUCCESS) {
+ return rc;
+ }
+
+ if (read_job_config()) {
+ free_job_config();
+ return 1;
+ }
+
+ if (verify_test_params(&opts) != 0) {
+ free_job_config();
+ exit(1);
+ }
+
+ rc = spdk_app_start(&opts, bdevperf_run, NULL);
+
+ spdk_app_fini();
+ free_job_config();
+ return rc;
+}
diff --git a/src/spdk/test/bdev/bdevperf/bdevperf.py b/src/spdk/test/bdev/bdevperf/bdevperf.py
new file mode 100755
index 000000000..178d90c34
--- /dev/null
+++ b/src/spdk/test/bdev/bdevperf/bdevperf.py
@@ -0,0 +1,86 @@
+#!/usr/bin/env python3
+
+import logging
+import argparse
+import sys
+import shlex
+
+try:
+ from rpc.client import print_dict, JSONRPCException
+ import rpc
+except ImportError:
+ print("SPDK RPC library missing. Please add spdk/scripts/ directory to PYTHONPATH:")
+ print("'export PYTHONPATH=$PYTHONPATH:./spdk/scripts/'")
+ exit(1)
+
+try:
+ from shlex import quote
+except ImportError:
+ from pipes import quote
+
+
+def print_array(a):
+ print(" ".join((quote(v) for v in a)))
+
+
+def perform_tests_func(client):
+ """Perform bdevperf tests according to command line arguments when application was started.
+
+ Args:
+ none
+
+ Returns:
+ On success, 0 is returned. On error, -1 is returned.
+ """
+ params = {}
+ return client.call('perform_tests', params)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description='SPDK RPC command line interface. NOTE: spdk/scripts/ is expected in PYTHONPATH')
+ parser.add_argument('-s', dest='server_addr',
+ help='RPC domain socket path or IP address', default='/var/tmp/spdk.sock')
+ parser.add_argument('-p', dest='port',
+ help='RPC port number (if server_addr is IP address)',
+ default=5260, type=int)
+ parser.add_argument('-t', dest='timeout',
+ help='Timeout as a floating point number expressed in seconds waiting for response. Default: 60.0',
+ default=60.0, type=float)
+ parser.add_argument('-v', dest='verbose', action='store_const', const="INFO",
+ help='Set verbose mode to INFO', default="ERROR")
+ parser.add_argument('--verbose', dest='verbose', choices=['DEBUG', 'INFO', 'ERROR'],
+ help="""Set verbose level. """)
+ subparsers = parser.add_subparsers(help='RPC methods')
+
+ def perform_tests(args):
+ print_dict(perform_tests_func(args.client))
+
+ p = subparsers.add_parser('perform_tests', help='Perform bdevperf tests')
+ p.set_defaults(func=perform_tests)
+
+ def call_rpc_func(args):
+ try:
+ args.func(args)
+ except JSONRPCException as ex:
+ print(ex.message)
+ exit(1)
+
+ def execute_script(parser, client, fd):
+ for rpc_call in map(str.rstrip, fd):
+ if not rpc_call.strip():
+ continue
+ args = parser.parse_args(shlex.split(rpc_call))
+ args.client = client
+ call_rpc_func(args)
+
+ args = parser.parse_args()
+ args.client = rpc.client.JSONRPCClient(args.server_addr, args.port, args.timeout, log_level=getattr(logging, args.verbose.upper()))
+ if hasattr(args, 'func'):
+ call_rpc_func(args)
+ elif sys.stdin.isatty():
+ # No arguments and no data piped through stdin
+ parser.print_help()
+ exit(1)
+ else:
+ execute_script(parser, args.client, sys.stdin)
diff --git a/src/spdk/test/bdev/bdevperf/common.sh b/src/spdk/test/bdev/bdevperf/common.sh
new file mode 100644
index 000000000..eade380a3
--- /dev/null
+++ b/src/spdk/test/bdev/bdevperf/common.sh
@@ -0,0 +1,33 @@
+bdevperf=$rootdir/test/bdev/bdevperf/bdevperf
+
+function create_job() {
+ local job_section=$1
+ local rw=$2
+ local filename=$3
+
+ if [[ $job_section == "global" ]]; then
+ cat <<- EOF >> "$testdir"/test.conf
+ [global]
+ filename=${filename}
+ EOF
+ fi
+ job="[${job_section}]"
+ echo $global
+ cat <<- EOF >> "$testdir"/test.conf
+ ${job}
+ filename=${filename}
+ bs=1024
+ rwmixread=70
+ rw=${rw}
+ iodepth=256
+ cpumask=0xff
+ EOF
+}
+
+function get_num_jobs() {
+ echo "$1" | grep -oE "Using job config with [0-9]+ jobs" | grep -oE "[0-9]+"
+}
+
+function cleanup() {
+ rm -f $testdir/test.conf
+}
diff --git a/src/spdk/test/bdev/bdevperf/conf.json b/src/spdk/test/bdev/bdevperf/conf.json
new file mode 100644
index 000000000..c58407f38
--- /dev/null
+++ b/src/spdk/test/bdev/bdevperf/conf.json
@@ -0,0 +1,25 @@
+{
+ "subsystems": [
+ {
+ "subsystem": "bdev",
+ "config": [
+ {
+ "method": "bdev_malloc_create",
+ "params": {
+ "name": "Malloc0",
+ "num_blocks": 102400,
+ "block_size": 512
+ }
+ },
+ {
+ "method": "bdev_malloc_create",
+ "params": {
+ "name": "Malloc1",
+ "num_blocks": 102400,
+ "block_size": 512
+ }
+ }
+ ]
+ }
+ ]
+}
diff --git a/src/spdk/test/bdev/bdevperf/test_config.sh b/src/spdk/test/bdev/bdevperf/test_config.sh
new file mode 100755
index 000000000..911d4e27d
--- /dev/null
+++ b/src/spdk/test/bdev/bdevperf/test_config.sh
@@ -0,0 +1,41 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $testdir/common.sh
+
+jsonconf=$testdir/conf.json
+testconf=$testdir/test.conf
+
+trap 'cleanup; exit 1' SIGINT SIGTERM EXIT
+#Test inheriting filename and rw_mode parameters from global section.
+create_job "global" "read" "Malloc0"
+create_job "job0"
+create_job "job1"
+create_job "job2"
+create_job "job3"
+bdevperf_output=$($bdevperf -t 2 --json $jsonconf -j $testconf 2>&1)
+[[ $(get_num_jobs "$bdevperf_output") == "4" ]]
+
+bdevperf_output=$($bdevperf -C -t 2 --json $jsonconf -j $testconf)
+
+cleanup
+#Test missing global section.
+create_job "job0" "write" "Malloc0"
+create_job "job1" "write" "Malloc0"
+create_job "job2" "write" "Malloc0"
+bdevperf_output=$($bdevperf -t 2 --json $jsonconf -j $testconf 2>&1)
+[[ $(get_num_jobs "$bdevperf_output") == "3" ]]
+
+cleanup
+#Test inheriting multiple filenames and rw_mode parameters from global section.
+create_job "global" "rw" "Malloc0:Malloc1"
+create_job "job0"
+create_job "job1"
+create_job "job2"
+create_job "job3"
+bdevperf_output=$($bdevperf -t 2 --json $jsonconf -j $testconf 2>&1)
+[[ $(get_num_jobs "$bdevperf_output") == "4" ]]
+cleanup
+trap - SIGINT SIGTERM EXIT
diff --git a/src/spdk/test/bdev/blockdev.sh b/src/spdk/test/bdev/blockdev.sh
new file mode 100755
index 000000000..12d9c6f52
--- /dev/null
+++ b/src/spdk/test/bdev/blockdev.sh
@@ -0,0 +1,408 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/test/common/autotest_common.sh
+source $testdir/nbd_common.sh
+
+rpc_py="$rootdir/scripts/rpc.py"
+conf_file="$testdir/bdev.json"
+# Make sure the configuration is clean
+: > "$conf_file"
+
+function cleanup() {
+ rm -f "$SPDK_TEST_STORAGE/aiofile"
+ rm -f "$SPDK_TEST_STORAGE/spdk-pmem-pool"
+ rm -f "$conf_file"
+
+ if [[ $test_type == rbd ]]; then
+ rbd_cleanup
+ fi
+}
+
+function start_spdk_tgt() {
+ "$SPDK_BIN_DIR/spdk_tgt" &
+ spdk_tgt_pid=$!
+ trap 'killprocess "$spdk_tgt_pid"; exit 1' SIGINT SIGTERM EXIT
+ waitforlisten "$spdk_tgt_pid"
+}
+
+function setup_bdev_conf() {
+ "$rpc_py" <<- RPC
+ bdev_split_create Malloc1 2
+ bdev_split_create -s 4 Malloc2 8
+ bdev_malloc_create -b Malloc0 32 512
+ bdev_malloc_create -b Malloc1 32 512
+ bdev_malloc_create -b Malloc2 32 512
+ bdev_malloc_create -b Malloc3 32 512
+ bdev_malloc_create -b Malloc4 32 512
+ bdev_malloc_create -b Malloc5 32 512
+ bdev_passthru_create -p TestPT -b Malloc3
+ bdev_raid_create -n raid0 -z 64 -r 0 -b "Malloc4 Malloc5"
+ RPC
+ # FIXME: QoS doesn't work properly with json_config, see issue 1146
+ #$rpc_py bdev_set_qos_limit --rw_mbytes_per_sec 100 Malloc3
+ #$rpc_py bdev_set_qos_limit --rw_ios_per_sec 20000 Malloc0
+ if [[ $(uname -s) != "FreeBSD" ]]; then
+ dd if=/dev/zero of="$SPDK_TEST_STORAGE/aiofile" bs=2048 count=5000
+ "$rpc_py" bdev_aio_create "$SPDK_TEST_STORAGE/aiofile" AIO0 2048
+ fi
+}
+
+function setup_nvme_conf() {
+ "$rootdir/scripts/gen_nvme.sh" --json | "$rpc_py" load_subsystem_config
+}
+
+function setup_gpt_conf() {
+ if [[ $(uname -s) = Linux ]] && hash sgdisk; then
+ $rootdir/scripts/setup.sh reset
+ # FIXME: Note that we are racing with the kernel here. There's no guarantee that
+ # proper object will be already in place under sysfs nor that any udev-like
+ # helper created proper block devices for us. Replace the below sleep with proper
+ # udev settle routine.
+ sleep 1s
+ # Get nvme devices by following drivers' links towards nvme class
+ local nvme_devs=(/sys/bus/pci/drivers/nvme/*/nvme/nvme*/nvme*n*) nvme_dev
+ gpt_nvme=""
+ # Pick first device which doesn't have any valid partition table
+ for nvme_dev in "${nvme_devs[@]}"; do
+ dev=/dev/${nvme_dev##*/}
+ if ! pt=$(parted "$dev" -ms print 2>&1); then
+ [[ $pt == *"$dev: unrecognised disk label"* ]] || continue
+ gpt_nvme=$dev
+ break
+ fi
+ done
+ if [[ -n $gpt_nvme ]]; then
+ # Create gpt partition table
+ parted -s "$gpt_nvme" mklabel gpt mkpart first '0%' '50%' mkpart second '50%' '100%'
+ # change the GUID to SPDK GUID value
+ # FIXME: Hardcode this in some common place, this value should not be changed much
+ IFS="()" read -r _ SPDK_GPT_GUID _ < <(grep SPDK_GPT_PART_TYPE_GUID module/bdev/gpt/gpt.h)
+ SPDK_GPT_GUID=${SPDK_GPT_GUID//, /-} SPDK_GPT_GUID=${SPDK_GPT_GUID//0x/}
+ sgdisk -t "1:$SPDK_GPT_GUID" "$gpt_nvme"
+ sgdisk -t "2:$SPDK_GPT_GUID" "$gpt_nvme"
+ "$rootdir/scripts/setup.sh"
+ "$rpc_py" bdev_get_bdevs
+ setup_nvme_conf
+ else
+ printf 'Did not find any nvme block devices to work with, aborting the test\n' >&2
+ "$rootdir/scripts/setup.sh"
+ return 1
+ fi
+ else
+ # Not supported platform or missing tooling, nothing to be done, simply exit the test
+ # in a graceful manner.
+ trap - SIGINT SIGTERM EXIT
+ killprocess "$spdk_tgt_pid"
+ cleanup
+ exit 0
+ fi
+}
+
+function setup_crypto_aesni_conf() {
+ # Malloc0 and Malloc1 use AESNI
+ "$rpc_py" <<- RPC
+ bdev_malloc_create -b Malloc0 16 512
+ bdev_malloc_create -b Malloc1 16 512
+ bdev_crypto_create Malloc0 crypto_ram crypto_aesni_mb 0123456789123456
+ bdev_crypto_create Malloc1 crypto_ram2 crypto_aesni_mb 9012345678912345
+ RPC
+}
+
+function setup_crypto_qat_conf() {
+ # Malloc0 will use QAT AES_CBC
+ # Malloc1 will use QAT AES_XTS
+ "$rpc_py" <<- RPC
+ bdev_malloc_create -b Malloc0 16 512
+ bdev_malloc_create -b Malloc1 16 512
+ bdev_crypto_create Malloc0 crypto_ram crypto_qat 0123456789123456
+ bdev_crypto_create -c AES_XTS -k2 0123456789123456 Malloc1 crypto_ram3 crypto_qat 0123456789123456
+ RPC
+ "$rpc_py" bdev_get_bdevs -b Malloc1
+}
+
+function setup_pmem_conf() {
+ if hash pmempool; then
+ rm -f "$SPDK_TEST_STORAGE/spdk-pmem-pool"
+ pmempool create blk --size=32M 512 "$SPDK_TEST_STORAGE/spdk-pmem-pool"
+ "$rpc_py" bdev_pmem_create -n Pmem0 "$SPDK_TEST_STORAGE/spdk-pmem-pool"
+ else
+ return 1
+ fi
+}
+
+function setup_rbd_conf() {
+ timing_enter rbd_setup
+ rbd_setup 127.0.0.1
+ timing_exit rbd_setup
+
+ "$rpc_py" bdev_rbd_create -b Ceph0 rbd foo 512
+}
+
+function bdev_bounds() {
+ $testdir/bdevio/bdevio -w -s $PRE_RESERVED_MEM --json "$conf_file" &
+ bdevio_pid=$!
+ trap 'killprocess $bdevio_pid; exit 1' SIGINT SIGTERM EXIT
+ echo "Process bdevio pid: $bdevio_pid"
+ waitforlisten $bdevio_pid
+ $testdir/bdevio/tests.py perform_tests
+ killprocess $bdevio_pid
+ trap - SIGINT SIGTERM EXIT
+}
+
+function nbd_function_test() {
+ if [ $(uname -s) = Linux ] && modprobe -n nbd; then
+ local rpc_server=/var/tmp/spdk-nbd.sock
+ local conf=$1
+ local nbd_all=($(ls /dev/nbd* | grep -v p))
+ local bdev_all=($bdevs_name)
+ local nbd_num=${#bdevs_all[@]}
+ if [ ${#nbd_all[@]} -le $nbd_num ]; then
+ nbd_num=${#nbd_all[@]}
+ fi
+ local nbd_list=(${nbd_all[@]:0:$nbd_num})
+ local bdev_list=(${bdev_all[@]:0:$nbd_num})
+
+ if [ ! -e $conf ]; then
+ return 1
+ fi
+
+ modprobe nbd
+ $rootdir/test/app/bdev_svc/bdev_svc -r $rpc_server -i 0 --json "$conf" &
+ nbd_pid=$!
+ trap 'killprocess $nbd_pid; exit 1' SIGINT SIGTERM EXIT
+ echo "Process nbd pid: $nbd_pid"
+ waitforlisten $nbd_pid $rpc_server
+
+ nbd_rpc_start_stop_verify $rpc_server "${bdev_list[*]}"
+ nbd_rpc_data_verify $rpc_server "${bdev_list[*]}" "${nbd_list[*]}"
+
+ killprocess $nbd_pid
+ trap - SIGINT SIGTERM EXIT
+ fi
+
+ return 0
+}
+
+function fio_test_suite() {
+ # Generate the fio config file given the list of all unclaimed bdevs
+ fio_config_gen $testdir/bdev.fio verify AIO
+ for b in $(echo $bdevs | jq -r '.name'); do
+ echo "[job_$b]" >> $testdir/bdev.fio
+ echo "filename=$b" >> $testdir/bdev.fio
+ done
+
+ local fio_params="--ioengine=spdk_bdev --iodepth=8 --bs=4k --runtime=10 $testdir/bdev.fio --spdk_json_conf=$conf_file"
+
+ run_test "bdev_fio_rw_verify" fio_bdev $fio_params --spdk_mem=$PRE_RESERVED_MEM \
+ --output=$output_dir/blockdev_fio_verify.txt
+ rm -f ./*.state
+ rm -f $testdir/bdev.fio
+
+ # Generate the fio config file given the list of all unclaimed bdevs that support unmap
+ fio_config_gen $testdir/bdev.fio trim
+ if [ "$(echo $bdevs | jq -r 'select(.supported_io_types.unmap == true) | .name')" != "" ]; then
+ for b in $(echo $bdevs | jq -r 'select(.supported_io_types.unmap == true) | .name'); do
+ echo "[job_$b]" >> $testdir/bdev.fio
+ echo "filename=$b" >> $testdir/bdev.fio
+ done
+ else
+ rm -f $testdir/bdev.fio
+ return 0
+ fi
+
+ run_test "bdev_fio_trim" fio_bdev $fio_params --output=$output_dir/blockdev_trim.txt
+ rm -f ./*.state
+ rm -f $testdir/bdev.fio
+}
+
+function get_io_result() {
+ local limit_type=$1
+ local qos_dev=$2
+ local iostat_result
+ iostat_result=$($rootdir/scripts/iostat.py -d -i 1 -t $QOS_RUN_TIME | grep $qos_dev | tail -1)
+ if [ $limit_type = IOPS ]; then
+ iostat_result=$(awk '{print $2}' <<< $iostat_result)
+ elif [ $limit_type = BANDWIDTH ]; then
+ iostat_result=$(awk '{print $6}' <<< $iostat_result)
+ fi
+
+ echo ${iostat_result/.*/}
+}
+
+function run_qos_test() {
+ local qos_limit=$1
+ local qos_result=0
+
+ qos_result=$(get_io_result $2 $3)
+ if [ $2 = BANDWIDTH ]; then
+ qos_limit=$((qos_limit * 1024))
+ fi
+ lower_limit=$((qos_limit * 9 / 10))
+ upper_limit=$((qos_limit * 11 / 10))
+
+ # QoS realization is related with bytes transfered. It currently has some variation.
+ if [ $qos_result -lt $lower_limit ] || [ $qos_result -gt $upper_limit ]; then
+ echo "Failed to limit the io read rate of NULL bdev by qos"
+ $rpc_py bdev_malloc_delete $QOS_DEV_1
+ $rpc_py bdev_null_delete $QOS_DEV_2
+ killprocess $QOS_PID
+ exit 1
+ fi
+}
+
+function qos_function_test() {
+ local qos_lower_iops_limit=1000
+ local qos_lower_bw_limit=2
+ local io_result=0
+ local iops_limit=0
+ local bw_limit=0
+
+ io_result=$(get_io_result IOPS $QOS_DEV_1)
+ # Set the IOPS limit as one quarter of the measured performance without QoS
+ iops_limit=$(((io_result / 4) / qos_lower_iops_limit * qos_lower_iops_limit))
+ if [ $iops_limit -gt $qos_lower_iops_limit ]; then
+
+ # Run bdevperf with IOPS rate limit on bdev 1
+ $rpc_py bdev_set_qos_limit --rw_ios_per_sec $iops_limit $QOS_DEV_1
+ run_test "bdev_qos_iops" run_qos_test $iops_limit IOPS $QOS_DEV_1
+
+ # Run bdevperf with bandwidth rate limit on bdev 2
+ # Set the bandwidth limit as 1/10 of the measure performance without QoS
+ bw_limit=$(get_io_result BANDWIDTH $QOS_DEV_2)
+ bw_limit=$((bw_limit / 1024 / 10))
+ if [ $bw_limit -lt $qos_lower_bw_limit ]; then
+ bw_limit=$qos_lower_bw_limit
+ fi
+ $rpc_py bdev_set_qos_limit --rw_mbytes_per_sec $bw_limit $QOS_DEV_2
+ run_test "bdev_qos_bw" run_qos_test $bw_limit BANDWIDTH $QOS_DEV_2
+
+ # Run bdevperf with additional read only bandwidth rate limit on bdev 1
+ $rpc_py bdev_set_qos_limit --r_mbytes_per_sec $qos_lower_bw_limit $QOS_DEV_1
+ run_test "bdev_qos_ro_bw" run_qos_test $qos_lower_bw_limit BANDWIDTH $QOS_DEV_1
+ else
+ echo "Actual IOPS without limiting is too low - exit testing"
+ fi
+}
+
+function qos_test_suite() {
+ # Run bdevperf with QoS disabled first
+ "$testdir/bdevperf/bdevperf" -z -m 0x2 -q 256 -o 4096 -w randread -t 60 &
+ QOS_PID=$!
+ echo "Process qos testing pid: $QOS_PID"
+ trap 'killprocess $QOS_PID; exit 1' SIGINT SIGTERM EXIT
+ waitforlisten $QOS_PID
+
+ $rpc_py bdev_malloc_create -b $QOS_DEV_1 128 512
+ waitforbdev $QOS_DEV_1
+ $rpc_py bdev_null_create $QOS_DEV_2 128 512
+ waitforbdev $QOS_DEV_2
+
+ $rootdir/test/bdev/bdevperf/bdevperf.py perform_tests &
+ qos_function_test
+
+ $rpc_py bdev_malloc_delete $QOS_DEV_1
+ $rpc_py bdev_null_delete $QOS_DEV_2
+ killprocess $QOS_PID
+ trap - SIGINT SIGTERM EXIT
+}
+
+# Inital bdev creation and configuration
+#-----------------------------------------------------
+QOS_DEV_1="Malloc_0"
+QOS_DEV_2="Null_1"
+QOS_RUN_TIME=5
+
+if [ $(uname -s) = Linux ]; then
+ # Test dynamic memory management. All hugepages will be reserved at runtime
+ PRE_RESERVED_MEM=0
+else
+ # Dynamic memory management is not supported on BSD
+ PRE_RESERVED_MEM=2048
+fi
+
+test_type=${1:-bdev}
+start_spdk_tgt
+case "$test_type" in
+ bdev)
+ setup_bdev_conf
+ ;;
+ nvme)
+ setup_nvme_conf
+ ;;
+ gpt)
+ setup_gpt_conf
+ ;;
+ crypto_aesni)
+ setup_crypto_aesni_conf
+ ;;
+ crypto_qat)
+ setup_crypto_qat_conf
+ ;;
+ pmem)
+ setup_pmem_conf
+ ;;
+ rbd)
+ setup_rbd_conf
+ ;;
+ *)
+ echo "invalid test name"
+ exit 1
+ ;;
+esac
+
+# Generate json config and use it throughout all the tests
+cat <<- CONF > "$conf_file"
+ {"subsystems":[
+ $("$rpc_py" save_subsystem_config -n bdev)
+ ]}
+CONF
+
+bdevs=$("$rpc_py" bdev_get_bdevs | jq -r '.[] | select(.claimed == false)')
+bdevs_name=$(echo $bdevs | jq -r '.name')
+bdev_list=($bdevs_name)
+hello_world_bdev=${bdev_list[0]}
+trap - SIGINT SIGTERM EXIT
+killprocess "$spdk_tgt_pid"
+# End bdev configuration
+#-----------------------------------------------------
+
+run_test "bdev_hello_world" $SPDK_EXAMPLE_DIR/hello_bdev --json "$conf_file" -b "$hello_world_bdev"
+run_test "bdev_bounds" bdev_bounds
+run_test "bdev_nbd" nbd_function_test $conf_file "$bdevs_name"
+if [[ $CONFIG_FIO_PLUGIN == y ]]; then
+ if [ "$test_type" = "nvme" ] || [ "$test_type" = "gpt" ]; then
+ # TODO: once we get real multi-ns drives, re-enable this test for NVMe.
+ echo "skipping fio tests on NVMe due to multi-ns failures."
+ else
+ run_test "bdev_fio" fio_test_suite
+ fi
+else
+ echo "FIO not available"
+ exit 1
+fi
+
+run_test "bdev_verify" $testdir/bdevperf/bdevperf --json "$conf_file" -q 128 -o 4096 -w verify -t 5 -C -m 0x3
+run_test "bdev_write_zeroes" $testdir/bdevperf/bdevperf --json "$conf_file" -q 128 -o 4096 -w write_zeroes -t 1
+
+if [[ $test_type == bdev ]]; then
+ run_test "bdev_qos" qos_test_suite
+fi
+
+# Temporarily disabled - infinite loop
+# if [ $RUN_NIGHTLY -eq 1 ]; then
+# run_test "bdev_reset" $testdir/bdevperf/bdevperf --json "$conf_file" -q 16 -w reset -o 4096 -t 60
+# fi
+
+# Bdev and configuration cleanup below this line
+#-----------------------------------------------------
+if [ "$test_type" = "gpt" ]; then
+ "$rootdir/scripts/setup.sh" reset
+ sleep 1s
+ if [[ -b $gpt_nvme ]]; then
+ dd if=/dev/zero of="$gpt_nvme" bs=4096 count=8 oflag=direct
+ fi
+fi
+
+cleanup
diff --git a/src/spdk/test/bdev/nbd_common.sh b/src/spdk/test/bdev/nbd_common.sh
new file mode 100644
index 000000000..2ea765649
--- /dev/null
+++ b/src/spdk/test/bdev/nbd_common.sh
@@ -0,0 +1,123 @@
+set -e
+
+function nbd_start_disks() {
+ local rpc_server=$1
+ local bdev_list=($2)
+ local nbd_list=($3)
+ local i
+
+ for ((i = 0; i < ${#nbd_list[@]}; i++)); do
+ $rootdir/scripts/rpc.py -s $rpc_server nbd_start_disk ${bdev_list[$i]} ${nbd_list[$i]}
+ # Wait for nbd device ready
+ waitfornbd $(basename ${nbd_list[$i]})
+ done
+}
+
+function nbd_start_disks_without_nbd_idx() {
+ local rpc_server=$1
+ local bdev_list=($2)
+ local i
+ local nbd_device
+
+ for ((i = 0; i < ${#bdev_list[@]}; i++)); do
+ nbd_device=$($rootdir/scripts/rpc.py -s $rpc_server nbd_start_disk ${bdev_list[$i]})
+ # Wait for nbd device ready
+ waitfornbd $(basename ${nbd_device})
+ done
+}
+
+function waitfornbd_exit() {
+ local nbd_name=$1
+
+ for ((i = 1; i <= 20; i++)); do
+ if grep -q -w $nbd_name /proc/partitions; then
+ sleep 0.1
+ else
+ break
+ fi
+ done
+
+ return 0
+}
+
+function nbd_stop_disks() {
+ local rpc_server=$1
+ local nbd_list=($2)
+ local i
+
+ for i in "${nbd_list[@]}"; do
+ $rootdir/scripts/rpc.py -s $rpc_server nbd_stop_disk $i
+ waitfornbd_exit $(basename $i)
+ done
+}
+
+function nbd_get_count() {
+ # return = count of spdk nbd devices
+ local rpc_server=$1
+
+ nbd_disks_json=$($rootdir/scripts/rpc.py -s $rpc_server nbd_get_disks)
+ nbd_disks_name=$(echo "${nbd_disks_json}" | jq -r '.[] | .nbd_device')
+ count=$(echo "${nbd_disks_name}" | grep -c /dev/nbd || true)
+ echo $count
+}
+
+function nbd_dd_data_verify() {
+ local nbd_list=($1)
+ local operation=$2
+ local tmp_file=$SPDK_TEST_STORAGE/nbdrandtest
+
+ if [ "$operation" = "write" ]; then
+ # data write
+ dd if=/dev/urandom of=$tmp_file bs=4096 count=256
+ for i in "${nbd_list[@]}"; do
+ dd if=$tmp_file of=$i bs=4096 count=256 oflag=direct
+ done
+ elif [ "$operation" = "verify" ]; then
+ # data read and verify
+ for i in "${nbd_list[@]}"; do
+ cmp -b -n 1M $tmp_file $i
+ done
+ rm $tmp_file
+ fi
+}
+
+function nbd_rpc_data_verify() {
+ local rpc_server=$1
+ local bdev_list=($2)
+ local nbd_list=($3)
+
+ nbd_start_disks $rpc_server "${bdev_list[*]}" "${nbd_list[*]}"
+ count=$(nbd_get_count $rpc_server)
+ if [ $count -ne ${#nbd_list[@]} ]; then
+ return 1
+ fi
+
+ nbd_dd_data_verify "${nbd_list[*]}" "write"
+ nbd_dd_data_verify "${nbd_list[*]}" "verify"
+
+ nbd_stop_disks $rpc_server "${nbd_list[*]}"
+ count=$(nbd_get_count $rpc_server)
+ if [ $count -ne 0 ]; then
+ return 1
+ fi
+
+ return 0
+}
+
+function nbd_rpc_start_stop_verify() {
+ local rpc_server=$1
+ local bdev_list=($2)
+
+ nbd_start_disks_without_nbd_idx $rpc_server "${bdev_list[*]}"
+
+ nbd_disks_json=$($rootdir/scripts/rpc.py -s $rpc_server nbd_get_disks)
+ nbd_disks_name=($(echo "${nbd_disks_json}" | jq -r '.[] | .nbd_device'))
+ nbd_stop_disks $rpc_server "${nbd_disks_name[*]}"
+
+ count=$(nbd_get_count $rpc_server)
+ if [ $count -ne 0 ]; then
+ return 1
+ fi
+
+ return 0
+}
diff --git a/src/spdk/test/blobfs/Makefile b/src/spdk/test/blobfs/Makefile
new file mode 100644
index 000000000..d4275544b
--- /dev/null
+++ b/src/spdk/test/blobfs/Makefile
@@ -0,0 +1,45 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = mkfs
+DIRS-$(CONFIG_FUSE) += fuse
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/blobfs/blobfs.sh b/src/spdk/test/blobfs/blobfs.sh
new file mode 100755
index 000000000..29c4cc433
--- /dev/null
+++ b/src/spdk/test/blobfs/blobfs.sh
@@ -0,0 +1,141 @@
+#!/usr/bin/env bash
+
+SYSTEM=$(uname -s)
+if [ $SYSTEM = "FreeBSD" ]; then
+ echo "blobfs.sh cannot run on FreeBSD currently."
+ exit 0
+fi
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/test/common/autotest_common.sh
+
+rpc_server=/var/tmp/spdk-blobfs.sock
+rpc_py="$rootdir/scripts/rpc.py -s $rpc_server"
+tmp_file=$SPDK_TEST_STORAGE/blobfs_file
+conf_file=/tmp/blobfs.conf
+bdevname=BlobfsBdev
+mount_dir=/tmp/spdk_tmp_mount
+test_cache_size=512
+
+function cleanup() {
+ if [[ -n $blobfs_pid && -e /proc/$blobfs_pid ]]; then
+ killprocess $blobfs_pid
+ fi
+
+ rm -rf $mount_dir
+ rm -f $tmp_file
+ rm -f $conf_file
+}
+
+function blobfs_start_app() {
+ $rootdir/test/app/bdev_svc/bdev_svc -r $rpc_server -c ${conf_file} &
+ blobfs_pid=$!
+
+ echo "Process blobfs pid: $blobfs_pid"
+ waitforlisten $blobfs_pid $rpc_server
+
+ result=$($rpc_py blobfs_set_cache_size ${test_cache_size})
+ if [ "${result}" != "True" ]; then
+ false
+ fi
+}
+
+function blobfs_detect_test() {
+ # Detect out there is no blobfs on test bdev
+ blobfs_start_app
+ result=$($rpc_py blobfs_detect ${bdevname})
+ if [ "${result}" != "False" ]; then
+ false
+ fi
+
+ killprocess $blobfs_pid
+
+ # Create blobfs on test bdev
+ $rootdir/test/blobfs/mkfs/mkfs ${conf_file} ${bdevname}
+
+ # Detect out there is a blobfs on test bdev
+ blobfs_start_app
+ result=$($rpc_py blobfs_detect ${bdevname})
+ if [ "${result}" != "True" ]; then
+ false
+ fi
+
+ killprocess $blobfs_pid
+}
+
+function blobfs_create_test() {
+ blobfs_start_app
+
+ # Create blobfs on test bdev
+ $rpc_py blobfs_create ${bdevname}
+
+ # Detect out there is a blobfs on test bdev
+ result=$($rpc_py blobfs_detect ${bdevname})
+ if [ "${result}" != "True" ]; then
+ false
+ fi
+
+ killprocess $blobfs_pid
+}
+
+function blobfs_fuse_test() {
+ if [ ! -d /usr/include/fuse3 ] && [ ! -d /usr/local/include/fuse3 ]; then
+ echo "libfuse3 is not installed which is required to this test."
+ return 0
+ fi
+
+ # mount blobfs on test dir
+ $rootdir/test/blobfs/fuse/fuse ${conf_file} ${bdevname} $mount_dir &
+ blobfs_pid=$!
+ echo "Process blobfs pid: $blobfs_pid"
+
+ # Currently blobfs fuse APP doesn't support specific path of RPC sock.
+ # So directly use default sock path.
+ waitforlisten $blobfs_pid /var/tmp/spdk.sock
+
+ # check mount status
+ mount | grep "$mount_dir"
+
+ # create a rand file in mount dir
+ dd if=/dev/urandom of=${mount_dir}/rand_file bs=4k count=32
+
+ umount ${mount_dir}
+ sleep 1
+ killprocess $blobfs_pid
+
+ # Verify there is no file in mount dir now
+ if [ -f ${mount_dir}/rand_file ]; then
+ false
+ fi
+
+ # use blobfs mount RPC
+ blobfs_start_app
+ $rpc_py blobfs_mount ${bdevname} $mount_dir
+
+ # read and delete the rand file
+ md5sum ${mount_dir}/rand_file
+ rm ${mount_dir}/rand_file
+
+ umount ${mount_dir}
+ sleep 1
+ killprocess $blobfs_pid
+}
+
+trap 'cleanup' EXIT
+
+# Create one temp file as test bdev
+dd if=/dev/zero of=${tmp_file} bs=4k count=1M
+echo "[AIO]" > ${conf_file}
+echo "AIO ${tmp_file} ${bdevname} 512" >> ${conf_file}
+
+blobfs_detect_test
+
+# Clear blobfs on temp file
+dd if=/dev/zero of=${tmp_file} bs=4k count=1M
+
+blobfs_create_test
+
+# Create dir for FUSE mount
+mkdir -p $mount_dir
+blobfs_fuse_test
diff --git a/src/spdk/test/blobfs/fuse/.gitignore b/src/spdk/test/blobfs/fuse/.gitignore
new file mode 100644
index 000000000..a517c488f
--- /dev/null
+++ b/src/spdk/test/blobfs/fuse/.gitignore
@@ -0,0 +1 @@
+fuse
diff --git a/src/spdk/test/blobfs/fuse/Makefile b/src/spdk/test/blobfs/fuse/Makefile
new file mode 100644
index 000000000..09d956e4f
--- /dev/null
+++ b/src/spdk/test/blobfs/fuse/Makefile
@@ -0,0 +1,50 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.modules.mk
+
+APP = fuse
+
+C_SRCS := fuse.c
+
+SPDK_LIB_LIST = $(ALL_MODULES_LIST)
+SPDK_LIB_LIST += $(EVENT_BDEV_SUBSYSTEM)
+SPDK_LIB_LIST += bdev accel event thread util conf trace \
+ log jsonrpc json rpc sock notify blobfs_bdev
+
+# libfuse3 is required internally by blobfs_bdev
+LIBS+= -L/usr/local/lib -lfuse3
+
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
diff --git a/src/spdk/test/blobfs/fuse/fuse.c b/src/spdk/test/blobfs/fuse/fuse.c
new file mode 100644
index 000000000..e434fb505
--- /dev/null
+++ b/src/spdk/test/blobfs/fuse/fuse.c
@@ -0,0 +1,114 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#define FUSE_USE_VERSION 30
+#include "fuse3/fuse.h"
+#include "fuse3/fuse_lowlevel.h"
+
+#include "spdk/blobfs.h"
+#include "spdk/bdev.h"
+#include "spdk/event.h"
+#include "spdk/thread.h"
+#include "spdk/blob_bdev.h"
+#include "spdk/blobfs_bdev.h"
+#include "spdk/log.h"
+#include "spdk/string.h"
+
+char *g_bdev_name;
+char *g_mountpoint;
+
+int g_fuse_argc = 0;
+char **g_fuse_argv = NULL;
+
+static void
+fuse_run_cb(void *cb_arg, int fserrno)
+{
+ if (fserrno) {
+ printf("Failed to mount filesystem on bdev %s to path %s: %s",
+ g_bdev_name, g_mountpoint, spdk_strerror(fserrno));
+
+ spdk_app_stop(0);
+ return;
+ }
+
+ printf("done.\n");
+}
+
+static void
+spdk_fuse_run(void *arg1)
+{
+ printf("Mounting filesystem on bdev %s to path %s...",
+ g_bdev_name, g_mountpoint);
+ fflush(stdout);
+
+ spdk_blobfs_bdev_mount(g_bdev_name, g_mountpoint, fuse_run_cb, NULL);
+}
+
+static void
+spdk_fuse_shutdown(void)
+{
+ spdk_app_stop(0);
+}
+
+int main(int argc, char **argv)
+{
+ struct spdk_app_opts opts = {};
+ int rc = 0;
+
+ if (argc < 4) {
+ fprintf(stderr, "usage: %s <conffile> <bdev name> <mountpoint>\n", argv[0]);
+ exit(1);
+ }
+
+ spdk_app_opts_init(&opts);
+ opts.name = "spdk_fuse";
+ opts.config_file = argv[1];
+ opts.reactor_mask = "0x3";
+ opts.shutdown_cb = spdk_fuse_shutdown;
+
+ g_bdev_name = argv[2];
+ g_mountpoint = argv[3];
+
+ /* TODO: mount blobfs with extra FUSE options. */
+ g_fuse_argc = argc - 2;
+ g_fuse_argv = &argv[2];
+
+ spdk_fs_set_cache_size(512);
+
+ rc = spdk_app_start(&opts, spdk_fuse_run, NULL);
+ spdk_app_fini();
+
+ return rc;
+}
diff --git a/src/spdk/test/blobfs/mkfs/.gitignore b/src/spdk/test/blobfs/mkfs/.gitignore
new file mode 100644
index 000000000..54e292c61
--- /dev/null
+++ b/src/spdk/test/blobfs/mkfs/.gitignore
@@ -0,0 +1 @@
+mkfs
diff --git a/src/spdk/test/blobfs/mkfs/Makefile b/src/spdk/test/blobfs/mkfs/Makefile
new file mode 100644
index 000000000..42eebd9f1
--- /dev/null
+++ b/src/spdk/test/blobfs/mkfs/Makefile
@@ -0,0 +1,52 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.modules.mk
+
+APP = mkfs
+
+C_SRCS := mkfs.c
+
+SPDK_LIB_LIST = $(ALL_MODULES_LIST)
+SPDK_LIB_LIST += $(EVENT_BDEV_SUBSYSTEM)
+SPDK_LIB_LIST += bdev accel event thread util conf trace \
+ log jsonrpc json rpc sock notify blobfs_bdev
+
+# libfuse3 is required internally by blobfs_bdev
+ifeq ($(CONFIG_FUSE),y)
+LIBS+= -L/usr/local/lib -lfuse3
+endif
+
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
diff --git a/src/spdk/test/blobfs/mkfs/mkfs.c b/src/spdk/test/blobfs/mkfs/mkfs.c
new file mode 100644
index 000000000..4d70a5e16
--- /dev/null
+++ b/src/spdk/test/blobfs/mkfs/mkfs.c
@@ -0,0 +1,115 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk/event.h"
+#include "spdk/blobfs.h"
+#include "spdk/blobfs_bdev.h"
+#include "spdk/log.h"
+#include "spdk/string.h"
+
+const char *g_bdev_name;
+static uint64_t g_cluster_size;
+
+static void
+shutdown_cb(void *cb_arg, int fserrno)
+{
+ if (fserrno) {
+ printf("\nFailed to initialize filesystem on bdev %s...", g_bdev_name);
+ }
+
+ printf("done.\n");
+
+ spdk_app_stop(0);
+}
+
+static void
+spdk_mkfs_run(void *arg1)
+{
+ printf("Initializing filesystem on bdev %s...", g_bdev_name);
+ fflush(stdout);
+
+ spdk_blobfs_bdev_create(g_bdev_name, g_cluster_size, shutdown_cb, NULL);
+}
+
+static void
+mkfs_usage(void)
+{
+ printf(" -C <size> cluster size\n");
+}
+
+static int
+mkfs_parse_arg(int ch, char *arg)
+{
+ bool has_prefix;
+
+ switch (ch) {
+ case 'C':
+ spdk_parse_capacity(arg, &g_cluster_size, &has_prefix);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int main(int argc, char **argv)
+{
+ struct spdk_app_opts opts = {};
+ int rc = 0;
+
+ if (argc < 3) {
+ SPDK_ERRLOG("usage: %s <conffile> <bdevname>\n", argv[0]);
+ exit(1);
+ }
+
+ spdk_app_opts_init(&opts);
+ opts.name = "spdk_mkfs";
+ opts.config_file = argv[1];
+ opts.reactor_mask = "0x3";
+ opts.shutdown_cb = NULL;
+
+ spdk_fs_set_cache_size(512);
+ g_bdev_name = argv[2];
+ if ((rc = spdk_app_parse_args(argc, argv, &opts, "C:", NULL,
+ mkfs_parse_arg, mkfs_usage)) !=
+ SPDK_APP_PARSE_ARGS_SUCCESS) {
+ exit(rc);
+ }
+
+ rc = spdk_app_start(&opts, spdk_mkfs_run, NULL);
+ spdk_app_fini();
+
+ return rc;
+}
diff --git a/src/spdk/test/blobfs/rocksdb/.gitignore b/src/spdk/test/blobfs/rocksdb/.gitignore
new file mode 100644
index 000000000..1a06816d8
--- /dev/null
+++ b/src/spdk/test/blobfs/rocksdb/.gitignore
@@ -0,0 +1 @@
+results
diff --git a/src/spdk/test/blobfs/rocksdb/common_flags.txt b/src/spdk/test/blobfs/rocksdb/common_flags.txt
new file mode 100644
index 000000000..6390c7a40
--- /dev/null
+++ b/src/spdk/test/blobfs/rocksdb/common_flags.txt
@@ -0,0 +1,27 @@
+--disable_seek_compaction=1
+--mmap_read=0
+--statistics=1
+--histogram=1
+--key_size=16
+--value_size=1000
+--block_size=4096
+--cache_size=0
+--bloom_bits=10
+--cache_numshardbits=4
+--open_files=500000
+--verify_checksum=1
+--db=/mnt/rocksdb
+--sync=0
+--compression_type=none
+--stats_interval=1000000
+--compression_ratio=1
+--disable_data_sync=0
+--target_file_size_base=67108864
+--max_write_buffer_number=3
+--max_bytes_for_level_multiplier=10
+--max_background_compactions=10
+--num_levels=10
+--delete_obsolete_files_period_micros=3000000
+--max_grandparent_overlap_factor=10
+--stats_per_interval=1
+--max_bytes_for_level_base=10485760
diff --git a/src/spdk/test/blobfs/rocksdb/postprocess.py b/src/spdk/test/blobfs/rocksdb/postprocess.py
new file mode 100755
index 000000000..1ba8a7302
--- /dev/null
+++ b/src/spdk/test/blobfs/rocksdb/postprocess.py
@@ -0,0 +1,70 @@
+#!/usr/bin/env python3
+from collections import namedtuple
+from itertools import islice
+import operator
+import sys
+
+total_samples = 0
+thread_module_samples = {}
+function_module_samples = {}
+module_samples = {}
+threads = set()
+
+ThreadModule = namedtuple('ThreadModule', ['thread', 'module'])
+FunctionModule = namedtuple('FunctionModule', ['function', 'module'])
+
+with open(sys.argv[1] + "/" + sys.argv[2] + ".perf.txt") as f:
+ for line in f:
+ fields = line.split()
+ total_samples += int(fields[1])
+ key = ThreadModule(fields[2], fields[3])
+ thread_module_samples.setdefault(key, 0)
+ thread_module_samples[key] += int(fields[1])
+ key = FunctionModule(fields[5], fields[3])
+ function_module_samples.setdefault(key, 0)
+ function_module_samples[key] += int(fields[1])
+ threads.add(fields[2])
+
+ key = fields[3]
+ module_samples.setdefault(key, 0)
+ module_samples[key] += int(fields[1])
+
+for thread in sorted(threads):
+ thread_pct = 0
+ print("")
+ print("Thread: {:s}".format(thread))
+ print(" Percent Module")
+ print("============================")
+ for key, value in sorted(list(thread_module_samples.items()), key=operator.itemgetter(1), reverse=True):
+ if key.thread == thread:
+ print("{:8.4f} {:20s}".format(float(value) * 100 / total_samples, key.module))
+ thread_pct += float(value) * 100 / total_samples
+ print("============================")
+ print("{:8.4f} Total".format(thread_pct))
+
+print("")
+print(" Percent Module Function")
+print("=================================================================")
+for key, value in islice(sorted(list(function_module_samples.items()), key=operator.itemgetter(1), reverse=True), 100):
+ print(("{:8.4f} {:20s} {:s}".format(float(value) * 100 / total_samples, key.module, key.function)))
+
+print("")
+print("")
+print(" Percent Module")
+print("=================================")
+for key, value in sorted(list(module_samples.items()), key=operator.itemgetter(1), reverse=True):
+ print("{:8.4f} {:s}".format(float(value) * 100 / total_samples, key))
+
+print("")
+with open(sys.argv[1] + "/" + sys.argv[2] + "_db_bench.txt") as f:
+ for line in f:
+ if "maxresident" in line:
+ fields = line.split()
+ print("Wall time elapsed: {:s}".format(fields[2].split("e")[0]))
+ print("CPU utilization: {:s}".format(fields[3].split('C')[0]))
+ user = float(fields[0].split('u')[0])
+ system = float(fields[1].split('s')[0])
+ print("User: {:8.2f} ({:5.2f}%)".format(user, user * 100 / (user + system)))
+ print("System: {:8.2f} ({:5.2f}%)".format(system, system * 100 / (user + system)))
+
+print("")
diff --git a/src/spdk/test/blobfs/rocksdb/rocksdb.sh b/src/spdk/test/blobfs/rocksdb/rocksdb.sh
new file mode 100755
index 000000000..406156905
--- /dev/null
+++ b/src/spdk/test/blobfs/rocksdb/rocksdb.sh
@@ -0,0 +1,155 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+
+dump_db_bench_on_err() {
+ # Fetch std dump of the last run_step that might have failed
+ [[ -e $db_bench ]] || return 0
+
+ # Dump entire *.txt to stderr to clearly see what might have failed
+ xtrace_disable
+ mapfile -t step_map < "$db_bench"
+ printf '%s\n' "${step_map[@]/#/* $step (FAILED)}" >&2
+ xtrace_restore
+}
+
+run_step() {
+ if [ -z "$1" ]; then
+ echo run_step called with no parameter
+ exit 1
+ fi
+
+ cat <<- EOL >> "$1"_flags.txt
+ --spdk=$ROCKSDB_CONF
+ --spdk_bdev=Nvme0n1
+ --spdk_cache_size=$CACHE_SIZE
+ EOL
+
+ db_bench=$1_db_bench.txt
+ echo -n Start $1 test phase...
+ time taskset 0xFF $DB_BENCH --flagfile="$1"_flags.txt &> "$db_bench"
+ DB_BENCH_FILE=$(grep -o '/dev/shm/\(\w\|\.\|\d\|/\)*' "$db_bench")
+ gzip $DB_BENCH_FILE
+ mv $DB_BENCH_FILE.gz "$1"_trace.gz
+ chmod 644 "$1"_trace.gz
+ echo done.
+}
+
+run_bsdump() {
+ $SPDK_EXAMPLE_DIR/blobcli -c $ROCKSDB_CONF -b Nvme0n1 -D &> bsdump.txt
+}
+
+# In the autotest job, we copy the rocksdb source to just outside the spdk directory.
+DB_BENCH_DIR="$rootdir/../rocksdb"
+DB_BENCH=$DB_BENCH_DIR/db_bench
+ROCKSDB_CONF=$testdir/rocksdb.conf
+
+if [ ! -e $DB_BENCH_DIR ]; then
+ echo $DB_BENCH_DIR does not exist
+ false
+fi
+
+timing_enter db_bench_build
+
+pushd $DB_BENCH_DIR
+if [ -z "$SKIP_GIT_CLEAN" ]; then
+ git clean -x -f -d
+fi
+
+EXTRA_CXXFLAGS=""
+GCC_VERSION=$(cc -dumpversion | cut -d. -f1)
+if ((GCC_VERSION >= 9)); then
+ EXTRA_CXXFLAGS+="-Wno-deprecated-copy -Wno-pessimizing-move -Wno-error=stringop-truncation"
+fi
+
+$MAKE db_bench $MAKEFLAGS $MAKECONFIG DEBUG_LEVEL=0 SPDK_DIR=$rootdir EXTRA_CXXFLAGS="$EXTRA_CXXFLAGS"
+popd
+
+timing_exit db_bench_build
+
+$rootdir/scripts/gen_nvme.sh > $ROCKSDB_CONF
+# 0x80 is the bit mask for BlobFS tracepoints
+echo "[Global]" >> $ROCKSDB_CONF
+echo "TpointGroupMask 0x80" >> $ROCKSDB_CONF
+
+trap 'dump_db_bench_on_err; run_bsdump || :; rm -f $ROCKSDB_CONF; exit 1' SIGINT SIGTERM EXIT
+
+if [ -z "$SKIP_MKFS" ]; then
+ run_test "blobfs_mkfs" $rootdir/test/blobfs/mkfs/mkfs $ROCKSDB_CONF Nvme0n1
+fi
+
+mkdir -p $output_dir/rocksdb
+RESULTS_DIR=$output_dir/rocksdb
+if [ $RUN_NIGHTLY -eq 1 ]; then
+ CACHE_SIZE=4096
+ DURATION=60
+ NUM_KEYS=100000000
+else
+ CACHE_SIZE=2048
+ DURATION=20
+ NUM_KEYS=20000000
+fi
+
+cd $RESULTS_DIR
+cp $testdir/common_flags.txt insert_flags.txt
+cat << EOL >> insert_flags.txt
+--benchmarks=fillseq
+--threads=1
+--disable_wal=1
+--use_existing_db=0
+--num=$NUM_KEYS
+EOL
+
+cp $testdir/common_flags.txt randread_flags.txt
+cat << EOL >> randread_flags.txt
+--benchmarks=readrandom
+--threads=16
+--duration=$DURATION
+--disable_wal=1
+--use_existing_db=1
+--num=$NUM_KEYS
+EOL
+
+cp $testdir/common_flags.txt overwrite_flags.txt
+cat << EOL >> overwrite_flags.txt
+--benchmarks=overwrite
+--threads=1
+--duration=$DURATION
+--disable_wal=1
+--use_existing_db=1
+--num=$NUM_KEYS
+EOL
+
+cp $testdir/common_flags.txt readwrite_flags.txt
+cat << EOL >> readwrite_flags.txt
+--benchmarks=readwhilewriting
+--threads=4
+--duration=$DURATION
+--disable_wal=1
+--use_existing_db=1
+--num=$NUM_KEYS
+EOL
+
+cp $testdir/common_flags.txt writesync_flags.txt
+cat << EOL >> writesync_flags.txt
+--benchmarks=overwrite
+--threads=1
+--duration=$DURATION
+--disable_wal=0
+--use_existing_db=1
+--sync=1
+--num=$NUM_KEYS
+EOL
+
+run_test "rocksdb_insert" run_step insert
+run_test "rocksdb_overwrite" run_step overwrite
+run_test "rocksdb_readwrite" run_step readwrite
+run_test "rocksdb_writesync" run_step writesync
+run_test "rocksdb_randread" run_step randread
+
+trap - SIGINT SIGTERM EXIT
+
+run_bsdump
+rm -f $ROCKSDB_CONF
diff --git a/src/spdk/test/blobfs/rocksdb/rocksdb_commit_id b/src/spdk/test/blobfs/rocksdb/rocksdb_commit_id
new file mode 100644
index 000000000..efac5a55d
--- /dev/null
+++ b/src/spdk/test/blobfs/rocksdb/rocksdb_commit_id
@@ -0,0 +1 @@
+526c73bd94150cc8fbd651f736e1ca95f50d8e13
diff --git a/src/spdk/test/blobstore/blob_io_wait/blob_io_wait.sh b/src/spdk/test/blobstore/blob_io_wait/blob_io_wait.sh
new file mode 100755
index 000000000..4cdca98f0
--- /dev/null
+++ b/src/spdk/test/blobstore/blob_io_wait/blob_io_wait.sh
@@ -0,0 +1,61 @@
+#!/usr/bin/env bash
+
+SYSTEM=$(uname -s)
+if [ $SYSTEM = "FreeBSD" ]; then
+ echo "blob_io_wait.sh cannot run on FreeBSD currently."
+ exit 0
+fi
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+rpc_py="$rootdir/scripts/rpc.py"
+
+truncate -s 64M $testdir/aio.bdev
+
+$rootdir/test/app/bdev_svc/bdev_svc &
+bdev_svc_pid=$!
+
+trap 'killprocess $bdev_svc_pid; exit 1' SIGINT SIGTERM EXIT
+
+waitforlisten $bdev_svc_pid
+$rpc_py bdev_aio_create $testdir/aio.bdev aio0 4096
+$rpc_py bdev_lvol_create_lvstore aio0 lvs0
+$rpc_py bdev_lvol_create -l lvs0 lvol0 32
+
+killprocess $bdev_svc_pid
+
+# Minimal number of bdev io pool (128) and cache (1)
+cat << EOL > $testdir/bdevperf.conf
+[Bdev]
+BdevIoPoolSize 128
+BdevIoCacheSize 1
+[AIO]
+AIO $testdir/aio.bdev aio0 4096
+EOL
+
+$rootdir/test/bdev/bdevperf/bdevperf -c $testdir/bdevperf.conf -q 128 -o 4096 -w write -t 5 -r /var/tmp/spdk.sock &
+bdev_perf_pid=$!
+waitforlisten $bdev_perf_pid
+$rpc_py bdev_enable_histogram aio0 -e
+sleep 2
+$rpc_py bdev_get_histogram aio0 | $rootdir/scripts/histogram.py
+$rpc_py bdev_enable_histogram aio0 -d
+wait $bdev_perf_pid
+
+$rootdir/test/bdev/bdevperf/bdevperf -c $testdir/bdevperf.conf -q 128 -o 4096 -w read -t 5 -r /var/tmp/spdk.sock &
+bdev_perf_pid=$!
+waitforlisten $bdev_perf_pid
+$rpc_py bdev_enable_histogram aio0 -e
+sleep 2
+$rpc_py bdev_get_histogram aio0 | $rootdir/scripts/histogram.py
+$rpc_py bdev_enable_histogram aio0 -d
+wait $bdev_perf_pid
+
+$rootdir/test/bdev/bdevperf/bdevperf -c $testdir/bdevperf.conf -q 128 -o 4096 -w unmap -t 1
+
+sync
+rm -rf $testdir/bdevperf.conf
+rm -rf $testdir/aio.bdev
+trap - SIGINT SIGTERM EXIT
diff --git a/src/spdk/test/blobstore/blobstore.sh b/src/spdk/test/blobstore/blobstore.sh
new file mode 100755
index 000000000..247150f5c
--- /dev/null
+++ b/src/spdk/test/blobstore/blobstore.sh
@@ -0,0 +1,30 @@
+#!/usr/bin/env bash
+
+SYSTEM=$(uname -s)
+if [ $SYSTEM = "FreeBSD" ]; then
+ echo "blobstore.sh cannot run on FreeBSD currently."
+ exit 0
+fi
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/test/common/autotest_common.sh
+
+# Nvme0 target configuration
+$rootdir/scripts/gen_nvme.sh > $testdir/blobcli.conf
+
+# generate random data file for import/export diff
+dd if=/dev/urandom of=$testdir/test.pattern bs=1M count=1
+
+(cd $testdir \
+ && $SPDK_EXAMPLE_DIR/blobcli -c $testdir/blobcli.conf -b Nvme0n1 -T $testdir/test.bs > $testdir/btest.out)
+
+# the test script will import the test pattern generated by dd and then export
+# it to a file so we can compare and confirm basic read and write
+$rootdir/test/app/match/match -v $testdir/btest.out.match
+diff $testdir/test.pattern $testdir/test.pattern.blob
+
+rm -rf $testdir/btest.out
+rm -rf $testdir/blobcli.conf
+rm -rf $testdir/*.blob
+rm -rf $testdir/test.pattern
diff --git a/src/spdk/test/blobstore/btest.out.ignore b/src/spdk/test/blobstore/btest.out.ignore
new file mode 100644
index 000000000..8a1df3d18
--- /dev/null
+++ b/src/spdk/test/blobstore/btest.out.ignore
@@ -0,0 +1,5 @@
+DPDK
+EAL
+Working
+
+cryptodev_aesni_mb_create
diff --git a/src/spdk/test/blobstore/btest.out.match b/src/spdk/test/blobstore/btest.out.match
new file mode 100644
index 000000000..b197e22d4
--- /dev/null
+++ b/src/spdk/test/blobstore/btest.out.match
@@ -0,0 +1,90 @@
+Starting SPDK v19.10.1 / DPDK 19.08.0 initialization...
+[ DPDK EAL parameters: blobcli --no-shconf -c 0x1 --log-level=lib.eal:6 --log-level=lib.cryptodev:5 --log-level=user1:6 --iova-mode=pa --base-virtaddr=0x200000000000 --match-allocations --file-prefix=spdk_pid1641656 ]
+
+SCRIPT NOW PROCESSING: -i
+Init blobstore using bdev Product Name: NVMe disk
+blobstore init'd: ($(XX))
+
+SCRIPT NOW PROCESSING: -l bdevs
+
+List bdevs:
+ bdev Name: Nvme0n1
+ bdev Product Name: NVMe disk
+
+
+SCRIPT NOW PROCESSING: -n 1
+New blob id $(N)
+blob now has USED clusters of 1
+
+SCRIPT NOW PROCESSING: -p $B0
+Super Blob ID has been set.
+
+SCRIPT NOW PROCESSING: -n 1
+New blob id $(N)
+blob now has USED clusters of 1
+
+SCRIPT NOW PROCESSING: -m $B1 test.pattern
+Working...............................................................................................................................................................................................................................................................
+Blob import complete (from test.pattern).
+
+SCRIPT NOW PROCESSING: -d $B1 test.pattern.blob
+Working................................................................................................................................................................................................................................................................
+File write complete (to test.pattern.blob).
+
+SCRIPT NOW PROCESSING: -x $B1 key val
+Xattr has been set.
+
+SCRIPT NOW PROCESSING: -s bs
+Blobstore Public Info:
+ Using bdev Product Name: NVMe disk
+ API Version: $(N)
+ super blob ID: $(N)
+ page size: $(N)
+ io unit size: $(N)
+ cluster size: 1048576
+ # free clusters: $(N)
+ blobstore type:
+00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
+
+Blobstore Private Info:
+ Metadata start (pages): $(N)
+ Metadata length (pages): $(N)
+
+SCRIPT NOW PROCESSING: -s $B1
+Blob Public Info:
+blob ID: $(N)
+# of clusters: 1
+# of bytes: 1048576
+# of pages: 256
+# of xattrs: 1
+xattrs:
+
+(0) Name:key
+(0) Value:
+
+00000000 76 61 6c val
+
+Blob Private Info:
+state: CLEAN
+open ref count: 1
+
+SCRIPT NOW PROCESSING: -r $B1 key
+Xattr has been removed.
+
+SCRIPT NOW PROCESSING: -s bs
+Blobstore Public Info:
+ Using bdev Product Name: NVMe disk
+ API Version: 3
+ super blob ID: $(N)
+ page size: $(N)
+ io unit size: $(N)
+ cluster size: 1048576
+ # free clusters: $(N)
+ blobstore type:
+00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
+
+Blobstore Private Info:
+ Metadata start (pages): $(N)
+ Metadata length (pages): $(N)
+
+SCRIPT NOW PROCESSING: -X
diff --git a/src/spdk/test/blobstore/test.bs b/src/spdk/test/blobstore/test.bs
new file mode 100644
index 000000000..dcc64861d
--- /dev/null
+++ b/src/spdk/test/blobstore/test.bs
@@ -0,0 +1,12 @@
+-i
+-l bdevs
+-n 1
+-p $B0
+-n 1
+-m $B1 test.pattern
+-d $B1 test.pattern.blob
+-x $B1 key val
+-s bs
+-s $B1
+-r $B1 key
+-s bs
diff --git a/src/spdk/test/common/applications.sh b/src/spdk/test/common/applications.sh
new file mode 100644
index 000000000..041af2932
--- /dev/null
+++ b/src/spdk/test/common/applications.sh
@@ -0,0 +1,24 @@
+# Default set of apps used in functional testing
+
+_root=$(readlink -f "$(dirname "${BASH_SOURCE[0]}")")
+_root=${_root%/test/common}
+_app_dir=$_root/build/bin
+_test_app_dir=$_root/test/app
+
+VHOST_FUZZ_APP=("$_test_app_dir/fuzz/vhost_fuzz/vhost_fuzz")
+ISCSI_APP=("$_app_dir/iscsi_tgt")
+NVMF_APP=("$_app_dir/nvmf_tgt")
+VHOST_APP=("$_app_dir/vhost")
+DD_APP=("$_app_dir/spdk_dd")
+
+# Check if apps should execute under debug flags
+if [[ -e $_root/include/spdk/config.h ]]; then
+ if [[ $(< "$_root/include/spdk/config.h") == *"#define SPDK_CONFIG_DEBUG"* ]] \
+ && ((SPDK_AUTOTEST_DEBUG_APPS)); then
+ VHOST_FUZZ_APP+=("--logflag=all")
+ ISCSI_APP+=("--logflag=all")
+ NVMF_APP+=("--logflag=all")
+ VHOST_APP+=("--logflag=all")
+ DD_APP+=("--logflag=all")
+ fi
+fi
diff --git a/src/spdk/test/common/autotest_common.sh b/src/spdk/test/common/autotest_common.sh
new file mode 100755
index 000000000..32cd4e32a
--- /dev/null
+++ b/src/spdk/test/common/autotest_common.sh
@@ -0,0 +1,1350 @@
+#!/usr/bin/env bash
+
+function xtrace_disable() {
+ if [ "$XTRACE_DISABLED" != "yes" ]; then
+ PREV_BASH_OPTS="$-"
+ if [[ "$PREV_BASH_OPTS" == *"x"* ]]; then
+ XTRACE_DISABLED="yes"
+ fi
+ set +x
+ elif [ -z $XTRACE_NESTING_LEVEL ]; then
+ XTRACE_NESTING_LEVEL=1
+ else
+ XTRACE_NESTING_LEVEL=$((++XTRACE_NESTING_LEVEL))
+ fi
+}
+
+xtrace_disable
+set -e
+shopt -s expand_aliases
+
+source "$rootdir/test/common/applications.sh"
+if [[ -e $rootdir/test/common/build_config.sh ]]; then
+ source "$rootdir/test/common/build_config.sh"
+elif [[ -e $rootdir/mk/config.mk ]]; then
+ build_config=$(< "$rootdir/mk/config.mk")
+ source <(echo "${build_config//\?=/=}")
+else
+ source "$rootdir/CONFIG"
+fi
+
+# Dummy function to be called after restoring xtrace just so that it appears in the
+# xtrace log. This way we can consistently track when xtrace is enabled/disabled.
+function xtrace_enable() {
+ # We have to do something inside a function in bash, and calling any command
+ # (even `:`) will produce an xtrace entry, so we just define another function.
+ function xtrace_dummy() { :; }
+}
+
+# Keep it as alias to avoid xtrace_enable backtrace always pointing to xtrace_restore.
+# xtrace_enable will appear as called directly from the user script, from the same line
+# that "called" xtrace_restore.
+alias xtrace_restore='if [ -z $XTRACE_NESTING_LEVEL ]; then
+ if [[ "$PREV_BASH_OPTS" == *"x"* ]]; then
+ XTRACE_DISABLED="no"; PREV_BASH_OPTS=""; set -x; xtrace_enable;
+ fi
+else
+ XTRACE_NESTING_LEVEL=$((--XTRACE_NESTING_LEVEL));
+ if [ $XTRACE_NESTING_LEVEL -eq "0" ]; then
+ unset XTRACE_NESTING_LEVEL
+ fi
+fi'
+
+: ${RUN_NIGHTLY:=0}
+export RUN_NIGHTLY
+
+# Set defaults for missing test config options
+: ${SPDK_AUTOTEST_DEBUG_APPS:=0}
+export SPDK_AUTOTEST_DEBUG_APPS
+: ${SPDK_RUN_VALGRIND=0}
+export SPDK_RUN_VALGRIND
+: ${SPDK_RUN_FUNCTIONAL_TEST=0}
+export SPDK_RUN_FUNCTIONAL_TEST
+: ${SPDK_TEST_UNITTEST=0}
+export SPDK_TEST_UNITTEST
+: ${SPDK_TEST_AUTOBUILD=0}
+export SPDK_TEST_AUTOBUILD
+: ${SPDK_TEST_ISAL=0}
+export SPDK_TEST_ISAL
+: ${SPDK_TEST_ISCSI=0}
+export SPDK_TEST_ISCSI
+: ${SPDK_TEST_ISCSI_INITIATOR=0}
+export SPDK_TEST_ISCSI_INITIATOR
+: ${SPDK_TEST_NVME=0}
+export SPDK_TEST_NVME
+: ${SPDK_TEST_NVME_CLI=0}
+export SPDK_TEST_NVME_CLI
+: ${SPDK_TEST_NVME_CUSE=0}
+export SPDK_TEST_NVME_CUSE
+: ${SPDK_TEST_NVMF=0}
+export SPDK_TEST_NVMF
+: ${SPDK_TEST_NVMF_TRANSPORT="rdma"}
+export SPDK_TEST_NVMF_TRANSPORT
+: ${SPDK_TEST_RBD=0}
+export SPDK_TEST_RBD
+: ${SPDK_TEST_VHOST=0}
+export SPDK_TEST_VHOST
+: ${SPDK_TEST_BLOCKDEV=0}
+export SPDK_TEST_BLOCKDEV
+: ${SPDK_TEST_IOAT=0}
+export SPDK_TEST_IOAT
+: ${SPDK_TEST_BLOBFS=0}
+export SPDK_TEST_BLOBFS
+: ${SPDK_TEST_VHOST_INIT=0}
+export SPDK_TEST_VHOST_INIT
+: ${SPDK_TEST_PMDK=0}
+export SPDK_TEST_PMDK
+: ${SPDK_TEST_LVOL=0}
+export SPDK_TEST_LVOL
+: ${SPDK_TEST_JSON=0}
+export SPDK_TEST_JSON
+: ${SPDK_TEST_REDUCE=0}
+export SPDK_TEST_REDUCE
+: ${SPDK_TEST_VPP=0}
+export SPDK_TEST_VPP
+: ${SPDK_RUN_ASAN=0}
+export SPDK_RUN_ASAN
+: ${SPDK_RUN_UBSAN=0}
+export SPDK_RUN_UBSAN
+: ${SPDK_RUN_INSTALLED_DPDK=0}
+export SPDK_RUN_INSTALLED_DPDK
+: ${SPDK_RUN_NON_ROOT=0}
+export SPDK_RUN_NON_ROOT
+: ${SPDK_TEST_CRYPTO=0}
+export SPDK_TEST_CRYPTO
+: ${SPDK_TEST_FTL=0}
+export SPDK_TEST_FTL
+: ${SPDK_TEST_OCF=0}
+export SPDK_TEST_OCF
+: ${SPDK_TEST_FTL_EXTENDED=0}
+export SPDK_TEST_FTL_EXTENDED
+: ${SPDK_TEST_VMD=0}
+export SPDK_TEST_VMD
+: ${SPDK_TEST_OPAL=0}
+export SPDK_TEST_OPAL
+: ${SPDK_AUTOTEST_X=true}
+export SPDK_AUTOTEST_X
+: ${SPDK_TEST_RAID5=0}
+export SPDK_TEST_RAID5
+: ${SPDK_TEST_URING=0}
+export SPDK_TEST_URING
+
+# Export PYTHONPATH with addition of RPC framework. New scripts can be created
+# specific use cases for tests.
+export PYTHONPATH=$PYTHONPATH:$rootdir/scripts
+
+# Don't create Python .pyc files. When running with sudo these will be
+# created with root ownership and can cause problems when cleaning the repository.
+export PYTHONDONTWRITEBYTECODE=1
+
+# Export flag to skip the known bug that exists in librados
+# Bug is reported on ceph bug tracker with number 24078
+export ASAN_OPTIONS=new_delete_type_mismatch=0
+export UBSAN_OPTIONS='halt_on_error=1:print_stacktrace=1:abort_on_error=1'
+
+# Export LeakSanitizer option to use suppression file in order to prevent false positives
+# and known leaks in external executables or libraries from showing up.
+asan_suppression_file="/var/tmp/asan_suppression_file"
+sudo rm -rf "$asan_suppression_file"
+cat << EOL >> "$asan_suppression_file"
+# ASAN has some bugs around thread_local variables. We have a destructor in place
+# to free the thread contexts, but ASAN complains about the leak before those
+# destructors have a chance to run. So suppress this one specific leak using
+# LSAN_OPTIONS.
+leak:spdk_fs_alloc_thread_ctx
+
+# Suppress known leaks in fio project
+leak:$CONFIG_FIO_SOURCE_DIR/parse.c
+leak:$CONFIG_FIO_SOURCE_DIR/iolog.c
+leak:$CONFIG_FIO_SOURCE_DIR/init.c
+leak:$CONFIG_FIO_SOURCE_DIR/filesetup.c
+leak:fio_memalign
+leak:spdk_fio_io_u_init
+
+# Suppress leaks in libiscsi
+leak:libiscsi.so
+EOL
+
+# Suppress leaks in libfuse3
+echo "leak:libfuse3.so" >> "$asan_suppression_file"
+
+export LSAN_OPTIONS=suppressions="$asan_suppression_file"
+
+export DEFAULT_RPC_ADDR="/var/tmp/spdk.sock"
+
+if [ -z "$DEPENDENCY_DIR" ]; then
+ export DEPENDENCY_DIR=/home/sys_sgsw
+else
+ export DEPENDENCY_DIR
+fi
+
+# Export location of where all the SPDK binaries are
+export SPDK_BIN_DIR="$rootdir/build/bin"
+export SPDK_EXAMPLE_DIR="$rootdir/build/examples"
+
+# pass our valgrind desire on to unittest.sh
+if [ $SPDK_RUN_VALGRIND -eq 0 ]; then
+ export valgrind=''
+fi
+
+if [ "$(uname -s)" = "Linux" ]; then
+ MAKE="make"
+ MAKEFLAGS=${MAKEFLAGS:--j$(nproc)}
+ DPDK_LINUX_DIR=/usr/share/dpdk/x86_64-default-linuxapp-gcc
+ if [ -d $DPDK_LINUX_DIR ] && [ $SPDK_RUN_INSTALLED_DPDK -eq 1 ]; then
+ WITH_DPDK_DIR=$DPDK_LINUX_DIR
+ fi
+ # Override the default HUGEMEM in scripts/setup.sh to allocate 8GB in hugepages.
+ export HUGEMEM=8192
+elif [ "$(uname -s)" = "FreeBSD" ]; then
+ MAKE="gmake"
+ MAKEFLAGS=${MAKEFLAGS:--j$(sysctl -a | grep -E -i 'hw.ncpu' | awk '{print $2}')}
+ DPDK_FREEBSD_DIR=/usr/local/share/dpdk/x86_64-native-bsdapp-clang
+ if [ -d $DPDK_FREEBSD_DIR ] && [ $SPDK_RUN_INSTALLED_DPDK -eq 1 ]; then
+ WITH_DPDK_DIR=$DPDK_FREEBSD_DIR
+ fi
+ # FreeBSD runs a much more limited set of tests, so keep the default 2GB.
+ export HUGEMEM=2048
+else
+ echo "Unknown OS \"$(uname -s)\""
+ exit 1
+fi
+
+if [ -z "$output_dir" ]; then
+ if [ -z "$rootdir" ] || [ ! -d "$rootdir/../output" ]; then
+ output_dir=.
+ else
+ output_dir=$rootdir/../output
+ fi
+ export output_dir
+fi
+
+TEST_MODE=
+for i in "$@"; do
+ case "$i" in
+ --iso)
+ TEST_MODE=iso
+ ;;
+ --transport=*)
+ TEST_TRANSPORT="${i#*=}"
+ ;;
+ --sock=*)
+ TEST_SOCK="${i#*=}"
+ ;;
+ esac
+done
+
+# start rpc.py coprocess if it's not started yet
+if [[ -z $RPC_PIPE_PID ]] || ! kill -0 "$RPC_PIPE_PID" &> /dev/null; then
+ coproc RPC_PIPE { "$rootdir/scripts/rpc.py" --server; }
+ exec {RPC_PIPE_OUTPUT}<&${RPC_PIPE[0]} {RPC_PIPE_INPUT}>&${RPC_PIPE[1]}
+ # all descriptors will automatically close together with this bash
+ # process, this will make rpc.py stop reading and exit gracefully
+fi
+
+if [ $SPDK_TEST_VPP -eq 1 ]; then
+ VPP_PATH="/usr/local/src/vpp-19.04/build-root/install-vpp_debug-native/vpp/"
+ export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:${VPP_PATH}/lib/
+ export PATH=${PATH}:${VPP_PATH}/bin/
+fi
+
+function set_test_storage() {
+ [[ -v testdir ]] || return 0
+
+ local requested_size=$1 # bytes
+ local mount target_dir
+
+ local -A mounts fss sizes avails uses
+ local source fs size avail mount use
+
+ local storage_fallback storage_candidates
+ local storage_fallback_purge
+
+ storage_fallback_purge=("${TMPDIR:-/tmp}/spdk."??????)
+
+ if ((${#storage_fallback_purge[@]} > 0)); then
+ printf '* Purging old temporary test storage (%s)\n' \
+ "${storage_fallback_purge[*]}" >&2
+ rm -rf "${storage_fallback_purge[@]}"
+ fi
+
+ storage_fallback=$(mktemp -udt spdk.XXXXXX)
+ storage_candidates=(
+ "$testdir"
+ "$storage_fallback/tests/${testdir##*/}"
+ "$storage_fallback"
+ )
+
+ if [[ -n $ADD_TEST_STORAGE ]]; then
+ # List of dirs|mounts separated by whitespaces
+ storage_candidates+=($ADD_TEST_STORAGE)
+ fi
+
+ if [[ -n $DEDICATED_TEST_STORAGE ]]; then
+ # Single, dedicated dir|mount
+ storage_candidates=("$DEDICATED_TEST_STORAGE")
+ fi
+
+ mkdir -p "${storage_candidates[@]}"
+
+ # add some headroom - 64M
+ requested_size=$((requested_size + (64 << 20)))
+
+ while read -r source fs size use avail _ mount; do
+ mounts["$mount"]=$source fss["$mount"]=$fs
+ avails["$mount"]=$((avail * 1024)) sizes["$mount"]=$((size * 1024))
+ uses["$mount"]=$((use * 1024))
+ done < <(df -T | grep -v Filesystem)
+
+ printf '* Looking for test storage...\n' >&2
+
+ local target_space new_size
+ for target_dir in "${storage_candidates[@]}"; do
+ # FreeBSD's df is lacking the --output arg
+ # mount=$(df --output=target "$target_dir" | grep -v "Mounted on")
+ mount=$(df "$target_dir" | awk '$1 !~ /Filesystem/{print $6}')
+
+ target_space=${avails["$mount"]}
+ if ((target_space == 0 || target_space < requested_size)); then
+ continue
+ fi
+ if ((target_space >= requested_size)); then
+ # For in-memory fs, and / make sure our requested size won't fill most of the space.
+ if [[ ${fss["$mount"]} == tmpfs ]] || [[ ${fss["$mount"]} == ramfs ]] || [[ $mount == / ]]; then
+ new_size=$((uses["$mount"] + requested_size))
+ if ((new_size * 100 / sizes["$mount"] > 95)); then
+ continue
+ fi
+ fi
+ fi
+ export SPDK_TEST_STORAGE=$target_dir
+ printf '* Found test storage at %s\n' "$SPDK_TEST_STORAGE" >&2
+ return 0
+ done
+ printf '* Test storage is not available\n'
+ return 1
+}
+
+function get_config_params() {
+ xtrace_disable
+ config_params='--enable-debug --enable-werror'
+
+ # for options with dependencies but no test flag, set them here
+ if [ -f /usr/include/infiniband/verbs.h ]; then
+ config_params+=' --with-rdma'
+ fi
+
+ if [ $(uname -s) == "FreeBSD" ]; then
+ intel="hw.model: Intel"
+ cpu_vendor=$(sysctl -a | grep hw.model | cut -c 1-15)
+ else
+ intel="GenuineIntel"
+ cpu_vendor=$(grep -i 'vendor' /proc/cpuinfo --max-count=1)
+ fi
+ if [[ "$cpu_vendor" != *"$intel"* ]]; then
+ config_params+=" --without-idxd"
+ else
+ config_params+=" --with-idxd"
+ fi
+
+ if [[ -d $CONFIG_FIO_SOURCE_DIR ]]; then
+ config_params+=" --with-fio=$CONFIG_FIO_SOURCE_DIR"
+ fi
+
+ if [ -d ${DEPENDENCY_DIR}/vtune_codes ]; then
+ config_params+=' --with-vtune='${DEPENDENCY_DIR}'/vtune_codes'
+ fi
+
+ if [ -d /usr/include/iscsi ]; then
+ libiscsi_version=$(grep LIBISCSI_API_VERSION /usr/include/iscsi/iscsi.h | head -1 | awk '{print $3}' | awk -F '(' '{print $2}' | awk -F ')' '{print $1}')
+ if [ $libiscsi_version -ge 20150621 ]; then
+ config_params+=' --with-iscsi-initiator'
+ fi
+ fi
+
+ if [ $SPDK_TEST_UNITTEST -eq 0 ]; then
+ config_params+=' --disable-unit-tests'
+ fi
+
+ if [ $SPDK_TEST_NVME_CUSE -eq 1 ]; then
+ config_params+=' --with-nvme-cuse'
+ fi
+
+ # for options with both dependencies and a test flag, set them here
+ if [ -f /usr/include/libpmemblk.h ] && [ $SPDK_TEST_PMDK -eq 1 ]; then
+ config_params+=' --with-pmdk'
+ fi
+
+ if [ -f /usr/include/libpmem.h ] && [ $SPDK_TEST_REDUCE -eq 1 ]; then
+ if [ $SPDK_TEST_ISAL -eq 1 ]; then
+ config_params+=' --with-reduce'
+ fi
+ fi
+
+ if [ -d /usr/include/rbd ] && [ -d /usr/include/rados ] && [ $SPDK_TEST_RBD -eq 1 ]; then
+ config_params+=' --with-rbd'
+ fi
+
+ if [ $SPDK_TEST_VPP -eq 1 ]; then
+ config_params+=" --with-vpp=${VPP_PATH}"
+ fi
+
+ # for options with no required dependencies, just test flags, set them here
+ if [ $SPDK_TEST_CRYPTO -eq 1 ]; then
+ config_params+=' --with-crypto'
+ fi
+
+ if [ $SPDK_TEST_OCF -eq 1 ]; then
+ config_params+=" --with-ocf"
+ fi
+
+ if [ $SPDK_RUN_UBSAN -eq 1 ]; then
+ config_params+=' --enable-ubsan'
+ fi
+
+ if [ $SPDK_RUN_ASAN -eq 1 ]; then
+ config_params+=' --enable-asan'
+ fi
+
+ if [ "$(uname -s)" = "Linux" ]; then
+ config_params+=' --enable-coverage'
+ fi
+
+ if [ $SPDK_TEST_ISAL -eq 0 ]; then
+ config_params+=' --without-isal'
+ fi
+
+ if [ $SPDK_TEST_BLOBFS -eq 1 ]; then
+ if [[ -d /usr/include/fuse3 ]] || [[ -d /usr/local/include/fuse3 ]]; then
+ config_params+=' --with-fuse'
+ fi
+ fi
+
+ if [ $SPDK_TEST_RAID5 -eq 1 ]; then
+ config_params+=' --with-raid5'
+ fi
+
+ # Check whether liburing library header exists
+ if [ -f /usr/include/liburing/io_uring.h ] && [ $SPDK_TEST_URING -eq 1 ]; then
+ config_params+=' --with-uring'
+ fi
+
+ # By default, --with-dpdk is not set meaning the SPDK build will use the DPDK submodule.
+ # If a DPDK installation is found in a well-known location though, WITH_DPDK_DIR will be
+ # set which will override the default and use that DPDK installation instead.
+ if [ -n "$WITH_DPDK_DIR" ]; then
+ config_params+=" --with-dpdk=$WITH_DPDK_DIR"
+ fi
+
+ echo "$config_params"
+ xtrace_restore
+}
+
+function rpc_cmd() {
+ xtrace_disable
+ local rsp rc
+
+ echo "$@" >&$RPC_PIPE_INPUT
+ while read -t 5 -ru $RPC_PIPE_OUTPUT rsp; do
+ if [[ $rsp == "**STATUS="* ]]; then
+ break
+ fi
+ echo "$rsp"
+ done
+
+ rc=${rsp#*=}
+ xtrace_restore
+ [[ $rc == 0 ]]
+}
+
+function rpc_cmd_simple_data_json() {
+
+ local elems="$1[@]" elem
+ local -gA jq_out=()
+ local jq val
+
+ local lvs=(
+ "uuid"
+ "name"
+ "base_bdev"
+ "total_data_clusters"
+ "free_clusters"
+ "block_size"
+ "cluster_size"
+ )
+
+ local bdev=(
+ "name"
+ "aliases[0]"
+ "block_size"
+ "num_blocks"
+ "uuid"
+ "product_name"
+ )
+
+ [[ -v $elems ]] || return 1
+
+ for elem in "${!elems}"; do
+ jq="${jq:+$jq,\"\\n\",}\"$elem\",\" \",.[0].$elem"
+ done
+ jq+=',"\n"'
+
+ shift
+ while read -r elem val; do
+ jq_out["$elem"]=$val
+ done < <(rpc_cmd "$@" | jq -jr "$jq")
+ ((${#jq_out[@]} > 0)) || return 1
+}
+
+# invert error code of any command and also trigger ERR on 0 (unlike bash ! prefix)
+function NOT() {
+ if "$@"; then
+ return 1
+ fi
+}
+
+function timing() {
+ direction="$1"
+ testname="$2"
+
+ now=$(date +%s)
+
+ if [ "$direction" = "enter" ]; then
+ export timing_stack="${timing_stack};${now}"
+ export test_stack="${test_stack};${testname}"
+ else
+ touch "$output_dir/timing.txt"
+ child_time=$(grep "^${test_stack:1};" $output_dir/timing.txt | awk '{s+=$2} END {print s}')
+
+ start_time=$(echo "$timing_stack" | sed -e 's@^.*;@@')
+ timing_stack=$(echo "$timing_stack" | sed -e 's@;[^;]*$@@')
+
+ elapsed=$((now - start_time - child_time))
+ echo "${test_stack:1} $elapsed" >> $output_dir/timing.txt
+
+ test_stack=$(echo "$test_stack" | sed -e 's@;[^;]*$@@')
+ fi
+}
+
+function timing_enter() {
+ xtrace_disable
+ timing "enter" "$1"
+ xtrace_restore
+}
+
+function timing_exit() {
+ xtrace_disable
+ timing "exit" "$1"
+ xtrace_restore
+}
+
+function timing_finish() {
+ flamegraph='/usr/local/FlameGraph/flamegraph.pl'
+ if [ -x "$flamegraph" ]; then
+ "$flamegraph" \
+ --title 'Build Timing' \
+ --nametype 'Step:' \
+ --countname seconds \
+ $output_dir/timing.txt \
+ > $output_dir/timing.svg
+ fi
+}
+
+function create_test_list() {
+ xtrace_disable
+ # First search all scripts in main SPDK directory.
+ completion=$(grep -shI -d skip --include="*.sh" -e "run_test " $rootdir/*)
+ # Follow up with search in test directory recursively.
+ completion+=$(grep -rshI --include="*.sh" --exclude="autotest_common.sh" -e "run_test " $rootdir/test)
+ printf "%s" "$completion" | grep -v "#" \
+ | sed 's/^.*run_test/run_test/' | awk '{print $2}' \
+ | sed 's/\"//g' | sort > $output_dir/all_tests.txt || true
+ xtrace_restore
+}
+
+function gdb_attach() {
+ gdb -q --batch \
+ -ex 'handle SIGHUP nostop pass' \
+ -ex 'handle SIGQUIT nostop pass' \
+ -ex 'handle SIGPIPE nostop pass' \
+ -ex 'handle SIGALRM nostop pass' \
+ -ex 'handle SIGTERM nostop pass' \
+ -ex 'handle SIGUSR1 nostop pass' \
+ -ex 'handle SIGUSR2 nostop pass' \
+ -ex 'handle SIGCHLD nostop pass' \
+ -ex 'set print thread-events off' \
+ -ex 'cont' \
+ -ex 'thread apply all bt' \
+ -ex 'quit' \
+ --tty=/dev/stdout \
+ -p $1
+}
+
+function process_core() {
+ ret=0
+ while IFS= read -r -d '' core; do
+ exe=$(eu-readelf -n "$core" | grep psargs | sed "s/.*psargs: \([^ \'\" ]*\).*/\1/")
+ if [[ ! -f "$exe" ]]; then
+ exe=$(eu-readelf -n "$core" | grep -oP -m1 "$exe.+")
+ fi
+ echo "exe for $core is $exe"
+ if [[ -n "$exe" ]]; then
+ if hash gdb &> /dev/null; then
+ gdb -batch -ex "thread apply all bt full" $exe $core
+ fi
+ cp $exe $output_dir
+ fi
+ mv $core $output_dir
+ chmod a+r $output_dir/$core
+ ret=1
+ done < <(find . -type f \( -name 'core\.?[0-9]*' -o -name '*.core' \) -print0)
+ return $ret
+}
+
+function process_shm() {
+ type=$1
+ id=$2
+ if [ "$type" = "--pid" ]; then
+ id="pid${id}"
+ elif [ "$type" = "--id" ]; then
+ id="${id}"
+ else
+ echo "Please specify to search for pid or shared memory id."
+ return 1
+ fi
+
+ shm_files=$(find /dev/shm -name "*.${id}" -printf "%f\n")
+
+ if [[ -z $shm_files ]]; then
+ echo "SHM File for specified PID or shared memory id: ${id} not found!"
+ return 1
+ fi
+ for n in $shm_files; do
+ tar -C /dev/shm/ -cvzf $output_dir/${n}_shm.tar.gz ${n}
+ done
+ return 0
+}
+
+function waitforlisten() {
+ # $1 = process pid
+ if [ -z "$1" ]; then
+ exit 1
+ fi
+
+ local rpc_addr="${2:-$DEFAULT_RPC_ADDR}"
+
+ echo "Waiting for process to start up and listen on UNIX domain socket $rpc_addr..."
+ # turn off trace for this loop
+ xtrace_disable
+ local ret=0
+ local i
+ for ((i = 40; i != 0; i--)); do
+ # if the process is no longer running, then exit the script
+ # since it means the application crashed
+ if ! kill -s 0 $1; then
+ echo "ERROR: process (pid: $1) is no longer running"
+ ret=1
+ break
+ fi
+
+ if $rootdir/scripts/rpc.py -t 1 -s "$rpc_addr" rpc_get_methods &> /dev/null; then
+ break
+ fi
+
+ sleep 0.5
+ done
+
+ xtrace_restore
+ if ((i == 0)); then
+ echo "ERROR: timeout while waiting for process (pid: $1) to start listening on '$rpc_addr'"
+ ret=1
+ fi
+ return $ret
+}
+
+function waitfornbd() {
+ local nbd_name=$1
+ local i
+
+ for ((i = 1; i <= 20; i++)); do
+ if grep -q -w $nbd_name /proc/partitions; then
+ break
+ else
+ sleep 0.1
+ fi
+ done
+
+ # The nbd device is now recognized as a block device, but there can be
+ # a small delay before we can start I/O to that block device. So loop
+ # here trying to read the first block of the nbd block device to a temp
+ # file. Note that dd returns success when reading an empty file, so we
+ # need to check the size of the output file instead.
+ for ((i = 1; i <= 20; i++)); do
+ dd if=/dev/$nbd_name of="$SPDK_TEST_STORAGE/nbdtest" bs=4096 count=1 iflag=direct
+ size=$(stat -c %s "$SPDK_TEST_STORAGE/nbdtest")
+ rm -f "$SPDK_TEST_STORAGE/nbdtest"
+ if [ "$size" != "0" ]; then
+ return 0
+ else
+ sleep 0.1
+ fi
+ done
+
+ return 1
+}
+
+function waitforbdev() {
+ local bdev_name=$1
+ local i
+
+ for ((i = 1; i <= 20; i++)); do
+ if $rpc_py bdev_get_bdevs | jq -r '.[] .name' | grep -qw $bdev_name; then
+ return 0
+ fi
+
+ if $rpc_py bdev_get_bdevs | jq -r '.[] .aliases' | grep -qw $bdev_name; then
+ return 0
+ fi
+
+ sleep 0.1
+ done
+
+ return 1
+}
+
+function make_filesystem() {
+ local fstype=$1
+ local dev_name=$2
+ local i=0
+ local force
+
+ if [ $fstype = ext4 ]; then
+ force=-F
+ else
+ force=-f
+ fi
+
+ while ! mkfs.${fstype} $force ${dev_name}; do
+ if [ $i -ge 15 ]; then
+ return 1
+ fi
+ i=$((i + 1))
+ sleep 1
+ done
+
+ return 0
+}
+
+function killprocess() {
+ # $1 = process pid
+ if [ -z "$1" ]; then
+ exit 1
+ fi
+
+ if kill -0 $1; then
+ if [ $(uname) = Linux ]; then
+ process_name=$(ps --no-headers -o comm= $1)
+ else
+ process_name=$(ps -c -o command $1 | tail -1)
+ fi
+ if [ "$process_name" = "sudo" ]; then
+ # kill the child process, which is the actual app
+ # (assume $1 has just one child)
+ local child
+ child="$(pgrep -P $1)"
+ echo "killing process with pid $child"
+ kill $child
+ else
+ echo "killing process with pid $1"
+ kill $1
+ fi
+
+ # wait for the process regardless if its the dummy sudo one
+ # or the actual app - it should terminate anyway
+ wait $1
+ else
+ # the process is not there anymore
+ echo "Process with pid $1 is not found"
+ exit 1
+ fi
+}
+
+function iscsicleanup() {
+ echo "Cleaning up iSCSI connection"
+ iscsiadm -m node --logout || true
+ iscsiadm -m node -o delete || true
+ rm -rf /var/lib/iscsi/nodes/*
+}
+
+function stop_iscsi_service() {
+ if cat /etc/*-release | grep Ubuntu; then
+ service open-iscsi stop
+ else
+ service iscsid stop
+ fi
+}
+
+function start_iscsi_service() {
+ if cat /etc/*-release | grep Ubuntu; then
+ service open-iscsi start
+ else
+ service iscsid start
+ fi
+}
+
+function rbd_setup() {
+ # $1 = monitor ip address
+ # $2 = name of the namespace
+ if [ -z "$1" ]; then
+ echo "No monitor IP address provided for ceph"
+ exit 1
+ fi
+ if [ -n "$2" ]; then
+ if ip netns list | grep "$2"; then
+ NS_CMD="ip netns exec $2"
+ else
+ echo "No namespace $2 exists"
+ exit 1
+ fi
+ fi
+
+ if hash ceph; then
+ export PG_NUM=128
+ export RBD_POOL=rbd
+ export RBD_NAME=foo
+ $NS_CMD $rootdir/scripts/ceph/stop.sh || true
+ $NS_CMD $rootdir/scripts/ceph/start.sh $1
+
+ $NS_CMD ceph osd pool create $RBD_POOL $PG_NUM || true
+ $NS_CMD rbd create $RBD_NAME --size 1000
+ fi
+}
+
+function rbd_cleanup() {
+ if hash ceph; then
+ $rootdir/scripts/ceph/stop.sh || true
+ rm -f /var/tmp/ceph_raw.img
+ fi
+}
+
+function nvme_cli_build() {
+ if [[ -z "${DEPENDENCY_DIR}" ]]; then
+ echo DEPENDENCY_DIR not defined!
+ exit 1
+ fi
+
+ spdk_nvme_cli="${DEPENDENCY_DIR}/nvme-cli"
+
+ if [[ ! -d $spdk_nvme_cli ]]; then
+ echo "nvme-cli repository not found at $spdk_nvme_cli; skipping tests."
+ exit 1
+ fi
+
+ if ! grep -q "DEF_VER=v1.6" $spdk_nvme_cli/NVME-VERSION-GEN; then
+ echo "SPDK supports only \"spdk/nvme-cli\" project on \"spdk-1.6\" branch."
+ exit 1
+ fi
+
+ # Build against the version of SPDK under test
+ pushd $spdk_nvme_cli
+
+ # Remove and recreate git index in case it became corrupted
+ if ! git clean -dfx; then
+ rm -f .git/index
+ git clean -dfx
+ git reset --hard
+ fi
+
+ rm -f "$spdk_nvme_cli/spdk"
+ ln -sf "$rootdir" "$spdk_nvme_cli/spdk"
+
+ make -j$(nproc) LDFLAGS="$(make -s -C $spdk_nvme_cli/spdk ldflags)"
+ popd
+}
+
+function _start_stub() {
+ # Disable ASLR for multi-process testing. SPDK does support using DPDK multi-process,
+ # but ASLR can still be unreliable in some cases.
+ # We will reenable it again after multi-process testing is complete in kill_stub().
+ # Save current setting so it can be restored upon calling kill_stub().
+ _randomize_va_space=$(< /proc/sys/kernel/randomize_va_space)
+ echo 0 > /proc/sys/kernel/randomize_va_space
+ $rootdir/test/app/stub/stub $1 &
+ stubpid=$!
+ echo Waiting for stub to ready for secondary processes...
+ while ! [ -e /var/run/spdk_stub0 ]; do
+ # If stub dies while we wait, bail
+ [[ -e /proc/$stubpid ]] || return 1
+ sleep 1s
+ done
+ echo done.
+}
+
+function start_stub() {
+ if ! _start_stub "$@"; then
+ echo "stub failed" >&2
+ return 1
+ fi
+}
+
+function kill_stub() {
+ if [[ -e /proc/$stubpid ]]; then
+ kill $1 $stubpid
+ wait $stubpid
+ fi 2> /dev/null || :
+ rm -f /var/run/spdk_stub0
+ # Re-enable ASLR now that we are done with multi-process testing
+ # Note: "1" enables ASLR w/o randomizing data segments, "2" adds data segment
+ # randomizing and is the default on all recent Linux kernels
+ echo "${_randomize_va_space:-2}" > /proc/sys/kernel/randomize_va_space
+}
+
+function run_test() {
+ if [ $# -le 1 ]; then
+ echo "Not enough parameters"
+ echo "usage: run_test test_name test_script [script_params]"
+ exit 1
+ fi
+
+ xtrace_disable
+ local test_name="$1"
+ shift
+
+ if [ -n "$test_domain" ]; then
+ export test_domain="${test_domain}.${test_name}"
+ else
+ export test_domain="$test_name"
+ fi
+
+ timing_enter $test_name
+ echo "************************************"
+ echo "START TEST $test_name"
+ echo "************************************"
+ xtrace_restore
+ time "$@"
+ xtrace_disable
+ echo "************************************"
+ echo "END TEST $test_name"
+ echo "************************************"
+ timing_exit $test_name
+
+ export test_domain=${test_domain%"$test_name"}
+ if [ -n "$test_domain" ]; then
+ export test_domain=${test_domain%?}
+ fi
+
+ if [ -z "$test_domain" ]; then
+ echo "top_level $test_name" >> $output_dir/test_completions.txt
+ else
+ echo "$test_domain $test_name" >> $output_dir/test_completions.txt
+ fi
+ xtrace_restore
+}
+
+function skip_run_test_with_warning() {
+ echo "WARNING: $1"
+ echo "Test run may fail if run with autorun.sh"
+ echo "Please check your $rootdir/test/common/skipped_tests.txt"
+}
+
+function print_backtrace() {
+ # if errexit is not enabled, don't print a backtrace
+ [[ "$-" =~ e ]] || return 0
+
+ local args=("${BASH_ARGV[@]}")
+
+ xtrace_disable
+ echo "========== Backtrace start: =========="
+ echo ""
+ for i in $(seq 1 $((${#FUNCNAME[@]} - 1))); do
+ local func="${FUNCNAME[$i]}"
+ local line_nr="${BASH_LINENO[$((i - 1))]}"
+ local src="${BASH_SOURCE[$i]}"
+ local bt="" cmdline=()
+
+ if [[ -f $src ]]; then
+ bt=$(nl -w 4 -ba -nln $src | grep -B 5 -A 5 "^${line_nr}[^0-9]" \
+ | sed "s/^/ /g" | sed "s/^ $line_nr /=> $line_nr /g")
+ fi
+
+ # If extdebug set the BASH_ARGC[i], try to fetch all the args
+ if ((BASH_ARGC[i] > 0)); then
+ # Use argc as index to reverse the stack
+ local argc=${BASH_ARGC[i]} arg
+ for arg in "${args[@]::BASH_ARGC[i]}"; do
+ cmdline[argc--]="[\"$arg\"]"
+ done
+ args=("${args[@]:BASH_ARGC[i]}")
+ fi
+
+ echo "in $src:$line_nr -> $func($(
+ IFS=","
+ printf '%s\n' "${cmdline[*]:-[]}"
+ ))"
+ echo " ..."
+ echo "${bt:-backtrace unavailable}"
+ echo " ..."
+ done
+ echo ""
+ echo "========== Backtrace end =========="
+ xtrace_restore
+ return 0
+}
+
+function waitforserial() {
+ local i=0
+ local nvme_device_counter=1
+ if [[ -n "$2" ]]; then
+ nvme_device_counter=$2
+ fi
+
+ while [ $(lsblk -l -o NAME,SERIAL | grep -c $1) -lt $nvme_device_counter ]; do
+ [ $i -lt 15 ] || break
+ i=$((i + 1))
+ echo "Waiting for devices"
+ sleep 1
+ done
+
+ if [[ $(lsblk -l -o NAME,SERIAL | grep -c $1) -lt $nvme_device_counter ]]; then
+ return 1
+ fi
+
+ return 0
+}
+
+function waitforserial_disconnect() {
+ local i=0
+ while lsblk -o NAME,SERIAL | grep -q -w $1; do
+ [ $i -lt 15 ] || break
+ i=$((i + 1))
+ echo "Waiting for disconnect devices"
+ sleep 1
+ done
+
+ if lsblk -l -o NAME | grep -q -w $1; then
+ return 1
+ fi
+
+ return 0
+}
+
+function waitforblk() {
+ local i=0
+ while ! lsblk -l -o NAME | grep -q -w $1; do
+ [ $i -lt 15 ] || break
+ i=$((i + 1))
+ sleep 1
+ done
+
+ if ! lsblk -l -o NAME | grep -q -w $1; then
+ return 1
+ fi
+
+ return 0
+}
+
+function waitforblk_disconnect() {
+ local i=0
+ while lsblk -l -o NAME | grep -q -w $1; do
+ [ $i -lt 15 ] || break
+ i=$((i + 1))
+ sleep 1
+ done
+
+ if lsblk -l -o NAME | grep -q -w $1; then
+ return 1
+ fi
+
+ return 0
+}
+
+function waitforfile() {
+ local i=0
+ while [ ! -e $1 ]; do
+ [ $i -lt 200 ] || break
+ i=$((i + 1))
+ sleep 0.1
+ done
+
+ if [ ! -e $1 ]; then
+ return 1
+ fi
+
+ return 0
+}
+
+function fio_config_gen() {
+ local config_file=$1
+ local workload=$2
+ local bdev_type=$3
+ local fio_dir=$CONFIG_FIO_SOURCE_DIR
+
+ if [ -e "$config_file" ]; then
+ echo "Configuration File Already Exists!: $config_file"
+ return 1
+ fi
+
+ if [ -z "$workload" ]; then
+ workload=randrw
+ fi
+
+ touch $1
+
+ cat > $1 << EOL
+[global]
+thread=1
+group_reporting=1
+direct=1
+norandommap=1
+percentile_list=50:99:99.9:99.99:99.999
+time_based=1
+ramp_time=0
+EOL
+
+ if [ "$workload" == "verify" ]; then
+ cat <<- EOL >> $config_file
+ verify=sha1
+ verify_backlog=1024
+ rw=randwrite
+ EOL
+
+ # To avoid potential data race issue due to the AIO device
+ # flush mechanism, add the flag to serialize the writes.
+ # This is to fix the intermittent IO failure issue of #935
+ if [ "$bdev_type" == "AIO" ]; then
+ if [[ $($fio_dir/fio --version) == *"fio-3"* ]]; then
+ echo "serialize_overlap=1" >> $config_file
+ fi
+ fi
+ elif [ "$workload" == "trim" ]; then
+ echo "rw=trimwrite" >> $config_file
+ else
+ echo "rw=$workload" >> $config_file
+ fi
+}
+
+function fio_bdev() {
+ # Setup fio binary cmd line
+ local fio_dir=$CONFIG_FIO_SOURCE_DIR
+ local bdev_plugin="$rootdir/build/fio/spdk_bdev"
+
+ # Preload AddressSanitizer library to fio if fio_plugin was compiled with it
+ local asan_lib
+ asan_lib=$(ldd $bdev_plugin | grep libasan | awk '{print $3}')
+
+ LD_PRELOAD="$asan_lib $bdev_plugin" "$fio_dir"/fio "$@"
+}
+
+function fio_nvme() {
+ # Setup fio binary cmd line
+ local fio_dir=$CONFIG_FIO_SOURCE_DIR
+ local nvme_plugin="$rootdir/build/fio/spdk_nvme"
+
+ # Preload AddressSanitizer library to fio if fio_plugin was compiled with it
+ asan_lib=$(ldd $nvme_plugin | grep libasan | awk '{print $3}')
+
+ LD_PRELOAD="$asan_lib $nvme_plugin" "$fio_dir"/fio "$@"
+}
+
+function get_lvs_free_mb() {
+ local lvs_uuid=$1
+ local lvs_info
+ local fc
+ local cs
+ lvs_info=$($rpc_py bdev_lvol_get_lvstores)
+ fc=$(jq ".[] | select(.uuid==\"$lvs_uuid\") .free_clusters" <<< "$lvs_info")
+ cs=$(jq ".[] | select(.uuid==\"$lvs_uuid\") .cluster_size" <<< "$lvs_info")
+
+ # Change to MB's
+ free_mb=$((fc * cs / 1024 / 1024))
+ echo "$free_mb"
+}
+
+function get_bdev_size() {
+ local bdev_name=$1
+ local bdev_info
+ local bs
+ local nb
+ bdev_info=$($rpc_py bdev_get_bdevs -b $bdev_name)
+ bs=$(jq ".[] .block_size" <<< "$bdev_info")
+ nb=$(jq ".[] .num_blocks" <<< "$bdev_info")
+
+ # Change to MB's
+ bdev_size=$((bs * nb / 1024 / 1024))
+ echo "$bdev_size"
+}
+
+function autotest_cleanup() {
+ $rootdir/scripts/setup.sh reset
+ $rootdir/scripts/setup.sh cleanup
+ if [ $(uname -s) = "Linux" ]; then
+ if grep -q '#define SPDK_CONFIG_IGB_UIO_DRIVER 1' $rootdir/include/spdk/config.h; then
+ [[ -e /sys/module/igb_uio ]] && rmmod igb_uio
+ else
+ modprobe -r uio_pci_generic
+ fi
+ fi
+ rm -rf "$asan_suppression_file"
+}
+
+function freebsd_update_contigmem_mod() {
+ if [ $(uname) = FreeBSD ]; then
+ kldunload contigmem.ko || true
+ if [ -n "$WITH_DPDK_DIR" ]; then
+ echo "Warning: SPDK only works on FreeBSD with patches that only exist in SPDK's dpdk submodule"
+ cp -f "$WITH_DPDK_DIR/kmod/contigmem.ko" /boot/modules/
+ cp -f "$WITH_DPDK_DIR/kmod/contigmem.ko" /boot/kernel/
+ cp -f "$WITH_DPDK_DIR/kmod/nic_uio.ko" /boot/modules/
+ cp -f "$WITH_DPDK_DIR/kmod/nic_uio.ko" /boot/kernel/
+ else
+ cp -f "$rootdir/dpdk/build/kmod/contigmem.ko" /boot/modules/
+ cp -f "$rootdir/dpdk/build/kmod/contigmem.ko" /boot/kernel/
+ cp -f "$rootdir/dpdk/build/kmod/nic_uio.ko" /boot/modules/
+ cp -f "$rootdir/dpdk/build/kmod/nic_uio.ko" /boot/kernel/
+ fi
+ fi
+}
+
+function get_nvme_name_from_bdf() {
+ blkname=()
+
+ nvme_devs=$(lsblk -d --output NAME | grep "^nvme") || true
+ if [ -z "$nvme_devs" ]; then
+ return
+ fi
+ for dev in $nvme_devs; do
+ link_name=$(readlink /sys/block/$dev/device/device) || true
+ if [ -z "$link_name" ]; then
+ link_name=$(readlink /sys/block/$dev/device)
+ fi
+ bdf=$(basename "$link_name")
+ if [ "$bdf" = "$1" ]; then
+ blkname+=($dev)
+ fi
+ done
+
+ printf '%s\n' "${blkname[@]}"
+}
+
+function get_nvme_ctrlr_from_bdf() {
+ bdf_sysfs_path=$(readlink -f /sys/class/nvme/nvme* | grep "$1/nvme/nvme")
+ if [[ -z "$bdf_sysfs_path" ]]; then
+ return
+ fi
+
+ printf '%s\n' "$(basename $bdf_sysfs_path)"
+}
+
+# Get BDF addresses of all NVMe drives currently attached to
+# uio-pci-generic or vfio-pci
+function get_nvme_bdfs() {
+ xtrace_disable
+ bdfs=$(jq -r .config[].params.traddr <<< $($rootdir/scripts/gen_nvme.sh --json))
+ if [[ -z $bdfs ]]; then
+ echo "No devices to test on!"
+ exit 1
+ fi
+ echo "$bdfs"
+ xtrace_restore
+}
+
+# Same as function above, but just get the first disks BDF address
+function get_first_nvme_bdf() {
+ head -1 <<< "$(get_nvme_bdfs)"
+}
+
+function nvme_namespace_revert() {
+ $rootdir/scripts/setup.sh
+ sleep 1
+ bdfs=$(get_nvme_bdfs)
+
+ $rootdir/scripts/setup.sh reset
+ sleep 1
+
+ for bdf in $bdfs; do
+ nvme_ctrlr=/dev/$(get_nvme_ctrlr_from_bdf ${bdf})
+ if [[ -z "$nvme_ctrlr" ]]; then
+ continue
+ fi
+
+ # Check Optional Admin Command Support for Namespace Management
+ oacs=$(nvme id-ctrl ${nvme_ctrlr} | grep oacs | cut -d: -f2)
+ oacs_ns_manage=$((oacs & 0x8))
+
+ if [[ "$oacs_ns_manage" -ne 0 ]]; then
+ # This assumes every NVMe controller contains single namespace,
+ # encompassing Total NVM Capacity and formatted as 512 block size.
+ # 512 block size is needed for test/vhost/vhost_boot.sh to
+ # succesfully run.
+
+ unvmcap=$(nvme id-ctrl ${nvme_ctrlr} | grep unvmcap | cut -d: -f2)
+ if [[ "$unvmcap" -eq 0 ]]; then
+ # All available space already used
+ continue
+ fi
+ tnvmcap=$(nvme id-ctrl ${nvme_ctrlr} | grep tnvmcap | cut -d: -f2)
+ blksize=512
+
+ size=$((tnvmcap / blksize))
+
+ nvme detach-ns ${nvme_ctrlr} -n 0xffffffff -c 0 || true
+ nvme delete-ns ${nvme_ctrlr} -n 0xffffffff || true
+ nvme create-ns ${nvme_ctrlr} -s ${size} -c ${size} -b ${blksize}
+ nvme attach-ns ${nvme_ctrlr} -n 1 -c 0
+ nvme reset ${nvme_ctrlr}
+ waitforblk "${nvme_ctrlr}n1"
+ fi
+ done
+}
+
+# Get BDFs based on device ID, such as 0x0a54
+function get_nvme_bdfs_by_id() {
+ local bdfs=()
+
+ for bdf in $(get_nvme_bdfs); do
+ device=$(cat /sys/bus/pci/devices/$bdf/device) || true
+ if [[ "$device" == "$1" ]]; then
+ bdfs+=($bdf)
+ fi
+ done
+
+ printf '%s\n' "${bdfs[@]}"
+}
+
+function opal_revert_cleanup() {
+ # The OPAL CI tests is only used for P4510 devices.
+ mapfile -t bdfs < <(get_nvme_bdfs_by_id 0x0a54)
+ if [[ -z ${bdfs[0]} ]]; then
+ return 0
+ fi
+
+ $SPDK_BIN_DIR/spdk_tgt &
+ spdk_tgt_pid=$!
+ waitforlisten $spdk_tgt_pid
+
+ for bdf in "${bdfs[@]}"; do
+ $rootdir/scripts/rpc.py bdev_nvme_attach_controller -b "nvme0" -t "pcie" -a ${bdf}
+ # Ignore if this fails.
+ $rootdir/scripts/rpc.py bdev_nvme_opal_revert -b nvme0 -p test || true
+ done
+
+ killprocess $spdk_tgt_pid
+}
+
+# Define temp storage for all the tests. Look for 2GB at minimum
+set_test_storage "${TEST_MIN_STORAGE_SIZE:-$((1 << 31))}"
+
+set -o errtrace
+shopt -s extdebug
+trap "trap - ERR; print_backtrace >&2" ERR
+
+PS4=' \t \$ '
+if $SPDK_AUTOTEST_X; then
+ # explicitly enable xtraces, overriding any tracking information.
+ unset XTRACE_DISABLED
+ unset XTRACE_NESTING_LEVEL
+ set -x
+ xtrace_enable
+else
+ xtrace_restore
+fi
diff --git a/src/spdk/test/common/config/README.md b/src/spdk/test/common/config/README.md
new file mode 100644
index 000000000..26a587709
--- /dev/null
+++ b/src/spdk/test/common/config/README.md
@@ -0,0 +1,104 @@
+# Virtual Test Configuration
+
+This readme and the associated bash script, vm_setup.sh, are intended to assist developers in quickly
+preparing a virtual test environment on which to run the SPDK validation tests rooted at autorun.sh.
+This file contains basic information about SPDK environment requirements, an introduction to the
+autorun-spdk.conf files used to moderate which tests are run by autorun.sh, and step-by-step instructions
+for spinning up a VM capable of running the SPDK test suite.
+There is no need for external hardware to run these tests. The linux kernel comes with the drivers necessary
+to emulate an RDMA enabled NIC. NVMe controllers can also be virtualized in emulators such as QEMU.
+
+## VM Envronment Requirements (Host)
+
+- 8 GiB of RAM (for DPDK)
+- Enable intel_kvm on the host machine from the bios.
+- Enable nesting for VMs in kernel command line (for vhost tests).
+ - In `/etc/default/grub` append the following to the GRUB_CMDLINE_LINUX line: intel_iommu=on kvm-intel.nested=1.
+
+## VM Specs
+
+When creating the user during the fedora installation, it is best to use the name sys_sgsw. Efforts are being made
+to remove all references to this user, or files specific to this user from the codebase, but there are still some
+trailing references to it.
+
+## Autorun-spdk.conf
+
+Every machine that runs the autotest scripts should include a file titled autorun-spdk.conf in the home directory
+of the user that will run them. This file consists of several lines of the form 'variable_name=0/1'. autorun.sh sources
+this file each time it is run, and determines which tests to attempt based on which variables are defined in the
+configuration file. For a full list of the variable declarations available for autorun-spdk.conf, please see
+`test/common/autotest_common.sh` starting at line 13.
+
+## Steps for Configuring the VM
+
+1. Download a fresh Fedora 26 image.
+2. Perform the installation of Fedora 26 server.
+3. Create an admin user sys_sgsw (enabling passwordless sudo for this account will make life easier during the tests).
+4. Run the vm_setup.sh script which will install all proper dependencies.
+5. Modify the autorun-spdk.conf file in the home directory.
+6. Reboot the VM.
+7. Run autorun.sh for SPDK. Any output files will be placed in `~/spdk_repo/output/`.
+
+## Additional Steps for Preparing the Vhost Tests
+
+The Vhost tests also require the creation of a second virtual machine nested inside of the test VM.
+Please follow the directions below to complete that installation. Note that host refers to the Fedora VM
+created above and guest or VM refer to the Ubuntu VM created in this section.
+
+1. Follow instructions from spdk/scripts/vagrant/README.md
+ - install all needed packages mentioned in "Mac OSX Setup" or "Windows 10 Setup" sections
+ - follow steps from "Configure Vagrant" section
+
+2. Use Vagrant scripts located in spdk/scripts/vagrant to automatically generate
+ VM image to use in SPDK vhost tests.
+ Example command:
+ ~~~{.sh}
+ spdk/scripts/vagrant/create_vhost_vm.sh --move-to-def-dirs ubuntu16
+ ~~~
+ This command will:
+ - Download a Ubuntu 16.04 image file
+ - upgrade the system and install needed dependencies (fio, sg3-utils, bc)
+ - add entry to VM's ~/.ssh/autorized_keys
+ - add appropriate options to GRUB command line and update grub
+ - convert the image to .qcow2 format
+ - move .qcow2 file and ssh keys to default locations used by vhost test scripts
+
+Alternatively it is possible to create the VM image manually using following steps:
+
+1. Create an image file for the VM. It does not have to be large, about 3.5G should suffice.
+2. Create an ssh keypair for host-guest communications (performed on the host):
+ - Generate an ssh keypair with the name spdk_vhost_id_rsa and save it in `/root/.ssh`.
+ - Make sure that only root has read access to the private key.
+3. Install the OS in the VM image (performed on guest):
+ - Use the latest Ubuntu server (Currently 16.04 LTS).
+ - When partitioning the disk, make one partion that consumes the whole disk mounted at /. Do not encrypt the disk or enable LVM.
+ - Choose the OpenSSH server packages during install.
+4. Post installation configuration (performed on guest):
+ - Run the following commands to enable all necessary dependencies:
+ ~~~{.sh}
+ sudo apt update
+ sudo apt upgrade
+ sudo apt install fio sg3-utils bc
+ ~~~
+ - Enable the root user: "sudo passwd root -> root".
+ - Enable root login over ssh: vim `/etc/ssh/sshd_config` -> PermitRootLogin=yes.
+ - Disable DNS for ssh: `/etc/ssh/sshd_config` -> UseDNS=no.
+ - Add the spdk_vhost key to root's known hosts: `/root/.ssh/authorized_keys` -> add spdk_vhost_id_rsa.pub key to authorized keys.
+ Remember to save the private key in `~/.ssh/spdk_vhost_id_rsa` on the host.
+ - Change the grub boot options for the guest as follows:
+ - Add "console=ttyS0 earlyprintk=ttyS0" to the boot options in `/etc/default/grub` (for serial output redirect).
+ - Add "scsi_mod.use_blk_mq=1" to boot options in `/etc/default/grub`.
+ ~~~{.sh}
+ sudo update-grub
+ ~~~
+ - Reboot the VM.
+ - Remove any unnecessary packages (this is to make booting the VM faster):
+ ~~~{.sh}
+ apt purge snapd
+ apt purge Ubuntu-core-launcher
+ apt purge squashfs-tools
+ apt purge unattended-upgrades
+ ~~~
+5. Copy the fio binary from the guest location `/usr/bin/fio` to the host location `/home/sys_sgsw/fio_ubuntu`.
+6. Place the guest VM in the host at the following location: `/home/sys_sgsw/vhost_vm_image.qcow2`.
+7. On the host, edit the `~/autorun-spdk.conf` file to include the following line: SPDK_TEST_VHOST=1.
diff --git a/src/spdk/test/common/config/pkgdep/apt-get b/src/spdk/test/common/config/pkgdep/apt-get
new file mode 100644
index 000000000..a1630620d
--- /dev/null
+++ b/src/spdk/test/common/config/pkgdep/apt-get
@@ -0,0 +1,100 @@
+package_manager=apt-get
+
+update() {
+ sudo "$package_manager" update
+}
+
+install() {
+ (( $# )) || return 0
+
+ sudo "$package_manager" install -y "$@"
+}
+
+upgrade() {
+ sudo "$package_manager" update
+ sudo "$package_manager" upgrade -y
+}
+
+
+pre_install() {
+ echo "Package perl-open is not available at Ubuntu repositories" >&2
+
+ update
+
+ if [[ $INSTALL_TSOCKS == true ]]; then
+ install tsocks
+ fi
+
+ # asan an ubsan have to be installed together to not mix up gcc versions
+ if install libasan5; then
+ install libubsan1
+ else
+ echo "Latest libasan5 is not available" >&2
+ echo " installing libasan2 and corresponding libubsan0" >&2
+ install libasan2
+ install libubsan0
+ fi
+ if ! install rdma-core; then
+ echo "Package rdma-core is avaliable at Ubuntu 18 [universe] repositorium" >&2
+ install rdmacm-utils
+ install ibverbs-utils
+ else
+ LIBRXE_INSTALL=false
+ fi
+ if ! install libpmempool1; then
+ echo "Package libpmempool1 is available at Ubuntu 18 [universe] repositorium" >&2
+ fi
+ if ! install clang-tools; then
+ echo "Package clang-tools is available at Ubuntu 18 [universe] repositorium" >&2
+ fi
+ if ! install --no-install-suggests --no-install-recommends open-isns-utils; then
+ echo "Package open-isns-utils is available at Ubuntu 18 [universe] repositorium" >&2
+ fi
+
+ # Package name for Ubuntu 18 is targetcli-fb but for Ubuntu 16 it's targetcli
+ if ! install targetcli-fb; then
+ install targetcli
+ fi
+
+ # On Ubuntu 20.04 (focal) btrfs-tools are available under different name - btrfs-progs
+ if ! install btrfs-tools; then
+ install btrfs-progs
+ fi
+}
+
+packages=(
+ valgrind
+ jq
+ nvme-cli
+ ceph
+ gdb
+ fio
+ librbd-dev
+ linux-headers-generic
+ libgflags-dev
+ autoconf
+ automake
+ libtool
+ libmount-dev
+ open-iscsi
+ libglib2.0-dev
+ libpixman-1-dev
+ astyle
+ elfutils
+ libelf-dev
+ flex
+ bison
+ libswitch-perl
+ gdisk
+ socat
+ sshfs
+ sshpass
+ python3-pandas
+ bc
+ smartmontools
+ wget
+)
+
+if [[ $OSID != ubuntu ]]; then
+ echo "Located apt-get package manager, but it was tested for Ubuntu only"
+fi
diff --git a/src/spdk/test/common/config/pkgdep/dnf b/src/spdk/test/common/config/pkgdep/dnf
new file mode 100644
index 000000000..b009f106e
--- /dev/null
+++ b/src/spdk/test/common/config/pkgdep/dnf
@@ -0,0 +1,72 @@
+package_manager=dnf
+
+upgrade() {
+ sudo "$package_manager" upgrade -y
+}
+
+install() {
+ (($#)) || return 0
+
+ sudo "$package_manager" install -y "$@"
+}
+
+packages=(
+ valgrind
+ jq
+ nvme-cli
+ ceph
+ gdb
+ fio
+ librbd-devel
+ kernel-devel
+ gflags-devel
+ libasan
+ libubsan
+ autoconf
+ automake
+ libtool
+ libmount-devel
+ iscsi-initiator-utils
+ isns-utils-devel
+ pmempool
+ perl-open
+ glib2-devel
+ pixman-devel
+ astyle-devel
+ elfutils
+ libabigail
+ elfutils-libelf-devel
+ flex
+ bison
+ targetcli
+ perl-Switch
+ librdmacm-utils
+ libibverbs-utils
+ gdisk
+ socat
+ sshfs
+ sshpass
+ python3-pandas
+ btrfs-progs
+ rpm-build
+ iptables
+ clang-analyzer
+ bc
+ kernel-modules-extra
+ systemd-devel
+ smartmontools
+ wget
+)
+
+pre_install() {
+ if [[ $INTSALL_TSOCKS == true ]]; then
+ # currently, tsocks package is retired in fedora 31, so don't exit in case
+ # installation failed
+ # FIXME: Review when fedora starts to successfully build this package again.
+ install tsocks || echo "Installation of the tsocks package failed, proxy may not be available"
+ fi
+}
+
+if [[ $OSID != fedora ]]; then
+ echo "Located dnf package manager, but it was tested for Fedora only"
+fi
diff --git a/src/spdk/test/common/config/pkgdep/git b/src/spdk/test/common/config/pkgdep/git
new file mode 100644
index 000000000..f46183ac8
--- /dev/null
+++ b/src/spdk/test/common/config/pkgdep/git
@@ -0,0 +1,325 @@
+function install_spdk() {
+ mkdir -p "$GIT_REPOS/spdk_repo/output" || echo "Can not create spdk_repo/output directory."
+
+ if [[ -d $GIT_REPOS/spdk_repo/spdk ]]; then
+ echo "spdk source already present, not cloning"
+ else
+ git -C "$GIT_REPOS/spdk_repo" clone "${GIT_REPO_SPDK}"
+ fi
+ git -C "$GIT_REPOS/spdk_repo/spdk" config submodule.dpdk.url "${GIT_REPO_DPDK}"
+ git -C "$GIT_REPOS/spdk_repo/spdk" config submodule.intel-ipsec-mb.url "${GIT_REPO_INTEL_IPSEC_MB}"
+ git -C "$GIT_REPOS/spdk_repo/spdk" submodule update --init --recursive
+}
+
+function install_refspdk() {
+ local last_release
+ local output_dir
+ local config_params
+ local rootdir
+
+ # Create a reference SPDK build for ABI tests
+ git -C "$GIT_REPOS/spdk_repo/spdk" fetch --tags
+ last_release=$(git -C "$GIT_REPOS/spdk_repo/spdk" tag | sort --version-sort | grep -v rc | tail -n1)
+ output_dir="$GIT_REPOS/spdk_$(tr . _ < <(tr -d '[:alpha:]' <<< $last_release))"
+
+ if [[ ! -d $output_dir ]]; then
+ cp -r "$GIT_REPOS/spdk_repo/spdk" "$output_dir"
+ fi
+
+ git -C "$output_dir" checkout "$last_release"
+ git -C "$output_dir" submodule update --init
+
+ cat > $HOME/autorun-spdk.conf <<- EOF
+ SPDK_BUILD_SHARED_OBJECT=1
+ SPDK_TEST_AUTOBUILD=1
+ SPDK_TEST_UNITTEST=1
+ SPDK_TEST_BLOCKDEV=1
+ SPDK_TEST_PMDK=1
+ SPDK_TEST_ISAL=1
+ SPDK_TEST_REDUCE=1
+ SPDK_TEST_CRYPTO=1
+ SPDK_TEST_FTL=1
+ SPDK_TEST_OCF=1
+ SPDK_TEST_RAID5=1
+ SPDK_TEST_RBD=1
+ SPDK_RUN_ASAN=1
+ SPDK_RUN_UBSAN=1
+ EOF
+
+ mkdir -p $HOME/output
+
+ (
+ rootdir="$output_dir"
+ source $HOME/autorun-spdk.conf
+ source $output_dir/test/common/autotest_common.sh
+
+ # Prepare separate, fixed, cmdline for the FreeBSD, Issue #1397.
+ if [[ $OSID == freebsd ]]; then
+ config_params="--enable-debug --enable-werror"
+ config_params+=" --with-idxd --with-fio=/usr/src/fio"
+ config_params+=" --disable-unit-tests --without-isal"
+ MAKE=gmake
+ else
+ config_params="$(get_config_params)"
+ fi
+ $output_dir/configure $(echo $config_params | sed 's/--enable-coverage//g')
+ if [[ $OSID != freebsd ]]; then
+ $MAKE -C $output_dir $MAKEFLAGS include/spdk/config.h
+ CONFIG_OCF_PATH="$output_dir/ocf" $MAKE -C $output_dir/lib/env_ocf $MAKEFLAGS exportlib O=$output_dir/build/ocf.a
+ $output_dir/configure $config_params --with-ocf=$output_dir/build/ocf.a --with-shared
+ fi
+ $MAKE -C $output_dir $MAKEFLAGS
+ )
+}
+
+function install_qat() {
+
+ kernel_maj=$(uname -r | cut -d'.' -f1)
+ kernel_min=$(uname -r | cut -d'.' -f2)
+
+ if [[ -e /sys/module/qat_c62x ]]; then
+ sudo modprobe -r qat_c62x || :
+ fi
+ if [[ -d $GIT_REPOS/QAT ]]; then
+ sudo rm -rf "$GIT_REPOS/QAT"
+ fi
+
+ mkdir "$GIT_REPOS/QAT"
+
+ tar -C "$GIT_REPOS/QAT" -xzof - < <(wget -O- "$DRIVER_LOCATION_QAT")
+
+ #The driver version 1.7.l.4.3.0-00033 contains a reference to a deprecated function. Remove it so the build won't fail.
+ if [ $kernel_maj -le 4 ]; then
+ if [ $kernel_min -le 17 ]; then
+ sudo sed -i 's/rdtscll(timestamp);/timestamp = rdtsc_ordered();/g' \
+ "$GIT_REPOS/QAT/quickassist/utilities/osal/src/linux/kernel_space/OsalServices.c" || true
+ fi
+ fi
+
+ (cd "$GIT_REPOS/QAT" && sudo ./configure --enable-icp-sriov=host && sudo make install)
+
+ if sudo service qat_service start; then
+ echo "failed to start the qat service. Something may be wrong with your device or package."
+ fi
+}
+
+function install_rocksdb() {
+ # Rocksdb is installed for use with the blobfs tests.
+ if [ ! -d /usr/src/rocksdb ]; then
+ git clone "${GIT_REPO_ROCKSDB}" "$GIT_REPOS/rocksdb"
+ git -C "$GIT_REPOS/rocksdb" checkout spdk-v5.6.1
+ sudo mv "$GIT_REPOS/rocksdb" /usr/src/
+ else
+ sudo git -C /usr/src/rocksdb checkout spdk-v5.6.1
+ echo "rocksdb already in /usr/src. Not checking out again"
+ fi
+}
+
+function install_fio() {
+ # This version of fio is installed in /usr/src/fio to enable
+ # building the spdk fio plugin.
+ local fio_version="fio-3.19"
+
+ if [ ! -d /usr/src/fio ]; then
+ if [ ! -d fio ]; then
+ git clone "${GIT_REPO_FIO}" "$GIT_REPOS/fio"
+ sudo mv "$GIT_REPOS/fio" /usr/src/
+ else
+ sudo mv "$GIT_REPOS/fio" /usr/src/
+ fi
+ (
+ git -C /usr/src/fio checkout master \
+ && git -C /usr/src/fio pull \
+ && git -C /usr/src/fio checkout $fio_version \
+ && if [ $OSID == 'freebsd' ]; then
+ gmake -C /usr/src/fio -j${jobs} \
+ && sudo gmake -C /usr/src/fio install
+ else
+ make -C /usr/src/fio -j${jobs} \
+ && sudo make -C /usr/src/fio install
+ fi
+ )
+ else
+ echo "fio already in /usr/src/fio. Not installing"
+ fi
+}
+
+function install_flamegraph() {
+ # Flamegraph is used when printing out timing graphs for the tests.
+ if [ ! -d /usr/local/FlameGraph ]; then
+ git clone "${GIT_REPO_FLAMEGRAPH}" "$GIT_REPOS/FlameGraph"
+ mkdir -p /usr/local
+ sudo mv "$GIT_REPOS/FlameGraph" /usr/local/FlameGraph
+ else
+ echo "flamegraph already installed. Skipping"
+ fi
+}
+
+function install_qemu() {
+ # Two versions of QEMU are used in the tests.
+ # Stock QEMU is used for vhost. A special fork
+ # is used to test OCSSDs. Install both.
+
+ # Forked QEMU
+ SPDK_QEMU_BRANCH=spdk-5.0.0
+ mkdir -p "$GIT_REPOS/qemu"
+ if [[ ! -d $GIT_REPOS/qemu/$SPDK_QEMU_BRANCH ]]; then
+ git clone "${GIT_REPO_QEMU}" -b "$SPDK_QEMU_BRANCH" "$GIT_REPOS/qemu/$SPDK_QEMU_BRANCH"
+ else
+ echo "qemu already checked out. Skipping"
+ fi
+
+ declare -a opt_params=("--prefix=/usr/local/qemu/$SPDK_QEMU_BRANCH")
+ if ((gcc_version >= 9)); then
+ # GCC 9 fails to compile Qemu due to some old warnings which were not detected by older versions.
+ opt_params+=("--extra-cflags=-Wno-error=stringop-truncation -Wno-error=deprecated-declarations -Wno-error=incompatible-pointer-types -Wno-error=format-truncation")
+ opt_params+=("--disable-glusterfs")
+ fi
+
+ # Most tsocks proxies rely on a configuration file in /etc/tsocks.conf.
+ # If using tsocks, please make sure to complete this config before trying to build qemu.
+ if [[ $INSTALL_TSOCKS == true && $NO_TSOCKS != true ]]; then
+ if hash tsocks 2> /dev/null; then
+ opt_params+=("--with-git='tsocks git'")
+ fi
+ fi
+
+ sed -i s@git://git.qemu.org/@https://github.com/qemu/@g "$GIT_REPOS/qemu/$SPDK_QEMU_BRANCH/.gitmodules"
+ sed -i s@git://git.qemu.org/@https://github.com/qemu/@g "$GIT_REPOS/qemu/$SPDK_QEMU_BRANCH/.git/config"
+ sed -i s@git://git.qemu-project.org/@https://github.com/qemu/@g "$GIT_REPOS/qemu/$SPDK_QEMU_BRANCH/.gitmodules"
+ sed -i s@git://git.qemu-project.org/@https://github.com/qemu/@g "$GIT_REPOS/qemu/$SPDK_QEMU_BRANCH/.git/config"
+ # The qemu configure script places several output files in the CWD.
+ (cd "$GIT_REPOS/qemu/$SPDK_QEMU_BRANCH" && ./configure "${opt_params[@]}" --target-list="x86_64-softmmu" --enable-kvm --enable-linux-aio --enable-numa)
+
+ make -C "$GIT_REPOS/qemu/$SPDK_QEMU_BRANCH" -j${jobs}
+ sudo make -C "$GIT_REPOS/qemu/$SPDK_QEMU_BRANCH" install
+}
+
+function install_nvmecli() {
+ SPDK_NVME_CLI_BRANCH=spdk-1.6
+ if [[ ! -d $GIT_REPOS/nvme-cli ]]; then
+ git clone "${GIT_REPO_SPDK_NVME_CLI}" -b "$SPDK_NVME_CLI_BRANCH" "$GIT_REPOS/nvme-cli"
+ else
+ echo "nvme-cli already checked out. Skipping"
+ fi
+ if [ ! -d "/usr/local/src/nvme-cli" ]; then
+ # Changes required for SPDK are already merged on top of
+ # nvme-cli, however not released yet.
+ # Support for SPDK should be released in nvme-cli >1.11.1
+ if [[ ! -d $GIT_REPOS/nvme-cli-cuse ]]; then
+ git clone "https://github.com/linux-nvme/nvme-cli.git" "$GIT_REPOS/nvme-cli-cuse"
+ fi
+ git -C "$GIT_REPOS/nvme-cli-cuse" checkout "e770466615096a6d41f038a28819b00bc3078e1d"
+ make -C "$GIT_REPOS/nvme-cli-cuse"
+ sudo mv "$GIT_REPOS/nvme-cli-cuse" /usr/local/src/nvme-cli
+ fi
+}
+
+function install_libiscsi() {
+ # We currently don't make any changes to the libiscsi repository for our tests, but it is possible that we will need
+ # to later. Cloning from git is just future proofing the machines.
+ if [[ ! -d $GIT_REPOS/libiscsi ]]; then
+ git clone "${GIT_REPO_LIBISCSI}" "$GIT_REPOS/libiscsi"
+ else
+ echo "libiscsi already checked out. Skipping"
+ fi
+ (cd "$GIT_REPOS/libiscsi" && ./autogen.sh && ./configure --prefix=/usr/local/libiscsi)
+ make -C "$GIT_REPOS/libiscsi" -j${jobs}
+ sudo make -C "$GIT_REPOS/libiscsi" install
+}
+
+function install_git() {
+ install zlib-devel curl-devel
+ tar -C "$GIT_REPOS" -xzof <(wget -qO- "$GIT_REPO_GIT")
+ (cd "$GIT_REPOS/git-$GIT_VERSION" \
+ && make configure \
+ && ./configure --prefix=/usr/local/git \
+ && sudo make -j${jobs} install)
+ sudo sh -c "echo 'export PATH=/usr/local/git/bin:$PATH' >> /etc/bashrc"
+ export "PATH=/usr/local/git/bin:$PATH"
+}
+
+function install_extra_pkgs() {
+ if [[ $INSTALL_QAT == true ]]; then
+ install libudev-devel || install libudev-dev
+ fi
+
+ if [[ $INSTALL_QEMU == true ]]; then
+ install qemu-system-x86 qemu-img \
+ || install qemu-system-x86 qemu-utils \
+ || install qemu
+ fi
+}
+
+GIT_VERSION=2.25.1
+: ${GIT_REPO_SPDK=https://github.com/spdk/spdk.git}
+export GIT_REPO_SPDK
+: ${GIT_REPO_DPDK=https://github.com/spdk/dpdk.git}
+export GIT_REPO_DPDK
+: ${GIT_REPO_ROCKSDB=https://review.spdk.io/spdk/rocksdb}
+export GIT_REPO_ROCKSDB
+: ${GIT_REPO_FIO=http://git.kernel.dk/fio.git}
+export GIT_REPO_FIO
+: ${GIT_REPO_FLAMEGRAPH=https://github.com/brendangregg/FlameGraph.git}
+export GIT_REPO_FLAMEGRAPH
+: ${GIT_REPO_QEMU=https://github.com/spdk/qemu}
+export GIT_REPO_QEMU
+: ${GIT_REPO_LIBISCSI=https://github.com/sahlberg/libiscsi}
+export GIT_REPO_LIBISCSI
+: ${GIT_REPO_SPDK_NVME_CLI=https://github.com/spdk/nvme-cli}
+export GIT_REPO_SPDK_NVME_CLI
+: ${GIT_REPO_INTEL_IPSEC_MB=https://github.com/spdk/intel-ipsec-mb.git}
+export GIT_REPO_INTEL_IPSEC_MB
+: ${DRIVER_LOCATION_QAT=https://01.org/sites/default/files/downloads//qat1.7.l.4.9.0-00008.tar.gz}
+export DRIVER_LOCATION_QAT
+: ${GIT_REPO_GIT=https://github.com/git/git/archive/v${GIT_VERSION}.tar.gz}
+export GIT_REPO_GIT
+GIT_REPOS=${GIT_REPOS:-$HOME}
+
+gcc_version=$(gcc -dumpversion) gcc_version=${gcc_version%%.*}
+if [[ $ID == centos ]] && (( VERSION_ID == 7 )); then
+ # install proper version of the git first
+ install_git
+fi
+
+IFS="," read -ra conf_env <<< "$CONF"
+for conf in "${conf_env[@]}"; do
+ export "INSTALL_${conf^^}=true"
+done
+sources=(install_refspdk)
+
+if [[ $OS == FreeBSD ]]; then
+ jobs=$(($(sysctl -n hw.ncpu) * 2))
+else
+ jobs=$(($(nproc) * 2))
+ sources+=(
+ install_libiscsi
+ install_nvmecli
+ install_qat
+ install_rocksdb
+ install_flamegraph
+ install_qemu
+ )
+fi
+sources+=(install_fio)
+
+sudo mkdir -p /usr/{,local}/src
+sudo mkdir -p "$GIT_REPOS"
+
+install_extra_pkgs
+
+if [[ $INSTALL_REFSPDK == true ]]; then
+ # Serialize builds as refspdk depends on spdk
+ install_spdk
+ install_refspdk
+else
+ sources+=(install_spdk)
+fi
+
+for source in "${sources[@]}"; do
+ source_conf=${source^^}
+ if [[ ${!source_conf} == true ]]; then
+ "$source" &
+ fi
+done
+wait
diff --git a/src/spdk/test/common/config/pkgdep/pacman b/src/spdk/test/common/config/pkgdep/pacman
new file mode 100644
index 000000000..43d3db2f5
--- /dev/null
+++ b/src/spdk/test/common/config/pkgdep/pacman
@@ -0,0 +1,62 @@
+package_manager=pacman
+
+upgrade() {
+ sudo "$package_manager" -Syu --noconfirm --needed
+}
+
+install() {
+ (($#)) || return 0
+
+ sudo "$package_manager" -Sy --noconfirm --needed "$@"
+}
+
+pre_install() {
+ if [[ $INTSALL_TSOCKS == true ]]; then
+ install tsocks
+ fi
+}
+
+packages=(
+ valgrind
+ jq
+ nvme-cli
+ ceph
+ gdb
+ fio
+ linux-headers
+ gflags
+ autoconf
+ automake
+ libtool
+ libutil-linux
+ libiscsi
+ open-isns
+ glib2
+ pixman
+ flex
+ bison
+ elfutils
+ libelf
+ astyle
+ gptfdisk
+ socat
+ sshfs
+ sshpass
+ python-pandas
+ btrfs-progs
+ iptables
+ clang
+ bc
+ perl-switch
+ open-iscsi
+ smartmontools
+ parted
+ wget
+)
+
+# TODO:
+# These are either missing or require some other installation method
+# than pacman:
+# librbd-devel
+# perl-open
+# targetcli
diff --git a/src/spdk/test/common/config/pkgdep/pkg b/src/spdk/test/common/config/pkgdep/pkg
new file mode 100644
index 000000000..3f3f41725
--- /dev/null
+++ b/src/spdk/test/common/config/pkgdep/pkg
@@ -0,0 +1,27 @@
+package_manager=pkg
+
+upgrade() {
+ sudo "$package_manager" upgrade -y
+}
+
+install() {
+ (($#)) || return 0
+
+ sudo "$package_manager" install -y "$@"
+}
+
+packages=(
+ pciutils
+ jq
+ gdb
+ fio
+ p5-extutils-pkgconfig
+ libtool
+ flex
+ bison
+ gdisk
+ socat
+ sshpass
+ py37-pandas
+ wget
+)
diff --git a/src/spdk/test/common/config/pkgdep/swupd b/src/spdk/test/common/config/pkgdep/swupd
new file mode 100644
index 000000000..c1d2a8a6b
--- /dev/null
+++ b/src/spdk/test/common/config/pkgdep/swupd
@@ -0,0 +1,21 @@
+package_manager=swupd
+
+upgrade() {
+ sudo "$package_manager" update -y
+}
+
+install() {
+ (($#)) || return 0
+
+ sudo "$package_manager" bundle-add -y "$@"
+}
+
+packages=(
+ jq
+)
+
+pre_install() {
+ if [[ $INTSALL_TSOCKS == true ]]; then
+ install tsocks || echo "Installation of the tsocks package failed, proxy may not be available"
+ fi
+}
diff --git a/src/spdk/test/common/config/pkgdep/yum b/src/spdk/test/common/config/pkgdep/yum
new file mode 100644
index 000000000..32e89bc15
--- /dev/null
+++ b/src/spdk/test/common/config/pkgdep/yum
@@ -0,0 +1,67 @@
+package_manager=yum
+
+upgrade() {
+ sudo "$package_manager" upgrade -y
+}
+
+install() {
+ (($#)) || return 0
+
+ sudo "$package_manager" install -y "$@"
+}
+
+packages=(
+ pciutils
+ valgrind
+ jq
+ nvme-cli
+ gdb
+ fio
+ librbd-devel
+ kernel-devel
+ gflags-devel
+ libasan
+ libubsan
+ autoconf
+ automake
+ libtool
+ libmount-devel
+ iscsi-initiator-utils
+ isns-utils-devel pmempool
+ perl-open
+ glib2-devel
+ pixman-devel
+ astyle-devel
+ elfutils
+ elfutils-libelf-devel
+ flex
+ bison
+ targetcli
+ perl-Switch
+ librdmacm-utils
+ libibverbs-utils
+ gdisk
+ socat
+ sshfs
+ sshpass
+ python3-pandas
+ rpm-build
+ iptables
+ clang-analyzer
+ bc
+ kernel-modules-extra
+ systemd-devel
+ python3
+ wget
+)
+
+pre_install() {
+ if [[ $ID == centos ]] && (( VERSION_ID == 8 )); then
+ "$package_manager" update -y --refresh
+ fi
+
+ install nbd || {
+ wget -O nbd.rpm https://download-ib01.fedoraproject.org/pub/epel/7/x86_64/Packages/n/nbd-3.14-2.el7.x86_64.rpm
+ install nbd.rpm
+ }
+}
diff --git a/src/spdk/test/common/config/vm_setup.conf b/src/spdk/test/common/config/vm_setup.conf
new file mode 100644
index 000000000..a8e58d82a
--- /dev/null
+++ b/src/spdk/test/common/config/vm_setup.conf
@@ -0,0 +1,12 @@
+# This configuration file is provided for reference purposes.
+GIT_REPO_SPDK=https://review.spdk.io/gerrit/spdk/spdk
+GIT_REPO_DPDK=https://github.com/spdk/dpdk.git
+GIT_REPO_OPEN_ISCSI=https://github.com/open-iscsi/open-iscsi
+GIT_REPO_ROCKSDB=https://review.gerrithub.io/spdk/rocksdb
+GIT_REPO_FIO=http://git.kernel.dk/fio.git
+GIT_REPO_FLAMEGRAPH=https://github.com/brendangregg/FlameGraph.git
+GIT_REPO_QEMU=https://github.com/spdk/qemu
+GIT_REPO_VPP=https://gerrit.fd.io/r/vpp
+GIT_REPO_LIBISCSI=https://github.com/sahlberg/libiscsi
+GIT_REPO_SPDK_NVME_CLI=https://github.com/spdk/nvme-cli
+DRIVER_LOCATION_QAT=https://01.org/sites/default/files/downloads/intelr-quickassist-technology/qat1.7.l.4.3.0-00033.tar.gz
diff --git a/src/spdk/test/common/config/vm_setup.sh b/src/spdk/test/common/config/vm_setup.sh
new file mode 100755
index 000000000..b2b3a8fc1
--- /dev/null
+++ b/src/spdk/test/common/config/vm_setup.sh
@@ -0,0 +1,176 @@
+#!/usr/bin/env bash
+
+# Virtual Machine environment requirements:
+# 8 GiB of RAM (for DPDK)
+# enable intel_kvm on your host machine
+
+# The purpose of this script is to provide a simple procedure for spinning up a new
+# virtual test environment capable of running our whole test suite. This script, when
+# applied to a fresh install of fedora 26 or ubuntu 16,18 server will install all of the
+# necessary dependencies to run almost the complete test suite. The main exception being VHost.
+# Vhost requires the configuration of a second virtual machine. instructions for how to configure
+# that vm are included in the file TEST_ENV_SETUP_README inside this repository
+
+# it is important to enable nesting for vms in kernel command line of your machine for the vhost tests.
+# in /etc/default/grub
+# append the following to the GRUB_CMDLINE_LINUX line
+# intel_iommu=on kvm-intel.nested=1
+
+# We have made a lot of progress with removing hardcoded paths from the tests,
+
+sudo() {
+ "$(type -P sudo)" -E "$@"
+}
+
+set -e
+
+VM_SETUP_PATH=$(readlink -f ${BASH_SOURCE%/*})
+
+UPGRADE=false
+INSTALL=false
+CONF="rocksdb,fio,flamegraph,tsocks,qemu,libiscsi,nvmecli,qat,spdk,refspdk"
+
+if [[ -e /etc/os-release ]]; then
+ source /etc/os-release
+fi
+
+if [ $(uname -s) == "FreeBSD" ]; then
+ OSID="freebsd"
+ OSVERSION=$(freebsd-version | cut -d. -f1)
+else
+ OSID=$(source /etc/os-release && echo $ID)
+ OSVERSION=$(source /etc/os-release && echo $VERSION_ID)
+fi
+
+function usage() {
+ echo "This script is intended to automate the environment setup for a linux virtual machine."
+ echo "Please run this script as your regular user. The script will make calls to sudo as needed."
+ echo ""
+ echo "./vm_setup.sh"
+ echo " -h --help"
+ echo " -u --upgrade Run $package_manager upgrade"
+ echo " -i --install-deps Install $package_manager based dependencies"
+ echo " -t --test-conf List of test configurations to enable (${CONF})"
+ echo " -c --conf-path Path to configuration file"
+ echo " -d --dir-git Path to where git sources should be saved"
+ echo " -s --disable-tsocks Disable use of tsocks"
+ exit 0
+}
+
+vmsetupdir=$(readlink -f "$(dirname "$0")")
+rootdir=$(readlink -f "$vmsetupdir/../../../")
+
+managers=("$vmsetupdir/pkgdep/"*)
+# Get package manager #
+if hash dnf &> /dev/null; then
+ source "$vmsetupdir/pkgdep/dnf"
+elif hash yum &> /dev/null; then
+ source "$vmsetupdir/pkgdep/yum"
+elif hash apt-get &> /dev/null; then
+ source "$vmsetupdir/pkgdep/apt-get"
+elif hash pacman &> /dev/null; then
+ source "$vmsetupdir/pkgdep/pacman"
+elif hash pkg &> /dev/null; then
+ source "$vmsetupdir/pkgdep/pkg"
+elif hash swupd &> /dev/null; then
+ source "$vmsetupdir/pkgdep/swupd"
+else
+ package_manager="undefined"
+fi
+
+# Parse input arguments #
+while getopts 'd:siuht:c:-:' optchar; do
+ case "$optchar" in
+ -)
+ case "$OPTARG" in
+ help) usage ;;
+ upgrade) UPGRADE=true ;;
+ install-deps) INSTALL=true ;;
+ test-conf=*) CONF="${OPTARG#*=}" ;;
+ conf-path=*) CONF_PATH="${OPTARG#*=}" ;;
+ dir-git=*) GIT_REPOS="${OPTARG#*=}" ;;
+ disable-tsocks) NO_TSOCKS=true ;;
+ *)
+ echo "Invalid argument '$OPTARG'"
+ usage
+ ;;
+ esac
+ ;;
+ h) usage ;;
+ u) UPGRADE=true ;;
+ i) INSTALL=true ;;
+ t) CONF="$OPTARG" ;;
+ c) CONF_PATH="$OPTARG" ;;
+ d) GIT_REPOS="$OPTARG" ;;
+ s) NO_TSOCKS=true ;;
+ *)
+ echo "Invalid argument '$OPTARG'"
+ usage
+ ;;
+ esac
+done
+
+if [[ "$package_manager" == "undefined" ]]; then
+ echo "Supported package manager not found. Script supports:"
+ printf ' * %s\n' "${managers[@]##*/}"
+ exit 1
+fi
+
+if [ -n "$CONF_PATH" ]; then
+ if [ ! -f "$CONF_PATH" ]; then
+ echo Configuration file does not exist: "$CONF_PATH"
+ exit 1
+ else
+ source "$CONF_PATH"
+ fi
+fi
+
+if $UPGRADE; then
+ upgrade
+fi
+
+if $INSTALL; then
+ sudo "$rootdir/scripts/pkgdep.sh" --all
+ pre_install
+ install "${packages[@]}"
+fi
+
+source "$vmsetupdir/pkgdep/git"
+
+# create autorun-spdk.conf in home folder. This is sourced by the autotest_common.sh file.
+# By setting any one of the values below to 0, you can skip that specific test. If you are
+# using your autotest platform to do sanity checks before uploading to the build pool, it is
+# probably best to only run the tests that you believe your changes have modified along with
+# Scanbuild and check format. This is because running the whole suite of tests in series can
+# take ~40 minutes to complete.
+if [ ! -e ~/autorun-spdk.conf ]; then
+ cat > ~/autorun-spdk.conf << EOF
+# assign a value of 1 to all of the pertinent tests
+SPDK_RUN_VALGRIND=1
+SPDK_TEST_CRYPTO=1
+SPDK_RUN_FUNCTIONAL_TEST=1
+SPDK_TEST_AUTOBUILD=1
+SPDK_TEST_UNITTEST=1
+SPDK_TEST_ISCSI=1
+SPDK_TEST_ISCSI_INITIATOR=1
+SPDK_TEST_NVME=1
+SPDK_TEST_NVME_CLI=1
+SPDK_TEST_NVMF=1
+SPDK_TEST_RBD=1
+SPDK_TEST_BLOCKDEV=1
+SPDK_TEST_BLOBFS=1
+SPDK_TEST_PMDK=1
+SPDK_TEST_LVOL=1
+SPDK_TEST_JSON=1
+SPDK_RUN_ASAN=1
+SPDK_RUN_UBSAN=1
+# doesn't work on vm
+SPDK_TEST_IOAT=0
+# requires some extra configuration. see TEST_ENV_SETUP_README
+SPDK_TEST_VHOST=0
+SPDK_TEST_VHOST_INIT=0
+# Not configured here
+SPDK_RUN_INSTALLED_DPDK=0
+
+EOF
+fi
diff --git a/src/spdk/test/common/lib/nvme/common_stubs.h b/src/spdk/test/common/lib/nvme/common_stubs.h
new file mode 100644
index 000000000..1dc22a162
--- /dev/null
+++ b/src/spdk/test/common/lib/nvme/common_stubs.h
@@ -0,0 +1,117 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "common/lib/test_env.c"
+
+const char *
+spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype)
+{
+ switch (trtype) {
+ case SPDK_NVME_TRANSPORT_PCIE:
+ return "PCIe";
+ case SPDK_NVME_TRANSPORT_RDMA:
+ return "RDMA";
+ case SPDK_NVME_TRANSPORT_FC:
+ return "FC";
+ default:
+ return NULL;
+ }
+}
+
+int
+spdk_nvme_transport_id_populate_trstring(struct spdk_nvme_transport_id *trid, const char *trstring)
+{
+ int len, i;
+
+ if (trstring == NULL) {
+ return -EINVAL;
+ }
+
+ len = strnlen(trstring, SPDK_NVMF_TRSTRING_MAX_LEN);
+ if (len == SPDK_NVMF_TRSTRING_MAX_LEN) {
+ return -EINVAL;
+ }
+
+ /* cast official trstring to uppercase version of input. */
+ for (i = 0; i < len; i++) {
+ trid->trstring[i] = toupper(trstring[i]);
+ }
+ return 0;
+}
+
+DEFINE_STUB(nvme_request_check_timeout, int, (struct nvme_request *req, uint16_t cid,
+ struct spdk_nvme_ctrlr_process *active_proc, uint64_t now_tick), 0);
+DEFINE_STUB_V(nvme_ctrlr_destruct_finish, (struct spdk_nvme_ctrlr *ctrlr));
+DEFINE_STUB(nvme_ctrlr_construct, int, (struct spdk_nvme_ctrlr *ctrlr), 0);
+DEFINE_STUB_V(nvme_ctrlr_destruct, (struct spdk_nvme_ctrlr *ctrlr));
+DEFINE_STUB_V(nvme_ctrlr_init_cap, (struct spdk_nvme_ctrlr *ctrlr,
+ const union spdk_nvme_cap_register *cap,
+ const union spdk_nvme_vs_register *vs));
+DEFINE_STUB(nvme_ctrlr_get_vs, int, (struct spdk_nvme_ctrlr *ctrlr,
+ union spdk_nvme_vs_register *vs), 0);
+DEFINE_STUB(nvme_ctrlr_get_cap, int, (struct spdk_nvme_ctrlr *ctrlr,
+ union spdk_nvme_cap_register *cap), 0);
+DEFINE_STUB(nvme_qpair_init, int, (struct spdk_nvme_qpair *qpair, uint16_t id,
+ struct spdk_nvme_ctrlr *ctrlr,
+ enum spdk_nvme_qprio qprio,
+ uint32_t num_requests), 0);
+DEFINE_STUB_V(nvme_qpair_deinit, (struct spdk_nvme_qpair *qpair));
+DEFINE_STUB_V(spdk_nvme_transport_register, (const struct spdk_nvme_transport_ops *ops));
+DEFINE_STUB(nvme_transport_ctrlr_connect_qpair, int, (struct spdk_nvme_ctrlr *ctrlr,
+ struct spdk_nvme_qpair *qpair), 0);
+DEFINE_STUB(nvme_ctrlr_get_current_process, struct spdk_nvme_ctrlr_process *,
+ (struct spdk_nvme_ctrlr *ctrlr), (struct spdk_nvme_ctrlr_process *)(uintptr_t)0x1);
+DEFINE_STUB(nvme_ctrlr_add_process, int, (struct spdk_nvme_ctrlr *ctrlr, void *devhandle), 0);
+DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
+ enum spdk_nvme_transport_type trtype));
+DEFINE_STUB(nvme_get_transport, const struct spdk_nvme_transport *, (const char *transport_name),
+ NULL);
+DEFINE_STUB(spdk_nvme_qpair_process_completions, int32_t, (struct spdk_nvme_qpair *qpair,
+ uint32_t max_completions), 0);
+
+/* Fabric transports only */
+DEFINE_STUB_V(nvme_ctrlr_disconnect_qpair, (struct spdk_nvme_qpair *qpair));
+DEFINE_STUB(nvme_fabric_ctrlr_set_reg_4, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
+ uint32_t value), 0);
+DEFINE_STUB(nvme_fabric_ctrlr_set_reg_8, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
+ uint64_t value), 0);
+DEFINE_STUB(nvme_fabric_ctrlr_get_reg_4, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
+ uint32_t *value), 0);
+DEFINE_STUB(nvme_fabric_ctrlr_get_reg_8, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
+ uint64_t *value), 0);
+DEFINE_STUB(nvme_fabric_ctrlr_scan, int, (struct spdk_nvme_probe_ctx *probe_ctx,
+ bool direct_connect), 0);
+DEFINE_STUB(nvme_fabric_qpair_connect, int, (struct spdk_nvme_qpair *qpair, uint32_t num_entries),
+ 0);
+DEFINE_STUB_V(nvme_transport_ctrlr_disconnect_qpair, (struct spdk_nvme_ctrlr *ctrlr,
+ struct spdk_nvme_qpair *qpair));
+DEFINE_STUB(nvme_poll_group_disconnect_qpair, int, (struct spdk_nvme_qpair *qpair), 0);
diff --git a/src/spdk/test/common/lib/test_env.c b/src/spdk/test/common/lib/test_env.c
new file mode 100644
index 000000000..5e2912b5c
--- /dev/null
+++ b/src/spdk/test/common/lib/test_env.c
@@ -0,0 +1,637 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_internal/mock.h"
+
+#include "spdk/env.h"
+#include "spdk/queue.h"
+#include "spdk/util.h"
+
+static uint32_t g_ut_num_cores;
+static bool *g_ut_cores;
+
+void allocate_cores(uint32_t num_cores);
+void free_cores(void);
+
+DEFINE_STUB(spdk_process_is_primary, bool, (void), true)
+DEFINE_STUB(spdk_memzone_lookup, void *, (const char *name), NULL)
+DEFINE_STUB_V(spdk_pci_driver_register, (const char *name, struct spdk_pci_id *id_table,
+ uint32_t flags));
+DEFINE_STUB(spdk_pci_nvme_get_driver, struct spdk_pci_driver *, (void), NULL)
+DEFINE_STUB(spdk_pci_ioat_get_driver, struct spdk_pci_driver *, (void), NULL)
+DEFINE_STUB(spdk_pci_virtio_get_driver, struct spdk_pci_driver *, (void), NULL)
+
+void
+allocate_cores(uint32_t num_cores)
+{
+ uint32_t i;
+
+ g_ut_num_cores = num_cores;
+
+ g_ut_cores = calloc(num_cores, sizeof(bool));
+ assert(g_ut_cores != NULL);
+
+ for (i = 0; i < num_cores; i++) {
+ g_ut_cores[i] = true;
+ }
+}
+
+void
+free_cores(void)
+{
+ free(g_ut_cores);
+ g_ut_cores = NULL;
+ g_ut_num_cores = 0;
+}
+
+static uint32_t
+ut_get_next_core(uint32_t i)
+{
+ i++;
+
+ while (i < g_ut_num_cores) {
+ if (!g_ut_cores[i]) {
+ i++;
+ continue;
+ }
+ break;
+ }
+
+ if (i < g_ut_num_cores) {
+ return i;
+ } else {
+ return UINT32_MAX;
+ }
+}
+
+uint32_t
+spdk_env_get_first_core(void)
+{
+ return ut_get_next_core(-1);
+}
+
+uint32_t
+spdk_env_get_next_core(uint32_t prev_core)
+{
+ return ut_get_next_core(prev_core);
+}
+
+uint32_t
+spdk_env_get_core_count(void)
+{
+ return g_ut_num_cores;
+}
+
+uint32_t
+spdk_env_get_last_core(void)
+{
+ uint32_t i;
+ uint32_t last_core = UINT32_MAX;
+
+ SPDK_ENV_FOREACH_CORE(i) {
+ last_core = i;
+ }
+
+ return last_core;
+}
+
+DEFINE_RETURN_MOCK(spdk_env_get_current_core, uint32_t);
+uint32_t
+spdk_env_get_current_core(void)
+{
+ HANDLE_RETURN_MOCK(spdk_env_get_current_core);
+
+ return UINT32_MAX;
+}
+
+DEFINE_RETURN_MOCK(spdk_env_get_socket_id, uint32_t);
+uint32_t
+spdk_env_get_socket_id(uint32_t core)
+{
+ HANDLE_RETURN_MOCK(spdk_env_get_socket_id);
+
+ return SPDK_ENV_SOCKET_ID_ANY;
+}
+
+/*
+ * These mocks don't use the DEFINE_STUB macros because
+ * their default implementation is more complex.
+ */
+
+DEFINE_RETURN_MOCK(spdk_memzone_reserve, void *);
+void *
+spdk_memzone_reserve(const char *name, size_t len, int socket_id, unsigned flags)
+{
+ HANDLE_RETURN_MOCK(spdk_memzone_reserve);
+
+ return malloc(len);
+}
+
+DEFINE_RETURN_MOCK(spdk_memzone_reserve_aligned, void *);
+void *
+spdk_memzone_reserve_aligned(const char *name, size_t len, int socket_id,
+ unsigned flags, unsigned align)
+{
+ HANDLE_RETURN_MOCK(spdk_memzone_reserve_aligned);
+
+ return malloc(len);
+}
+
+DEFINE_RETURN_MOCK(spdk_malloc, void *);
+void *
+spdk_malloc(size_t size, size_t align, uint64_t *phys_addr, int socket_id, uint32_t flags)
+{
+ HANDLE_RETURN_MOCK(spdk_malloc);
+
+ void *buf = NULL;
+
+ if (align == 0) {
+ align = 8;
+ }
+
+ if (posix_memalign(&buf, align, size)) {
+ return NULL;
+ }
+ if (phys_addr) {
+ *phys_addr = (uint64_t)buf;
+ }
+
+ return buf;
+}
+
+DEFINE_RETURN_MOCK(spdk_zmalloc, void *);
+void *
+spdk_zmalloc(size_t size, size_t align, uint64_t *phys_addr, int socket_id, uint32_t flags)
+{
+ HANDLE_RETURN_MOCK(spdk_zmalloc);
+
+ void *buf = spdk_malloc(size, align, phys_addr, -1, 1);
+
+ if (buf != NULL) {
+ memset(buf, 0, size);
+ }
+ return buf;
+}
+
+DEFINE_RETURN_MOCK(spdk_dma_malloc, void *);
+void *
+spdk_dma_malloc(size_t size, size_t align, uint64_t *phys_addr)
+{
+ HANDLE_RETURN_MOCK(spdk_dma_malloc);
+
+ return spdk_malloc(size, align, phys_addr, -1, 1);
+}
+
+DEFINE_RETURN_MOCK(spdk_realloc, void *);
+void *
+spdk_realloc(void *buf, size_t size, size_t align)
+{
+ HANDLE_RETURN_MOCK(spdk_realloc);
+
+ return realloc(buf, size);
+}
+
+DEFINE_RETURN_MOCK(spdk_dma_zmalloc, void *);
+void *
+spdk_dma_zmalloc(size_t size, size_t align, uint64_t *phys_addr)
+{
+ HANDLE_RETURN_MOCK(spdk_dma_zmalloc);
+
+ return spdk_zmalloc(size, align, phys_addr, -1, 1);
+}
+
+DEFINE_RETURN_MOCK(spdk_dma_malloc_socket, void *);
+void *
+spdk_dma_malloc_socket(size_t size, size_t align, uint64_t *phys_addr, int socket_id)
+{
+ HANDLE_RETURN_MOCK(spdk_dma_malloc_socket);
+
+ return spdk_dma_malloc(size, align, phys_addr);
+}
+
+DEFINE_RETURN_MOCK(spdk_dma_zmalloc_socket, void *);
+void *
+spdk_dma_zmalloc_socket(size_t size, size_t align, uint64_t *phys_addr, int socket_id)
+{
+ HANDLE_RETURN_MOCK(spdk_dma_zmalloc_socket);
+
+ return spdk_dma_zmalloc(size, align, phys_addr);
+}
+
+DEFINE_RETURN_MOCK(spdk_dma_realloc, void *);
+void *
+spdk_dma_realloc(void *buf, size_t size, size_t align, uint64_t *phys_addr)
+{
+ HANDLE_RETURN_MOCK(spdk_dma_realloc);
+
+ return realloc(buf, size);
+}
+
+void
+spdk_free(void *buf)
+{
+ /* fix for false-positives in *certain* static analysis tools. */
+ assert((uintptr_t)buf != UINTPTR_MAX);
+ free(buf);
+}
+
+void
+spdk_dma_free(void *buf)
+{
+ return spdk_free(buf);
+}
+
+#ifndef UNIT_TEST_NO_VTOPHYS
+DEFINE_RETURN_MOCK(spdk_vtophys, uint64_t);
+uint64_t
+spdk_vtophys(void *buf, uint64_t *size)
+{
+ HANDLE_RETURN_MOCK(spdk_vtophys);
+
+ return (uintptr_t)buf;
+}
+#endif
+
+void
+spdk_memzone_dump(FILE *f)
+{
+ return;
+}
+
+DEFINE_RETURN_MOCK(spdk_memzone_free, int);
+int
+spdk_memzone_free(const char *name)
+{
+ HANDLE_RETURN_MOCK(spdk_memzone_free);
+
+ return 0;
+}
+
+struct test_mempool {
+ size_t count;
+ size_t ele_size;
+};
+
+DEFINE_RETURN_MOCK(spdk_mempool_create, struct spdk_mempool *);
+struct spdk_mempool *
+spdk_mempool_create(const char *name, size_t count,
+ size_t ele_size, size_t cache_size, int socket_id)
+{
+ struct test_mempool *mp;
+
+ HANDLE_RETURN_MOCK(spdk_mempool_create);
+
+ mp = calloc(1, sizeof(*mp));
+ if (mp == NULL) {
+ return NULL;
+ }
+
+ mp->count = count;
+ mp->ele_size = ele_size;
+
+ return (struct spdk_mempool *)mp;
+}
+
+void
+spdk_mempool_free(struct spdk_mempool *_mp)
+{
+ struct test_mempool *mp = (struct test_mempool *)_mp;
+
+ free(mp);
+}
+
+DEFINE_RETURN_MOCK(spdk_mempool_get, void *);
+void *
+spdk_mempool_get(struct spdk_mempool *_mp)
+{
+ struct test_mempool *mp = (struct test_mempool *)_mp;
+ size_t ele_size = 0x10000;
+ void *buf;
+
+ HANDLE_RETURN_MOCK(spdk_mempool_get);
+
+ if (mp && mp->count == 0) {
+ return NULL;
+ }
+
+ if (mp) {
+ ele_size = mp->ele_size;
+ }
+
+ if (posix_memalign(&buf, 64, spdk_align32pow2(ele_size))) {
+ return NULL;
+ } else {
+ if (mp) {
+ mp->count--;
+ }
+ return buf;
+ }
+}
+
+int
+spdk_mempool_get_bulk(struct spdk_mempool *mp, void **ele_arr, size_t count)
+{
+ for (size_t i = 0; i < count; i++) {
+ ele_arr[i] = spdk_mempool_get(mp);
+ if (ele_arr[i] == NULL) {
+ return -1;
+ }
+ }
+ return 0;
+}
+
+void
+spdk_mempool_put(struct spdk_mempool *_mp, void *ele)
+{
+ struct test_mempool *mp = (struct test_mempool *)_mp;
+
+ if (mp) {
+ mp->count++;
+ }
+ free(ele);
+}
+
+void
+spdk_mempool_put_bulk(struct spdk_mempool *mp, void **ele_arr, size_t count)
+{
+ for (size_t i = 0; i < count; i++) {
+ spdk_mempool_put(mp, ele_arr[i]);
+ }
+}
+
+DEFINE_RETURN_MOCK(spdk_mempool_count, size_t);
+size_t
+spdk_mempool_count(const struct spdk_mempool *_mp)
+{
+ struct test_mempool *mp = (struct test_mempool *)_mp;
+
+ HANDLE_RETURN_MOCK(spdk_mempool_count);
+
+ if (mp) {
+ return mp->count;
+ } else {
+ return 1024;
+ }
+}
+
+struct spdk_ring_ele {
+ void *ele;
+ TAILQ_ENTRY(spdk_ring_ele) link;
+};
+
+struct spdk_ring {
+ TAILQ_HEAD(, spdk_ring_ele) elements;
+ pthread_mutex_t lock;
+ size_t count;
+};
+
+DEFINE_RETURN_MOCK(spdk_ring_create, struct spdk_ring *);
+struct spdk_ring *
+spdk_ring_create(enum spdk_ring_type type, size_t count, int socket_id)
+{
+ struct spdk_ring *ring;
+
+ HANDLE_RETURN_MOCK(spdk_ring_create);
+
+ ring = calloc(1, sizeof(*ring));
+ if (!ring) {
+ return NULL;
+ }
+
+ if (pthread_mutex_init(&ring->lock, NULL)) {
+ free(ring);
+ return NULL;
+ }
+
+ TAILQ_INIT(&ring->elements);
+ return ring;
+}
+
+void
+spdk_ring_free(struct spdk_ring *ring)
+{
+ struct spdk_ring_ele *ele, *tmp;
+
+ if (!ring) {
+ return;
+ }
+
+ TAILQ_FOREACH_SAFE(ele, &ring->elements, link, tmp) {
+ free(ele);
+ }
+
+ pthread_mutex_destroy(&ring->lock);
+ free(ring);
+}
+
+DEFINE_RETURN_MOCK(spdk_ring_enqueue, size_t);
+size_t
+spdk_ring_enqueue(struct spdk_ring *ring, void **objs, size_t count,
+ size_t *free_space)
+{
+ struct spdk_ring_ele *ele;
+ size_t i;
+
+ HANDLE_RETURN_MOCK(spdk_ring_enqueue);
+
+ pthread_mutex_lock(&ring->lock);
+
+ for (i = 0; i < count; i++) {
+ ele = calloc(1, sizeof(*ele));
+ if (!ele) {
+ break;
+ }
+
+ ele->ele = objs[i];
+ TAILQ_INSERT_TAIL(&ring->elements, ele, link);
+ ring->count++;
+ }
+
+ pthread_mutex_unlock(&ring->lock);
+ return i;
+}
+
+DEFINE_RETURN_MOCK(spdk_ring_dequeue, size_t);
+size_t
+spdk_ring_dequeue(struct spdk_ring *ring, void **objs, size_t count)
+{
+ struct spdk_ring_ele *ele, *tmp;
+ size_t i = 0;
+
+ HANDLE_RETURN_MOCK(spdk_ring_dequeue);
+
+ if (count == 0) {
+ return 0;
+ }
+
+ pthread_mutex_lock(&ring->lock);
+
+ TAILQ_FOREACH_SAFE(ele, &ring->elements, link, tmp) {
+ TAILQ_REMOVE(&ring->elements, ele, link);
+ ring->count--;
+ objs[i] = ele->ele;
+ free(ele);
+ i++;
+ if (i >= count) {
+ break;
+ }
+ }
+
+ pthread_mutex_unlock(&ring->lock);
+ return i;
+
+}
+
+
+DEFINE_RETURN_MOCK(spdk_ring_count, size_t);
+size_t
+spdk_ring_count(struct spdk_ring *ring)
+{
+ HANDLE_RETURN_MOCK(spdk_ring_count);
+ return ring->count;
+}
+
+DEFINE_RETURN_MOCK(spdk_get_ticks, uint64_t);
+uint64_t
+spdk_get_ticks(void)
+{
+ HANDLE_RETURN_MOCK(spdk_get_ticks);
+
+ return ut_spdk_get_ticks;
+}
+
+DEFINE_RETURN_MOCK(spdk_get_ticks_hz, uint64_t);
+uint64_t
+spdk_get_ticks_hz(void)
+{
+ HANDLE_RETURN_MOCK(spdk_get_ticks_hz);
+
+ return 1000000;
+}
+
+void
+spdk_delay_us(unsigned int us)
+{
+ /* spdk_get_ticks_hz is 1000000, meaning 1 tick per us. */
+ ut_spdk_get_ticks += us;
+}
+
+#ifndef UNIT_TEST_NO_PCI_ADDR
+DEFINE_RETURN_MOCK(spdk_pci_addr_parse, int);
+int
+spdk_pci_addr_parse(struct spdk_pci_addr *addr, const char *bdf)
+{
+ unsigned domain, bus, dev, func;
+
+ HANDLE_RETURN_MOCK(spdk_pci_addr_parse);
+
+ if (addr == NULL || bdf == NULL) {
+ return -EINVAL;
+ }
+
+ if ((sscanf(bdf, "%x:%x:%x.%x", &domain, &bus, &dev, &func) == 4) ||
+ (sscanf(bdf, "%x.%x.%x.%x", &domain, &bus, &dev, &func) == 4)) {
+ /* Matched a full address - all variables are initialized */
+ } else if (sscanf(bdf, "%x:%x:%x", &domain, &bus, &dev) == 3) {
+ func = 0;
+ } else if ((sscanf(bdf, "%x:%x.%x", &bus, &dev, &func) == 3) ||
+ (sscanf(bdf, "%x.%x.%x", &bus, &dev, &func) == 3)) {
+ domain = 0;
+ } else if ((sscanf(bdf, "%x:%x", &bus, &dev) == 2) ||
+ (sscanf(bdf, "%x.%x", &bus, &dev) == 2)) {
+ domain = 0;
+ func = 0;
+ } else {
+ return -EINVAL;
+ }
+
+ if (bus > 0xFF || dev > 0x1F || func > 7) {
+ return -EINVAL;
+ }
+
+ addr->domain = domain;
+ addr->bus = bus;
+ addr->dev = dev;
+ addr->func = func;
+
+ return 0;
+}
+
+DEFINE_RETURN_MOCK(spdk_pci_addr_fmt, int);
+int
+spdk_pci_addr_fmt(char *bdf, size_t sz, const struct spdk_pci_addr *addr)
+{
+ int rc;
+
+ HANDLE_RETURN_MOCK(spdk_pci_addr_fmt);
+
+ rc = snprintf(bdf, sz, "%04x:%02x:%02x.%x",
+ addr->domain, addr->bus,
+ addr->dev, addr->func);
+
+ if (rc > 0 && (size_t)rc < sz) {
+ return 0;
+ }
+
+ return -1;
+}
+
+DEFINE_RETURN_MOCK(spdk_pci_addr_compare, int);
+int
+spdk_pci_addr_compare(const struct spdk_pci_addr *a1, const struct spdk_pci_addr *a2)
+{
+ HANDLE_RETURN_MOCK(spdk_pci_addr_compare);
+
+ if (a1->domain > a2->domain) {
+ return 1;
+ } else if (a1->domain < a2->domain) {
+ return -1;
+ } else if (a1->bus > a2->bus) {
+ return 1;
+ } else if (a1->bus < a2->bus) {
+ return -1;
+ } else if (a1->dev > a2->dev) {
+ return 1;
+ } else if (a1->dev < a2->dev) {
+ return -1;
+ } else if (a1->func > a2->func) {
+ return 1;
+ } else if (a1->func < a2->func) {
+ return -1;
+ }
+
+ return 0;
+}
+#endif
diff --git a/src/spdk/test/common/lib/test_rdma.c b/src/spdk/test/common/lib/test_rdma.c
new file mode 100644
index 000000000..109862fe6
--- /dev/null
+++ b/src/spdk/test/common/lib/test_rdma.c
@@ -0,0 +1,49 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation. All rights reserved.
+ * Copyright (c) 2020 Mellanox Technologies LTD. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_internal/rdma.h"
+#include "spdk_internal/mock.h"
+
+DEFINE_STUB(spdk_rdma_qp_create, struct spdk_rdma_qp *, (struct rdma_cm_id *cm_id,
+ struct spdk_rdma_qp_init_attr *qp_attr), NULL);
+DEFINE_STUB(spdk_rdma_qp_accept, int, (struct spdk_rdma_qp *spdk_rdma_qp,
+ struct rdma_conn_param *conn_param), 0);
+DEFINE_STUB(spdk_rdma_qp_complete_connect, int, (struct spdk_rdma_qp *spdk_rdma_qp), 0);
+DEFINE_STUB_V(spdk_rdma_qp_destroy, (struct spdk_rdma_qp *spdk_rdma_qp));
+DEFINE_STUB(spdk_rdma_qp_disconnect, int, (struct spdk_rdma_qp *spdk_rdma_qp), 0);
+DEFINE_STUB(spdk_rdma_qp_queue_send_wrs, bool, (struct spdk_rdma_qp *spdk_rdma_qp,
+ struct ibv_send_wr *first), true);
+DEFINE_STUB(spdk_rdma_qp_flush_send_wrs, int, (struct spdk_rdma_qp *spdk_rdma_qp,
+ struct ibv_send_wr **bad_wr), 0);
diff --git a/src/spdk/test/common/lib/test_sock.c b/src/spdk/test/common/lib/test_sock.c
new file mode 100644
index 000000000..d2c83b732
--- /dev/null
+++ b/src/spdk/test/common/lib/test_sock.c
@@ -0,0 +1,70 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_internal/sock.h"
+#include "spdk_internal/mock.h"
+
+DEFINE_STUB(spdk_sock_getaddr, int, (struct spdk_sock *sock, char *saddr, int slen, uint16_t *sport,
+ char *caddr, int clen, uint16_t *cport), 0);
+DEFINE_STUB(spdk_sock_connect, struct spdk_sock *, (const char *ip, int port, char *impl_name),
+ NULL);
+DEFINE_STUB(spdk_sock_connect_ext, struct spdk_sock *, (const char *ip, int port, char *impl_name,
+ struct spdk_sock_opts *opts), NULL);
+DEFINE_STUB(spdk_sock_listen, struct spdk_sock *, (const char *ip, int port, char *impl_name),
+ NULL);
+DEFINE_STUB(spdk_sock_listen_ext, struct spdk_sock *, (const char *ip, int port, char *impl_name,
+ struct spdk_sock_opts *opts), NULL);
+DEFINE_STUB_V(spdk_sock_get_default_opts, (struct spdk_sock_opts *opts));
+DEFINE_STUB(spdk_sock_accept, struct spdk_sock *, (struct spdk_sock *sock), NULL);
+DEFINE_STUB(spdk_sock_close, int, (struct spdk_sock **sock), 0);
+DEFINE_STUB(spdk_sock_recv, ssize_t, (struct spdk_sock *sock, void *buf, size_t len), 0);
+DEFINE_STUB(spdk_sock_writev, ssize_t, (struct spdk_sock *sock, struct iovec *iov, int iovcnt), 0);
+DEFINE_STUB(spdk_sock_readv, ssize_t, (struct spdk_sock *sock, struct iovec *iov, int iovcnt), 0);
+DEFINE_STUB(spdk_sock_set_recvlowat, int, (struct spdk_sock *sock, int nbytes), 0);
+DEFINE_STUB(spdk_sock_set_recvbuf, int, (struct spdk_sock *sock, int sz), 0);
+DEFINE_STUB(spdk_sock_set_sendbuf, int, (struct spdk_sock *sock, int sz), 0);
+DEFINE_STUB_V(spdk_sock_writev_async, (struct spdk_sock *sock, struct spdk_sock_request *req));
+DEFINE_STUB(spdk_sock_flush, int, (struct spdk_sock *sock), 0);
+DEFINE_STUB(spdk_sock_is_ipv6, bool, (struct spdk_sock *sock), false);
+DEFINE_STUB(spdk_sock_is_ipv4, bool, (struct spdk_sock *sock), true);
+DEFINE_STUB(spdk_sock_is_connected, bool, (struct spdk_sock *sock), true);
+DEFINE_STUB(spdk_sock_group_create, struct spdk_sock_group *, (void *ctx), NULL);
+DEFINE_STUB(spdk_sock_group_add_sock, int, (struct spdk_sock_group *group, struct spdk_sock *sock,
+ spdk_sock_cb cb_fn, void *cb_arg), 0);
+DEFINE_STUB(spdk_sock_group_remove_sock, int, (struct spdk_sock_group *group,
+ struct spdk_sock *sock), 0);
+DEFINE_STUB(spdk_sock_group_poll, int, (struct spdk_sock_group *group), 0);
+DEFINE_STUB(spdk_sock_group_poll_count, int, (struct spdk_sock_group *group, int max_events), 0);
+DEFINE_STUB(spdk_sock_group_close, int, (struct spdk_sock_group **group), 0);
diff --git a/src/spdk/test/common/lib/ut_multithread.c b/src/spdk/test/common/lib/ut_multithread.c
new file mode 100644
index 000000000..30b78f74d
--- /dev/null
+++ b/src/spdk/test/common/lib/ut_multithread.c
@@ -0,0 +1,214 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+#include "spdk/thread.h"
+#include "spdk_internal/mock.h"
+#include "spdk_internal/thread.h"
+
+#include "common/lib/test_env.c"
+
+static uint32_t g_ut_num_threads;
+
+int allocate_threads(int num_threads);
+void free_threads(void);
+void poll_threads(void);
+bool poll_thread(uintptr_t thread_id);
+bool poll_thread_times(uintptr_t thread_id, uint32_t max_polls);
+
+struct ut_msg {
+ spdk_msg_fn fn;
+ void *ctx;
+ TAILQ_ENTRY(ut_msg) link;
+};
+
+struct ut_thread {
+ struct spdk_thread *thread;
+ struct spdk_io_channel *ch;
+};
+
+struct ut_thread *g_ut_threads;
+
+#define INVALID_THREAD 0x1000
+
+static uint64_t g_ut_thread_id = INVALID_THREAD;
+
+static void
+set_thread(uintptr_t thread_id)
+{
+ g_ut_thread_id = thread_id;
+ if (thread_id == INVALID_THREAD) {
+ spdk_set_thread(NULL);
+ } else {
+ spdk_set_thread(g_ut_threads[thread_id].thread);
+ }
+
+}
+
+int
+allocate_threads(int num_threads)
+{
+ struct spdk_thread *thread;
+ uint32_t i;
+
+ spdk_thread_lib_init(NULL, 0);
+
+ g_ut_num_threads = num_threads;
+
+ g_ut_threads = calloc(num_threads, sizeof(*g_ut_threads));
+ assert(g_ut_threads != NULL);
+
+ for (i = 0; i < g_ut_num_threads; i++) {
+ set_thread(i);
+ thread = spdk_thread_create(NULL, NULL);
+ assert(thread != NULL);
+ g_ut_threads[i].thread = thread;
+ }
+
+ set_thread(INVALID_THREAD);
+ return 0;
+}
+
+void
+free_threads(void)
+{
+ uint32_t i, num_threads;
+ struct spdk_thread *thread;
+
+ for (i = 0; i < g_ut_num_threads; i++) {
+ set_thread(i);
+ thread = g_ut_threads[i].thread;
+ spdk_thread_exit(thread);
+ }
+
+ num_threads = g_ut_num_threads;
+
+ while (num_threads != 0) {
+ for (i = 0; i < g_ut_num_threads; i++) {
+ set_thread(i);
+ thread = g_ut_threads[i].thread;
+ if (thread == NULL) {
+ continue;
+ }
+
+ if (spdk_thread_is_exited(thread)) {
+ g_ut_threads[i].thread = NULL;
+ num_threads--;
+ spdk_thread_destroy(thread);
+ } else {
+ spdk_thread_poll(thread, 0, 0);
+ }
+ }
+ }
+
+ g_ut_num_threads = 0;
+ free(g_ut_threads);
+ g_ut_threads = NULL;
+
+ spdk_thread_lib_fini();
+}
+
+bool
+poll_thread_times(uintptr_t thread_id, uint32_t max_polls)
+{
+ bool busy = false;
+ struct ut_thread *thread = &g_ut_threads[thread_id];
+ uintptr_t original_thread_id;
+ uint32_t polls_executed = 0;
+ uint64_t now;
+
+ if (max_polls == 0) {
+ /* If max_polls is set to 0,
+ * poll until no operation is pending. */
+ return poll_thread(thread_id);
+ }
+ assert(thread_id != (uintptr_t)INVALID_THREAD);
+ assert(thread_id < g_ut_num_threads);
+
+ original_thread_id = g_ut_thread_id;
+ set_thread(INVALID_THREAD);
+
+ now = spdk_get_ticks();
+ while (polls_executed < max_polls) {
+ if (spdk_thread_poll(thread->thread, 1, now) > 0) {
+ busy = true;
+ }
+ now = spdk_thread_get_last_tsc(thread->thread);
+ polls_executed++;
+ }
+
+ set_thread(original_thread_id);
+
+ return busy;
+}
+
+bool
+poll_thread(uintptr_t thread_id)
+{
+ bool busy = false;
+ struct ut_thread *thread = &g_ut_threads[thread_id];
+ uintptr_t original_thread_id;
+ uint64_t now;
+
+ assert(thread_id != (uintptr_t)INVALID_THREAD);
+ assert(thread_id < g_ut_num_threads);
+
+ original_thread_id = g_ut_thread_id;
+ set_thread(INVALID_THREAD);
+
+ now = spdk_get_ticks();
+ while (spdk_thread_poll(thread->thread, 0, now) > 0) {
+ now = spdk_thread_get_last_tsc(thread->thread);
+ busy = true;
+ }
+
+ set_thread(original_thread_id);
+
+ return busy;
+}
+
+void
+poll_threads(void)
+{
+ while (true) {
+ bool busy = false;
+
+ for (uint32_t i = 0; i < g_ut_num_threads; i++) {
+ busy = busy || poll_thread(i);
+ }
+
+ if (!busy) {
+ break;
+ }
+ }
+}
diff --git a/src/spdk/test/common/skipped_build_files.txt b/src/spdk/test/common/skipped_build_files.txt
new file mode 100644
index 000000000..dca967681
--- /dev/null
+++ b/src/spdk/test/common/skipped_build_files.txt
@@ -0,0 +1,60 @@
+# Not configured to test vtune.
+lib/bdev/vtune
+
+# Not configured to test VPP
+module/sock/vpp/vpp
+
+# Not configured to test rocksdb env file
+lib/rocksdb/env_spdk.cc
+
+# Not configured to test FC
+lib/nvmf/fc
+lib/nvmf/fc_ls
+test/unit/lib/nvmf/fc.c/fc_ut
+test/unit/lib/nvmf/fc_ls.c/fc_ls_ut
+
+# Not configured for Neon testing
+lib/util/base64_neon
+
+# Not configured for mlx5 dv testing
+lib/rdma/rdma_mlx5_dv
+
+# Files related to testing our internal vhost implementation.
+lib/rte_vhost/fd_man
+lib/rte_vhost/socket
+lib/rte_vhost/vhost
+lib/rte_vhost/vhost_user
+lib/vhost/vhost_nvme
+lib/virtio/vhost_user
+
+# Cuse related files, enable when ready.
+lib/nvme/nvme_cuse
+module/bdev/nvme/bdev_nvme_cuse_rpc
+test/nvme/cuse/cuse
+
+# Currently we don't have this plumbed for testing, enable when ready.
+module/bdev/uring/bdev_uring
+module/bdev/uring/bdev_uring_rpc
+module/sock/uring/uring
+
+# Currently not testing blobfs_fuse, enable when ready.
+module/blobfs/bdev/blobfs_fuse
+test/blobfs/fuse/fuse
+
+
+# These files all represent c files that are only compiled by direct inclusion in other files.
+test/common/lib/test_env
+test/common/lib/test_sock
+test/common/lib/ut_multithread
+test/common/lib/test_rdma
+test/unit/lib/blob/bs_dev_common
+test/unit/lib/blob/bs_scheduler
+test/unit/lib/ftl/common/utils
+test/unit/lib/iscsi/common
+test/unit/lib/json_mock
+test/unit/lib/sock/uring.c/uring_ut
+
+# These files are in the external_code directory which doesn't get compiled with SPDK.
+test/external_code/hello_world/hello_bdev
+test/external_code/passthru/vbdev_passthru
+test/external_code/passthru/vbdev_passthru_rpc
diff --git a/src/spdk/test/common/skipped_tests.txt b/src/spdk/test/common/skipped_tests.txt
new file mode 100644
index 000000000..d96957f2b
--- /dev/null
+++ b/src/spdk/test/common/skipped_tests.txt
@@ -0,0 +1,73 @@
+# This file represents the tests we are intentionally skipping in CI testing.
+
+# cases
+ftl_dirty_shutdown
+ftl_fio_basic
+ftl_fio_extended
+ftl_restore_nv_cache
+
+# Waiting for test refactor
+iscsi_tgt_fio_remote_nvme
+
+# VPP deprecated with 20.07
+iscsi_tgt_vpp
+
+# Waiting on significant test rewrite
+nvme_opal
+nvme_opal_bdevio
+nvme_opal_bdevperf
+nvme_opal_spdk_tgt
+
+# CI doesn't have FC hardware
+nvmf_fc
+spdkcli_nvmf_fc
+unittest_nvmf_fc
+unittest_nvmf_fc_ls
+
+# Enable after cuse tests switch to physical devices
+nvme_ns_manage_cuse
+
+# These tests are currently only run manually
+vhost_blk_fs_integrity
+vhost_blk_hot_remove
+vhost_scsi_hot_remove
+vhost_hotplug
+
+# Waiting on hardware
+vmd
+vmd_bdev_svc
+vmd_fio
+vmd_hello_world
+vmd_identify
+vmd_perf
+
+# nightly tests
+bdev_fio_rw_verify_ext
+bdev_fio_trim_ext
+bdev_reset
+iscsi_tgt_digest
+iscsi_tgt_data_digest
+iscsi_tgt_pmem
+iscsi_tgt_ext4test
+iscsi_tgt_digests
+iscsi_tgt_multiconnection
+iscsi_tgt_fuzz
+nvmf_fuzz
+nvmf_multiconnection
+nvmf_initiator_timeout
+vhost_blk_2core_2ctrl
+vhost_blk_1core_2ctrl
+vhost_blk_fs_integrity
+vhost_blk_integrity
+vhost_blk_nightly
+vhost_lvol_integrity_1core_1ctrl
+vhost_migration
+vhost_migration_tc1
+vhost_migration_tc2
+vhost_readonly
+vhost_scsi_fs_integrity
+vhost_scsi_integrity
+vhost_scsi_nightly
+vhost_scsi_2core_2ctrl
+vhost_scsi_1core_2ctrl
+vhost_scsi_1core_1ctrl
diff --git a/src/spdk/test/compress/compress.sh b/src/spdk/test/compress/compress.sh
new file mode 100755
index 000000000..0c67f7021
--- /dev/null
+++ b/src/spdk/test/compress/compress.sh
@@ -0,0 +1,119 @@
+#!/usr/bin/env bash
+
+set -e
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+plugindir=$rootdir/examples/bdev/fio_plugin
+rpc_py="$rootdir/scripts/rpc.py"
+source "$rootdir/scripts/common.sh"
+source "$rootdir/test/common/autotest_common.sh"
+source "$rootdir/test/nvmf/common.sh"
+
+function error_cleanup() {
+ # force delete pmem file and wipe on-disk metadata
+ rm -rf /tmp/pmem
+ $SPDK_EXAMPLE_DIR/perf -q 1 -o 131072 -w write -t 2
+}
+
+function destroy_vols() {
+ # Gracefully destroy the vols via bdev_compress_delete API.
+ # bdev_compress_delete will delete the on-disk metadata as well as
+ # the persistent memory file containing its metadata.
+ $rpc_py bdev_compress_delete COMP_lvs0/lv0
+ $rpc_py bdev_lvol_delete_lvstore -l lvs0
+}
+
+function create_vols() {
+ $rootdir/scripts/gen_nvme.sh --json | $rpc_py load_subsystem_config
+ waitforbdev Nvme0n1
+
+ $rpc_py bdev_lvol_create_lvstore Nvme0n1 lvs0
+ $rpc_py bdev_lvol_create -t -l lvs0 lv0 100
+ waitforbdev lvs0/lv0
+
+ $rpc_py compress_set_pmd -p "$pmd"
+ if [ -z "$1" ]; then
+ $rpc_py bdev_compress_create -b lvs0/lv0 -p /tmp/pmem
+ else
+ $rpc_py bdev_compress_create -b lvs0/lv0 -p /tmp/pmem -l $1
+ fi
+ waitforbdev COMP_lvs0/lv0
+}
+
+function run_bdevio() {
+ $rootdir/test/bdev/bdevio/bdevio -w &
+ bdevio_pid=$!
+ trap 'killprocess $bdevio_pid; error_cleanup; exit 1' SIGINT SIGTERM EXIT
+ waitforlisten $bdevio_pid
+ create_vols
+ $rootdir/test/bdev/bdevio/tests.py perform_tests
+ destroy_vols
+ trap - SIGINT SIGTERM EXIT
+ killprocess $bdevio_pid
+}
+
+function run_bdevperf() {
+ $rootdir/test/bdev/bdevperf/bdevperf -z -q $1 -o $2 -w verify -t $3 -C -m 0x6 &
+ bdevperf_pid=$!
+ trap 'killprocess $bdevperf_pid; error_cleanup; exit 1' SIGINT SIGTERM EXIT
+ waitforlisten $bdevperf_pid
+ create_vols $4
+ $rootdir/test/bdev/bdevperf/bdevperf.py perform_tests
+ destroy_vols
+ trap - SIGINT SIGTERM EXIT
+ killprocess $bdevperf_pid
+}
+
+test_type=$1
+case "$test_type" in
+ qat)
+ pmd=1
+ ;;
+ isal)
+ pmd=2
+ ;;
+ *)
+ echo "invalid pmd name"
+ exit 1
+ ;;
+esac
+
+mkdir -p /tmp/pmem
+
+# per patch bdevperf uses slightly different params than nightly
+# logical block size same as underlying device, then 512 then 4096
+run_bdevperf 32 4096 3
+run_bdevperf 32 4096 3 512
+run_bdevperf 32 4096 3 4096
+
+if [ $RUN_NIGHTLY -eq 1 ]; then
+ run_bdevio
+ run_bdevperf 64 16384 30
+
+ # run perf on nvmf target w/compressed vols
+ export TEST_TRANSPORT=tcp && nvmftestinit
+ nvmfappstart -m 0x7
+ trap "nvmftestfini; error_cleanup; exit 1" SIGINT SIGTERM EXIT
+
+ # Create an NVMe-oF subsystem and add compress bdevs as namespaces
+ $rpc_py nvmf_create_transport -t $TEST_TRANSPORT -u 8192
+ create_vols
+ $rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode0 -a -s SPDK0
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode0 COMP_lvs0/lv0
+ $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode0 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+
+ # Start random read writes in the background
+ $SPDK_EXAMPLE_DIR/perf -r "trtype:$TEST_TRANSPORT adrfam:IPv4 traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT" -o 4096 -q 64 -s 512 -w randrw -t 30 -c 0x18 -M 50 &
+ perf_pid=$!
+
+ # Wait for I/O to complete
+ trap 'killprocess $perf_pid; compress_err_cleanup; exit 1' SIGINT SIGTERM EXIT
+ wait $perf_pid
+ destroy_vols
+
+ trap - SIGINT SIGTERM EXIT
+ nvmftestfini
+fi
+
+rm -rf /tmp/pmem
diff --git a/src/spdk/test/config_converter/config.ini b/src/spdk/test/config_converter/config.ini
new file mode 100644
index 000000000..bbfc17373
--- /dev/null
+++ b/src/spdk/test/config_converter/config.ini
@@ -0,0 +1,153 @@
+#comment1
+[Global]
+ Comment "Global section"#comment2
+ ReactorMask 0xF #comment3
+#comment4
+ #comment5
+[Nvmf]
+ MaxQueuesPerSession 4
+ MaxQueueDepth 128
+ InCapsuleDataSize 4096
+ MaxIOSize 131072
+ AcceptorPollRate 10000
+ IOUnitSize 131072
+
+[Nvme]
+ TransportID "trtype:PCIe traddr:0000:00:04.0" Nvme0
+
+[Bdev]
+ BdevIoPoolSize 65536
+ BdevIoCacheSize 256
+
+[Split]
+ Split Nvme0n1 8
+
+[Nvme]
+ RetryCount 4
+ TimeoutUsec 0
+ ActionOnTimeout None
+ AdminPollRate 100000
+ IOPollRate 0
+ HotplugEnable Yes
+
+[iSCSI]
+ NodeBase "iqn.2016-06.io.spdk"
+ AuthFile /usr/local/etc/spdk/auth.conf
+ Timeout 30
+ DiscoveryAuthMethod Auto
+ DiscoveryAuthGroup AuthGroup1
+ MaxSessions 16
+ ImmediateData Yes
+ ErrorRecoveryLevel 0
+ MaxR2T 256
+ NopInInterval 10
+ AllowDuplicateIsid Yes
+ DefaultTime2Wait 2
+ QueueDepth 128
+
+[Malloc]
+ NumberOfLuns 8
+ LunSizeInMB 128
+ BlockSize 4096
+
+[Pmem]
+ Blk /tmp/sample_pmem Pmem0
+
+[AIO]
+ AIO /tmp/sample_aio0 AIO0 2048
+ AIO /tmp/sample_aio1 AIO1 2048
+ AIO /tmp/sample_aio2 AIO2 2048
+ AIO /tmp/sample_aio1 AIO3 2048
+ AIO /tmp/sample_aio2 AIO4 2048
+
+[VhostBlk0]
+ Name vhost.1
+ Dev Malloc6
+ ReadOnly yes
+ Cpumask 0x1
+
+[VhostScsi0]
+ Name naa.vhost.0
+ Target 0 Malloc4
+ Target 1 AIO3
+ Target 2 Nvme0n1p2
+ # Target 3 Nvme1n1p2
+ Cpumask 0x1
+
+[VhostScsi1]
+ Name naa.vhost.1
+ Target 0 AIO4
+ Cpumask 0x1
+
+[VhostBlk1]
+ Name naa.vhost.2
+ Dev Malloc5
+ ReadOnly no
+ Cpumask 0x1
+
+[VhostNvme0]
+ Name naa.vhost.3
+ NumberOfQueues 2
+ Namespace Nvme0n1p0
+ Namespace Nvme0n1p1
+ Cpumask 0x1
+
+[Subsystem1]
+ NQN nqn.2016-06.io.spdk:cnode1
+ Listen RDMA 10.0.2.15:4420
+ AllowAnyHost No
+ Host nqn.2016-06.io.spdk:init
+ SN SPDK00000000000001
+ MN SPDK_Controller1
+ MaxNamespaces 20
+ Namespace Nvme0n1p5 1
+ Namespace Nvme0n1p6 2
+
+[Subsystem2]
+ NQN nqn.2016-06.io.spdk:cnode2
+ Listen RDMA 10.0.2.15:4421
+ AllowAnyHost No
+ Host nqn.2016-06.io.spdk:init
+ SN SPDK00000000000002
+ MN SPDK_Controller2
+ Namespace Malloc1
+ Namespace Malloc2
+ Namespace AIO0
+ Namespace AIO1
+
+[InitiatorGroup1]
+ InitiatorName ANY
+ Netmask 127.0.0.1/32
+
+[PortalGroup1]
+ Portal DA1 127.0.0.1:4000
+ Portal DA2 127.0.0.1:4001@0xF
+
+[TargetNode1]
+ TargetName disk1
+ TargetAlias "Data Disk1"
+ Mapping PortalGroup1 InitiatorGroup1
+ AuthMethod Auto
+ AuthGroup AuthGroup1
+ # Enable header and data digest
+ # UseDigest Header Data
+ UseDigest Auto
+ # Use the first malloc target
+ LUN0 Malloc0
+ # Using the first AIO target
+ LUN1 AIO2
+ # Using the second storage target
+ LUN2 AIO3
+ # Using the third storage target
+ LUN3 AIO4
+ QueueDepth 128
+
+[TargetNode2]
+ TargetName disk2
+ TargetAlias "Data Disk2"
+ Mapping PortalGroup1 InitiatorGroup1
+ AuthMethod Auto
+ AuthGroup AuthGroup1
+ UseDigest Auto
+ LUN0 Nvme0n1p3
+ QueueDepth 32
diff --git a/src/spdk/test/config_converter/config_virtio.ini b/src/spdk/test/config_converter/config_virtio.ini
new file mode 100644
index 000000000..b2b7f4c71
--- /dev/null
+++ b/src/spdk/test/config_converter/config_virtio.ini
@@ -0,0 +1,21 @@
+[VirtioUser0]
+ Path naa.vhost.0
+ Queues 8
+
+[VirtioUser1]
+ Path naa.vhost.1
+ Queues 8
+
+#[VirtioUser2]
+# Path naa.vhost.3
+# Queues 8
+
+#[VirtioUser3]
+# Path naa.vhost.2
+# Type Blk
+# Queues 8
+
+[VirtioUser4]
+ Path vhost.1
+ Type Blk
+# Queues 8
diff --git a/src/spdk/test/config_converter/spdk_config.json b/src/spdk/test/config_converter/spdk_config.json
new file mode 100644
index 000000000..af8dcfbcc
--- /dev/null
+++ b/src/spdk/test/config_converter/spdk_config.json
@@ -0,0 +1,526 @@
+{
+ "subsystems": [
+ {
+ "subsystem": "accel",
+ "config": null
+ },
+ {
+ "subsystem": "interface",
+ "config": null
+ },
+ {
+ "subsystem": "net_framework",
+ "config": null
+ },
+ {
+ "subsystem": "bdev",
+ "config": [
+ {
+ "params": {
+ "bdev_io_pool_size": 65536,
+ "bdev_io_cache_size": 256
+ },
+ "method": "bdev_set_options"
+ },
+ {
+ "params": {
+ "base_bdev": "Nvme0n1",
+ "split_size_mb": 0,
+ "split_count": 8
+ },
+ "method": "bdev_split_create"
+ },
+ {
+ "params": {
+ "retry_count": 4,
+ "timeout_us": 0,
+ "nvme_adminq_poll_period_us": 100000,
+ "nvme_ioq_poll_period_us": 0,
+ "action_on_timeout": "none"
+ },
+ "method": "bdev_nvme_set_options"
+ },
+ {
+ "params": {
+ "trtype": "PCIe",
+ "name": "Nvme0",
+ "traddr": "0000:00:04.0"
+ },
+ "method": "bdev_nvme_attach_controller"
+ },
+ {
+ "params": {
+ "enable": true,
+ "period_us": 10000000
+ },
+ "method": "bdev_nvme_set_hotplug"
+ },
+ {
+ "params": {
+ "block_size": 4096,
+ "num_blocks": 32768,
+ "name": "Malloc0"
+ },
+ "method": "bdev_malloc_create"
+ },
+ {
+ "params": {
+ "block_size": 4096,
+ "num_blocks": 32768,
+ "name": "Malloc1"
+ },
+ "method": "bdev_malloc_create"
+ },
+ {
+ "params": {
+ "block_size": 4096,
+ "num_blocks": 32768,
+ "name": "Malloc2"
+ },
+ "method": "bdev_malloc_create"
+ },
+ {
+ "params": {
+ "block_size": 4096,
+ "num_blocks": 32768,
+ "name": "Malloc3"
+ },
+ "method": "bdev_malloc_create"
+ },
+ {
+ "params": {
+ "block_size": 4096,
+ "num_blocks": 32768,
+ "name": "Malloc4"
+ },
+ "method": "bdev_malloc_create"
+ },
+ {
+ "params": {
+ "block_size": 4096,
+ "num_blocks": 32768,
+ "name": "Malloc5"
+ },
+ "method": "bdev_malloc_create"
+ },
+ {
+ "params": {
+ "block_size": 4096,
+ "num_blocks": 32768,
+ "name": "Malloc6"
+ },
+ "method": "bdev_malloc_create"
+ },
+ {
+ "params": {
+ "block_size": 4096,
+ "num_blocks": 32768,
+ "name": "Malloc7"
+ },
+ "method": "bdev_malloc_create"
+ },
+ {
+ "params": {
+ "block_size": 2048,
+ "name": "AIO0",
+ "filename": "/tmp/sample_aio0"
+ },
+ "method": "bdev_aio_create"
+ },
+ {
+ "params": {
+ "block_size": 2048,
+ "name": "AIO1",
+ "filename": "/tmp/sample_aio1"
+ },
+ "method": "bdev_aio_create"
+ },
+ {
+ "params": {
+ "block_size": 2048,
+ "name": "AIO2",
+ "filename": "/tmp/sample_aio2"
+ },
+ "method": "bdev_aio_create"
+ },
+ {
+ "params": {
+ "block_size": 2048,
+ "name": "AIO3",
+ "filename": "/tmp/sample_aio1"
+ },
+ "method": "bdev_aio_create"
+ },
+ {
+ "params": {
+ "block_size": 2048,
+ "name": "AIO4",
+ "filename": "/tmp/sample_aio2"
+ },
+ "method": "bdev_aio_create"
+ },
+ {
+ "params": {
+ "name": "Pmem0",
+ "pmem_file": "/tmp/sample_pmem"
+ },
+ "method": "bdev_pmem_create"
+ }
+ ]
+ },
+ {
+ "subsystem": "scsi",
+ "config": null
+ },
+ {
+ "subsystem": "nvmf",
+ "config": [
+ {
+ "params": {
+ "acceptor_poll_rate": 10000
+ },
+ "method": "nvmf_set_config"
+ },
+ {
+ "params": {
+ "max_subsystems": 1024
+ },
+ "method": "nvmf_set_max_subsystems"
+ },
+ {
+ "params": {
+ "max_namespaces": 20,
+ "allow_any_host": false,
+ "serial_number": "SPDK00000000000001",
+ "model_number": "SPDK_Controller1",
+ "nqn": "nqn.2016-06.io.spdk:cnode1"
+ },
+ "method": "nvmf_create_subsystem"
+ },
+ {
+ "params": {
+ "listen_address": {
+ "adrfam": "IPv4",
+ "traddr": "10.0.2.15",
+ "trsvcid": "4420",
+ "trtype": "RDMA"
+ },
+ "nqn": "nqn.2016-06.io.spdk:cnode1"
+ },
+ "method": "nvmf_subsystem_add_listener"
+ },
+ {
+ "params": {
+ "host": "nqn.2016-06.io.spdk:init",
+ "nqn": "nqn.2016-06.io.spdk:cnode1"
+ },
+ "method": "nvmf_subsystem_add_host"
+ },
+ {
+ "params": {
+ "namespace": {
+ "bdev_name": "Nvme0n1p5",
+ "nsid": 1
+ },
+ "nqn": "nqn.2016-06.io.spdk:cnode1"
+ },
+ "method": "nvmf_subsystem_add_ns"
+ },
+ {
+ "params": {
+ "namespace": {
+ "bdev_name": "Nvme0n1p6",
+ "nsid": 2
+ },
+ "nqn": "nqn.2016-06.io.spdk:cnode1"
+ },
+ "method": "nvmf_subsystem_add_ns"
+ },
+ {
+ "params": {
+ "allow_any_host": false,
+ "serial_number": "SPDK00000000000002",
+ "model_number": "SPDK_Controller2",
+ "nqn": "nqn.2016-06.io.spdk:cnode2"
+ },
+ "method": "nvmf_create_subsystem"
+ },
+ {
+ "method": "nvmf_subsystem_add_listener",
+ "params": {
+ "listen_address": {
+ "adrfam": "IPv4",
+ "traddr": "10.0.2.15",
+ "trsvcid": "4421",
+ "trtype": "RDMA"
+ },
+ "nqn": "nqn.2016-06.io.spdk:cnode2"
+ }
+ },
+ {
+ "method": "nvmf_subsystem_add_host",
+ "params": {
+ "host": "nqn.2016-06.io.spdk:init",
+ "nqn": "nqn.2016-06.io.spdk:cnode2"
+ }
+ },
+ {
+ "method": "nvmf_subsystem_add_ns",
+ "params": {
+ "namespace": {
+ "bdev_name": "Malloc1",
+ "nsid": 1
+ },
+ "nqn": "nqn.2016-06.io.spdk:cnode2"
+ }
+ },
+ {
+ "method": "nvmf_subsystem_add_ns",
+ "params": {
+ "namespace": {
+ "bdev_name": "Malloc2",
+ "nsid": 2
+ },
+ "nqn": "nqn.2016-06.io.spdk:cnode2"
+ }
+ },
+ {
+ "method": "nvmf_subsystem_add_ns",
+ "params": {
+ "namespace": {
+ "bdev_name": "AIO0",
+ "nsid": 3
+ },
+ "nqn": "nqn.2016-06.io.spdk:cnode2"
+ }
+ },
+ {
+ "method": "nvmf_subsystem_add_ns",
+ "params": {
+ "namespace": {
+ "bdev_name": "AIO1",
+ "nsid": 4
+ },
+ "nqn": "nqn.2016-06.io.spdk:cnode2"
+ }
+ }
+ ]
+ },
+ {
+ "subsystem": "nbd",
+ "config": []
+ },
+ {
+ "subsystem": "vhost",
+ "config": [
+ {
+ "params": {
+ "cpumask": "1",
+ "ctrlr": "naa.vhost.0"
+ },
+ "method": "vhost_create_scsi_controller"
+ },
+ {
+ "params": {
+ "scsi_target_num": 0,
+ "bdev_name": "Malloc4",
+ "ctrlr": "naa.vhost.0"
+ },
+ "method": "vhost_scsi_controller_add_target"
+ },
+ {
+ "params": {
+ "scsi_target_num": 1,
+ "bdev_name": "AIO3",
+ "ctrlr": "naa.vhost.0"
+ },
+ "method": "vhost_scsi_controller_add_target"
+ },
+ {
+ "params": {
+ "scsi_target_num": 2,
+ "bdev_name": "Nvme0n1p2",
+ "ctrlr": "naa.vhost.0"
+ },
+ "method": "vhost_scsi_controller_add_target"
+ },
+ {
+ "params": {
+ "cpumask": "1",
+ "ctrlr": "naa.vhost.1"
+ },
+ "method": "vhost_create_scsi_controller"
+ },
+ {
+ "params": {
+ "scsi_target_num": 0,
+ "bdev_name": "AIO4",
+ "ctrlr": "naa.vhost.1"
+ },
+ "method": "vhost_scsi_controller_add_target"
+ },
+ {
+ "params": {
+ "dev_name": "Malloc6",
+ "readonly": true,
+ "ctrlr": "vhost.1",
+ "cpumask": "1"
+ },
+ "method": "vhost_create_blk_controller"
+ },
+ {
+ "params": {
+ "dev_name": "Malloc5",
+ "readonly": false,
+ "ctrlr": "naa.vhost.2",
+ "cpumask": "1"
+ },
+ "method": "vhost_create_blk_controller"
+ },
+ {
+ "params": {
+ "cpumask": "1",
+ "io_queues": 2,
+ "ctrlr": "naa.vhost.3"
+ },
+ "method": "vhost_create_nvme_controller"
+ },
+ {
+ "params": {
+ "bdev_name": "Nvme0n1p0",
+ "ctrlr": "naa.vhost.3"
+ },
+ "method": "vhost_nvme_controller_add_ns"
+ },
+ {
+ "params": {
+ "bdev_name": "Nvme0n1p1",
+ "ctrlr": "naa.vhost.3"
+ },
+ "method": "vhost_nvme_controller_add_ns"
+ }
+ ]
+ },
+ {
+ "subsystem": "iscsi",
+ "config": [
+ {
+ "params": {
+ "allow_duplicated_isid": true,
+ "default_time2retain": 20,
+ "mutual_chap": false,
+ "require_chap": false,
+ "immediate_data": true,
+ "node_base": "iqn.2016-06.io.spdk",
+ "nop_in_interval": 10,
+ "max_connections_per_session": 2,
+ "first_burst_length": 8192,
+ "max_queue_depth": 64,
+ "nop_timeout": 30,
+ "chap_group": 1,
+ "max_sessions": 16,
+ "error_recovery_level": 0,
+ "disable_chap": false,
+ "auth_file": "/usr/local/etc/spdk/auth.conf",
+ "default_time2wait": 2
+ },
+ "method": "iscsi_set_options"
+ },
+ {
+ "params": {
+ "portals": [
+ {
+ "cpumask": "0x1",
+ "host": "127.0.0.1",
+ "port": "4000"
+ },
+ {
+ "cpumask": "0x1",
+ "host": "127.0.0.1",
+ "port": "4001"
+ }
+ ],
+ "tag": 1
+ },
+ "method": "iscsi_create_portal_group"
+ },
+ {
+ "params": {
+ "initiators": [
+ "ANY"
+ ],
+ "tag": 1,
+ "netmasks": [
+ "127.0.0.1/32"
+ ]
+ },
+ "method": "iscsi_create_initiator_group"
+ },
+ {
+ "params": {
+ "luns": [
+ {
+ "lun_id": 0,
+ "bdev_name": "Malloc0"
+ },
+ {
+ "lun_id": 1,
+ "bdev_name": "AIO2"
+ },
+ {
+ "lun_id": 2,
+ "bdev_name": "AIO3"
+ },
+ {
+ "lun_id": 3,
+ "bdev_name": "AIO4"
+ }
+ ],
+ "mutual_chap": false,
+ "name": "iqn.2016-06.io.spdk:disk1",
+ "alias_name": "Data Disk1",
+ "require_chap": false,
+ "chap_group": 1,
+ "pg_ig_maps": [
+ {
+ "ig_tag": 1,
+ "pg_tag": 1
+ }
+ ],
+ "data_digest": false,
+ "disable_chap": false,
+ "header_digest": false,
+ "queue_depth": 64
+ },
+ "method": "iscsi_create_target_node"
+ },
+ {
+ "params": {
+ "luns": [
+ {
+ "lun_id": 0,
+ "bdev_name": "Nvme0n1p3"
+ }
+ ],
+ "mutual_chap": false,
+ "name": "iqn.2016-06.io.spdk:disk2",
+ "alias_name": "Data Disk2",
+ "require_chap": false,
+ "chap_group": 1,
+ "pg_ig_maps": [
+ {
+ "ig_tag": 1,
+ "pg_tag": 1
+ }
+ ],
+ "data_digest": false,
+ "disable_chap": false,
+ "header_digest": false,
+ "queue_depth": 32
+ },
+ "method": "iscsi_create_target_node"
+ }
+ ]
+ }
+ ]
+}
diff --git a/src/spdk/test/config_converter/spdk_config_virtio.json b/src/spdk/test/config_converter/spdk_config_virtio.json
new file mode 100644
index 000000000..f9608311c
--- /dev/null
+++ b/src/spdk/test/config_converter/spdk_config_virtio.json
@@ -0,0 +1,133 @@
+{
+ "subsystems": [
+ {
+ "subsystem": "accel",
+ "config": null
+ },
+ {
+ "subsystem": "interface",
+ "config": null
+ },
+ {
+ "subsystem": "net_framework",
+ "config": null
+ },
+ {
+ "subsystem": "bdev",
+ "config": [
+ {
+ "params": {
+ "bdev_io_pool_size": 65536,
+ "bdev_io_cache_size": 256
+ },
+ "method": "bdev_set_options"
+ },
+ {
+ "params": {
+ "retry_count": 4,
+ "timeout_us": 0,
+ "nvme_adminq_poll_period_us": 1000000,
+ "nvme_ioq_poll_period_us": 0,
+ "action_on_timeout": "none"
+ },
+ "method": "bdev_nvme_set_options"
+ },
+ {
+ "params": {
+ "enable": false,
+ "period_us": 100000
+ },
+ "method": "bdev_nvme_set_hotplug"
+ },
+ {
+ "params": {
+ "name": "VirtioScsi0",
+ "dev_type": "scsi",
+ "vq_size": 512,
+ "trtype": "user",
+ "traddr": "naa.vhost.0",
+ "vq_count": 8
+ },
+ "method": "bdev_virtio_attach_controller"
+ },
+ {
+ "params": {
+ "name": "VirtioScsi1",
+ "dev_type": "scsi",
+ "vq_size": 512,
+ "trtype": "user",
+ "traddr": "naa.vhost.1",
+ "vq_count": 8
+ },
+ "method": "bdev_virtio_attach_controller"
+ },
+ {
+ "params": {
+ "name": "VirtioBlk4",
+ "dev_type": "blk",
+ "vq_size": 512,
+ "trtype": "user",
+ "traddr": "vhost.1",
+ "vq_count": 1
+ },
+ "method": "bdev_virtio_attach_controller"
+ }
+ ]
+ },
+ {
+ "subsystem": "scsi",
+ "config": null
+ },
+ {
+ "subsystem": "nvmf",
+ "config": [
+ {
+ "params": {
+ "acceptor_poll_rate": 10000
+ },
+ "method": "nvmf_set_config"
+ },
+ {
+ "params": {
+ "max_subsystems": 1024
+ },
+ "method": "nvmf_set_max_subsystems"
+ }
+ ]
+ },
+ {
+ "subsystem": "nbd",
+ "config": []
+ },
+ {
+ "subsystem": "vhost",
+ "config": []
+ },
+ {
+ "subsystem": "iscsi",
+ "config": [
+ {
+ "params": {
+ "allow_duplicated_isid": false,
+ "default_time2retain": 20,
+ "mutual_chap": false,
+ "require_chap": false,
+ "immediate_data": true,
+ "node_base": "iqn.2016-06.io.spdk",
+ "nop_in_interval": 30,
+ "max_connections_per_session": 2,
+ "first_burst_length": 8192,
+ "max_queue_depth": 64,
+ "nop_timeout": 60,
+ "chap_group": 0,
+ "max_sessions": 128,
+ "error_recovery_level": 0,
+ "disable_chap": false,
+ "default_time2wait": 2
+ },
+ "method": "iscsi_set_options"
+ }
+ ]
+ }
+ ]
+}
diff --git a/src/spdk/test/config_converter/test_converter.sh b/src/spdk/test/config_converter/test_converter.sh
new file mode 100755
index 000000000..1b3279a85
--- /dev/null
+++ b/src/spdk/test/config_converter/test_converter.sh
@@ -0,0 +1,26 @@
+#!/usr/bin/env bash
+testdir=$(readlink -f "$(dirname "$0")")
+rootdir=$(readlink -f "$testdir/../../")
+source "$rootdir/test/common/autotest_common.sh"
+
+CONVERTER_DIR=$testdir
+SPDK_BUILD_DIR=$rootdir
+
+function test_cleanup() {
+ rm -f $CONVERTER_DIR/config_converter.json $CONVERTER_DIR/config_virtio_converter.json
+}
+
+function on_error_exit() {
+ set +e
+ test_cleanup
+ print_backtrace
+ exit 1
+}
+
+trap 'on_error_exit' ERR
+
+$SPDK_BUILD_DIR/scripts/config_converter.py < $CONVERTER_DIR/config.ini > $CONVERTER_DIR/config_converter.json
+$SPDK_BUILD_DIR/scripts/config_converter.py < $CONVERTER_DIR/config_virtio.ini > $CONVERTER_DIR/config_virtio_converter.json
+diff -I "cpumask" -I "max_queue_depth" -I "queue_depth" <(jq -S . $CONVERTER_DIR/config_converter.json) <(jq -S . $CONVERTER_DIR/spdk_config.json)
+diff <(jq -S . $CONVERTER_DIR/config_virtio_converter.json) <(jq -S . $CONVERTER_DIR/spdk_config_virtio.json)
+test_cleanup
diff --git a/src/spdk/test/cpp_headers/.gitignore b/src/spdk/test/cpp_headers/.gitignore
new file mode 100644
index 000000000..ce1da4c53
--- /dev/null
+++ b/src/spdk/test/cpp_headers/.gitignore
@@ -0,0 +1 @@
+*.cpp
diff --git a/src/spdk/test/cpp_headers/Makefile b/src/spdk/test/cpp_headers/Makefile
new file mode 100644
index 000000000..64a9eda35
--- /dev/null
+++ b/src/spdk/test/cpp_headers/Makefile
@@ -0,0 +1,59 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+HEADERS := $(wildcard $(SPDK_ROOT_DIR)/include/spdk/*.h)
+
+# On Linux, queue_extras.h is pulled in with queue.h,
+# on FreeBSD, we want to ignore queue_extras.h entirely.
+HEADERS := $(filter-out $(SPDK_ROOT_DIR)/include/spdk/queue_extras.h,$(HEADERS))
+CXX_SRCS := $(patsubst %.h,%.cpp,$(notdir $(HEADERS)))
+
+install : all
+
+%.cpp: $(SPDK_ROOT_DIR)/include/spdk/%.h
+ $(Q)echo " TEST_HEADER include/spdk/$(notdir $<)"; \
+ echo '#include "spdk/$(notdir $<)"' > $@
+
+all : $(CXX_SRCS) $(OBJS)
+ @:
+
+clean :
+ $(CLEAN_C) $(CXX_SRCS)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.deps.mk
+
+uninstall:
+ @:
diff --git a/src/spdk/test/dd/basic_rw.sh b/src/spdk/test/dd/basic_rw.sh
new file mode 100755
index 000000000..5e9be5363
--- /dev/null
+++ b/src/spdk/test/dd/basic_rw.sh
@@ -0,0 +1,107 @@
+#!/usr/bin/env bash
+testdir=$(readlink -f "$(dirname "$0")")
+rootdir=$(readlink -f "$testdir/../../")
+source "$testdir/common.sh"
+
+basic_rw() {
+ local native_bs=$1
+ local count size
+ local qds bss
+
+ qds=(1 64)
+ # Generate some bs for tests based on the native_bs
+ for bs in {0..4}; do
+ bss+=($((native_bs << bs)))
+ done
+
+ for bs in "${bss[@]}"; do
+ for qd in "${qds[@]}"; do
+ count=$((0xffff / bs))
+ count=$((count == 0 ? 1 : count))
+ size=$((count * bs))
+
+ gen_bytes "$size" > "$test_file0"
+
+ "${DD_APP[@]}" \
+ --if="$test_file0" \
+ --ob="$bdev0" \
+ --bs="$bs" \
+ --qd="$qd" \
+ --json <(gen_conf)
+
+ "${DD_APP[@]}" \
+ --ib="$bdev0" \
+ --of="$test_file1" \
+ --bs="$bs" \
+ --qd="$qd" \
+ --count="$count" \
+ --json <(gen_conf)
+
+ diff -q "$test_file0" "$test_file1"
+ clear_nvme "$bdev0" "" "$size"
+ done
+ done
+}
+
+basic_offset() {
+ # Check if offseting works - using default io size of 4k
+ local count seek skip data data_check
+
+ gen_bytes 4096 > "$test_file0"
+ ((count = seek = skip = 1))
+ data=$(< "$test_file0")
+
+ "${DD_APP[@]}" \
+ --if="$test_file0" \
+ --ob="$bdev0" \
+ --seek="$seek" \
+ --json <(gen_conf)
+
+ "${DD_APP[@]}" \
+ --ib="$bdev0" \
+ --of="$test_file1" \
+ --skip="$skip" \
+ --count="$count" \
+ --json <(gen_conf)
+
+ read -rn${#data} data_check < "$test_file1"
+ [[ $data == "$data_check" ]]
+}
+
+plain_copy() {
+ # Test if copy between plain files works as well
+ "${DD_APP[@]}" --if="$test_file0" --of="$test_file1"
+ diff -q "$test_file0" "$test_file1"
+}
+
+cleanup() {
+ clear_nvme "$bdev0"
+ rm -f "$test_file0" "$test_file1"
+}
+
+trap "cleanup" EXIT
+
+nvmes=("$@")
+nvme0=Nvme0 nvme0_pci=${nvmes[0]} bdev0=Nvme0n1
+
+declare -A method_bdev_nvme_attach_controller_0=(
+ ["name"]=$nvme0
+ ["traddr"]=$nvme0_pci
+ ["trtype"]=pcie
+)
+
+test_file0=$SPDK_TEST_STORAGE/dd.dump0
+test_file1=$SPDK_TEST_STORAGE/dd.dump1
+native_bs=$(get_native_nvme_bs "$nvme0_pci")
+
+# Test if running with bs < native_bs successfully fails
+run_test "dd_bs_lt_native_bs" \
+ NOT "${DD_APP[@]}" \
+ --if=<(:) \
+ --ob="$bdev0" \
+ --bs=$((native_bs >> 1)) \
+ --json <(gen_conf)
+
+run_test "dd_rw" basic_rw "$native_bs"
+run_test "dd_rw_offset" basic_offset
+run_test "dd_rw_file_copy" plain_copy
diff --git a/src/spdk/test/dd/bdev_to_bdev.sh b/src/spdk/test/dd/bdev_to_bdev.sh
new file mode 100755
index 000000000..f18705ef7
--- /dev/null
+++ b/src/spdk/test/dd/bdev_to_bdev.sh
@@ -0,0 +1,111 @@
+#!/usr/bin/env bash
+testdir=$(readlink -f "$(dirname "$0")")
+rootdir=$(readlink -f "$testdir/../../")
+source "$testdir/common.sh"
+
+nvmes=("$@")
+
+offset_magic() {
+ local magic_check
+ local offsets offset
+
+ offsets=(16 256 4096) # * bs
+
+ for offset in "${offsets[@]}"; do
+ "${DD_APP[@]}" \
+ --ib="$bdev0" \
+ --ob="$bdev1" \
+ --count="$count" \
+ --seek="$offset" \
+ --bs="$bs" \
+ --json <(gen_conf)
+
+ "${DD_APP[@]}" \
+ --ib="$bdev1" \
+ --of="$test_file1" \
+ --count=1 \
+ --skip="$offset" \
+ --bs="$bs" \
+ --json <(gen_conf)
+
+ read -rn${#magic} magic_check < "$test_file1"
+ [[ $magic_check == "$magic" ]]
+ done
+}
+
+cleanup() {
+ # Zero up to 1G on input bdev, 4G on out bdev to consider offsetting
+ clear_nvme "$bdev0" "" $((0x40000000 + ${#magic}))
+ clear_nvme "$bdev1" "" $((0x100000000 + ${#magic}))
+ rm -f "$test_file0" "$test_file1" "$aio1"
+}
+
+trap "cleanup" EXIT
+
+bs=$((1024 << 10))
+
+if ((${#nvmes[@]} > 1)); then
+ nvme0=Nvme0 bdev0=Nvme0n1 nvme0_pci=${nvmes[0]} # input bdev
+ nvme1=Nvme1 bdev1=Nvme1n1 nvme1_pci=${nvmes[1]} # output bdev
+
+ declare -A method_bdev_nvme_attach_controller_0=(
+ ["name"]=$nvme0
+ ["traddr"]=$nvme0_pci
+ ["trtype"]=pcie
+ )
+ declare -A method_bdev_nvme_attach_controller_1=(
+ ["name"]=$nvme1
+ ["traddr"]=$nvme1_pci
+ ["trtype"]=pcie
+ )
+else
+ # Use AIO to compensate lack of actual hardware
+ nvme0=Nvme0 bdev0=Nvme0n1 nvme0_pci=${nvmes[0]} # input bdev
+ aio1=$SPDK_TEST_STORAGE/aio1 bdev1=aio1 # output bdev
+
+ declare -A method_bdev_nvme_attach_controller_1=(
+ ["name"]=$nvme0
+ ["traddr"]=$nvme0_pci
+ ["trtype"]=pcie
+ )
+ declare -A method_bdev_aio_create_0=(
+ ["name"]=$bdev1
+ ["filename"]=$aio1
+ ["block_size"]=4096
+ )
+
+ # 8G AIO file
+ "${DD_APP[@]}" \
+ --if=/dev/zero \
+ --of="$aio1" \
+ --bs="$bs" \
+ --count=8192
+fi
+
+test_file0=$SPDK_TEST_STORAGE/dd.dump0
+test_file1=$SPDK_TEST_STORAGE/dd.dump1
+
+magic="This Is Our Magic, find it"
+echo "$magic" > "$test_file0"
+
+# Make the file a bit bigger (~1GB)
+run_test "dd_inflate_file" \
+ "${DD_APP[@]}" \
+ --if=/dev/zero \
+ --of="$test_file0" \
+ --oflag=append \
+ --bs="$bs" \
+ --count=1024
+
+test_file0_size=$(wc -c < "$test_file0")
+
+# Now, copy it over to first nvme with default bs (4k)
+run_test "dd_copy_to_out_bdev" \
+ "${DD_APP[@]}" \
+ --if="$test_file0" \
+ --ob="$bdev0" \
+ --json <(gen_conf)
+
+count=$(((test_file0_size / bs) + 1))
+
+run_test "dd_offset_magic" offset_magic
diff --git a/src/spdk/test/dd/common.sh b/src/spdk/test/dd/common.sh
new file mode 100644
index 000000000..d2f7defa3
--- /dev/null
+++ b/src/spdk/test/dd/common.sh
@@ -0,0 +1,154 @@
+source "$rootdir/test/common/autotest_common.sh"
+source "$rootdir/scripts/common.sh"
+
+clear_nvme() {
+ local bdev=$1
+ local nvme_ref=$2
+ local size=${3:-0xffff}
+
+ local bs=$((1024 << 10)) # 1M
+ local count=$(((size / bs) + (size % bs ? 1 : 0)))
+
+ "${DD_APP[@]}" \
+ --if="/dev/zero" \
+ --bs="$bs" \
+ --ob="$bdev" \
+ --count="$count" \
+ --json <(gen_conf $nvme_ref)
+}
+
+trunc_files() {
+ local f
+ for f; do : > "$f"; done
+}
+
+gen_conf() {
+ xtrace_disable
+
+ local ref_name
+ local method methods
+ local param params
+ local config
+
+ # Pick references to all assoc arrays and build subsystem's config
+ # around them. The assoc array should be the name of the rpc method
+ # suffixed with unique _ID (ID may be any string). Default arrays
+ # should be prefixed with _method string. The keys of the array
+ # should store names of the method's parameters - proper quoting
+ # of the values is done here. extra_subsystems[] can store extra
+ # json configuration for different subsystems, other than bdev.
+
+ methods=("${@:-${!method_@}}")
+ local IFS=","
+
+ for ref_name in "${methods[@]}"; do
+ method=${ref_name#*method_} method=${method%_*} params=()
+
+ # FIXME: centos7's Bash got trapped in 2011:
+ # local -n ref=$ref_name -> local: -n: invalid option
+ # HACK: it with eval and partial refs instead.
+ eval "local refs=(\${!${ref_name}[@]})"
+ local param_ref
+
+ for param in "${refs[@]}"; do
+ param_ref="${ref_name}[$param]"
+ if [[ ${!param_ref} =~ ^([0-9]+|true|false|\{.*\})$ ]]; then
+ params+=("\"$param\": ${!param_ref}")
+ else
+ params+=("\"$param\": \"${!param_ref}\"")
+ fi
+ done
+
+ config+=("$(
+ cat <<- JSON
+ {
+ "params": {
+ ${params[*]}
+ },
+ "method": "$method"
+ }
+ JSON
+ )")
+ done
+
+ jq . <<- JSON | tee /dev/stderr
+ {
+ "subsystems": [
+ {
+ "subsystem": "bdev",
+ "config": [
+ ${config[*]}
+ ]
+ }
+ ${extra_subsystems[*]:+,${extra_subsystems[*]}}
+ ]
+ }
+ JSON
+
+ xtrace_restore
+}
+
+gen_bytes() {
+ xtrace_disable
+
+ local max=$1
+ local bytes
+ local byte
+ local string
+ shift
+
+ bytes=({a..z} {0..9})
+ if (($#)); then
+ bytes=("$@")
+ fi
+
+ for ((byte = 0; byte < max; byte++)); do
+ string+=${bytes[RANDOM % ${#bytes[@]}]}
+ done
+ printf '%b' "$string"
+
+ xtrace_restore
+}
+
+get_native_nvme_bs() {
+ # This is now needed since spdk_dd will reject all bs smaller than the
+ # native bs of given nvme. We need to make sure all tests are using
+ # bs >= native_bs. Use identify here so we don't have to switch nvmes
+ # between user space and the kernel back and forth.
+ local pci=$1 lbaf id
+
+ mapfile -t id < <("$rootdir/build/examples/identify" -r trtype:pcie "traddr:$pci")
+
+ # Get size of the current LBAF
+ [[ ${id[*]} =~ "Current LBA Format:"\ *"LBA Format #"([0-9]+) ]]
+ lbaf=${BASH_REMATCH[1]}
+ [[ ${id[*]} =~ "LBA Format #$lbaf: Data Size:"\ *([0-9]+) ]]
+ lbaf=${BASH_REMATCH[1]}
+
+ echo "$lbaf"
+}
+
+check_liburing() {
+ # Simply check if spdk_dd links to liburing. If yes, log that information.
+ local lib so
+ local -g liburing_in_use=0
+
+ while read -r lib _ so _; do
+ if [[ $lib == liburing.so.* ]]; then
+ printf '* spdk_dd linked to liburing\n'
+ # For sanity, check build config to see if liburing was requested.
+ if [[ -e $rootdir/test/common/build_config.sh ]]; then
+ source "$rootdir/test/common/build_config.sh"
+ fi
+ if [[ $CONFIG_URING != y ]]; then
+ printf '* spdk_dd built with liburing, but no liburing support requested?\n'
+ fi
+ if [[ ! -e $so ]]; then
+ printf '* %s is missing, aborting\n' "$lib"
+ return 1
+ fi
+ export liburing_in_use=1
+ return 0
+ fi
+ done < <(LD_TRACE_LOADED_OBJECTS=1 "${DD_APP[@]}") >&2
+}
diff --git a/src/spdk/test/dd/dd.sh b/src/spdk/test/dd/dd.sh
new file mode 100755
index 000000000..e2b8bb86a
--- /dev/null
+++ b/src/spdk/test/dd/dd.sh
@@ -0,0 +1,13 @@
+#!/usr/bin/env bash
+testdir=$(readlink -f "$(dirname "$0")")
+rootdir=$(readlink -f "$testdir/../../")
+source "$testdir/common.sh"
+
+"$rootdir/scripts/setup.sh"
+nvmes=($(nvme_in_userspace))
+
+check_liburing
+
+run_test "spdk_dd_basic_rw" "$testdir/basic_rw.sh" "${nvmes[@]}"
+run_test "spdk_dd_posix" "$testdir/posix.sh"
+run_test "spdk_dd_bdev_to_bdev" "$testdir/bdev_to_bdev.sh" "${nvmes[@]}"
diff --git a/src/spdk/test/dd/posix.sh b/src/spdk/test/dd/posix.sh
new file mode 100755
index 000000000..15346d8d3
--- /dev/null
+++ b/src/spdk/test/dd/posix.sh
@@ -0,0 +1,122 @@
+#!/usr/bin/env bash
+testdir=$(readlink -f "$(dirname "$0")")
+rootdir=$(readlink -f "$testdir/../../")
+source "$testdir/common.sh"
+
+cleanup() {
+ rm -f "$test_file0"{,.link}
+ rm -f "$test_file1"{,.link}
+}
+
+append() {
+ local dump0
+ local dump1
+
+ dump0=$(gen_bytes 32)
+ dump1=$(gen_bytes 32)
+
+ printf '%s' "$dump0" > "$test_file0"
+ printf '%s' "$dump1" > "$test_file1"
+
+ "${DD_APP[@]}" --if="$test_file0" --of="$test_file1" --oflag=append
+
+ [[ $(< "$test_file1") == "${dump1}${dump0}" ]]
+}
+
+directory() {
+ NOT "${DD_APP[@]}" --if="$test_file0" --iflag=directory --of="$test_file0"
+ NOT "${DD_APP[@]}" --if="$test_file0" --of="$test_file0" --oflag=directory
+}
+
+nofollow() {
+ local test_file0_link=$test_file0.link
+ local test_file1_link=$test_file1.link
+
+ ln -fs "$test_file0" "$test_file0_link"
+ ln -fs "$test_file1" "$test_file1_link"
+
+ NOT "${DD_APP[@]}" --if="$test_file0_link" --iflag=nofollow --of="$test_file1"
+ NOT "${DD_APP[@]}" --if="$test_file0" --of="$test_file1_link" --oflag=nofollow
+
+ # Do an extra step of checking if we actually can follow symlinks
+ gen_bytes 512 > "$test_file0"
+
+ "${DD_APP[@]}" --if="$test_file0_link" --of="$test_file1"
+ [[ $(< "$test_file0") == "$(< "$test_file1")" ]]
+}
+
+noatime() {
+ local atime_if
+ local atime_of
+
+ # It seems like spdk_dd doesn't update the atime in case 0 bytes are copied.
+ # This differs from how standard dd works for instance
+ gen_bytes 512 > "$test_file0"
+
+ atime_if=$(stat --printf="%X" "$test_file0")
+ atime_of=$(stat --printf="%X" "$test_file1")
+
+ "${DD_APP[@]}" --if="$test_file0" --iflag=noatime --of="$test_file1"
+ ((atime_if == $(stat --printf="%X" "$test_file0")))
+ ((atime_of == $(stat --printf="%X" "$test_file1")))
+
+ "${DD_APP[@]}" --if="$test_file0" --of="$test_file1"
+ ((atime_if < $(stat --printf="%X" "$test_file0")))
+}
+
+io() {
+ local flags_ro flags_rw flag_ro flag_rw
+
+ # O_NONBLOCK is actually a no-op, from a functional perspective, while
+ # open()ing a regular file, but let's keep it just to test its usage.
+ flags_ro=(direct nonblock)
+ flags_rw=("${flags_ro[@]}" sync dsync)
+
+ # simply check if data was correctly copied between files
+ for flag_ro in "${flags_ro[@]}"; do
+ gen_bytes 512 > "$test_file0"
+ for flag_rw in "${flags_rw[@]}"; do
+ "${DD_APP[@]}" \
+ --if="$test_file0" \
+ --iflag="$flag_ro" \
+ --of="$test_file1" \
+ --oflag="$flag_rw"
+ [[ $(< "$test_file0") == "$(< "$test_file1")" ]]
+ done
+ done
+}
+
+tests() {
+ printf '* First test run%s\n' \
+ "${msg[liburing_in_use]}" >&2
+
+ run_test "dd_flag_append" append
+ run_test "dd_flag_directory" directory
+ run_test "dd_flag_nofollow" nofollow
+ run_test "dd_flag_noatime" noatime
+ run_test "dd_flags_misc" io
+}
+
+tests_forced_aio() {
+ printf '* Second test run%s\n' \
+ "${msg[liburing_in_use ? 2 : 0]}" >&2
+
+ DD_APP+=("--aio")
+ run_test "dd_flag_append_forced_aio" append
+ run_test "dd_flag_directory_forced_aio" directory
+ run_test "dd_flag_nofollow_forced_aio" nofollow
+ run_test "dd_flag_noatime_forced_aio" noatime
+ run_test "dd_flags_misc_forced_aio" io
+}
+
+msg[0]=", using AIO"
+msg[1]=", liburing in use"
+msg[2]=", disabling liburing, forcing AIO"
+
+trap "cleanup" EXIT
+
+test_file0=$SPDK_TEST_STORAGE/dd.dump0
+test_file1=$SPDK_TEST_STORAGE/dd.dump1
+
+tests
+tests_forced_aio
diff --git a/src/spdk/test/dpdk_memory_utility/test_dpdk_mem_info.sh b/src/spdk/test/dpdk_memory_utility/test_dpdk_mem_info.sh
new file mode 100755
index 000000000..e49094635
--- /dev/null
+++ b/src/spdk/test/dpdk_memory_utility/test_dpdk_mem_info.sh
@@ -0,0 +1,25 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/test/common/autotest_common.sh
+
+rpc_py="$rootdir/scripts/rpc.py"
+SPDK_APP="$SPDK_BIN_DIR/spdk_tgt"
+MEM_SCRIPT="$rootdir/scripts/dpdk_mem_info.py"
+
+$SPDK_APP &
+spdkpid=$!
+
+waitforlisten $spdkpid
+
+trap 'killprocess $spdkpid' SIGINT SIGTERM EXIT
+
+$rpc_py env_dpdk_get_mem_stats
+
+$MEM_SCRIPT
+
+$MEM_SCRIPT -m 0
+
+trap - SIGINT SIGTERM EXIT
+killprocess $spdkpid
diff --git a/src/spdk/test/env/Makefile b/src/spdk/test/env/Makefile
new file mode 100644
index 000000000..33b7c903b
--- /dev/null
+++ b/src/spdk/test/env/Makefile
@@ -0,0 +1,50 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+ENV_NAME := $(notdir $(CONFIG_ENV))
+
+DIRS-y = mem_callbacks vtophys
+
+ifeq ($(ENV_NAME),env_dpdk)
+DIRS-y += env_dpdk_post_init memory pci
+endif
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/env/env.sh b/src/spdk/test/env/env.sh
new file mode 100755
index 000000000..696c14b08
--- /dev/null
+++ b/src/spdk/test/env/env.sh
@@ -0,0 +1,27 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/test/common/autotest_common.sh
+
+run_test "env_memory" $testdir/memory/memory_ut
+run_test "env_vtophys" $testdir/vtophys/vtophys
+run_test "env_pci" $testdir/pci/pci_ut
+
+argv="-c 0x1 "
+if [ $(uname) = Linux ]; then
+ # The default base virtaddr falls into a region reserved by ASAN.
+ # DPDK will try to find the nearest available address space by
+ # trying to do mmap over and over, which will take ages to finish.
+ # We speed up the process by specifying an address that's not
+ # supposed to be reserved by ASAN. Regular SPDK applications do
+ # this implicitly.
+ argv+="--base-virtaddr=0x200000000000"
+fi
+run_test "env_dpdk_post_init" $testdir/env_dpdk_post_init/env_dpdk_post_init $argv
+
+if [ $(uname) = Linux ]; then
+ # This tests the --match-allocations DPDK parameter which is only
+ # supported on Linux
+ run_test "env_mem_callbacks" $testdir/mem_callbacks/mem_callbacks
+fi
diff --git a/src/spdk/test/env/env_dpdk_post_init/.gitignore b/src/spdk/test/env/env_dpdk_post_init/.gitignore
new file mode 100644
index 000000000..39bd89884
--- /dev/null
+++ b/src/spdk/test/env/env_dpdk_post_init/.gitignore
@@ -0,0 +1 @@
+env_dpdk_post_init
diff --git a/src/spdk/test/env/env_dpdk_post_init/Makefile b/src/spdk/test/env/env_dpdk_post_init/Makefile
new file mode 100644
index 000000000..ea0ff7bb7
--- /dev/null
+++ b/src/spdk/test/env/env_dpdk_post_init/Makefile
@@ -0,0 +1,39 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../..)
+
+CFLAGS += $(ENV_CFLAGS)
+APP = env_dpdk_post_init
+
+include $(SPDK_ROOT_DIR)/mk/nvme.libtest.mk
diff --git a/src/spdk/test/env/env_dpdk_post_init/env_dpdk_post_init.c b/src/spdk/test/env/env_dpdk_post_init/env_dpdk_post_init.c
new file mode 100644
index 000000000..1b3897ea8
--- /dev/null
+++ b/src/spdk/test/env/env_dpdk_post_init/env_dpdk_post_init.c
@@ -0,0 +1,126 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk/nvme.h"
+#include "spdk/env.h"
+#include "spdk/env_dpdk.h"
+#include <rte_config.h>
+#include <rte_eal.h>
+
+#define MAX_DEVS 64
+
+struct dev {
+ struct spdk_nvme_ctrlr *ctrlr;
+ struct spdk_nvme_ns *ns;
+ struct spdk_nvme_qpair *qpair;
+ char name[SPDK_NVMF_TRADDR_MAX_LEN + 1];
+};
+
+static struct dev g_nvme_devs[MAX_DEVS];
+static int g_num_devs = 0;
+static int g_failed = 0;
+
+static bool
+probe_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
+ struct spdk_nvme_ctrlr_opts *opts)
+{
+ printf("Attaching to %s\n", trid->traddr);
+
+ return true;
+}
+
+static void
+attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
+ struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_ctrlr_opts *opts)
+{
+ struct dev *dev;
+ uint32_t nsid;
+
+ /* add to dev list */
+ dev = &g_nvme_devs[g_num_devs++];
+ if (g_num_devs >= MAX_DEVS) {
+ return;
+ }
+
+ dev->ctrlr = ctrlr;
+ nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr);
+ dev->ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
+
+ dev->qpair = spdk_nvme_ctrlr_alloc_io_qpair(ctrlr, NULL, 0);
+ if (dev->qpair == NULL) {
+ g_failed = 1;
+ return;
+ }
+
+ snprintf(dev->name, sizeof(dev->name), "%s",
+ trid->traddr);
+
+ printf("Attached to %s\n", dev->name);
+}
+
+int
+main(int argc, char **argv)
+{
+ int ret;
+ int i;
+
+ printf("Starting DPDK initialization...\n");
+ ret = rte_eal_init(argc, argv);
+ if (ret < 0) {
+ fprintf(stderr, "Failed to initialize DPDK\n");
+ return -1;
+ }
+
+ printf("Starting SPDK post initialization...\n");
+ ret = spdk_env_dpdk_post_init(false);
+ if (ret < 0) {
+ fprintf(stderr, "Failed to initialize SPDK\n");
+ return -1;
+ }
+
+ printf("SPDK NVMe probe\n");
+ if (spdk_nvme_probe(NULL, NULL, probe_cb, attach_cb, NULL) != 0) {
+ fprintf(stderr, "spdk_nvme_probe() failed\n");
+ return 1;
+ }
+
+ printf("Cleaning up...\n");
+ for (i = 0; i < g_num_devs; i++) {
+ struct dev *dev = &g_nvme_devs[i];
+ spdk_nvme_detach(dev->ctrlr);
+ }
+
+ return g_failed;
+}
diff --git a/src/spdk/test/env/mem_callbacks/.gitignore b/src/spdk/test/env/mem_callbacks/.gitignore
new file mode 100644
index 000000000..aff8f922b
--- /dev/null
+++ b/src/spdk/test/env/mem_callbacks/.gitignore
@@ -0,0 +1 @@
+mem_callbacks
diff --git a/src/spdk/test/env/mem_callbacks/Makefile b/src/spdk/test/env/mem_callbacks/Makefile
new file mode 100644
index 000000000..f31a765ab
--- /dev/null
+++ b/src/spdk/test/env/mem_callbacks/Makefile
@@ -0,0 +1,41 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+UNIT_TEST_LINK_ENV = 1
+TEST_FILE = mem_callbacks.c
+CFLAGS += $(ENV_CFLAGS)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/env/mem_callbacks/mem_callbacks.c b/src/spdk/test/env/mem_callbacks/mem_callbacks.c
new file mode 100644
index 000000000..165ddb3d8
--- /dev/null
+++ b/src/spdk/test/env/mem_callbacks/mem_callbacks.c
@@ -0,0 +1,217 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk/util.h"
+#include "spdk/queue.h"
+#include "spdk_cunit.h"
+
+#include <rte_config.h>
+#include <rte_version.h>
+#include <rte_malloc.h>
+#include <rte_eal_memconfig.h>
+#include <rte_eal.h>
+
+struct mem_allocation {
+ uintptr_t vaddr;
+ size_t len;
+ TAILQ_ENTRY(mem_allocation) link;
+};
+
+static TAILQ_HEAD(, mem_allocation) g_mem_allocations = TAILQ_HEAD_INITIALIZER(g_mem_allocations);
+
+static void
+memory_hotplug_cb(enum rte_mem_event event_type, const void *addr, size_t len, void *arg)
+{
+ struct mem_allocation *allocation;
+
+ if (event_type == RTE_MEM_EVENT_ALLOC) {
+ allocation = calloc(1, sizeof(*allocation));
+ SPDK_CU_ASSERT_FATAL(allocation != NULL);
+
+ printf("register %p %ju\n", addr, len);
+ allocation->vaddr = (uintptr_t)addr;
+ allocation->len = len;
+ TAILQ_INSERT_TAIL(&g_mem_allocations, allocation, link);
+ } else if (event_type == RTE_MEM_EVENT_FREE) {
+ TAILQ_FOREACH(allocation, &g_mem_allocations, link) {
+ if (allocation->vaddr == (uintptr_t)addr && allocation->len == len) {
+ break;
+ }
+ }
+ printf("unregister %p %ju %s\n", addr, len, allocation == NULL ? "FAILED" : "PASSED");
+ SPDK_CU_ASSERT_FATAL(allocation != NULL);
+ TAILQ_REMOVE(&g_mem_allocations, allocation, link);
+ free(allocation);
+ }
+}
+
+static int
+memory_iter_cb(const struct rte_memseg_list *msl,
+ const struct rte_memseg *ms, size_t len, void *arg)
+{
+ struct mem_allocation *allocation;
+
+ allocation = calloc(1, sizeof(*allocation));
+ SPDK_CU_ASSERT_FATAL(allocation != NULL);
+
+ printf("register %p %ju\n", ms->addr, len);
+ allocation->vaddr = (uintptr_t)ms->addr;
+ allocation->len = len;
+ TAILQ_INSERT_TAIL(&g_mem_allocations, allocation, link);
+
+ return 0;
+}
+
+static void
+verify_buffer(void *_buf, size_t len)
+{
+ uintptr_t buf = (uintptr_t)_buf;
+ struct mem_allocation *allocation;
+
+ SPDK_CU_ASSERT_FATAL(_buf != NULL);
+ printf("buf %p len %ju ", _buf, len);
+ TAILQ_FOREACH(allocation, &g_mem_allocations, link) {
+ if (buf >= allocation->vaddr &&
+ buf + len <= allocation->vaddr + allocation->len) {
+ break;
+ }
+ }
+ printf("%s\n", allocation == NULL ? "FAILED" : "PASSED");
+ CU_ASSERT(allocation != NULL);
+}
+
+static void
+test(void)
+{
+ void *buf1, *buf2, *buf3, *buf4;
+ size_t len1, len2, len3, len4;
+
+ printf("\n");
+
+ rte_mem_event_callback_register("test", memory_hotplug_cb, NULL);
+ rte_memseg_contig_walk(memory_iter_cb, NULL);
+
+ /* First allocate a 3MB buffer. This will allocate a 4MB hugepage
+ * region, with the 3MB buffer allocated from the end of it.
+ */
+ len1 = 3 * 1024 * 1024;
+ printf("malloc %ju\n", len1);
+ buf1 = rte_malloc(NULL, len1, 0);
+ verify_buffer(buf1, len1);
+
+ /* Now allocate a very small buffer. This will get allocated from
+ * the previous 4MB hugepage allocation, just before the 3MB buffer
+ * allocated just above.
+ */
+ len2 = 64;
+ printf("malloc %ju\n", len2);
+ buf2 = rte_malloc(NULL, len2, 0);
+ verify_buffer(buf2, len2);
+
+ /* Allocate a 4MB buffer. This should trigger a new hugepage allocation
+ * just for thie 4MB buffer.
+ */
+ len3 = 4 * 1024 * 1024;
+ printf("malloc %ju\n", len3);
+ buf3 = rte_malloc(NULL, len3, 0);
+ verify_buffer(buf3, len3);
+
+ /* Free the three buffers. Specifically free buf1 first. buf2 was
+ * allocated from the same huge page allocation as buf1 - so we want
+ * to make sure that DPDK doesn't try to free part of the first huge
+ * page allocation - it needs to wait until buf2 is also freed so it
+ * can free all of it.
+ */
+ printf("free %p %ju\n", buf1, len1);
+ rte_free(buf1);
+ printf("free %p %ju\n", buf2, len2);
+ rte_free(buf2);
+ printf("free %p %ju\n", buf3, len3);
+ rte_free(buf3);
+
+ /* Do a single 8MB hugepage allocation and then free it. This covers
+ * the more simple case.
+ */
+ len4 = 8 * 1024 * 1024;
+ printf("malloc %ju\n", len4);
+ buf4 = rte_malloc(NULL, len4, 0);
+ verify_buffer(buf4, len4);
+
+ printf("free %p %ju\n", buf4, len4);
+ rte_free(buf4);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+ char *dpdk_arg[] = {
+ "mem_callbacks", "-c 0x1",
+ "--base-virtaddr=0x200000000000",
+ "--match-allocations",
+ };
+ int rc;
+
+ rc = rte_eal_init(SPDK_COUNTOF(dpdk_arg), dpdk_arg);
+ if (rc < 0) {
+ printf("Err: Unable to initialize DPDK\n");
+ return 1;
+ }
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("memory", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "test", test) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/env/memory/.gitignore b/src/spdk/test/env/memory/.gitignore
new file mode 100644
index 000000000..7bef3dc03
--- /dev/null
+++ b/src/spdk/test/env/memory/.gitignore
@@ -0,0 +1 @@
+memory_ut
diff --git a/src/spdk/test/env/memory/Makefile b/src/spdk/test/env/memory/Makefile
new file mode 100644
index 000000000..623a81cb9
--- /dev/null
+++ b/src/spdk/test/env/memory/Makefile
@@ -0,0 +1,40 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../..)
+
+CFLAGS += $(ENV_CFLAGS)
+CFLAGS += -I$(SPDK_ROOT_DIR)/test/lib
+TEST_FILE = memory_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/env/memory/memory_ut.c b/src/spdk/test/env/memory/memory_ut.c
new file mode 100644
index 000000000..bdf6a76bf
--- /dev/null
+++ b/src/spdk/test/env/memory/memory_ut.c
@@ -0,0 +1,524 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "env_dpdk/memory.c"
+
+#define UNIT_TEST_NO_VTOPHYS
+#define UNIT_TEST_NO_PCI_ADDR
+#include "common/lib/test_env.c"
+#include "spdk_cunit.h"
+
+#include "spdk/bit_array.h"
+
+#define PAGE_ARRAY_SIZE (100)
+static struct spdk_bit_array *g_page_array;
+static void *g_vaddr_to_fail = (void *)UINT64_MAX;
+
+DEFINE_STUB(rte_memseg_contig_walk, int, (rte_memseg_contig_walk_t func, void *arg), 0);
+DEFINE_STUB(rte_mem_virt2memseg, struct rte_memseg *,
+ (const void *virt, const struct rte_memseg_list *msl), NULL);
+DEFINE_STUB(spdk_env_dpdk_external_init, bool, (void), true);
+DEFINE_STUB(rte_mem_event_callback_register, int,
+ (const char *name, rte_mem_event_callback_t clb, void *arg), 0);
+DEFINE_STUB(rte_mem_virt2iova, rte_iova_t, (const void *virtaddr), 0);
+
+static int
+test_mem_map_notify(void *cb_ctx, struct spdk_mem_map *map,
+ enum spdk_mem_map_notify_action action,
+ void *vaddr, size_t len)
+{
+ uint32_t i, end;
+
+ SPDK_CU_ASSERT_FATAL(((uintptr_t)vaddr & MASK_2MB) == 0);
+ SPDK_CU_ASSERT_FATAL((len & MASK_2MB) == 0);
+
+ /*
+ * This is a test requirement - the bit array we use to verify
+ * pages are valid is only so large.
+ */
+ SPDK_CU_ASSERT_FATAL((uintptr_t)vaddr < (VALUE_2MB * PAGE_ARRAY_SIZE));
+
+ i = (uintptr_t)vaddr >> SHIFT_2MB;
+ end = i + (len >> SHIFT_2MB);
+ for (; i < end; i++) {
+ switch (action) {
+ case SPDK_MEM_MAP_NOTIFY_REGISTER:
+ /* This page should not already be registered */
+ SPDK_CU_ASSERT_FATAL(spdk_bit_array_get(g_page_array, i) == false);
+ SPDK_CU_ASSERT_FATAL(spdk_bit_array_set(g_page_array, i) == 0);
+ break;
+ case SPDK_MEM_MAP_NOTIFY_UNREGISTER:
+ SPDK_CU_ASSERT_FATAL(spdk_bit_array_get(g_page_array, i) == true);
+ spdk_bit_array_clear(g_page_array, i);
+ break;
+ default:
+ SPDK_UNREACHABLE();
+ }
+ }
+
+ return 0;
+}
+
+static int
+test_mem_map_notify_fail(void *cb_ctx, struct spdk_mem_map *map,
+ enum spdk_mem_map_notify_action action, void *vaddr, size_t size)
+{
+ struct spdk_mem_map *reg_map = cb_ctx;
+
+ switch (action) {
+ case SPDK_MEM_MAP_NOTIFY_REGISTER:
+ if (vaddr == g_vaddr_to_fail) {
+ /* Test the error handling. */
+ return -1;
+ }
+ break;
+ case SPDK_MEM_MAP_NOTIFY_UNREGISTER:
+ /* Clear the same region in the other mem_map to be able to
+ * verify that there was no memory left still registered after
+ * the mem_map creation failure.
+ */
+ spdk_mem_map_clear_translation(reg_map, (uint64_t)vaddr, size);
+ break;
+ }
+
+ return 0;
+}
+
+static int
+test_mem_map_notify_checklen(void *cb_ctx, struct spdk_mem_map *map,
+ enum spdk_mem_map_notify_action action, void *vaddr, size_t size)
+{
+ size_t *len_arr = cb_ctx;
+
+ /*
+ * This is a test requirement - the len array we use to verify
+ * pages are valid is only so large.
+ */
+ SPDK_CU_ASSERT_FATAL((uintptr_t)vaddr < (VALUE_2MB * PAGE_ARRAY_SIZE));
+
+ switch (action) {
+ case SPDK_MEM_MAP_NOTIFY_REGISTER:
+ assert(size == len_arr[(uintptr_t)vaddr / VALUE_2MB]);
+ break;
+ case SPDK_MEM_MAP_NOTIFY_UNREGISTER:
+ CU_ASSERT(size == len_arr[(uintptr_t)vaddr / VALUE_2MB]);
+ break;
+ }
+
+ return 0;
+}
+
+static int
+test_check_regions_contiguous(uint64_t addr1, uint64_t addr2)
+{
+ return addr1 == addr2;
+}
+
+const struct spdk_mem_map_ops test_mem_map_ops = {
+ .notify_cb = test_mem_map_notify,
+ .are_contiguous = test_check_regions_contiguous
+};
+
+const struct spdk_mem_map_ops test_mem_map_ops_no_contig = {
+ .notify_cb = test_mem_map_notify,
+ .are_contiguous = NULL
+};
+
+struct spdk_mem_map_ops test_map_ops_notify_fail = {
+ .notify_cb = test_mem_map_notify_fail,
+ .are_contiguous = NULL
+};
+
+struct spdk_mem_map_ops test_map_ops_notify_checklen = {
+ .notify_cb = test_mem_map_notify_checklen,
+ .are_contiguous = NULL
+};
+
+static void
+test_mem_map_alloc_free(void)
+{
+ struct spdk_mem_map *map, *failed_map;
+ uint64_t default_translation = 0xDEADBEEF0BADF00D;
+ int i;
+
+ map = spdk_mem_map_alloc(default_translation, &test_mem_map_ops, NULL);
+ SPDK_CU_ASSERT_FATAL(map != NULL);
+ spdk_mem_map_free(&map);
+ CU_ASSERT(map == NULL);
+
+ map = spdk_mem_map_alloc(default_translation, NULL, NULL);
+ SPDK_CU_ASSERT_FATAL(map != NULL);
+
+ /* Register some memory for the initial memory walk in
+ * spdk_mem_map_alloc(). We'll fail registering the last region
+ * and will check if the mem_map cleaned up all its previously
+ * initialized translations.
+ */
+ for (i = 0; i < 5; i++) {
+ spdk_mem_register((void *)(uintptr_t)(2 * i * VALUE_2MB), VALUE_2MB);
+ }
+
+ /* The last region */
+ g_vaddr_to_fail = (void *)(8 * VALUE_2MB);
+ failed_map = spdk_mem_map_alloc(default_translation, &test_map_ops_notify_fail, map);
+ CU_ASSERT(failed_map == NULL);
+
+ for (i = 0; i < 4; i++) {
+ uint64_t reg, size = VALUE_2MB;
+
+ reg = spdk_mem_map_translate(map, 2 * i * VALUE_2MB, &size);
+ /* check if `failed_map` didn't leave any translations behind */
+ CU_ASSERT(reg == default_translation);
+ }
+
+ for (i = 0; i < 5; i++) {
+ spdk_mem_unregister((void *)(uintptr_t)(2 * i * VALUE_2MB), VALUE_2MB);
+ }
+
+ spdk_mem_map_free(&map);
+ CU_ASSERT(map == NULL);
+}
+
+static void
+test_mem_map_translation(void)
+{
+ struct spdk_mem_map *map;
+ uint64_t default_translation = 0xDEADBEEF0BADF00D;
+ uint64_t addr;
+ uint64_t mapping_length;
+ int rc;
+
+ map = spdk_mem_map_alloc(default_translation, &test_mem_map_ops, NULL);
+ SPDK_CU_ASSERT_FATAL(map != NULL);
+
+ /* Try to get translation for address with no translation */
+ addr = spdk_mem_map_translate(map, 10, NULL);
+ CU_ASSERT(addr == default_translation);
+
+ /* Set translation for region of non-2MB multiple size */
+ rc = spdk_mem_map_set_translation(map, VALUE_2MB, 1234, VALUE_2MB);
+ CU_ASSERT(rc == -EINVAL);
+
+ /* Set translation for vaddr that isn't 2MB aligned */
+ rc = spdk_mem_map_set_translation(map, 1234, VALUE_2MB, VALUE_2MB);
+ CU_ASSERT(rc == -EINVAL);
+
+ /* Set translation for one 2MB page */
+ rc = spdk_mem_map_set_translation(map, VALUE_2MB, VALUE_2MB, VALUE_2MB);
+ CU_ASSERT(rc == 0);
+
+ /* Set translation for region that overlaps the previous translation */
+ rc = spdk_mem_map_set_translation(map, 0, 3 * VALUE_2MB, 0);
+ CU_ASSERT(rc == 0);
+
+ /* Make sure we indicate that the three regions are contiguous */
+ mapping_length = VALUE_2MB * 3;
+ addr = spdk_mem_map_translate(map, 0, &mapping_length);
+ CU_ASSERT(addr == 0);
+ CU_ASSERT(mapping_length == VALUE_2MB * 3);
+
+ /* Translate an unaligned address */
+ mapping_length = VALUE_2MB * 3;
+ addr = spdk_mem_map_translate(map, VALUE_4KB, &mapping_length);
+ CU_ASSERT(addr == 0);
+ CU_ASSERT(mapping_length == VALUE_2MB * 3 - VALUE_4KB);
+
+ /* Clear translation for the middle page of the larger region. */
+ rc = spdk_mem_map_clear_translation(map, VALUE_2MB, VALUE_2MB);
+ CU_ASSERT(rc == 0);
+
+ /* Get translation for first page */
+ addr = spdk_mem_map_translate(map, 0, NULL);
+ CU_ASSERT(addr == 0);
+
+ /* Make sure we indicate that the three regions are no longer contiguous */
+ mapping_length = VALUE_2MB * 3;
+ addr = spdk_mem_map_translate(map, 0, &mapping_length);
+ CU_ASSERT(addr == 0);
+ CU_ASSERT(mapping_length == VALUE_2MB);
+
+ /* Get translation for an unallocated block. Make sure size is 0 */
+ mapping_length = VALUE_2MB * 3;
+ addr = spdk_mem_map_translate(map, VALUE_2MB, &mapping_length);
+ CU_ASSERT(addr == default_translation);
+ CU_ASSERT(mapping_length == VALUE_2MB);
+
+ /* Verify translation for 2nd page is the default */
+ addr = spdk_mem_map_translate(map, VALUE_2MB, NULL);
+ CU_ASSERT(addr == default_translation);
+
+ /* Get translation for third page */
+ addr = spdk_mem_map_translate(map, 2 * VALUE_2MB, NULL);
+ /*
+ * Note that addr should be 0, not 4MB. When we set the
+ * translation above, we said the whole 6MB region
+ * should translate to 0.
+ */
+ CU_ASSERT(addr == 0);
+
+ /* Translate only a subset of a 2MB page */
+ mapping_length = 543;
+ addr = spdk_mem_map_translate(map, 0, &mapping_length);
+ CU_ASSERT(addr == 0);
+ CU_ASSERT(mapping_length == 543);
+
+ /* Translate another subset of a 2MB page */
+ mapping_length = 543;
+ addr = spdk_mem_map_translate(map, VALUE_4KB, &mapping_length);
+ CU_ASSERT(addr == 0);
+ CU_ASSERT(mapping_length == 543);
+
+ /* Try to translate an unaligned region that is only partially registered */
+ mapping_length = 543;
+ addr = spdk_mem_map_translate(map, 3 * VALUE_2MB - 196, &mapping_length);
+ CU_ASSERT(addr == 0);
+ CU_ASSERT(mapping_length == 196);
+
+ /* Clear translation for the first page */
+ rc = spdk_mem_map_clear_translation(map, 0, VALUE_2MB);
+ CU_ASSERT(rc == 0);
+
+ /* Get translation for the first page */
+ addr = spdk_mem_map_translate(map, 0, NULL);
+ CU_ASSERT(addr == default_translation);
+
+ /* Clear translation for the third page */
+ rc = spdk_mem_map_clear_translation(map, 2 * VALUE_2MB, VALUE_2MB);
+ CU_ASSERT(rc == 0);
+
+ /* Get translation for the third page */
+ addr = spdk_mem_map_translate(map, 2 * VALUE_2MB, NULL);
+ CU_ASSERT(addr == default_translation);
+
+ /* Set translation for the last valid 2MB region */
+ rc = spdk_mem_map_set_translation(map, 0xffffffe00000ULL, VALUE_2MB, 0x1234);
+ CU_ASSERT(rc == 0);
+
+ /* Verify translation for last valid 2MB region */
+ addr = spdk_mem_map_translate(map, 0xffffffe00000ULL, NULL);
+ CU_ASSERT(addr == 0x1234);
+
+ /* Attempt to set translation for the first invalid address */
+ rc = spdk_mem_map_set_translation(map, 0x1000000000000ULL, VALUE_2MB, 0x5678);
+ CU_ASSERT(rc == -EINVAL);
+
+ /* Attempt to set translation starting at a valid address but exceeding the valid range */
+ rc = spdk_mem_map_set_translation(map, 0xffffffe00000ULL, VALUE_2MB * 2, 0x123123);
+ CU_ASSERT(rc != 0);
+
+ spdk_mem_map_free(&map);
+ CU_ASSERT(map == NULL);
+
+ /* Allocate a map without a contiguous region checker */
+ map = spdk_mem_map_alloc(default_translation, &test_mem_map_ops_no_contig, NULL);
+ SPDK_CU_ASSERT_FATAL(map != NULL);
+
+ /* map three contiguous regions */
+ rc = spdk_mem_map_set_translation(map, 0, 3 * VALUE_2MB, 0);
+ CU_ASSERT(rc == 0);
+
+ /* Since we can't check their contiguity, make sure we only return the size of one page */
+ mapping_length = VALUE_2MB * 3;
+ addr = spdk_mem_map_translate(map, 0, &mapping_length);
+ CU_ASSERT(addr == 0);
+ CU_ASSERT(mapping_length == VALUE_2MB);
+
+ /* Translate only a subset of a 2MB page */
+ mapping_length = 543;
+ addr = spdk_mem_map_translate(map, 0, &mapping_length);
+ CU_ASSERT(addr == 0);
+ CU_ASSERT(mapping_length == 543);
+
+ /* Clear the translation */
+ rc = spdk_mem_map_clear_translation(map, 0, VALUE_2MB * 3);
+ CU_ASSERT(rc == 0);
+
+ spdk_mem_map_free(&map);
+ CU_ASSERT(map == NULL);
+}
+
+static void
+test_mem_map_registration(void)
+{
+ int rc;
+ struct spdk_mem_map *map;
+ uint64_t default_translation = 0xDEADBEEF0BADF00D;
+
+ map = spdk_mem_map_alloc(default_translation, &test_mem_map_ops, NULL);
+ SPDK_CU_ASSERT_FATAL(map != NULL);
+
+ /* Unregister memory region that wasn't previously registered */
+ rc = spdk_mem_unregister((void *)VALUE_2MB, VALUE_2MB);
+ CU_ASSERT(rc == -EINVAL);
+
+ /* Register non-2MB multiple size */
+ rc = spdk_mem_register((void *)VALUE_2MB, 1234);
+ CU_ASSERT(rc == -EINVAL);
+
+ /* Register region that isn't 2MB aligned */
+ rc = spdk_mem_register((void *)1234, VALUE_2MB);
+ CU_ASSERT(rc == -EINVAL);
+
+ /* Register one 2MB page */
+ rc = spdk_mem_register((void *)VALUE_2MB, VALUE_2MB);
+ CU_ASSERT(rc == 0);
+
+ /* Register an overlapping address range */
+ rc = spdk_mem_register((void *)0, 3 * VALUE_2MB);
+ CU_ASSERT(rc == -EBUSY);
+
+ /* Unregister a 2MB page */
+ rc = spdk_mem_unregister((void *)VALUE_2MB, VALUE_2MB);
+ CU_ASSERT(rc == 0);
+
+ /* Register non overlapping address range */
+ rc = spdk_mem_register((void *)0, 3 * VALUE_2MB);
+ CU_ASSERT(rc == 0);
+
+ /* Unregister the middle page of the larger region. */
+ rc = spdk_mem_unregister((void *)VALUE_2MB, VALUE_2MB);
+ CU_ASSERT(rc == -ERANGE);
+
+ /* Unregister the first page */
+ rc = spdk_mem_unregister((void *)0, VALUE_2MB);
+ CU_ASSERT(rc == -ERANGE);
+
+ /* Unregister the third page */
+ rc = spdk_mem_unregister((void *)(2 * VALUE_2MB), VALUE_2MB);
+ CU_ASSERT(rc == -ERANGE);
+
+ /* Unregister the entire address range */
+ rc = spdk_mem_unregister((void *)0, 3 * VALUE_2MB);
+ CU_ASSERT(rc == 0);
+
+ spdk_mem_map_free(&map);
+ CU_ASSERT(map == NULL);
+}
+
+static void
+test_mem_map_registration_adjacent(void)
+{
+ struct spdk_mem_map *map, *newmap;
+ uint64_t default_translation = 0xDEADBEEF0BADF00D;
+ uintptr_t vaddr;
+ unsigned i;
+ size_t notify_len[PAGE_ARRAY_SIZE] = {0};
+ size_t chunk_len[] = { 2, 1, 3, 2, 1, 1 };
+
+ map = spdk_mem_map_alloc(default_translation,
+ &test_map_ops_notify_checklen, notify_len);
+ SPDK_CU_ASSERT_FATAL(map != NULL);
+
+ vaddr = 0;
+ for (i = 0; i < SPDK_COUNTOF(chunk_len); i++) {
+ notify_len[vaddr / VALUE_2MB] = chunk_len[i] * VALUE_2MB;
+ spdk_mem_register((void *)vaddr, notify_len[vaddr / VALUE_2MB]);
+ vaddr += notify_len[vaddr / VALUE_2MB];
+ }
+
+ /* Verify the memory is translated in the same chunks it was registered */
+ newmap = spdk_mem_map_alloc(default_translation,
+ &test_map_ops_notify_checklen, notify_len);
+ SPDK_CU_ASSERT_FATAL(newmap != NULL);
+ spdk_mem_map_free(&newmap);
+ CU_ASSERT(newmap == NULL);
+
+ vaddr = 0;
+ for (i = 0; i < SPDK_COUNTOF(chunk_len); i++) {
+ notify_len[vaddr / VALUE_2MB] = chunk_len[i] * VALUE_2MB;
+ spdk_mem_unregister((void *)vaddr, notify_len[vaddr / VALUE_2MB]);
+ vaddr += notify_len[vaddr / VALUE_2MB];
+ }
+
+ /* Register all chunks again just to unregister them again, but this
+ * time with only a single unregister() call.
+ */
+ vaddr = 0;
+ for (i = 0; i < SPDK_COUNTOF(chunk_len); i++) {
+ notify_len[vaddr / VALUE_2MB] = chunk_len[i] * VALUE_2MB;
+ spdk_mem_register((void *)vaddr, notify_len[vaddr / VALUE_2MB]);
+ vaddr += notify_len[vaddr / VALUE_2MB];
+ }
+ spdk_mem_unregister(0, vaddr);
+
+ spdk_mem_map_free(&map);
+ CU_ASSERT(map == NULL);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ /*
+ * These tests can use PAGE_ARRAY_SIZE 2MB pages of memory.
+ * Note that the tests just verify addresses - this memory
+ * is not actually allocated.
+ */
+ g_page_array = spdk_bit_array_create(PAGE_ARRAY_SIZE);
+
+ /* Initialize the memory map */
+ if (mem_map_init(false) < 0) {
+ return CUE_NOMEMORY;
+ }
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("memory", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "alloc and free memory map", test_mem_map_alloc_free) == NULL ||
+ CU_add_test(suite, "mem map translation", test_mem_map_translation) == NULL ||
+ CU_add_test(suite, "mem map registration", test_mem_map_registration) == NULL ||
+ CU_add_test(suite, "mem map adjacent registrations", test_mem_map_registration_adjacent) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ spdk_bit_array_free(&g_page_array);
+
+ return num_failures;
+}
diff --git a/src/spdk/test/env/pci/.gitignore b/src/spdk/test/env/pci/.gitignore
new file mode 100644
index 000000000..11d1c65ba
--- /dev/null
+++ b/src/spdk/test/env/pci/.gitignore
@@ -0,0 +1 @@
+pci_ut
diff --git a/src/spdk/test/env/pci/Makefile b/src/spdk/test/env/pci/Makefile
new file mode 100644
index 000000000..85d03ec34
--- /dev/null
+++ b/src/spdk/test/env/pci/Makefile
@@ -0,0 +1,40 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../..)
+
+UNIT_TEST_LINK_ENV = 1
+CFLAGS += $(ENV_CFLAGS)
+TEST_FILE = pci_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/env/pci/pci_ut.c b/src/spdk/test/env/pci/pci_ut.c
new file mode 100644
index 000000000..66d36b980
--- /dev/null
+++ b/src/spdk/test/env/pci/pci_ut.c
@@ -0,0 +1,238 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "CUnit/Basic.h"
+#include "spdk_internal/mock.h"
+
+#include "env_dpdk/pci.c"
+
+static void
+pci_claim_test(struct spdk_pci_device *dev)
+{
+ int rc = 0;
+ pid_t childPid;
+ int status, ret;
+
+ rc = spdk_pci_device_claim(dev);
+ CU_ASSERT(rc >= 0);
+
+ childPid = fork();
+ CU_ASSERT(childPid >= 0);
+ if (childPid == 0) {
+ ret = spdk_pci_device_claim(dev);
+ CU_ASSERT(ret == -1);
+ exit(0);
+ } else {
+ waitpid(childPid, &status, 0);
+ }
+}
+
+static struct spdk_pci_driver ut_pci_driver;
+
+struct ut_pci_dev {
+ struct spdk_pci_device pci;
+ char config[16];
+ char bar[16];
+ bool attached;
+};
+
+static int
+ut_map_bar(struct spdk_pci_device *dev, uint32_t bar,
+ void **mapped_addr, uint64_t *phys_addr, uint64_t *size)
+{
+ struct ut_pci_dev *ut_dev = (struct ut_pci_dev *)dev;
+
+ /* just one bar */
+ if (bar > 0) {
+ return -1;
+ }
+
+ *mapped_addr = ut_dev->bar;
+ *phys_addr = 0;
+ *size = sizeof(ut_dev->bar);
+ return 0;
+}
+
+static int
+ut_unmap_bar(struct spdk_pci_device *device, uint32_t bar, void *addr)
+{
+ return 0;
+}
+
+static int
+ut_cfg_read(struct spdk_pci_device *dev, void *value, uint32_t len, uint32_t offset)
+{
+ struct ut_pci_dev *ut_dev = (struct ut_pci_dev *)dev;
+
+ if (len + offset >= sizeof(ut_dev->config)) {
+ return -1;
+ }
+
+ memcpy(value, (void *)((uintptr_t)ut_dev->config + offset), len);
+ return 0;
+}
+
+static int ut_cfg_write(struct spdk_pci_device *dev, void *value, uint32_t len, uint32_t offset)
+{
+ struct ut_pci_dev *ut_dev = (struct ut_pci_dev *)dev;
+
+ if (len + offset >= sizeof(ut_dev->config)) {
+ return -1;
+ }
+
+ memcpy((void *)((uintptr_t)ut_dev->config + offset), value, len);
+ return 0;
+}
+
+
+static int
+ut_enum_cb(void *ctx, struct spdk_pci_device *dev)
+{
+ struct ut_pci_dev *ut_dev = (struct ut_pci_dev *)dev;
+
+ ut_dev->attached = true;
+ return 0;
+}
+
+static void
+pci_hook_test(void)
+{
+ struct ut_pci_dev ut_dev = {};
+ uint32_t value_32;
+ void *bar0_vaddr;
+ uint64_t bar0_paddr, bar0_size;
+ int rc;
+
+ ut_dev.pci.type = "custom";
+ ut_dev.pci.id.vendor_id = 0x4;
+ ut_dev.pci.id.device_id = 0x8;
+
+ /* Use add parse for initilization */
+ spdk_pci_addr_parse(&ut_dev.pci.addr, "10000:00:01.0");
+ CU_ASSERT(ut_dev.pci.addr.domain == 0x10000);
+ CU_ASSERT(ut_dev.pci.addr.bus == 0x0);
+ CU_ASSERT(ut_dev.pci.addr.dev == 0x1);
+ CU_ASSERT(ut_dev.pci.addr.func == 0x0);
+
+ ut_dev.pci.map_bar = ut_map_bar;
+ ut_dev.pci.unmap_bar = ut_unmap_bar;
+ ut_dev.pci.cfg_read = ut_cfg_read;
+ ut_dev.pci.cfg_write = ut_cfg_write;
+
+ /* hook the device into the PCI layer */
+ spdk_pci_hook_device(&ut_pci_driver, &ut_dev.pci);
+
+ /* try to attach a device with the matching driver and bdf */
+ rc = spdk_pci_device_attach(&ut_pci_driver, ut_enum_cb, NULL, &ut_dev.pci.addr);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(ut_dev.pci.internal.attached);
+ CU_ASSERT(ut_dev.attached);
+
+ /* check PCI config writes and reads */
+ value_32 = 0xDEADBEEF;
+ rc = spdk_pci_device_cfg_write32(&ut_dev.pci, value_32, 0);
+ CU_ASSERT(rc == 0);
+
+ value_32 = 0x0BADF00D;
+ rc = spdk_pci_device_cfg_write32(&ut_dev.pci, value_32, 4);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_pci_device_cfg_read32(&ut_dev.pci, &value_32, 0);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(value_32 == 0xDEADBEEF);
+ CU_ASSERT(memcmp(&value_32, &ut_dev.config[0], 4) == 0);
+
+ rc = spdk_pci_device_cfg_read32(&ut_dev.pci, &value_32, 4);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(value_32 == 0x0BADF00D);
+ CU_ASSERT(memcmp(&value_32, &ut_dev.config[4], 4) == 0);
+
+ /* out-of-bounds write */
+ rc = spdk_pci_device_cfg_read32(&ut_dev.pci, &value_32, sizeof(ut_dev.config));
+ CU_ASSERT(rc != 0);
+
+ /* map a bar */
+ rc = spdk_pci_device_map_bar(&ut_dev.pci, 0, &bar0_vaddr, &bar0_paddr, &bar0_size);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(bar0_vaddr == ut_dev.bar);
+ CU_ASSERT(bar0_size == sizeof(ut_dev.bar));
+ spdk_pci_device_unmap_bar(&ut_dev.pci, 0, bar0_vaddr);
+
+ /* map an inaccessible bar */
+ rc = spdk_pci_device_map_bar(&ut_dev.pci, 1, &bar0_vaddr, &bar0_paddr, &bar0_size);
+ CU_ASSERT(rc != 0);
+
+ /* test spdk_pci_device_claim() */
+ pci_claim_test(&ut_dev.pci);
+
+ spdk_pci_device_detach(&ut_dev.pci);
+ CU_ASSERT(!ut_dev.pci.internal.attached);
+
+ /* unhook the device */
+ spdk_pci_unhook_device(&ut_dev.pci);
+
+ /* try to attach the same device again */
+ rc = spdk_pci_device_attach(&ut_pci_driver, ut_enum_cb, NULL, &ut_dev.pci.addr);
+ CU_ASSERT(rc != 0);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("pci", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "pci_hook", pci_hook_test) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/env/vtophys/.gitignore b/src/spdk/test/env/vtophys/.gitignore
new file mode 100644
index 000000000..a03b46cc9
--- /dev/null
+++ b/src/spdk/test/env/vtophys/.gitignore
@@ -0,0 +1 @@
+vtophys
diff --git a/src/spdk/test/env/vtophys/Makefile b/src/spdk/test/env/vtophys/Makefile
new file mode 100644
index 000000000..68c4632a3
--- /dev/null
+++ b/src/spdk/test/env/vtophys/Makefile
@@ -0,0 +1,39 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../..)
+
+UNIT_TEST_LINK_ENV = 1
+TEST_FILE = vtophys.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/env/vtophys/vtophys.c b/src/spdk/test/env/vtophys/vtophys.c
new file mode 100644
index 000000000..e0d4d7992
--- /dev/null
+++ b/src/spdk/test/env/vtophys/vtophys.c
@@ -0,0 +1,196 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk/config.h"
+#include "spdk/env.h"
+#include "spdk/util.h"
+
+#include "CUnit/Basic.h"
+
+#define __SPDK_ENV_NAME(path) (strrchr(#path, '/') + 1)
+#define _SPDK_ENV_NAME(path) __SPDK_ENV_NAME(path)
+#define SPDK_ENV_NAME _SPDK_ENV_NAME(SPDK_CONFIG_ENV)
+
+static void
+vtophys_malloc_test(void)
+{
+ void *p = NULL;
+ int i;
+ unsigned int size = 1;
+ uint64_t paddr;
+
+ /* Verify vtophys doesn't work on regular malloc memory */
+ for (i = 0; i < 31; i++) {
+ p = malloc(size);
+ if (p == NULL) {
+ continue;
+ }
+
+ paddr = spdk_vtophys(p, NULL);
+ CU_ASSERT(paddr == SPDK_VTOPHYS_ERROR);
+
+ free(p);
+ size = size << 1;
+ }
+
+ /* Test addresses that are not in the valid x86-64 usermode range */
+ paddr = spdk_vtophys((void *)0x0000800000000000ULL, NULL);
+ CU_ASSERT(paddr == SPDK_VTOPHYS_ERROR);
+}
+
+static void
+vtophys_spdk_malloc_test(void)
+{
+ void *buf = NULL, *p = NULL;
+ size_t buf_align = 512;
+ int i;
+ unsigned int size = 1;
+ uint64_t paddr, tmpsize;
+
+ /* Test vtophys on memory allocated through SPDK */
+ for (i = 0; i < 31; i++) {
+ buf = spdk_zmalloc(size, buf_align, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
+ if (buf == NULL) {
+ continue;
+ }
+
+ /* test vtophys translation with no length parameter */
+ paddr = spdk_vtophys(buf, NULL);
+ CU_ASSERT(paddr != SPDK_VTOPHYS_ERROR);
+
+ /* translate the entire buffer; it's not necessarily contiguous */
+ p = buf;
+ tmpsize = size;
+ while (p < buf + size) {
+ paddr = spdk_vtophys(p, &tmpsize);
+ CU_ASSERT(paddr != SPDK_VTOPHYS_ERROR);
+ CU_ASSERT(tmpsize >= spdk_min(size, buf_align));
+ p += tmpsize;
+ tmpsize = buf + size - p;
+ }
+ CU_ASSERT(tmpsize == 0);
+
+ /* translate a valid vaddr, but with length 0 */
+ p = buf;
+ tmpsize = 0;
+ paddr = spdk_vtophys(p, &tmpsize);
+ CU_ASSERT(paddr != SPDK_VTOPHYS_ERROR);
+ CU_ASSERT(tmpsize == 0);
+
+ /* translate the first half of the buffer */
+ p = buf;
+ tmpsize = size / 2;
+ while (p < buf + size / 2) {
+ paddr = spdk_vtophys(p, &tmpsize);
+ CU_ASSERT(paddr != SPDK_VTOPHYS_ERROR);
+ CU_ASSERT(tmpsize >= spdk_min(size / 2, buf_align));
+ p += tmpsize;
+ tmpsize = buf + size / 2 - p;
+ }
+ CU_ASSERT(tmpsize == 0);
+
+ /* translate the second half of the buffer */
+ p = buf + size / 2;
+ tmpsize = size / 2;
+ while (p < buf + size) {
+ paddr = spdk_vtophys(p, &tmpsize);
+ CU_ASSERT(paddr != SPDK_VTOPHYS_ERROR);
+ CU_ASSERT(tmpsize >= spdk_min(size / 2, buf_align));
+ p += tmpsize;
+ tmpsize = buf + size - p;
+ }
+ CU_ASSERT(tmpsize == 0);
+
+ /* translate a region that's not entirely registered */
+ p = buf;
+ tmpsize = UINT64_MAX;
+ while (p < buf + size) {
+ paddr = spdk_vtophys(p, &tmpsize);
+ CU_ASSERT(paddr != SPDK_VTOPHYS_ERROR);
+ CU_ASSERT(tmpsize >= buf_align);
+ p += tmpsize;
+ /* verify our region is really contiguous */
+ CU_ASSERT(paddr + tmpsize - 1 == spdk_vtophys(p - 1, &tmpsize));
+ tmpsize = UINT64_MAX;
+ }
+
+ spdk_free(buf);
+ size = size << 1;
+ }
+}
+
+int
+main(int argc, char **argv)
+{
+ struct spdk_env_opts opts;
+ CU_pSuite suite = NULL;
+ unsigned num_failures;
+
+ spdk_env_opts_init(&opts);
+ opts.name = "vtophys";
+ opts.core_mask = "0x1";
+ if (strcmp(SPDK_ENV_NAME, "env_dpdk") == 0) {
+ opts.env_context = "--log-level=lib.eal:8";
+ }
+
+ if (spdk_env_init(&opts) < 0) {
+ printf("Err: Unable to initialize SPDK env\n");
+ return 1;
+ }
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("components_suite", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "vtophys_malloc_test", vtophys_malloc_test) == NULL ||
+ CU_add_test(suite, "vtophys_spdk_malloc_test", vtophys_spdk_malloc_test) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/event/Makefile b/src/spdk/test/event/Makefile
new file mode 100644
index 000000000..4b9cab867
--- /dev/null
+++ b/src/spdk/test/event/Makefile
@@ -0,0 +1,48 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = event_perf reactor reactor_perf
+
+ifeq ($(OS),Linux)
+DIRS-y += app_repeat
+endif
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/event/app_repeat/.gitignore b/src/spdk/test/event/app_repeat/.gitignore
new file mode 100644
index 000000000..0e59ff47d
--- /dev/null
+++ b/src/spdk/test/event/app_repeat/.gitignore
@@ -0,0 +1 @@
+app_repeat
diff --git a/src/spdk/test/event/app_repeat/Makefile b/src/spdk/test/event/app_repeat/Makefile
new file mode 100644
index 000000000..eb5140b1c
--- /dev/null
+++ b/src/spdk/test/event/app_repeat/Makefile
@@ -0,0 +1,54 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.modules.mk
+
+APP = app_repeat
+C_SRCS := app_repeat.c
+
+# Some of the modules and libaries are not repeatable yet, only organize
+# the repeatable ones.
+SPDK_LIB_LIST = event_bdev event_accel event_vmd event_sock
+SPDK_LIB_LIST += event log trace conf thread util bdev accel rpc jsonrpc json sock vmd
+SPDK_LIB_LIST += app_rpc log_rpc bdev_rpc notify
+SPDK_LIB_LIST += event_nbd nbd
+
+BLOCKDEV_LIST = bdev_malloc bdev_null
+BLOCKDEV_LIST += bdev_aio
+SYS_LIBS += -laio
+
+SPDK_LIB_LIST += $(BLOCKDEV_LIST)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
diff --git a/src/spdk/test/event/app_repeat/app_repeat.c b/src/spdk/test/event/app_repeat/app_repeat.c
new file mode 100644
index 000000000..d83e7949c
--- /dev/null
+++ b/src/spdk/test/event/app_repeat/app_repeat.c
@@ -0,0 +1,115 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk/event.h"
+#include "spdk/string.h"
+#include "spdk/thread.h"
+
+struct spdk_app_opts g_opts = {};
+static const char g_app_repeat_get_opts_string[] = "t:";
+static int g_repeat_times = 2;
+static bool g_exit;
+
+static void
+app_repeat_usage(void)
+{
+ printf(" -t <num> number of times to repeat calling spdk_app_start/stop\n");
+}
+
+static int
+app_repeat_parse_arg(int ch, char *arg)
+{
+ switch (ch) {
+ case 't':
+ g_repeat_times = spdk_strtol(arg, 0);
+ if (g_repeat_times < 2) {
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void
+app_repeat_started(void *arg1)
+{
+ int index = *(int *)arg1;
+
+ printf("spdk_app_start is called in Round %d.\n", index);
+}
+
+static void _app_repeat_shutdown_cb(void)
+{
+ printf("Shutdown signal received, exit.\n");
+ g_exit = true;
+ spdk_app_stop(0);
+}
+
+static void _app_repeat_usr1_handler(int signal)
+{
+ printf("USR1 signal received, restart spdk application framework.\n");
+ spdk_app_stop(0);
+}
+
+int
+main(int argc, char **argv)
+{
+ int rc;
+ int i;
+
+ spdk_app_opts_init(&g_opts);
+ g_opts.name = "app_repeat";
+ g_opts.shutdown_cb = _app_repeat_shutdown_cb;
+ g_opts.usr1_handler = _app_repeat_usr1_handler;
+ if ((rc = spdk_app_parse_args(argc, argv, &g_opts, g_app_repeat_get_opts_string,
+ NULL, app_repeat_parse_arg, app_repeat_usage)) !=
+ SPDK_APP_PARSE_ARGS_SUCCESS) {
+ return rc;
+ }
+
+ for (i = 0; i < g_repeat_times; i++) {
+ rc = spdk_app_start(&g_opts, app_repeat_started, &i);
+ spdk_app_fini();
+
+ if (rc) {
+ fprintf(stderr, "Failed to call spdk_app_start in Round %d.\n", i);
+ break;
+ }
+ }
+
+ return rc;
+}
diff --git a/src/spdk/test/event/event.sh b/src/spdk/test/event/event.sh
new file mode 100755
index 000000000..d198cd116
--- /dev/null
+++ b/src/spdk/test/event/event.sh
@@ -0,0 +1,44 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/bdev/nbd_common.sh
+
+function app_repeat_test() {
+ local rpc_server=/var/tmp/spdk-nbd.sock
+ local nbd_list=("/dev/nbd0" "/dev/nbd1")
+ local bdev_list=("Malloc0" "Malloc1")
+ local repeat_times=4
+
+ modprobe nbd
+ $rootdir/test/event/app_repeat/app_repeat -r $rpc_server -m 0x3 -t $repeat_times &
+ repeat_pid=$!
+ trap 'killprocess $repeat_pid; exit 1' SIGINT SIGTERM EXIT
+ echo "Process app_repeat pid: $repeat_pid"
+
+ for i in {0..2}; do
+ echo "spdk_app_start Round $i"
+ waitforlisten $repeat_pid $rpc_server
+
+ $rootdir/scripts/rpc.py -s $rpc_server bdev_malloc_create 64 4096
+ $rootdir/scripts/rpc.py -s $rpc_server bdev_malloc_create 64 4096
+
+ nbd_rpc_data_verify $rpc_server "${bdev_list[*]}" "${nbd_list[*]}"
+ ./scripts/rpc.py -s $rpc_server spdk_kill_instance SIGUSR1
+ done
+
+ waitforlisten $repeat_pid $rpc_server
+ killprocess $repeat_pid
+ trap - SIGINT SIGTERM EXIT
+
+ return 0
+}
+
+run_test "event_perf" $testdir/event_perf/event_perf -m 0xF -t 1
+run_test "event_reactor" $testdir/reactor/reactor -t 1
+run_test "event_reactor_perf" $testdir/reactor_perf/reactor_perf -t 1
+
+if [ $(uname -s) = Linux ] && modprobe -n nbd; then
+ run_test "app_repeat" app_repeat_test
+fi
diff --git a/src/spdk/test/event/event_perf/.gitignore b/src/spdk/test/event/event_perf/.gitignore
new file mode 100644
index 000000000..2bdb558da
--- /dev/null
+++ b/src/spdk/test/event/event_perf/.gitignore
@@ -0,0 +1 @@
+event_perf
diff --git a/src/spdk/test/event/event_perf/Makefile b/src/spdk/test/event/event_perf/Makefile
new file mode 100644
index 000000000..464709396
--- /dev/null
+++ b/src/spdk/test/event/event_perf/Makefile
@@ -0,0 +1,42 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+APP = event_perf
+C_SRCS := event_perf.c
+
+SPDK_LIB_LIST = event trace conf thread util log rpc jsonrpc json sock notify
+
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
diff --git a/src/spdk/test/event/event_perf/event_perf.c b/src/spdk/test/event/event_perf/event_perf.c
new file mode 100644
index 000000000..fe44e604d
--- /dev/null
+++ b/src/spdk/test/event/event_perf/event_perf.c
@@ -0,0 +1,184 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk/env.h"
+#include "spdk/event.h"
+#include "spdk_internal/event.h"
+#include "spdk/log.h"
+#include "spdk/string.h"
+
+static uint64_t g_tsc_rate;
+static uint64_t g_tsc_us_rate;
+static uint64_t g_tsc_end;
+
+static int g_time_in_sec;
+
+static uint64_t *call_count;
+
+static bool g_app_stopped = false;
+
+static void
+submit_new_event(void *arg1, void *arg2)
+{
+ struct spdk_event *event;
+ static __thread uint32_t next_lcore = UINT32_MAX;
+
+ if (spdk_get_ticks() > g_tsc_end) {
+ if (__sync_bool_compare_and_swap(&g_app_stopped, false, true)) {
+ spdk_app_stop(0);
+ }
+ return;
+ }
+
+ if (next_lcore == UINT32_MAX) {
+ next_lcore = spdk_env_get_next_core(spdk_env_get_current_core());
+ if (next_lcore == UINT32_MAX) {
+ next_lcore = spdk_env_get_first_core();
+ }
+ }
+
+ call_count[next_lcore]++;
+ event = spdk_event_allocate(next_lcore, submit_new_event, NULL, NULL);
+ spdk_event_call(event);
+}
+
+static void
+event_work_fn(void *arg1, void *arg2)
+{
+
+ submit_new_event(NULL, NULL);
+ submit_new_event(NULL, NULL);
+ submit_new_event(NULL, NULL);
+ submit_new_event(NULL, NULL);
+}
+
+static void
+event_perf_start(void *arg1)
+{
+ uint32_t i;
+
+ call_count = calloc(spdk_env_get_last_core() + 1, sizeof(*call_count));
+ if (call_count == NULL) {
+ fprintf(stderr, "call_count allocation failed\n");
+ spdk_app_stop(1);
+ return;
+ }
+
+ g_tsc_rate = spdk_get_ticks_hz();
+ g_tsc_us_rate = g_tsc_rate / (1000 * 1000);
+ g_tsc_end = spdk_get_ticks() + g_time_in_sec * g_tsc_rate;
+
+ printf("Running I/O for %d seconds...", g_time_in_sec);
+ fflush(stdout);
+
+ SPDK_ENV_FOREACH_CORE(i) {
+ spdk_event_call(spdk_event_allocate(i, event_work_fn,
+ NULL, NULL));
+ }
+
+}
+
+static void
+usage(char *program_name)
+{
+ printf("%s options\n", program_name);
+ printf("\t[-m core mask for distributing I/O submission/completion work\n");
+ printf("\t\t(default: 0x1 - use core 0 only)]\n");
+ printf("\t[-t time in seconds]\n");
+}
+
+static void
+performance_dump(int io_time)
+{
+ uint32_t i;
+
+ if (call_count == NULL) {
+ return;
+ }
+
+ printf("\n");
+ SPDK_ENV_FOREACH_CORE(i) {
+ printf("lcore %2d: %8ju\n", i, call_count[i] / g_time_in_sec);
+ }
+
+ fflush(stdout);
+ free(call_count);
+}
+
+int
+main(int argc, char **argv)
+{
+ struct spdk_app_opts opts = {};
+ int op;
+ int rc = 0;
+
+ opts.name = "event_perf";
+
+ g_time_in_sec = 0;
+
+ while ((op = getopt(argc, argv, "m:t:")) != -1) {
+ switch (op) {
+ case 'm':
+ opts.reactor_mask = optarg;
+ break;
+ case 't':
+ g_time_in_sec = spdk_strtol(optarg, 10);
+ if (g_time_in_sec < 0) {
+ fprintf(stderr, "Invalid run time\n");
+ return g_time_in_sec;
+ }
+ break;
+ default:
+ usage(argv[0]);
+ exit(1);
+ }
+ }
+
+ if (!g_time_in_sec) {
+ usage(argv[0]);
+ exit(1);
+ }
+
+ printf("Running I/O for %d seconds...", g_time_in_sec);
+ fflush(stdout);
+
+ rc = spdk_app_start(&opts, event_perf_start, NULL);
+
+ spdk_app_fini();
+ performance_dump(g_time_in_sec);
+
+ printf("done.\n");
+ return rc;
+}
diff --git a/src/spdk/test/event/reactor/.gitignore b/src/spdk/test/event/reactor/.gitignore
new file mode 100644
index 000000000..194b15d77
--- /dev/null
+++ b/src/spdk/test/event/reactor/.gitignore
@@ -0,0 +1 @@
+reactor
diff --git a/src/spdk/test/event/reactor/Makefile b/src/spdk/test/event/reactor/Makefile
new file mode 100644
index 000000000..b5d94dcb1
--- /dev/null
+++ b/src/spdk/test/event/reactor/Makefile
@@ -0,0 +1,42 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+APP = reactor
+C_SRCS := reactor.c
+
+SPDK_LIB_LIST = event trace conf thread util log rpc jsonrpc json sock notify
+
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
diff --git a/src/spdk/test/event/reactor/reactor.c b/src/spdk/test/event/reactor/reactor.c
new file mode 100644
index 000000000..f78f16570
--- /dev/null
+++ b/src/spdk/test/event/reactor/reactor.c
@@ -0,0 +1,144 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk/event.h"
+#include "spdk/string.h"
+#include "spdk/thread.h"
+
+static int g_time_in_sec;
+static struct spdk_poller *test_end_poller;
+static struct spdk_poller *poller_100ms;
+static struct spdk_poller *poller_250ms;
+static struct spdk_poller *poller_500ms;
+static struct spdk_poller *poller_oneshot;
+static struct spdk_poller *poller_unregister;
+
+static int
+test_end(void *arg)
+{
+ printf("test_end\n");
+
+ spdk_poller_unregister(&test_end_poller);
+ spdk_poller_unregister(&poller_100ms);
+ spdk_poller_unregister(&poller_250ms);
+ spdk_poller_unregister(&poller_500ms);
+
+ spdk_app_stop(0);
+ return -1;
+}
+
+static int
+tick(void *arg)
+{
+ uintptr_t period = (uintptr_t)arg;
+
+ printf("tick %" PRIu64 "\n", (uint64_t)period);
+
+ return -1;
+}
+
+static int
+oneshot(void *arg)
+{
+ printf("oneshot\n");
+ spdk_poller_unregister(&poller_oneshot);
+
+ return -1;
+}
+
+static int
+nop(void *arg)
+{
+ return -1;
+}
+
+static void
+test_start(void *arg1)
+{
+ printf("test_start\n");
+
+ /* Register a poller that will stop the test after the time has elapsed. */
+ test_end_poller = SPDK_POLLER_REGISTER(test_end, NULL, g_time_in_sec * 1000000ULL);
+
+ poller_100ms = SPDK_POLLER_REGISTER(tick, (void *)100, 100000);
+ poller_250ms = SPDK_POLLER_REGISTER(tick, (void *)250, 250000);
+ poller_500ms = SPDK_POLLER_REGISTER(tick, (void *)500, 500000);
+ poller_oneshot = SPDK_POLLER_REGISTER(oneshot, NULL, 0);
+
+ poller_unregister = SPDK_POLLER_REGISTER(nop, NULL, 0);
+ spdk_poller_unregister(&poller_unregister);
+}
+
+static void
+usage(const char *program_name)
+{
+ printf("%s options\n", program_name);
+ printf("\t[-t time in seconds]\n");
+}
+
+int
+main(int argc, char **argv)
+{
+ struct spdk_app_opts opts;
+ int op;
+ int rc = 0;
+
+ spdk_app_opts_init(&opts);
+ opts.name = "reactor";
+
+ g_time_in_sec = 0;
+
+ while ((op = getopt(argc, argv, "t:")) != -1) {
+ switch (op) {
+ case 't':
+ g_time_in_sec = spdk_strtol(optarg, 10);
+ break;
+ default:
+ usage(argv[0]);
+ exit(1);
+ }
+ }
+
+ if (g_time_in_sec <= 0) {
+ usage(argv[0]);
+ exit(1);
+ }
+
+ rc = spdk_app_start(&opts, test_start, NULL);
+
+ spdk_app_fini();
+
+ return rc;
+}
diff --git a/src/spdk/test/event/reactor_perf/.gitignore b/src/spdk/test/event/reactor_perf/.gitignore
new file mode 100644
index 000000000..32160228f
--- /dev/null
+++ b/src/spdk/test/event/reactor_perf/.gitignore
@@ -0,0 +1 @@
+reactor_perf
diff --git a/src/spdk/test/event/reactor_perf/Makefile b/src/spdk/test/event/reactor_perf/Makefile
new file mode 100644
index 000000000..74b56dcf8
--- /dev/null
+++ b/src/spdk/test/event/reactor_perf/Makefile
@@ -0,0 +1,42 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+APP = reactor_perf
+C_SRCS := reactor_perf.c
+
+SPDK_LIB_LIST = event trace conf thread util log rpc jsonrpc json sock notify
+
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
diff --git a/src/spdk/test/event/reactor_perf/reactor_perf.c b/src/spdk/test/event/reactor_perf/reactor_perf.c
new file mode 100644
index 000000000..ceac595f2
--- /dev/null
+++ b/src/spdk/test/event/reactor_perf/reactor_perf.c
@@ -0,0 +1,151 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk/env.h"
+#include "spdk/event.h"
+#include "spdk/string.h"
+#include "spdk/thread.h"
+
+static int g_time_in_sec;
+static int g_queue_depth;
+static struct spdk_poller *g_test_end_poller;
+static uint64_t g_call_count = 0;
+
+static int
+__test_end(void *arg)
+{
+ printf("test_end\n");
+ spdk_poller_unregister(&g_test_end_poller);
+ spdk_app_stop(0);
+ return -1;
+}
+
+static void
+__submit_next(void *arg1, void *arg2)
+{
+ struct spdk_event *event;
+
+ g_call_count++;
+
+ event = spdk_event_allocate(spdk_env_get_current_core(),
+ __submit_next, NULL, NULL);
+ spdk_event_call(event);
+}
+
+static void
+test_start(void *arg1)
+{
+ int i;
+
+ printf("test_start\n");
+
+ /* Register a poller that will stop the test after the time has elapsed. */
+ g_test_end_poller = SPDK_POLLER_REGISTER(__test_end, NULL,
+ g_time_in_sec * 1000000ULL);
+
+ for (i = 0; i < g_queue_depth; i++) {
+ __submit_next(NULL, NULL);
+ }
+}
+
+static void
+test_cleanup(void)
+{
+ printf("test_abort\n");
+
+ spdk_poller_unregister(&g_test_end_poller);
+ spdk_app_stop(0);
+}
+
+static void
+usage(const char *program_name)
+{
+ printf("%s options\n", program_name);
+ printf("\t[-q Queue depth (default: 1)]\n");
+ printf("\t[-t time in seconds]\n");
+}
+
+int
+main(int argc, char **argv)
+{
+ struct spdk_app_opts opts;
+ int op;
+ int rc;
+ long int val;
+
+ spdk_app_opts_init(&opts);
+ opts.name = "reactor_perf";
+
+ g_time_in_sec = 0;
+ g_queue_depth = 1;
+
+ while ((op = getopt(argc, argv, "q:t:")) != -1) {
+ if (op == '?') {
+ usage(argv[0]);
+ exit(1);
+ }
+ val = spdk_strtol(optarg, 10);
+ if (val < 0) {
+ fprintf(stderr, "Converting a string to integer failed\n");
+ exit(1);
+ }
+ switch (op) {
+ case 'q':
+ g_queue_depth = val;
+ break;
+ case 't':
+ g_time_in_sec = val;
+ break;
+ default:
+ usage(argv[0]);
+ exit(1);
+ }
+ }
+
+ if (!g_time_in_sec) {
+ usage(argv[0]);
+ exit(1);
+ }
+
+ opts.shutdown_cb = test_cleanup;
+
+ rc = spdk_app_start(&opts, test_start, NULL);
+
+ spdk_app_fini();
+
+ printf("Performance: %8ju events per second\n", g_call_count / g_time_in_sec);
+
+ return rc;
+}
diff --git a/src/spdk/test/external_code/Makefile b/src/spdk/test/external_code/Makefile
new file mode 100644
index 000000000..4a4fb6251
--- /dev/null
+++ b/src/spdk/test/external_code/Makefile
@@ -0,0 +1,80 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+ALL_DEPDIRS+= hello_world passthru
+
+ifneq ($(SPDK_HEADER_DIR),)
+COMMON_CFLAGS+=-I$(SPDK_HEADER_DIR)
+endif
+
+ifneq ($(SPDK_LIB_DIR),)
+COMMON_CFLAGS+=-L$(SPDK_LIB_DIR)
+endif
+
+ifneq ($(DPDK_LIB_DIR),)
+COMMON_CFLAGS+=-L$(DPDK_LIB_DIR)
+endif
+export
+.PHONY: all
+
+all: hello_world_bdev_shared_combo
+
+static: hello_world_bdev_static
+
+hello_world_bdev_shared_combo: passthru_shared
+ $(MAKE) --directory=hello_world bdev_shared_combo
+
+hello_world_bdev_shared_iso: passthru_shared
+ $(MAKE) --directory=hello_world bdev_shared_iso
+
+hello_world_no_bdev_shared_combo:
+ $(MAKE) --directory=hello_world alone_shared_combo
+
+hello_world_no_bdev_shared_iso:
+ $(MAKE) --directory=hello_world alone_shared_iso
+
+hello_world_bdev_static: passthru_static
+ $(MAKE) --directory=hello_world bdev_static
+
+hello_world_no_bdev_static:
+ $(MAKE) --directory=hello_world alone_static
+
+passthru_shared:
+ $(MAKE) --directory=passthru shared
+
+passthru_static:
+ $(MAKE) --directory=passthru static
+
+clean:
+ rm -f ./hello_world/hello_bdev
+ rm -f ./passthru/libpassthru_external.*
diff --git a/src/spdk/test/external_code/README.md b/src/spdk/test/external_code/README.md
new file mode 100644
index 000000000..3db3218c0
--- /dev/null
+++ b/src/spdk/test/external_code/README.md
@@ -0,0 +1,17 @@
+This directory is meant to demonstrate how to link an external application and bdev
+module to the SPDK libraries. The makefiles contain six examples of linking against spdk
+libraries. They cover linking an application both with and without a custom bdev. For each of
+these categories, they also demonstrate linking against the spdk combined shared library,
+individual shared libraries, and static libraries.
+
+This directory also contains a convenient test script, test_make.sh, which automates making SPDK
+and testing all six of these linker options. It takes a single argument, the path to an SPDK
+repository and should be run as follows:
+
+~~~
+sudo ./test_make.sh /path/to/spdk
+~~~
+
+The application `hello_world` and bdev module `passthru_external` have been copied from their namesakes
+in the top level [SPDK github repository](https://github.com/spdk/spdk) and don't have any special
+functionality.
diff --git a/src/spdk/test/external_code/hello_world/.gitignore b/src/spdk/test/external_code/hello_world/.gitignore
new file mode 100644
index 000000000..7bdf93936
--- /dev/null
+++ b/src/spdk/test/external_code/hello_world/.gitignore
@@ -0,0 +1 @@
+hello_bdev
diff --git a/src/spdk/test/external_code/hello_world/Makefile b/src/spdk/test/external_code/hello_world/Makefile
new file mode 100644
index 000000000..9f6c9cf30
--- /dev/null
+++ b/src/spdk/test/external_code/hello_world/Makefile
@@ -0,0 +1,73 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+# Shows how to compile both an external bdev and an external application against the SPDK combined shared object and dpdk shared objects.
+bdev_shared_combo:
+ $(CC) $(COMMON_CFLAGS) -L../passthru -Wl,-rpath=$(SPDK_LIB_DIR),--no-as-needed -o hello_bdev ./hello_bdev.c -lpassthru_external \
+ -lspdk -lspdk_env_dpdk -lrte_eal -lrte_mempool -lrte_ring -lrte_mbuf -lrte_mempool_ring -lrte_pci -lrte_bus_pci -lrte_kvargs \
+ -lrte_vhost -lrte_net -lrte_hash -lrte_cryptodev -Wl,--no-whole-archive
+
+# Shows how to compile both an external bdev and an external application against the SPDK individual shared objects and dpdk shared objects.
+bdev_shared_iso:
+ $(CC) $(COMMON_CFLAGS) -L../passthru -Wl,-rpath=$(SPDK_LIB_DIR),--no-as-needed -o hello_bdev ./hello_bdev.c \
+ -lpassthru_external -lspdk_event_bdev -lspdk_event_accel -lspdk_event_vmd -lspdk_bdev -lspdk_bdev_malloc -lspdk_log -lspdk_thread -lspdk_util -lspdk_event \
+ -lspdk_env_dpdk -lrte_eal -lrte_mempool -lrte_ring -lrte_mbuf -lrte_mempool_ring -lrte_pci -lrte_bus_pci -lrte_kvargs \
+ -lrte_vhost -lrte_net -lrte_hash -lrte_cryptodev -Wl,--no-whole-archive -lnuma
+
+# Shows how to compile an external application against the SPDK combined shared object and dpdk shared objects.
+alone_shared_combo:
+ $(CC) $(COMMON_CFLAGS) -Wl,-rpath=$(SPDK_LIB_DIR),--no-as-needed -o hello_bdev ./hello_bdev.c -lspdk -lspdk_env_dpdk -lrte_eal \
+ -lrte_mempool -lrte_ring -lrte_mbuf -lrte_mempool_ring -lrte_pci -lrte_bus_pci -lrte_kvargs -lrte_vhost -lrte_net -lrte_hash -lrte_cryptodev
+
+# Shows how to compile an external application against the SPDK individual shared objects and dpdk shared objects.
+alone_shared_iso:
+ $(CC) $(COMMON_CFLAGS) -Wl,-rpath=$(SPDK_LIB_DIR),--no-as-needed -o hello_bdev ./hello_bdev.c -lspdk_event_bdev \
+ -lspdk_event_accel -lspdk_event_vmd -lspdk_bdev -lspdk_bdev_malloc -lspdk_log -lspdk_thread -lspdk_util -lspdk_event \
+ -lspdk_env_dpdk -lrte_eal -lrte_mempool -lrte_ring -lrte_mbuf -lrte_mempool_ring -lrte_pci -lrte_bus_pci -lrte_kvargs \
+ -lrte_vhost -lrte_net -lrte_hash -lrte_cryptodev
+
+# Shows how to compile an external application against the SPDK archives.
+alone_static:
+ $(CC) $(COMMON_CFLAGS) -o hello_bdev ./hello_bdev.c -Wl,--whole-archive,-Bstatic -lspdk_bdev_malloc -lspdk_event_bdev -lspdk_event_accel -lspdk_event_vmd \
+ -lspdk_event_sock -lspdk_bdev -lspdk_accel -lspdk_event -lspdk_thread -lspdk_util -lspdk_conf -lspdk_trace -lspdk_log -lspdk_json \
+ -lspdk_jsonrpc -lspdk_rpc -lspdk_sock -lspdk_notify -lspdk_vmd -lspdk_env_dpdk -lrte_eal -lrte_mempool -lrte_ring \
+ -lrte_mbuf -lrte_mempool_ring -lrte_pci -lrte_bus_pci -lrte_kvargs -lrte_vhost -lrte_net -lrte_hash -lrte_telemetry \
+ -lrte_cryptodev -Wl,--no-whole-archive,-Bdynamic -lnuma -luuid -lpthread -ldl -lrt
+
+# Shows how to compile and external bdev and application sgainst the SPDK archives.
+bdev_static:
+ $(CC) $(COMMON_CFLAGS) -L../passthru -o hello_bdev ./hello_bdev.c -Wl,--whole-archive,-Bstatic -lpassthru_external -lspdk_bdev_malloc -lspdk_event_bdev \
+ -lspdk_event_accel -lspdk_event_vmd -lspdk_event_sock -lspdk_bdev -lspdk_accel -lspdk_event -lspdk_thread -lspdk_util -lspdk_conf -lspdk_trace \
+ -lspdk_log -lspdk_json -lspdk_jsonrpc -lspdk_rpc -lspdk_sock -lspdk_notify -lspdk_vmd -lspdk_env_dpdk -lrte_eal -lrte_mempool \
+ -lrte_ring -lrte_mbuf -lrte_mempool_ring -lrte_pci -lrte_bus_pci -lrte_kvargs -lrte_vhost -lrte_net -lrte_hash -lrte_telemetry -lrte_cryptodev \
+ -Wl,--no-whole-archive,-Bdynamic -lnuma -luuid -lpthread -ldl -lrt
diff --git a/src/spdk/test/external_code/hello_world/bdev.conf b/src/spdk/test/external_code/hello_world/bdev.conf
new file mode 100644
index 000000000..bf582f198
--- /dev/null
+++ b/src/spdk/test/external_code/hello_world/bdev.conf
@@ -0,0 +1,17 @@
+{
+ "subsystems": [
+ {
+ "subsystem": "bdev",
+ "config": [
+ {
+ "params": {
+ "name": "Malloc0",
+ "block_size": 4096,
+ "num_blocks": 32
+ },
+ "method": "construct_malloc_bdev"
+ }
+ ]
+ }
+ ]
+}
diff --git a/src/spdk/test/external_code/hello_world/bdev_external.conf b/src/spdk/test/external_code/hello_world/bdev_external.conf
new file mode 100644
index 000000000..dc84cd7a7
--- /dev/null
+++ b/src/spdk/test/external_code/hello_world/bdev_external.conf
@@ -0,0 +1,24 @@
+{
+ "subsystems": [
+ {
+ "subsystem": "bdev",
+ "config": [
+ {
+ "params": {
+ "name": "Malloc0",
+ "block_size": 4096,
+ "num_blocks": 32
+ },
+ "method": "construct_malloc_bdev"
+ },
+ {
+ "params": {
+ "base_bdev_name": "Malloc0",
+ "name": "TestPT"
+ },
+ "method": "construct_ext_passthru_bdev"
+ }
+ ]
+ }
+ ]
+}
diff --git a/src/spdk/test/external_code/hello_world/hello_bdev.c b/src/spdk/test/external_code/hello_world/hello_bdev.c
new file mode 100644
index 000000000..0a0195df4
--- /dev/null
+++ b/src/spdk/test/external_code/hello_world/hello_bdev.c
@@ -0,0 +1,300 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+#include "spdk/thread.h"
+#include "spdk/bdev.h"
+#include "spdk/env.h"
+#include "spdk/event.h"
+#include "spdk/log.h"
+#include "spdk/string.h"
+#include "spdk/bdev_module.h"
+
+static char *g_bdev_name = "Malloc0";
+
+/*
+ * We'll use this struct to gather housekeeping hello_context to pass between
+ * our events and callbacks.
+ */
+struct hello_context_t {
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_desc *bdev_desc;
+ struct spdk_io_channel *bdev_io_channel;
+ char *buff;
+ char *bdev_name;
+ struct spdk_bdev_io_wait_entry bdev_io_wait;
+};
+
+/*
+ * Usage function for printing parameters that are specific to this application
+ */
+static void
+hello_bdev_usage(void)
+{
+ printf(" -b <bdev> name of the bdev to use\n");
+}
+
+/*
+ * This function is called to parse the parameters that are specific to this application
+ */
+static int hello_bdev_parse_arg(int ch, char *arg)
+{
+ switch (ch) {
+ case 'b':
+ g_bdev_name = arg;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * Callback function for read io completion.
+ */
+static void
+read_complete(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
+{
+ struct hello_context_t *hello_context = cb_arg;
+
+ if (success) {
+ SPDK_NOTICELOG("Read string from bdev : %s\n", hello_context->buff);
+ } else {
+ SPDK_ERRLOG("bdev io read error\n");
+ }
+
+ /* Complete the bdev io and close the channel */
+ spdk_bdev_free_io(bdev_io);
+ spdk_put_io_channel(hello_context->bdev_io_channel);
+ spdk_bdev_close(hello_context->bdev_desc);
+ SPDK_NOTICELOG("Stopping app\n");
+ spdk_app_stop(success ? 0 : -1);
+}
+
+static void
+hello_read(void *arg)
+{
+ struct hello_context_t *hello_context = arg;
+ int rc = 0;
+ uint32_t length = spdk_bdev_get_block_size(hello_context->bdev);
+
+ SPDK_NOTICELOG("Reading io\n");
+ rc = spdk_bdev_read(hello_context->bdev_desc, hello_context->bdev_io_channel,
+ hello_context->buff, 0, length, read_complete, hello_context);
+
+ if (rc == -ENOMEM) {
+ SPDK_NOTICELOG("Queueing io\n");
+ /* In case we cannot perform I/O now, queue I/O */
+ hello_context->bdev_io_wait.bdev = hello_context->bdev;
+ hello_context->bdev_io_wait.cb_fn = hello_read;
+ hello_context->bdev_io_wait.cb_arg = hello_context;
+ spdk_bdev_queue_io_wait(hello_context->bdev, hello_context->bdev_io_channel,
+ &hello_context->bdev_io_wait);
+ } else if (rc) {
+ SPDK_ERRLOG("%s error while reading from bdev: %d\n", spdk_strerror(-rc), rc);
+ spdk_put_io_channel(hello_context->bdev_io_channel);
+ spdk_bdev_close(hello_context->bdev_desc);
+ spdk_app_stop(-1);
+ }
+}
+
+/*
+ * Callback function for write io completion.
+ */
+static void
+write_complete(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
+{
+ struct hello_context_t *hello_context = cb_arg;
+ uint32_t length;
+
+ /* Complete the I/O */
+ spdk_bdev_free_io(bdev_io);
+
+ if (success) {
+ SPDK_NOTICELOG("bdev io write completed successfully\n");
+ } else {
+ SPDK_ERRLOG("bdev io write error: %d\n", EIO);
+ spdk_put_io_channel(hello_context->bdev_io_channel);
+ spdk_bdev_close(hello_context->bdev_desc);
+ spdk_app_stop(-1);
+ return;
+ }
+
+ /* Zero the buffer so that we can use it for reading */
+ length = spdk_bdev_get_block_size(hello_context->bdev);
+ memset(hello_context->buff, 0, length);
+
+ hello_read(hello_context);
+}
+
+static void
+hello_write(void *arg)
+{
+ struct hello_context_t *hello_context = arg;
+ int rc = 0;
+ uint32_t length = spdk_bdev_get_block_size(hello_context->bdev);
+
+ SPDK_NOTICELOG("Writing to the bdev\n");
+ rc = spdk_bdev_write(hello_context->bdev_desc, hello_context->bdev_io_channel,
+ hello_context->buff, 0, length, write_complete, hello_context);
+
+ if (rc == -ENOMEM) {
+ SPDK_NOTICELOG("Queueing io\n");
+ /* In case we cannot perform I/O now, queue I/O */
+ hello_context->bdev_io_wait.bdev = hello_context->bdev;
+ hello_context->bdev_io_wait.cb_fn = hello_write;
+ hello_context->bdev_io_wait.cb_arg = hello_context;
+ spdk_bdev_queue_io_wait(hello_context->bdev, hello_context->bdev_io_channel,
+ &hello_context->bdev_io_wait);
+ } else if (rc) {
+ SPDK_ERRLOG("%s error while writing to bdev: %d\n", spdk_strerror(-rc), rc);
+ spdk_put_io_channel(hello_context->bdev_io_channel);
+ spdk_bdev_close(hello_context->bdev_desc);
+ spdk_app_stop(-1);
+ }
+}
+
+/*
+ * Our initial event that kicks off everything from main().
+ */
+static void
+hello_start(void *arg1)
+{
+ struct hello_context_t *hello_context = arg1;
+ uint32_t blk_size, buf_align;
+ int rc = 0;
+ hello_context->bdev = NULL;
+ hello_context->bdev_desc = NULL;
+
+ SPDK_NOTICELOG("Successfully started the application\n");
+
+ /*
+ * Get the bdev. There can be many bdevs configured in
+ * in the configuration file but this application will only
+ * use the one input by the user at runtime so we get it via its name.
+ */
+ hello_context->bdev = spdk_bdev_get_by_name(hello_context->bdev_name);
+ if (hello_context->bdev == NULL) {
+ SPDK_ERRLOG("Could not find the bdev: %s\n", hello_context->bdev_name);
+ spdk_app_stop(-1);
+ return;
+ }
+
+ /*
+ * Open the bdev by calling spdk_bdev_open()
+ * The function will return a descriptor
+ */
+ SPDK_NOTICELOG("Opening the bdev %s\n", hello_context->bdev_name);
+ rc = spdk_bdev_open(hello_context->bdev, true, NULL, NULL, &hello_context->bdev_desc);
+ if (rc) {
+ SPDK_ERRLOG("Could not open bdev: %s\n", hello_context->bdev_name);
+ spdk_app_stop(-1);
+ return;
+ }
+
+ SPDK_NOTICELOG("Opening io channel\n");
+ /* Open I/O channel */
+ hello_context->bdev_io_channel = spdk_bdev_get_io_channel(hello_context->bdev_desc);
+ if (hello_context->bdev_io_channel == NULL) {
+ SPDK_ERRLOG("Could not create bdev I/O channel!!\n");
+ spdk_bdev_close(hello_context->bdev_desc);
+ spdk_app_stop(-1);
+ return;
+ }
+
+ /* Allocate memory for the write buffer.
+ * Initialize the write buffer with the string "Hello World!"
+ */
+ blk_size = spdk_bdev_get_block_size(hello_context->bdev);
+ buf_align = spdk_bdev_get_buf_align(hello_context->bdev);
+ hello_context->buff = spdk_dma_zmalloc(blk_size, buf_align, NULL);
+ if (!hello_context->buff) {
+ SPDK_ERRLOG("Failed to allocate buffer\n");
+ spdk_put_io_channel(hello_context->bdev_io_channel);
+ spdk_bdev_close(hello_context->bdev_desc);
+ spdk_app_stop(-1);
+ return;
+ }
+ snprintf(hello_context->buff, blk_size, "%s", "Hello World!\n");
+
+ hello_write(hello_context);
+}
+
+int
+main(int argc, char **argv)
+{
+ struct spdk_app_opts opts = {};
+ int rc = 0;
+ struct hello_context_t hello_context = {};
+
+ /* Set default values in opts structure. */
+ spdk_app_opts_init(&opts);
+ opts.name = "hello_bdev";
+
+ /*
+ * The user can provide the config file and bdev name at run time.
+ * For example, to use Malloc0 in file bdev.conf run with params
+ * ./hello_bdev -c bdev.conf -b Malloc0
+ * To use passthru bdev PT0 run with params
+ * ./hello_bdev -c bdev.conf -b PT0
+ * If the bdev name is not specified,
+ * then Malloc0 is used by default
+ */
+ if ((rc = spdk_app_parse_args(argc, argv, &opts, "b:", NULL, hello_bdev_parse_arg,
+ hello_bdev_usage)) != SPDK_APP_PARSE_ARGS_SUCCESS) {
+ exit(rc);
+ }
+ if (opts.json_config_file == NULL) {
+ SPDK_ERRLOG("configfile must be specified using --json <conffile> e.g. -c bdev.conf\n");
+ exit(1);
+ }
+ hello_context.bdev_name = g_bdev_name;
+
+ /*
+ * spdk_app_start() will block running hello_start() until
+ * spdk_app_stop() is called by someone (not simply when
+ * hello_start() returns), or if an error occurs during
+ * spdk_app_start() before hello_start() runs.
+ */
+ rc = spdk_app_start(&opts, hello_start, &hello_context);
+ if (rc) {
+ SPDK_ERRLOG("ERROR starting application\n");
+ }
+
+ /* When the app stops, free up memory that we allocated. */
+ spdk_dma_free(hello_context.buff);
+
+ /* Gracefully close out all of the SPDK subsystems. */
+ spdk_app_fini();
+ return rc;
+}
diff --git a/src/spdk/test/external_code/passthru/Makefile b/src/spdk/test/external_code/passthru/Makefile
new file mode 100644
index 000000000..e8560ace4
--- /dev/null
+++ b/src/spdk/test/external_code/passthru/Makefile
@@ -0,0 +1,43 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+src=vbdev_passthru_rpc.c vbdev_passthru.c
+shared:
+ $(CC) $(COMMON_CFLAGS) -c -fPIC ./vbdev_passthru_rpc.c -o ./vbdev_passthru_rpc.o
+ $(CC) $(COMMON_CFLAGS) -c -fPIC ./vbdev_passthru.c -o ./vbdev_passthru.o
+ $(CC) $(COMMON_CFLAGS) -shared ./vbdev_passthru_rpc.o ./vbdev_passthru.o -o ./libpassthru_external.so
+
+static:
+ $(CC) $(COMMON_CFLAGS) -c ./vbdev_passthru_rpc.c -o ./vbdev_passthru_rpc.o
+ $(CC) $(COMMON_CFLAGS) -c ./vbdev_passthru.c -o ./vbdev_passthru.o
+ $(AR) rcs ./libpassthru_external.a ./vbdev_passthru_rpc.o ./vbdev_passthru.o
diff --git a/src/spdk/test/external_code/passthru/vbdev_passthru.c b/src/spdk/test/external_code/passthru/vbdev_passthru.c
new file mode 100644
index 000000000..cecc8f701
--- /dev/null
+++ b/src/spdk/test/external_code/passthru/vbdev_passthru.c
@@ -0,0 +1,748 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This is a simple example of a virtual block device module that passes IO
+ * down to a bdev (or bdevs) that its configured to attach to.
+ */
+
+#include "vbdev_passthru.h"
+#include "spdk/env.h"
+#include "spdk/conf.h"
+#include "spdk/endian.h"
+#include "spdk/thread.h"
+
+
+static int vbdev_ext_passthru_init(void);
+static void vbdev_ext_passthru_get_spdk_running_config(FILE *fp);
+static int vbdev_ext_passthru_get_ctx_size(void);
+static void vbdev_ext_passthru_examine(struct spdk_bdev *bdev);
+static void vbdev_ext_passthru_finish(void);
+static int vbdev_ext_passthru_config_json(struct spdk_json_write_ctx *w);
+
+static struct spdk_bdev_module passthru_if_external = {
+ .name = "passthru_external",
+ .module_init = vbdev_ext_passthru_init,
+ .config_text = vbdev_ext_passthru_get_spdk_running_config,
+ .get_ctx_size = vbdev_ext_passthru_get_ctx_size,
+ .examine_config = vbdev_ext_passthru_examine,
+ .module_fini = vbdev_ext_passthru_finish,
+ .config_json = vbdev_ext_passthru_config_json
+};
+
+SPDK_BDEV_MODULE_REGISTER(passthru, &passthru_if_external)
+
+/* List of pt_bdev names and their base bdevs via configuration file.
+ * Used so we can parse the conf once at init and use this list in examine().
+ */
+struct bdev_names {
+ char *vbdev_name;
+ char *bdev_name;
+ TAILQ_ENTRY(bdev_names) link;
+};
+static TAILQ_HEAD(, bdev_names) g_bdev_names = TAILQ_HEAD_INITIALIZER(g_bdev_names);
+
+/* List of virtual bdevs and associated info for each. */
+struct vbdev_passthru {
+ struct spdk_bdev *base_bdev; /* the thing we're attaching to */
+ struct spdk_bdev_desc *base_desc; /* its descriptor we get from open */
+ struct spdk_bdev pt_bdev; /* the PT virtual bdev */
+ TAILQ_ENTRY(vbdev_passthru) link;
+};
+static TAILQ_HEAD(, vbdev_passthru) g_pt_nodes = TAILQ_HEAD_INITIALIZER(g_pt_nodes);
+
+/* The pt vbdev channel struct. It is allocated and freed on my behalf by the io channel code.
+ * If this vbdev needed to implement a poller or a queue for IO, this is where those things
+ * would be defined. This passthru bdev doesn't actually need to allocate a channel, it could
+ * simply pass back the channel of the bdev underneath it but for example purposes we will
+ * present its own to the upper layers.
+ */
+struct pt_io_channel {
+ struct spdk_io_channel *base_ch; /* IO channel of base device */
+};
+
+/* Just for fun, this pt_bdev module doesn't need it but this is essentially a per IO
+ * context that we get handed by the bdev layer.
+ */
+struct passthru_bdev_io {
+ uint8_t test;
+
+ /* bdev related */
+ struct spdk_io_channel *ch;
+
+ /* for bdev_io_wait */
+ struct spdk_bdev_io_wait_entry bdev_io_wait;
+};
+
+static void
+vbdev_passthru_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io);
+
+
+/* Callback for unregistering the IO device. */
+static void
+_device_unregister_cb(void *io_device)
+{
+ struct vbdev_passthru *pt_node = io_device;
+
+ /* Done with this pt_node. */
+ free(pt_node->pt_bdev.name);
+ free(pt_node);
+}
+
+/* Called after we've unregistered following a hot remove callback.
+ * Our finish entry point will be called next.
+ */
+static int
+vbdev_passthru_destruct(void *ctx)
+{
+ struct vbdev_passthru *pt_node = (struct vbdev_passthru *)ctx;
+
+ /* It is important to follow this exact sequence of steps for destroying
+ * a vbdev...
+ */
+
+ TAILQ_REMOVE(&g_pt_nodes, pt_node, link);
+
+ /* Unclaim the underlying bdev. */
+ spdk_bdev_module_release_bdev(pt_node->base_bdev);
+
+ /* Close the underlying bdev. */
+ spdk_bdev_close(pt_node->base_desc);
+
+ /* Unregister the io_device. */
+ spdk_io_device_unregister(pt_node, _device_unregister_cb);
+
+ return 0;
+}
+
+/* Completion callback for IO that were issued from this bdev. The original bdev_io
+ * is passed in as an arg so we'll complete that one with the appropriate status
+ * and then free the one that this module issued.
+ */
+static void
+_pt_complete_io(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
+{
+ struct spdk_bdev_io *orig_io = cb_arg;
+ int status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
+ struct passthru_bdev_io *io_ctx = (struct passthru_bdev_io *)orig_io->driver_ctx;
+
+ /* We setup this value in the submission routine, just showing here that it is
+ * passed back to us.
+ */
+ if (io_ctx->test != 0x5a) {
+ SPDK_ERRLOG("Error, original IO device_ctx is wrong! 0x%x\n",
+ io_ctx->test);
+ }
+
+ /* Complete the original IO and then free the one that we created here
+ * as a result of issuing an IO via submit_reqeust.
+ */
+ spdk_bdev_io_complete(orig_io, status);
+ spdk_bdev_free_io(bdev_io);
+}
+
+static void
+vbdev_passthru_resubmit_io(void *arg)
+{
+ struct spdk_bdev_io *bdev_io = (struct spdk_bdev_io *)arg;
+ struct passthru_bdev_io *io_ctx = (struct passthru_bdev_io *)bdev_io->driver_ctx;
+
+ vbdev_passthru_submit_request(io_ctx->ch, bdev_io);
+}
+
+static void
+vbdev_passthru_queue_io(struct spdk_bdev_io *bdev_io)
+{
+ struct passthru_bdev_io *io_ctx = (struct passthru_bdev_io *)bdev_io->driver_ctx;
+ int rc;
+
+ io_ctx->bdev_io_wait.bdev = bdev_io->bdev;
+ io_ctx->bdev_io_wait.cb_fn = vbdev_passthru_resubmit_io;
+ io_ctx->bdev_io_wait.cb_arg = bdev_io;
+
+ rc = spdk_bdev_queue_io_wait(bdev_io->bdev, io_ctx->ch, &io_ctx->bdev_io_wait);
+ if (rc != 0) {
+ SPDK_ERRLOG("Queue io failed in vbdev_passthru_queue_io, rc=%d.\n", rc);
+ spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
+ }
+}
+
+/* Callback for getting a buf from the bdev pool in the event that the caller passed
+ * in NULL, we need to own the buffer so it doesn't get freed by another vbdev module
+ * beneath us before we're done with it. That won't happen in this example but it could
+ * if this example were used as a template for something more complex.
+ */
+static void
+pt_read_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, bool success)
+{
+ struct vbdev_passthru *pt_node = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_passthru,
+ pt_bdev);
+ struct pt_io_channel *pt_ch = spdk_io_channel_get_ctx(ch);
+ struct passthru_bdev_io *io_ctx = (struct passthru_bdev_io *)bdev_io->driver_ctx;
+ int rc;
+
+ if (!success) {
+ spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
+ return;
+ }
+
+ if (bdev_io->u.bdev.md_buf == NULL) {
+ rc = spdk_bdev_readv_blocks(pt_node->base_desc, pt_ch->base_ch, bdev_io->u.bdev.iovs,
+ bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.offset_blocks,
+ bdev_io->u.bdev.num_blocks, _pt_complete_io,
+ bdev_io);
+ } else {
+ rc = spdk_bdev_readv_blocks_with_md(pt_node->base_desc, pt_ch->base_ch,
+ bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
+ bdev_io->u.bdev.md_buf,
+ bdev_io->u.bdev.offset_blocks,
+ bdev_io->u.bdev.num_blocks,
+ _pt_complete_io, bdev_io);
+ }
+
+ if (rc != 0) {
+ if (rc == -ENOMEM) {
+ SPDK_ERRLOG("No memory, start to queue io for passthru.\n");
+ io_ctx->ch = ch;
+ vbdev_passthru_queue_io(bdev_io);
+ } else {
+ SPDK_ERRLOG("ERROR on bdev_io submission!\n");
+ spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
+ }
+ }
+}
+
+/* Called when someone above submits IO to this pt vbdev. We're simply passing it on here
+ * via SPDK IO calls which in turn allocate another bdev IO and call our cpl callback provided
+ * below along with the original bdiv_io so that we can complete it once this IO completes.
+ */
+static void
+vbdev_passthru_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
+{
+ struct vbdev_passthru *pt_node = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_passthru, pt_bdev);
+ struct pt_io_channel *pt_ch = spdk_io_channel_get_ctx(ch);
+ struct passthru_bdev_io *io_ctx = (struct passthru_bdev_io *)bdev_io->driver_ctx;
+ int rc = 0;
+
+ /* Setup a per IO context value; we don't do anything with it in the vbdev other
+ * than confirm we get the same thing back in the completion callback just to
+ * demonstrate.
+ */
+ io_ctx->test = 0x5a;
+
+ switch (bdev_io->type) {
+ case SPDK_BDEV_IO_TYPE_READ:
+ spdk_bdev_io_get_buf(bdev_io, pt_read_get_buf_cb,
+ bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
+ break;
+ case SPDK_BDEV_IO_TYPE_WRITE:
+ if (bdev_io->u.bdev.md_buf == NULL) {
+ rc = spdk_bdev_writev_blocks(pt_node->base_desc, pt_ch->base_ch, bdev_io->u.bdev.iovs,
+ bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.offset_blocks,
+ bdev_io->u.bdev.num_blocks, _pt_complete_io,
+ bdev_io);
+ } else {
+ rc = spdk_bdev_writev_blocks_with_md(pt_node->base_desc, pt_ch->base_ch,
+ bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
+ bdev_io->u.bdev.md_buf,
+ bdev_io->u.bdev.offset_blocks,
+ bdev_io->u.bdev.num_blocks,
+ _pt_complete_io, bdev_io);
+ }
+ break;
+ case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
+ rc = spdk_bdev_write_zeroes_blocks(pt_node->base_desc, pt_ch->base_ch,
+ bdev_io->u.bdev.offset_blocks,
+ bdev_io->u.bdev.num_blocks,
+ _pt_complete_io, bdev_io);
+ break;
+ case SPDK_BDEV_IO_TYPE_UNMAP:
+ rc = spdk_bdev_unmap_blocks(pt_node->base_desc, pt_ch->base_ch,
+ bdev_io->u.bdev.offset_blocks,
+ bdev_io->u.bdev.num_blocks,
+ _pt_complete_io, bdev_io);
+ break;
+ case SPDK_BDEV_IO_TYPE_FLUSH:
+ rc = spdk_bdev_flush_blocks(pt_node->base_desc, pt_ch->base_ch,
+ bdev_io->u.bdev.offset_blocks,
+ bdev_io->u.bdev.num_blocks,
+ _pt_complete_io, bdev_io);
+ break;
+ case SPDK_BDEV_IO_TYPE_RESET:
+ rc = spdk_bdev_reset(pt_node->base_desc, pt_ch->base_ch,
+ _pt_complete_io, bdev_io);
+ break;
+ default:
+ SPDK_ERRLOG("passthru: unknown I/O type %d\n", bdev_io->type);
+ spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
+ return;
+ }
+ if (rc != 0) {
+ if (rc == -ENOMEM) {
+ SPDK_ERRLOG("No memory, start to queue io for passthru.\n");
+ io_ctx->ch = ch;
+ vbdev_passthru_queue_io(bdev_io);
+ } else {
+ SPDK_ERRLOG("ERROR on bdev_io submission!\n");
+ spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
+ }
+ }
+}
+
+/* We'll just call the base bdev and let it answer however if we were more
+ * restrictive for some reason (or less) we could get the response back
+ * and modify according to our purposes.
+ */
+static bool
+vbdev_passthru_io_type_supported(void *ctx, enum spdk_bdev_io_type io_type)
+{
+ struct vbdev_passthru *pt_node = (struct vbdev_passthru *)ctx;
+
+ return spdk_bdev_io_type_supported(pt_node->base_bdev, io_type);
+}
+
+/* We supplied this as an entry point for upper layers who want to communicate to this
+ * bdev. This is how they get a channel. We are passed the same context we provided when
+ * we created our PT vbdev in examine() which, for this bdev, is the address of one of
+ * our context nodes. From here we'll ask the SPDK channel code to fill out our channel
+ * struct and we'll keep it in our PT node.
+ */
+static struct spdk_io_channel *
+vbdev_passthru_get_io_channel(void *ctx)
+{
+ struct vbdev_passthru *pt_node = (struct vbdev_passthru *)ctx;
+ struct spdk_io_channel *pt_ch = NULL;
+
+ /* The IO channel code will allocate a channel for us which consists of
+ * the SPDK channel structure plus the size of our pt_io_channel struct
+ * that we passed in when we registered our IO device. It will then call
+ * our channel create callback to populate any elements that we need to
+ * update.
+ */
+ pt_ch = spdk_get_io_channel(pt_node);
+
+ return pt_ch;
+}
+
+/* This is the output for get_bdevs() for this vbdev */
+static int
+vbdev_passthru_dump_info_json(void *ctx, struct spdk_json_write_ctx *w)
+{
+ struct vbdev_passthru *pt_node = (struct vbdev_passthru *)ctx;
+
+ spdk_json_write_name(w, "passthru");
+ spdk_json_write_object_begin(w);
+ spdk_json_write_named_string(w, "name", spdk_bdev_get_name(&pt_node->pt_bdev));
+ spdk_json_write_named_string(w, "base_bdev_name", spdk_bdev_get_name(pt_node->base_bdev));
+ spdk_json_write_object_end(w);
+
+ return 0;
+}
+
+/* This is used to generate JSON that can configure this module to its current state. */
+static int
+vbdev_ext_passthru_config_json(struct spdk_json_write_ctx *w)
+{
+ struct vbdev_passthru *pt_node;
+
+ TAILQ_FOREACH(pt_node, &g_pt_nodes, link) {
+ spdk_json_write_object_begin(w);
+ spdk_json_write_named_string(w, "method", "construct_passthru_bdev");
+ spdk_json_write_named_object_begin(w, "params");
+ spdk_json_write_named_string(w, "base_bdev_name", spdk_bdev_get_name(pt_node->base_bdev));
+ spdk_json_write_named_string(w, "name", spdk_bdev_get_name(&pt_node->pt_bdev));
+ spdk_json_write_object_end(w);
+ spdk_json_write_object_end(w);
+ }
+ return 0;
+}
+
+/* We provide this callback for the SPDK channel code to create a channel using
+ * the channel struct we provided in our module get_io_channel() entry point. Here
+ * we get and save off an underlying base channel of the device below us so that
+ * we can communicate with the base bdev on a per channel basis. If we needed
+ * our own poller for this vbdev, we'd register it here.
+ */
+static int
+pt_bdev_ch_create_cb(void *io_device, void *ctx_buf)
+{
+ struct pt_io_channel *pt_ch = ctx_buf;
+ struct vbdev_passthru *pt_node = io_device;
+
+ pt_ch->base_ch = spdk_bdev_get_io_channel(pt_node->base_desc);
+
+ return 0;
+}
+
+/* We provide this callback for the SPDK channel code to destroy a channel
+ * created with our create callback. We just need to undo anything we did
+ * when we created. If this bdev used its own poller, we'd unregsiter it here.
+ */
+static void
+pt_bdev_ch_destroy_cb(void *io_device, void *ctx_buf)
+{
+ struct pt_io_channel *pt_ch = ctx_buf;
+
+ spdk_put_io_channel(pt_ch->base_ch);
+}
+
+/* Create the passthru association from the bdev and vbdev name and insert
+ * on the global list. */
+static int
+vbdev_passthru_insert_name(const char *bdev_name, const char *vbdev_name)
+{
+ struct bdev_names *name;
+
+ TAILQ_FOREACH(name, &g_bdev_names, link) {
+ if (strcmp(vbdev_name, name->vbdev_name) == 0) {
+ SPDK_ERRLOG("passthru bdev %s already exists\n", vbdev_name);
+ return -EEXIST;
+ }
+ }
+
+ name = calloc(1, sizeof(struct bdev_names));
+ if (!name) {
+ SPDK_ERRLOG("could not allocate bdev_names\n");
+ return -ENOMEM;
+ }
+
+ name->bdev_name = strdup(bdev_name);
+ if (!name->bdev_name) {
+ SPDK_ERRLOG("could not allocate name->bdev_name\n");
+ free(name);
+ return -ENOMEM;
+ }
+
+ name->vbdev_name = strdup(vbdev_name);
+ if (!name->vbdev_name) {
+ SPDK_ERRLOG("could not allocate name->vbdev_name\n");
+ free(name->bdev_name);
+ free(name);
+ return -ENOMEM;
+ }
+
+ TAILQ_INSERT_TAIL(&g_bdev_names, name, link);
+
+ return 0;
+}
+
+/* On init, just parse config file and build list of pt vbdevs and bdev name pairs. */
+static int
+vbdev_ext_passthru_init(void)
+{
+ struct spdk_conf_section *sp = NULL;
+ const char *conf_bdev_name = NULL;
+ const char *conf_vbdev_name = NULL;
+ struct bdev_names *name;
+ int i, rc;
+
+ sp = spdk_conf_find_section(NULL, "Ext_Pt");
+ if (sp == NULL) {
+ return 0;
+ }
+
+ for (i = 0; ; i++) {
+ if (!spdk_conf_section_get_nval(sp, "PTE", i)) {
+ break;
+ }
+
+ conf_bdev_name = spdk_conf_section_get_nmval(sp, "PTE", i, 0);
+ if (!conf_bdev_name) {
+ SPDK_ERRLOG("Passthru configuration missing bdev name\n");
+ break;
+ }
+
+ conf_vbdev_name = spdk_conf_section_get_nmval(sp, "PTE", i, 1);
+ if (!conf_vbdev_name) {
+ SPDK_ERRLOG("Passthru configuration missing pt_bdev name\n");
+ break;
+ }
+
+ rc = vbdev_passthru_insert_name(conf_bdev_name, conf_vbdev_name);
+ if (rc != 0) {
+ return rc;
+ }
+ }
+ TAILQ_FOREACH(name, &g_bdev_names, link) {
+ SPDK_NOTICELOG("conf parse matched: %s\n", name->bdev_name);
+ }
+ return 0;
+}
+
+/* Called when the entire module is being torn down. */
+static void
+vbdev_ext_passthru_finish(void)
+{
+ struct bdev_names *name;
+
+ while ((name = TAILQ_FIRST(&g_bdev_names))) {
+ TAILQ_REMOVE(&g_bdev_names, name, link);
+ free(name->bdev_name);
+ free(name->vbdev_name);
+ free(name);
+ }
+}
+
+/* During init we'll be asked how much memory we'd like passed to us
+ * in bev_io structures as context. Here's where we specify how
+ * much context we want per IO.
+ */
+static int
+vbdev_ext_passthru_get_ctx_size(void)
+{
+ return sizeof(struct passthru_bdev_io);
+}
+
+/* Called when SPDK wants to save the current config of this vbdev module to
+ * a file.
+ */
+static void
+vbdev_ext_passthru_get_spdk_running_config(FILE *fp)
+{
+ struct bdev_names *names = NULL;
+
+ fprintf(fp, "\n[Ext_Pt]\n");
+ TAILQ_FOREACH(names, &g_bdev_names, link) {
+ fprintf(fp, " PTE %s %s\n", names->bdev_name, names->vbdev_name);
+ }
+ fprintf(fp, "\n");
+}
+
+/* Where vbdev_ext_passthru_config_json() is used to generate per module JSON config data, this
+ * function is called to output any per bdev specific methods. For the PT module, there are
+ * none.
+ */
+static void
+vbdev_passthru_write_config_json(struct spdk_bdev *bdev, struct spdk_json_write_ctx *w)
+{
+ /* No config per bdev needed */
+}
+
+/* When we register our bdev this is how we specify our entry points. */
+static const struct spdk_bdev_fn_table vbdev_passthru_fn_table = {
+ .destruct = vbdev_passthru_destruct,
+ .submit_request = vbdev_passthru_submit_request,
+ .io_type_supported = vbdev_passthru_io_type_supported,
+ .get_io_channel = vbdev_passthru_get_io_channel,
+ .dump_info_json = vbdev_passthru_dump_info_json,
+ .write_config_json = vbdev_passthru_write_config_json,
+};
+
+/* Called when the underlying base bdev goes away. */
+static void
+vbdev_passthru_base_bdev_hotremove_cb(void *ctx)
+{
+ struct vbdev_passthru *pt_node, *tmp;
+ struct spdk_bdev *bdev_find = ctx;
+
+ TAILQ_FOREACH_SAFE(pt_node, &g_pt_nodes, link, tmp) {
+ if (bdev_find == pt_node->base_bdev) {
+ spdk_bdev_unregister(&pt_node->pt_bdev, NULL, NULL);
+ }
+ }
+}
+
+/* Create and register the passthru vbdev if we find it in our list of bdev names.
+ * This can be called either by the examine path or RPC method.
+ */
+static int
+vbdev_passthru_register(struct spdk_bdev *bdev)
+{
+ struct bdev_names *name;
+ struct vbdev_passthru *pt_node;
+ int rc = 0;
+
+ /* Check our list of names from config versus this bdev and if
+ * there's a match, create the pt_node & bdev accordingly.
+ */
+ TAILQ_FOREACH(name, &g_bdev_names, link) {
+ if (strcmp(name->bdev_name, bdev->name) != 0) {
+ continue;
+ }
+
+ SPDK_NOTICELOG("Match on %s\n", bdev->name);
+ pt_node = calloc(1, sizeof(struct vbdev_passthru));
+ if (!pt_node) {
+ rc = -ENOMEM;
+ SPDK_ERRLOG("could not allocate pt_node\n");
+ break;
+ }
+
+ /* The base bdev that we're attaching to. */
+ pt_node->base_bdev = bdev;
+ pt_node->pt_bdev.name = strdup(name->vbdev_name);
+ if (!pt_node->pt_bdev.name) {
+ rc = -ENOMEM;
+ SPDK_ERRLOG("could not allocate pt_bdev name\n");
+ free(pt_node);
+ break;
+ }
+ pt_node->pt_bdev.product_name = "passthru";
+
+ /* Copy some properties from the underlying base bdev. */
+ pt_node->pt_bdev.write_cache = bdev->write_cache;
+ pt_node->pt_bdev.required_alignment = bdev->required_alignment;
+ pt_node->pt_bdev.optimal_io_boundary = bdev->optimal_io_boundary;
+ pt_node->pt_bdev.blocklen = bdev->blocklen;
+ pt_node->pt_bdev.blockcnt = bdev->blockcnt;
+
+ pt_node->pt_bdev.md_interleave = bdev->md_interleave;
+ pt_node->pt_bdev.md_len = bdev->md_len;
+ pt_node->pt_bdev.dif_type = bdev->dif_type;
+ pt_node->pt_bdev.dif_is_head_of_md = bdev->dif_is_head_of_md;
+ pt_node->pt_bdev.dif_check_flags = bdev->dif_check_flags;
+
+ /* This is the context that is passed to us when the bdev
+ * layer calls in so we'll save our pt_bdev node here.
+ */
+ pt_node->pt_bdev.ctxt = pt_node;
+ pt_node->pt_bdev.fn_table = &vbdev_passthru_fn_table;
+ pt_node->pt_bdev.module = &passthru_if_external;
+ TAILQ_INSERT_TAIL(&g_pt_nodes, pt_node, link);
+
+ spdk_io_device_register(pt_node, pt_bdev_ch_create_cb, pt_bdev_ch_destroy_cb,
+ sizeof(struct pt_io_channel),
+ name->vbdev_name);
+ SPDK_NOTICELOG("io_device created at: 0x%p\n", pt_node);
+
+ rc = spdk_bdev_open(bdev, true, vbdev_passthru_base_bdev_hotremove_cb,
+ bdev, &pt_node->base_desc);
+ if (rc) {
+ SPDK_ERRLOG("could not open bdev %s\n", spdk_bdev_get_name(bdev));
+ TAILQ_REMOVE(&g_pt_nodes, pt_node, link);
+ spdk_io_device_unregister(pt_node, NULL);
+ free(pt_node->pt_bdev.name);
+ free(pt_node);
+ break;
+ }
+ SPDK_NOTICELOG("bdev opened\n");
+
+ rc = spdk_bdev_module_claim_bdev(bdev, pt_node->base_desc, pt_node->pt_bdev.module);
+ if (rc) {
+ SPDK_ERRLOG("could not claim bdev %s\n", spdk_bdev_get_name(bdev));
+ spdk_bdev_close(pt_node->base_desc);
+ TAILQ_REMOVE(&g_pt_nodes, pt_node, link);
+ spdk_io_device_unregister(pt_node, NULL);
+ free(pt_node->pt_bdev.name);
+ free(pt_node);
+ break;
+ }
+ SPDK_NOTICELOG("bdev claimed\n");
+
+ rc = spdk_bdev_register(&pt_node->pt_bdev);
+ if (rc) {
+ SPDK_ERRLOG("could not register pt_bdev\n");
+ spdk_bdev_module_release_bdev(&pt_node->pt_bdev);
+ spdk_bdev_close(pt_node->base_desc);
+ TAILQ_REMOVE(&g_pt_nodes, pt_node, link);
+ spdk_io_device_unregister(pt_node, NULL);
+ free(pt_node->pt_bdev.name);
+ free(pt_node);
+ break;
+ }
+ SPDK_NOTICELOG("ext_pt_bdev registered\n");
+ SPDK_NOTICELOG("created ext_pt_bdev for: %s\n", name->vbdev_name);
+ }
+
+ return rc;
+}
+
+/* Create the passthru disk from the given bdev and vbdev name. */
+int
+create_passthru_disk(const char *bdev_name, const char *vbdev_name)
+{
+ struct spdk_bdev *bdev = NULL;
+ int rc = 0;
+
+ /* Insert the bdev into our global name list even if it doesn't exist yet,
+ * it may show up soon...
+ */
+ rc = vbdev_passthru_insert_name(bdev_name, vbdev_name);
+ if (rc) {
+ return rc;
+ }
+
+ bdev = spdk_bdev_get_by_name(bdev_name);
+ if (!bdev) {
+ /* This is not an error, we tracked the name above and it still
+ * may show up later.
+ */
+ SPDK_NOTICELOG("vbdev creation deferred pending base bdev arrival\n");
+ return 0;
+ }
+
+ return vbdev_passthru_register(bdev);
+}
+
+void
+delete_passthru_disk(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
+{
+ struct bdev_names *name;
+
+ if (!bdev || bdev->module != &passthru_if_external) {
+ cb_fn(cb_arg, -ENODEV);
+ return;
+ }
+
+ /* Remove the association (vbdev, bdev) from g_bdev_names. This is required so that the
+ * vbdev does not get re-created if the same bdev is constructed at some other time,
+ * unless the underlying bdev was hot-removed.
+ */
+ TAILQ_FOREACH(name, &g_bdev_names, link) {
+ if (strcmp(name->vbdev_name, bdev->name) == 0) {
+ TAILQ_REMOVE(&g_bdev_names, name, link);
+ free(name->bdev_name);
+ free(name->vbdev_name);
+ free(name);
+ break;
+ }
+ }
+
+ /* Additional cleanup happens in the destruct callback. */
+ spdk_bdev_unregister(bdev, cb_fn, cb_arg);
+}
+
+/* Because we specified this function in our pt bdev function table when we
+ * registered our pt bdev, we'll get this call anytime a new bdev shows up.
+ * Here we need to decide if we care about it and if so what to do. We
+ * parsed the config file at init so we check the new bdev against the list
+ * we built up at that time and if the user configured us to attach to this
+ * bdev, here's where we do it.
+ */
+static void
+vbdev_ext_passthru_examine(struct spdk_bdev *bdev)
+{
+ vbdev_passthru_register(bdev);
+
+ spdk_bdev_module_examine_done(&passthru_if_external);
+}
diff --git a/src/spdk/test/external_code/passthru/vbdev_passthru.h b/src/spdk/test/external_code/passthru/vbdev_passthru.h
new file mode 100644
index 000000000..db6ad8e8f
--- /dev/null
+++ b/src/spdk/test/external_code/passthru/vbdev_passthru.h
@@ -0,0 +1,65 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef SPDK_VBDEV_PASSTHRU_H
+#define SPDK_VBDEV_PASSTHRU_H
+
+#include "spdk/stdinc.h"
+
+#include "spdk/bdev.h"
+#include "spdk/bdev_module.h"
+#include "spdk/log.h"
+#include "spdk/rpc.h"
+#include "spdk/string.h"
+#include "spdk/util.h"
+
+/**
+ * Create new pass through bdev.
+ *
+ * \param bdev_name Bdev on which pass through vbdev will be created.
+ * \param vbdev_name Name of the pass through bdev.
+ * \return 0 on success, other on failure.
+ */
+int create_passthru_disk(const char *bdev_name, const char *vbdev_name);
+
+/**
+ * Delete passthru bdev.
+ *
+ * \param bdev Pointer to pass through bdev.
+ * \param cb_fn Function to call after deletion.
+ * \param cb_arg Argument to pass to cb_fn.
+ */
+void delete_passthru_disk(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn,
+ void *cb_arg);
+
+#endif /* SPDK_VBDEV_PASSTHRU_H */
diff --git a/src/spdk/test/external_code/passthru/vbdev_passthru_rpc.c b/src/spdk/test/external_code/passthru/vbdev_passthru_rpc.c
new file mode 100644
index 000000000..a50be08ed
--- /dev/null
+++ b/src/spdk/test/external_code/passthru/vbdev_passthru_rpc.c
@@ -0,0 +1,142 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "vbdev_passthru.h"
+
+/* Structure to hold the parameters for this RPC method. */
+struct rpc_construct_passthru {
+ char *base_bdev_name;
+ char *name;
+};
+
+/* Free the allocated memory resource after the RPC handling. */
+static void
+free_rpc_construct_passthru(struct rpc_construct_passthru *r)
+{
+ free(r->base_bdev_name);
+ free(r->name);
+}
+
+/* Structure to decode the input parameters for this RPC method. */
+static const struct spdk_json_object_decoder rpc_construct_passthru_decoders[] = {
+ {"base_bdev_name", offsetof(struct rpc_construct_passthru, base_bdev_name), spdk_json_decode_string},
+ {"name", offsetof(struct rpc_construct_passthru, name), spdk_json_decode_string},
+};
+
+/* Decode the parameters for this RPC method and properly construct the passthru
+ * device. Error status returned in the failed cases.
+ */
+static void
+spdk_rpc_construct_passthru_bdev(struct spdk_jsonrpc_request *request,
+ const struct spdk_json_val *params)
+{
+ struct rpc_construct_passthru req = {NULL};
+ struct spdk_json_write_ctx *w;
+ int rc;
+
+ if (spdk_json_decode_object(params, rpc_construct_passthru_decoders,
+ SPDK_COUNTOF(rpc_construct_passthru_decoders),
+ &req)) {
+ SPDK_ERRLOG("spdk_json_decode_object failed\n");
+ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
+ "spdk_json_decode_object failed");
+ goto cleanup;
+ }
+
+ rc = create_passthru_disk(req.base_bdev_name, req.name);
+ if (rc != 0) {
+ spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc));
+ goto cleanup;
+ }
+
+ w = spdk_jsonrpc_begin_result(request);
+ spdk_json_write_string(w, req.name);
+ spdk_jsonrpc_end_result(request, w);
+
+cleanup:
+ free_rpc_construct_passthru(&req);
+}
+SPDK_RPC_REGISTER("construct_ext_passthru_bdev", spdk_rpc_construct_passthru_bdev, SPDK_RPC_RUNTIME)
+
+struct rpc_delete_passthru {
+ char *name;
+};
+
+static void
+free_rpc_delete_passthru(struct rpc_delete_passthru *req)
+{
+ free(req->name);
+}
+
+static const struct spdk_json_object_decoder rpc_delete_passthru_decoders[] = {
+ {"name", offsetof(struct rpc_delete_passthru, name), spdk_json_decode_string},
+};
+
+static void
+_spdk_rpc_delete_passthru_bdev_cb(void *cb_arg, int bdeverrno)
+{
+ struct spdk_jsonrpc_request *request = cb_arg;
+ struct spdk_json_write_ctx *w;
+
+ w = spdk_jsonrpc_begin_result(request);
+ spdk_json_write_bool(w, bdeverrno == 0);
+ spdk_jsonrpc_end_result(request, w);
+}
+
+static void
+spdk_rpc_delete_passthru_bdev(struct spdk_jsonrpc_request *request,
+ const struct spdk_json_val *params)
+{
+ struct rpc_delete_passthru req = {NULL};
+ struct spdk_bdev *bdev;
+
+ if (spdk_json_decode_object(params, rpc_delete_passthru_decoders,
+ SPDK_COUNTOF(rpc_delete_passthru_decoders),
+ &req)) {
+ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
+ "spdk_json_decode_object failed");
+ goto cleanup;
+ }
+
+ bdev = spdk_bdev_get_by_name(req.name);
+ if (bdev == NULL) {
+ spdk_jsonrpc_send_error_response(request, -ENODEV, spdk_strerror(ENODEV));
+ goto cleanup;
+ }
+
+ delete_passthru_disk(bdev, _spdk_rpc_delete_passthru_bdev_cb, request);
+
+cleanup:
+ free_rpc_delete_passthru(&req);
+}
+SPDK_RPC_REGISTER("delete_ext_passthru_bdev", spdk_rpc_delete_passthru_bdev, SPDK_RPC_RUNTIME)
diff --git a/src/spdk/test/external_code/test_make.sh b/src/spdk/test/external_code/test_make.sh
new file mode 100755
index 000000000..af8785d38
--- /dev/null
+++ b/src/spdk/test/external_code/test_make.sh
@@ -0,0 +1,63 @@
+#!/usr/bin/env bash
+
+test_root=$(readlink -f $(dirname $0))
+rootdir="$test_root/../.."
+
+source "$rootdir/test/common/autotest_common.sh"
+
+set -e
+SPDK_DIR=$1
+
+# Skip all pci devices. These tests don't rely on them.
+sudo PCI_WHITELIST="NONE" HUGEMEM="$HUGEMEM" $SPDK_DIR/scripts/setup.sh
+
+$SPDK_DIR/configure --with-shared --without-isal --without-ocf --disable-asan
+make -C $SPDK_DIR -j$(nproc)
+
+export SPDK_HEADER_DIR="$SPDK_DIR/include"
+export SPDK_LIB_DIR="$SPDK_DIR/build/lib"
+export DPDK_LIB_DIR="$SPDK_DIR/dpdk/build/lib"
+export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$SPDK_LIB_DIR:$DPDK_LIB_DIR:"$test_root/passthru"
+
+# The default target is to make both the app and bdev and link them against the combined SPDK shared library libspdk.so.
+run_test "external_make_tc1" make -C $test_root hello_world_bdev_shared_combo
+run_test "external_run_tc1" $test_root/hello_world/hello_bdev --json $test_root/hello_world/bdev_external.conf -b TestPT
+
+make -C $test_root clean
+
+# Make just the application linked against the combined SPDK shared library libspdk.so.
+run_test "external_make_tc2" make -C $test_root hello_world_no_bdev_shared_combo
+run_test "external_run_tc2" $test_root/hello_world/hello_bdev --json $test_root/hello_world/bdev.conf -b Malloc0
+
+make -C $test_root clean
+
+# Make both the application and bdev against individual SPDK shared libraries.
+run_test "external_make_tc3" make -C $test_root hello_world_bdev_shared_iso
+run_test "external_run_tc3" $test_root/hello_world/hello_bdev --json $test_root/hello_world/bdev_external.conf -b TestPT
+
+make -C $test_root clean
+
+# Make just the application linked against individual SPDK shared libraries.
+run_test "external_make_tc4" make -C $test_root hello_world_no_bdev_shared_iso
+run_test "external_run_tc4" $test_root/hello_world/hello_bdev --json $test_root/hello_world/bdev.conf -b Malloc0
+
+make -C $test_root clean
+
+make -C $SPDK_DIR clean
+$SPDK_DIR/configure --without-shared --without-isal --without-ocf --disable-asan
+make -C $SPDK_DIR -j$(nproc)
+
+# Make both the application and bdev against individual SPDK archives.
+run_test "external_make_tc5" make -C $test_root hello_world_bdev_static
+run_test "external_run_tc5" $test_root/hello_world/hello_bdev --json $test_root/hello_world/bdev_external.conf -b TestPT
+
+make -C $test_root clean
+
+# Make just the application linked against individual SPDK archives.
+run_test "external_make_tc6" make -C $test_root hello_world_no_bdev_static
+run_test "external_run_tc6" $test_root/hello_world/hello_bdev --json $test_root/hello_world/bdev.conf -b Malloc0
+
+make -C $test_root clean
+make -C $SPDK_DIR -j$(nproc) clean
+
+sudo PCI_WHITELIST="NONE" HUGEMEM="$HUGEMEM" $SPDK_DIR/scripts/setup.sh reset
diff --git a/src/spdk/test/ftl/bdevperf.sh b/src/spdk/test/ftl/bdevperf.sh
new file mode 100755
index 000000000..c0cbc27b9
--- /dev/null
+++ b/src/spdk/test/ftl/bdevperf.sh
@@ -0,0 +1,31 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/test/common/autotest_common.sh
+source $testdir/common.sh
+
+tests=('-q 1 -w randwrite -t 4 -o 69632' '-q 128 -w randwrite -t 4 -o 4096' '-q 128 -w verify -t 4 -o 4096')
+device=$1
+use_append=$2
+rpc_py=$rootdir/scripts/rpc.py
+
+for ((i = 0; i < ${#tests[@]}; i++)); do
+ timing_enter "${tests[$i]}"
+ "$rootdir/test/bdev/bdevperf/bdevperf" -z -T ftl0 ${tests[$i]} --json <(gen_ftl_nvme_conf) &
+ bdevperf_pid=$!
+
+ trap 'killprocess $bdevperf_pid; exit 1' SIGINT SIGTERM EXIT
+ waitforlisten $bdevperf_pid
+ $rpc_py bdev_nvme_attach_controller -b nvme0 -a $device -t pcie
+ $rpc_py bdev_ocssd_create -c nvme0 -b nvme0n1
+ $rpc_py bdev_ftl_create -b ftl0 -d nvme0n1 $use_append
+
+ $rootdir/test/bdev/bdevperf/bdevperf.py perform_tests
+ $rpc_py delete_ftl_bdev -b ftl0
+ $rpc_py bdev_ocssd_delete nvme0n1
+ $rpc_py bdev_nvme_detach_controller nvme0
+ killprocess $bdevperf_pid
+ trap - SIGINT SIGTERM EXIT
+ timing_exit "${tests[$i]}"
+done
diff --git a/src/spdk/test/ftl/common.sh b/src/spdk/test/ftl/common.sh
new file mode 100644
index 000000000..f4620ac3d
--- /dev/null
+++ b/src/spdk/test/ftl/common.sh
@@ -0,0 +1,68 @@
+# Common utility functions to be sourced by the libftl test scripts
+
+function get_chunk_size() {
+ $SPDK_EXAMPLE_DIR/identify -r "trtype:PCIe traddr:$1" \
+ | grep 'Logical blks per chunk' | sed 's/[^0-9]//g'
+}
+
+function get_num_group() {
+ $SPDK_EXAMPLE_DIR/identify -r "trtype:PCIe traddr:$1" \
+ | grep 'Groups' | sed 's/[^0-9]//g'
+}
+
+function get_num_pu() {
+ $SPDK_EXAMPLE_DIR/identify -r "trtype:PCIe traddr:$1" \
+ | grep 'PUs' | sed 's/[^0-9]//g'
+}
+
+function has_separate_md() {
+ local md_type
+ md_type=$($SPDK_EXAMPLE_DIR/identify -r "trtype:PCIe traddr:$1" \
+ | grep 'Metadata Transferred' | cut -d: -f2)
+ if [[ "$md_type" =~ Separate ]]; then
+ return 0
+ else
+ return 1
+ fi
+}
+
+function create_nv_cache_bdev() {
+ local name=$1
+ local ocssd_bdf=$2
+ local cache_bdf=$3
+ local num_punits=$4
+
+ local bytes_to_mb=$((1024 * 1024))
+ local chunk_size
+ chunk_size=$(get_chunk_size $ocssd_bdf)
+
+ # We need at least 2 bands worth of data + 1 block
+ local size=$((2 * 4096 * chunk_size * num_punits + 1))
+ # Round the size up to the nearest megabyte
+ local size=$(((size + bytes_to_mb) / bytes_to_mb))
+
+ # Create NVMe bdev on specified device and split it so that it has the desired size
+ local nvc_bdev
+ nvc_bdev=$($rootdir/scripts/rpc.py bdev_nvme_attach_controller -b $name -t PCIe -a $cache_bdf)
+ $rootdir/scripts/rpc.py bdev_split_create $nvc_bdev -s $size 1
+}
+
+function gen_ftl_nvme_conf() {
+ jq . <<- JSON
+ {
+ "subsystems": [
+ {
+ "subsystem": "bdev",
+ "config": [
+ {
+ "params": {
+ "nvme_adminq_poll_period_us": 100
+ },
+ "method": "bdev_nvme_set_options"
+ }
+ ]
+ }
+ ]
+ }
+ JSON
+}
diff --git a/src/spdk/test/ftl/config/.gitignore b/src/spdk/test/ftl/config/.gitignore
new file mode 100644
index 000000000..5523f29b3
--- /dev/null
+++ b/src/spdk/test/ftl/config/.gitignore
@@ -0,0 +1,2 @@
+ftl.conf
+fio/*.fio
diff --git a/src/spdk/test/ftl/config/fio/drive-prep.fio b/src/spdk/test/ftl/config/fio/drive-prep.fio
new file mode 100644
index 000000000..430172ca9
--- /dev/null
+++ b/src/spdk/test/ftl/config/fio/drive-prep.fio
@@ -0,0 +1,15 @@
+[drive_prep]
+ioengine=spdk_bdev
+spdk_json_conf=${FTL_JSON_CONF}
+filename=${FTL_BDEV_NAME}
+thread=1
+
+direct=1
+buffered=0
+size=100%
+randrepeat=0
+norandommap
+bs=4k
+iodepth=128
+numjobs=1
+rw=write
diff --git a/src/spdk/test/ftl/config/fio/randr.fio b/src/spdk/test/ftl/config/fio/randr.fio
new file mode 100644
index 000000000..f3f644476
--- /dev/null
+++ b/src/spdk/test/ftl/config/fio/randr.fio
@@ -0,0 +1,19 @@
+[global]
+ioengine=spdk_bdev
+spdk_json_conf=${FTL_JSON_CONF}
+filename=${FTL_BDEV_NAME}
+direct=1
+thread=1
+buffered=0
+size=100%
+randrepeat=0
+time_based
+norandommap
+
+[test]
+stonewall
+bs=4k
+numjobs=4
+rw=randread
+iodepth=128
+runtime=1200
diff --git a/src/spdk/test/ftl/config/fio/randrw.fio b/src/spdk/test/ftl/config/fio/randrw.fio
new file mode 100644
index 000000000..fdce9a477
--- /dev/null
+++ b/src/spdk/test/ftl/config/fio/randrw.fio
@@ -0,0 +1,20 @@
+[global]
+ioengine=spdk_bdev
+spdk_json_conf=${FTL_JSON_CONF}
+filename=${FTL_BDEV_NAME}
+direct=1
+thread=1
+buffered=0
+size=100%
+randrepeat=0
+time_based
+norandommap
+
+[test]
+stonewall
+bs=4k
+numjobs=4
+rw=randrw
+rwmixread=70
+iodepth=32
+runtime=1200
diff --git a/src/spdk/test/ftl/config/fio/randw-verify-depth128.fio b/src/spdk/test/ftl/config/fio/randw-verify-depth128.fio
new file mode 100644
index 000000000..9adee6cab
--- /dev/null
+++ b/src/spdk/test/ftl/config/fio/randw-verify-depth128.fio
@@ -0,0 +1,20 @@
+[global]
+ioengine=spdk_bdev
+spdk_json_conf=${FTL_JSON_CONF}
+filename=${FTL_BDEV_NAME}
+thread=1
+direct=1
+iodepth=128
+rw=randwrite
+verify=crc32c
+do_verify=1
+verify_dump=0
+verify_state_save=0
+verify_fatal=1
+bs=4k
+random_distribution=normal
+serialize_overlap=1
+io_size=256M
+
+[test]
+numjobs=1
diff --git a/src/spdk/test/ftl/config/fio/randw-verify-j2.fio b/src/spdk/test/ftl/config/fio/randw-verify-j2.fio
new file mode 100644
index 000000000..4610efa3f
--- /dev/null
+++ b/src/spdk/test/ftl/config/fio/randw-verify-j2.fio
@@ -0,0 +1,25 @@
+[global]
+ioengine=spdk_bdev
+spdk_json_conf=${FTL_JSON_CONF}
+filename=${FTL_BDEV_NAME}
+thread=1
+direct=1
+iodepth=128
+rw=randwrite
+verify=crc32c
+do_verify=1
+verify_dump=0
+verify_state_save=0
+verify_backlog=5000
+verify_fatal=1
+bs=4k
+random_distribution=normal
+serialize_overlap=1
+io_size=256M
+
+[first_half]
+offset=0%
+size=50%
+
+[second_half]
+offset=50%
diff --git a/src/spdk/test/ftl/config/fio/randw-verify-qd128-ext.fio b/src/spdk/test/ftl/config/fio/randw-verify-qd128-ext.fio
new file mode 100644
index 000000000..f22b1f2ec
--- /dev/null
+++ b/src/spdk/test/ftl/config/fio/randw-verify-qd128-ext.fio
@@ -0,0 +1,20 @@
+[global]
+ioengine=spdk_bdev
+spdk_json_conf=${FTL_JSON_CONF}
+filename=${FTL_BDEV_NAME}
+thread=1
+direct=1
+iodepth=128
+rw=randwrite
+verify=crc32c
+do_verify=1
+verify_dump=0
+verify_state_save=0
+verify_fatal=1
+bs=4k
+random_distribution=normal
+serialize_overlap=1
+
+[test]
+io_size=64G
+numjobs=1
diff --git a/src/spdk/test/ftl/config/fio/randw-verify.fio b/src/spdk/test/ftl/config/fio/randw-verify.fio
new file mode 100644
index 000000000..edca6c618
--- /dev/null
+++ b/src/spdk/test/ftl/config/fio/randw-verify.fio
@@ -0,0 +1,20 @@
+[global]
+ioengine=spdk_bdev
+spdk_json_conf=${FTL_JSON_CONF}
+filename=${FTL_BDEV_NAME}
+thread=1
+direct=1
+iodepth=1
+rw=randwrite
+size=256M
+verify=crc32c
+do_verify=1
+verify_dump=0
+verify_state_save=0
+verify_backlog=16
+verify_fatal=1
+bs=68k
+random_distribution=normal
+
+[test]
+numjobs=1
diff --git a/src/spdk/test/ftl/config/fio/randw.fio b/src/spdk/test/ftl/config/fio/randw.fio
new file mode 100644
index 000000000..f5b20b124
--- /dev/null
+++ b/src/spdk/test/ftl/config/fio/randw.fio
@@ -0,0 +1,18 @@
+[global]
+ioengine=spdk_bdev
+spdk_json_conf=${FTL_JSON_CONF}
+filename=${FTL_BDEV_NAME}
+direct=1
+thread=1
+buffered=0
+size=100%
+randrepeat=0
+time_based
+norandommap
+
+[test]
+bs=4k
+numjobs=1
+rw=randwrite
+iodepth=64
+runtime=1200
diff --git a/src/spdk/test/ftl/dirty_shutdown.sh b/src/spdk/test/ftl/dirty_shutdown.sh
new file mode 100755
index 000000000..c0e1f3115
--- /dev/null
+++ b/src/spdk/test/ftl/dirty_shutdown.sh
@@ -0,0 +1,93 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/test/common/autotest_common.sh
+source $testdir/common.sh
+
+rpc_py=$rootdir/scripts/rpc.py
+
+while getopts ':u:c:' opt; do
+ case $opt in
+ u) uuid=$OPTARG ;;
+ c) nv_cache=$OPTARG ;;
+ ?) echo "Usage: $0 [-u UUID] [-c NV_CACHE_PCI_BDF] OCSSD_PCI_BDF" && exit 1 ;;
+ esac
+done
+shift $((OPTIND - 1))
+
+device=$1
+
+restore_kill() {
+ rm -f $testdir/config/ftl.json
+ rm -f $testdir/testfile.md5
+ rm -f $testdir/testfile2.md5
+
+ killprocess $svcpid || true
+ rmmod nbd || true
+}
+
+trap "restore_kill; exit 1" SIGINT SIGTERM EXIT
+
+chunk_size=$(get_chunk_size $device)
+num_group=$(get_num_group $device)
+num_pu=$(get_num_pu $device)
+pu_count=$((num_group * num_pu))
+
+# Write one band worth of data + one extra chunk
+data_size=$((chunk_size * (pu_count + 1)))
+
+"$SPDK_BIN_DIR/spdk_tgt" --json <(gen_ftl_nvme_conf) &
+svcpid=$!
+waitforlisten $svcpid
+
+if [ -n "$nv_cache" ]; then
+ nvc_bdev=$(create_nv_cache_bdev nvc0 $device $nv_cache $pu_count)
+fi
+
+$rpc_py bdev_nvme_attach_controller -b nvme0 -a $device -t pcie
+$rpc_py bdev_ocssd_create -c nvme0 -b nvme0n1 -n 1
+ftl_construct_args="bdev_ftl_create -b ftl0 -d nvme0n1 -o"
+
+[ -n "$nvc_bdev" ] && ftl_construct_args+=" -c $nvc_bdev"
+[ -n "$uuid" ] && ftl_construct_args+=" -u $uuid"
+
+$rpc_py $ftl_construct_args
+
+# Load the nbd driver
+modprobe nbd
+$rpc_py nbd_start_disk ftl0 /dev/nbd0
+waitfornbd nbd0
+
+$rpc_py save_config > $testdir/config/ftl.json
+
+dd if=/dev/urandom of=/dev/nbd0 bs=4K count=$data_size oflag=dsync
+# Calculate checksum of the data written
+dd if=/dev/nbd0 bs=4K count=$data_size | md5sum > $testdir/testfile.md5
+$rpc_py nbd_stop_disk /dev/nbd0
+
+# Force kill bdev service (dirty shutdown) and start it again
+kill -9 $svcpid
+rm -f /dev/shm/spdk_tgt_trace.pid$svcpid
+
+"$SPDK_BIN_DIR/spdk_tgt" --json <(gen_ftl_nvme_conf) -L ftl_init &
+svcpid=$!
+waitforlisten $svcpid
+
+$rpc_py load_config < $testdir/config/ftl.json
+waitfornbd nbd0
+
+# Write extra data after restore
+dd if=/dev/urandom of=/dev/nbd0 bs=4K count=$chunk_size seek=$data_size oflag=dsync
+# Save md5 data
+dd if=/dev/nbd0 bs=4K count=$chunk_size skip=$data_size | md5sum > $testdir/testfile2.md5
+
+# Make sure all data will be read from disk
+echo 3 > /proc/sys/vm/drop_caches
+
+# Verify that the checksum matches and the data is consistent
+dd if=/dev/nbd0 bs=4K count=$data_size | md5sum -c $testdir/testfile.md5
+dd if=/dev/nbd0 bs=4K count=$chunk_size skip=$data_size | md5sum -c $testdir/testfile2.md5
+
+trap - SIGINT SIGTERM EXIT
+restore_kill
diff --git a/src/spdk/test/ftl/fio.sh b/src/spdk/test/ftl/fio.sh
new file mode 100755
index 000000000..3ad2a085a
--- /dev/null
+++ b/src/spdk/test/ftl/fio.sh
@@ -0,0 +1,68 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/test/common/autotest_common.sh
+source $testdir/common.sh
+
+declare -A suite
+suite['basic']='randw-verify randw-verify-j2 randw-verify-depth128'
+suite['extended']='drive-prep randw-verify-qd128-ext randw randr randrw'
+
+rpc_py=$rootdir/scripts/rpc.py
+
+fio_kill() {
+ killprocess $svcpid
+ rm -f $FTL_JSON_CONF
+}
+
+device=$1
+tests=${suite[$2]}
+uuid=$3
+
+if [[ $CONFIG_FIO_PLUGIN != y ]]; then
+ echo "FIO not available"
+ exit 1
+fi
+
+if [ -z "$tests" ]; then
+ echo "Invalid test suite '$2'"
+ exit 1
+fi
+
+export FTL_BDEV_NAME=ftl0
+export FTL_JSON_CONF=$testdir/config/ftl.json
+
+trap "fio_kill; exit 1" SIGINT SIGTERM EXIT
+
+"$SPDK_BIN_DIR/spdk_tgt" --json <(gen_ftl_nvme_conf) &
+svcpid=$!
+waitforlisten $svcpid
+
+$rpc_py bdev_nvme_attach_controller -b nvme0 -a $device -t pcie
+$rpc_py bdev_ocssd_create -c nvme0 -b nvme0n1
+
+if [ -z "$uuid" ]; then
+ $rpc_py bdev_ftl_create -b ftl0 -d nvme0n1
+else
+ $rpc_py bdev_ftl_create -b ftl0 -d nvme0n1 -u $uuid
+fi
+
+waitforbdev ftl0
+
+(
+ echo '{"subsystems": ['
+ $rpc_py save_subsystem_config -n bdev
+ echo ']}'
+) > $FTL_JSON_CONF
+
+killprocess $svcpid
+trap - SIGINT SIGTERM EXIT
+
+for test in ${tests}; do
+ timing_enter $test
+ fio_bdev $testdir/config/fio/$test.fio
+ timing_exit $test
+done
+
+rm -f $FTL_JSON_CONF
diff --git a/src/spdk/test/ftl/ftl.sh b/src/spdk/test/ftl/ftl.sh
new file mode 100755
index 000000000..b432bdfb0
--- /dev/null
+++ b/src/spdk/test/ftl/ftl.sh
@@ -0,0 +1,80 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/test/common/autotest_common.sh
+source $testdir/common.sh
+
+rpc_py=$rootdir/scripts/rpc.py
+
+function at_ftl_exit() {
+ # restore original driver
+ PCI_WHITELIST="$device" PCI_BLACKLIST="" DRIVER_OVERRIDE="$ocssd_original_dirver" $rootdir/scripts/setup.sh
+}
+
+read -r device _ <<< "$OCSSD_PCI_DEVICES"
+
+if [[ -z "$device" ]]; then
+ echo "OCSSD device list is empty."
+ echo "This test require that OCSSD_PCI_DEVICES environment variable to be set"
+ echo "and point to OCSSD devices PCI BDF. You can specify multiple space"
+ echo "separated BDFs in this case first one will be used."
+ exit 1
+fi
+
+ocssd_original_dirver="$(basename $(readlink /sys/bus/pci/devices/$device/driver))"
+
+trap 'at_ftl_exit' SIGINT SIGTERM EXIT
+
+# OCSSD is blacklisted so bind it to vfio/uio driver before testing
+PCI_WHITELIST="$device" PCI_BLACKLIST="" DRIVER_OVERRIDE="" $rootdir/scripts/setup.sh
+
+# Use first regular NVMe disk (non-OC) as non-volatile cache
+nvme_disks=$($rootdir/scripts/gen_nvme.sh --json | jq -r \
+ ".config[] | select(.params.traddr != \"$device\").params.traddr")
+
+for disk in $nvme_disks; do
+ if has_separate_md $disk; then
+ nv_cache=$disk
+ break
+ fi
+done
+
+if [ -z "$nv_cache" ]; then
+ # TODO: once CI has devices with separate metadata support fail the test here
+ echo "Couldn't find NVMe device to be used as non-volatile cache"
+fi
+
+run_test "ftl_bdevperf" $testdir/bdevperf.sh $device
+run_test "ftl_bdevperf_append" $testdir/bdevperf.sh $device --use_append
+
+run_test "ftl_restore" $testdir/restore.sh $device
+if [ -n "$nv_cache" ]; then
+ run_test "ftl_restore_nv_cache" $testdir/restore.sh -c $nv_cache $device
+fi
+
+if [ -n "$nv_cache" ]; then
+ run_test "ftl_dirty_shutdown" $testdir/dirty_shutdown.sh -c $nv_cache $device
+fi
+
+run_test "ftl_json" $testdir/json.sh $device
+
+if [ $SPDK_TEST_FTL_EXTENDED -eq 1 ]; then
+ run_test "ftl_fio_basic" $testdir/fio.sh $device basic
+
+ "$SPDK_BIN_DIR/spdk_tgt" --json <(gen_ftl_nvme_conf) &
+ svcpid=$!
+
+ trap 'killprocess $svcpid; exit 1' SIGINT SIGTERM EXIT
+
+ waitforlisten $svcpid
+
+ $rpc_py bdev_nvme_attach_controller -b nvme0 -a $device -t pcie
+ $rpc_py bdev_ocssd_create -c nvme0 -b nvme0n1 -n 1
+ uuid=$($rpc_py bdev_ftl_create -b ftl0 -d nvme0n1 | jq -r '.uuid')
+ killprocess $svcpid
+
+ trap - SIGINT SIGTERM EXIT
+
+ run_test "ftl_fio_extended" $testdir/fio.sh $device extended $uuid
+fi
diff --git a/src/spdk/test/ftl/json.sh b/src/spdk/test/ftl/json.sh
new file mode 100755
index 000000000..0052665f8
--- /dev/null
+++ b/src/spdk/test/ftl/json.sh
@@ -0,0 +1,38 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/test/common/autotest_common.sh
+source $testdir/common.sh
+
+rpc_py=$rootdir/scripts/rpc.py
+
+device=$1
+
+json_kill() {
+ killprocess $svcpid
+}
+
+trap "json_kill; exit 1" SIGINT SIGTERM EXIT
+
+"$SPDK_BIN_DIR/spdk_tgt" --json <(gen_ftl_nvme_conf) &
+svcpid=$!
+waitforlisten $svcpid
+
+# Create new bdev from json configuration
+$rootdir/scripts/gen_ftl.sh -n ftl0 -d nvme0n1 | $rpc_py load_subsystem_config
+$rpc_py bdev_nvme_attach_controller -b nvme0 -a $device -t pcie
+$rpc_py bdev_ocssd_create -c nvme0 -b nvme0n1 -n 1
+
+waitforbdev ftl0
+uuid=$($rpc_py bdev_get_bdevs | jq -r ".[] | select(.name==\"ftl0\").uuid")
+
+$rpc_py bdev_ftl_delete -b ftl0
+
+# Restore bdev from json configuration
+$rootdir/scripts/gen_ftl.sh -n ftl0 -d nvme0n1 -u $uuid | $rpc_py load_subsystem_config
+$rpc_py bdev_ftl_delete -b ftl0
+$rpc_py bdev_nvme_detach_controller nvme0
+
+trap - SIGINT SIGTERM EXIT
+json_kill
diff --git a/src/spdk/test/ftl/restore.sh b/src/spdk/test/ftl/restore.sh
new file mode 100755
index 000000000..7b6b0ef05
--- /dev/null
+++ b/src/spdk/test/ftl/restore.sh
@@ -0,0 +1,99 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/test/common/autotest_common.sh
+source $testdir/common.sh
+
+rpc_py=$rootdir/scripts/rpc.py
+
+mount_dir=$(mktemp -d)
+
+while getopts ':u:c:' opt; do
+ case $opt in
+ u) uuid=$OPTARG ;;
+ c) nv_cache=$OPTARG ;;
+ ?) echo "Usage: $0 [-u UUID] [-c NV_CACHE_PCI_BDF] OCSSD_PCI_BDF" && exit 1 ;;
+ esac
+done
+shift $((OPTIND - 1))
+device=$1
+num_group=$(get_num_group $device)
+num_pu=$(get_num_pu $device)
+pu_count=$((num_group * num_pu))
+
+restore_kill() {
+ if mount | grep $mount_dir; then
+ umount $mount_dir
+ fi
+ rm -rf $mount_dir
+ rm -f $testdir/testfile.md5
+ rm -f $testdir/testfile2.md5
+ rm -f $testdir/config/ftl.json
+
+ killprocess $svcpid
+ rmmod nbd || true
+}
+
+trap "restore_kill; exit 1" SIGINT SIGTERM EXIT
+
+"$SPDK_BIN_DIR/spdk_tgt" --json <(gen_ftl_nvme_conf) &
+svcpid=$!
+# Wait until spdk_tgt starts
+waitforlisten $svcpid
+
+if [ -n "$nv_cache" ]; then
+ nvc_bdev=$(create_nv_cache_bdev nvc0 $device $nv_cache $pu_count)
+fi
+
+$rpc_py bdev_nvme_attach_controller -b nvme0 -a $device -t pcie
+$rpc_py bdev_ocssd_create -c nvme0 -b nvme0n1 -n 1
+ftl_construct_args="bdev_ftl_create -b ftl0 -d nvme0n1"
+
+[ -n "$uuid" ] && ftl_construct_args+=" -u $uuid"
+[ -n "$nv_cache" ] && ftl_construct_args+=" -c $nvc_bdev"
+
+$rpc_py $ftl_construct_args
+
+# Load the nbd driver
+modprobe nbd
+$rpc_py nbd_start_disk ftl0 /dev/nbd0
+waitfornbd nbd0
+
+$rpc_py save_config > $testdir/config/ftl.json
+
+# Prepare the disk by creating ext4 fs and putting a file on it
+make_filesystem ext4 /dev/nbd0
+mount /dev/nbd0 $mount_dir
+dd if=/dev/urandom of=$mount_dir/testfile bs=4K count=256K
+sync
+mount -o remount /dev/nbd0 $mount_dir
+md5sum $mount_dir/testfile > $testdir/testfile.md5
+
+# Kill bdev service and start it again
+umount $mount_dir
+killprocess $svcpid
+
+"$SPDK_BIN_DIR/spdk_tgt" --json <(gen_ftl_nvme_conf) -L ftl_init &
+svcpid=$!
+# Wait until spdk_tgt starts
+waitforlisten $svcpid
+
+$rpc_py load_config < $testdir/config/ftl.json
+waitfornbd nbd0
+
+mount /dev/nbd0 $mount_dir
+
+# Write second file, to make sure writer thread has restored properly
+dd if=/dev/urandom of=$mount_dir/testfile2 bs=4K count=256K
+md5sum $mount_dir/testfile2 > $testdir/testfile2.md5
+
+# Make sure second file will be read from disk
+echo 3 > /proc/sys/vm/drop_caches
+
+# Check both files have proper data
+md5sum -c $testdir/testfile.md5
+md5sum -c $testdir/testfile2.md5
+
+trap - SIGINT SIGTERM EXIT
+restore_kill
diff --git a/src/spdk/test/fuzz/autofuzz.sh b/src/spdk/test/fuzz/autofuzz.sh
new file mode 100755
index 000000000..92766bb06
--- /dev/null
+++ b/src/spdk/test/fuzz/autofuzz.sh
@@ -0,0 +1,74 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+
+source "$rootdir/test/common/autotest_common.sh"
+
+TEST_TIMEOUT=1200
+
+# The term transport is used a bit loosely for vhost tests.
+allowed_nvme_transports=("rdma" "tcp")
+allowed_vhost_transports=("scsi" "blk" "all")
+bad_transport=true
+config_params="--enable-asan --enable-ubsan --enable-debug --without-isal"
+
+# These arguments are used in addition to the test arguments in autotest_common.sh
+for i in "$@"; do
+ case "$i" in
+ --module=*)
+ TEST_MODULE="${i#*=}"
+ ;;
+ --timeout=*)
+ TEST_TIMEOUT="${i#*=}"
+ ;;
+ esac
+done
+
+timing_enter autofuzz
+if [ "$TEST_MODULE" == "nvmf" ]; then
+ allowed_transports=("${allowed_nvme_transports[@]}")
+ if [ $TEST_TRANSPORT == "rdma" ]; then
+ config_params="$config_params --with-rdma"
+ fi
+elif [ "$TEST_MODULE" == "vhost" ]; then
+ allowed_transports=("${allowed_vhost_transports[@]}")
+ config_params="$config_params --with-vhost --with-virtio"
+else
+ echo "Invalid module specified. Please specify either nvmf or vhost."
+ exit 1
+fi
+
+for transport in "${allowed_transports[@]}"; do
+ if [ $transport == "$TEST_TRANSPORT" ]; then
+ bad_transport=false
+ fi
+done
+
+if $bad_transport; then
+ echo "invalid transport. Please supply one of the following for module: $TEST_MODULE."
+ echo "${allowed_transports[@]}"
+ exit 1
+fi
+
+timing_enter make
+cd $rootdir
+./configure $config_params
+$MAKE $MAKEFLAGS
+timing_exit make
+
+# supply --iso to each test module so that it can run setup.sh.
+timing_enter fuzz_module
+if [ "$TEST_MODULE" == "nvmf" ]; then
+ sudo $testdir/autofuzz_nvmf.sh --iso --transport=$TEST_TRANSPORT --timeout=$TEST_TIMEOUT
+fi
+
+if [ "$TEST_MODULE" == "vhost" ]; then
+ sudo $testdir/autofuzz_vhost.sh --iso --transport=$TEST_TRANSPORT --timeout=$TEST_TIMEOUT
+fi
+
+if [ "$TEST_MODULE" == "iscsi" ]; then
+ sudo $testdir/autofuzz_iscsi.sh --iso --transport=$TEST_TRANSPORT --timeout=$TEST_TIMEOUT
+fi
+timing_exit fuzz_module
+timing_exit autofuzz
diff --git a/src/spdk/test/fuzz/autofuzz_iscsi.sh b/src/spdk/test/fuzz/autofuzz_iscsi.sh
new file mode 100755
index 000000000..8793e8bf1
--- /dev/null
+++ b/src/spdk/test/fuzz/autofuzz_iscsi.sh
@@ -0,0 +1,75 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/iscsi_tgt/common.sh
+
+# $1 = "iso" - triggers isolation mode (setting up required environment).
+# $2 = test type posix or vpp. defaults to posix.
+iscsitestinit $1 $2
+
+if [ -z "$TARGET_IP" ]; then
+ echo "TARGET_IP not defined in environment"
+ exit 1
+fi
+
+if [ -z "$INITIATOR_IP" ]; then
+ echo "INITIATOR_IP not defined in environment"
+ exit 1
+fi
+
+timing_enter iscsi_fuzz_test
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=4096
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+TEST_TIMEOUT=1200
+
+# This argument is used in addition to the test arguments in autotest_common.sh
+for i in "$@"; do
+ case "$i" in
+ --timeout=*)
+ TEST_TIMEOUT="${i#*=}"
+ ;;
+ esac
+done
+
+timing_enter start_iscsi_tgt
+
+"${ISCSI_APP[@]}" -m $ISCSI_TEST_CORE_MASK &> $output_dir/iscsi_autofuzz_tgt_output.txt &
+iscsipid=$!
+
+trap 'killprocess $iscsipid; exit 1' SIGINT SIGTERM EXIT
+
+waitforlisten $iscsipid
+# Fuzz initiator do not support nop-in yet. So we increase nop-in timeout value for now.
+$rpc_py iscsi_set_options -o 60 -a 16
+$rpc_py framework_start_init
+echo "iscsi_tgt is listening. Running tests..."
+timing_exit start_iscsi_tgt
+
+$rpc_py iscsi_create_portal_group $PORTAL_TAG $TARGET_IP:$ISCSI_PORT
+$rpc_py iscsi_create_initiator_group $INITIATOR_TAG $INITIATOR_NAME $NETMASK
+$rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE
+$rpc_py iscsi_create_target_node disk1 disk1_alias 'Malloc0:0' $PORTAL_TAG:$INITIATOR_TAG 256 -d
+sleep 1
+
+trap 'killprocess $iscsipid; iscsitestfini $1 $2; exit 1' SIGINT SIGTERM EXIT
+
+$rootdir/test/app/fuzz/iscsi_fuzz/iscsi_fuzz -m 0xF0 -T $TARGET_IP -t $TEST_TIMEOUT 2> $output_dir/iscsi_autofuzz_logs.txt
+
+$rpc_py iscsi_delete_target_node 'iqn.2016-06.io.spdk:disk1'
+
+# Delete malloc device
+$rpc_py bdev_malloc_delete Malloc0
+
+trap - SIGINT SIGTERM EXIT
+
+killprocess $iscsipid
+
+iscsitestfini $1 $2
+
+timing_exit iscsi_fuzz_test
diff --git a/src/spdk/test/fuzz/autofuzz_nvmf.sh b/src/spdk/test/fuzz/autofuzz_nvmf.sh
new file mode 100755
index 000000000..6fc36e23b
--- /dev/null
+++ b/src/spdk/test/fuzz/autofuzz_nvmf.sh
@@ -0,0 +1,52 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+TEST_TIMEOUT=1200
+
+# This argument is used in addition to the test arguments in autotest_common.sh
+for i in "$@"; do
+ case "$i" in
+ --timeout=*)
+ TEST_TIMEOUT="${i#*=}"
+ ;;
+ esac
+done
+
+nvmftestinit
+
+timing_enter nvmf_fuzz_test
+
+echo "[Nvme]" > $testdir/nvmf_fuzz.conf
+echo " TransportID \"trtype:$TEST_TRANSPORT adrfam:IPv4 subnqn:nqn.2016-06.io.spdk:cnode1 traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT\" Nvme0" >> $testdir/nvmf_fuzz.conf
+
+"${NVMF_APP[@]}" -m 0xF &> "$output_dir/nvmf_autofuzz_tgt_output.txt" &
+nvmfpid=$!
+
+trap 'process_shm --id $NVMF_APP_SHM_ID; rm -f $testdir/nvmf_fuzz.conf; killprocess $nvmfpid; nvmftestfini $1; exit 1' SIGINT SIGTERM EXIT
+
+waitforlisten $nvmfpid
+$rpc_py nvmf_create_transport -t $TEST_TRANSPORT -u 8192
+
+$rpc_py bdev_malloc_create -b Malloc0 64 512
+
+$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
+$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc0
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+
+# Note that we chose a consistent seed to ensure that this test is consistent in nightly builds.
+$rootdir/test/app/fuzz/nvme_fuzz/nvme_fuzz -m 0xF0 -r "/var/tmp/nvme_fuzz" -t $TEST_TIMEOUT -C $testdir/nvmf_fuzz.conf -N -a 2> $output_dir/nvmf_autofuzz_logs.txt
+
+rm -f $testdir/nvmf_fuzz.conf
+$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1
+
+trap - SIGINT SIGTERM EXIT
+
+nvmfcleanup
+nvmftestfini
+timing_exit nvmf_fuzz_test
diff --git a/src/spdk/test/fuzz/autofuzz_vhost.sh b/src/spdk/test/fuzz/autofuzz_vhost.sh
new file mode 100755
index 000000000..4b040ba82
--- /dev/null
+++ b/src/spdk/test/fuzz/autofuzz_vhost.sh
@@ -0,0 +1,75 @@
+#!/usr/bin/env bash
+
+rootdir=$(readlink -f $(dirname $0))/../..
+source $rootdir/test/common/autotest_common.sh
+source "$rootdir/scripts/common.sh"
+
+TEST_TIMEOUT=1200
+
+VHOST_APP+=(-p 0)
+FUZZ_RPC_SOCK="/var/tmp/spdk_fuzz.sock"
+VHOST_FUZZ_APP+=(-r "$FUZZ_RPC_SOCK" --wait-for-rpc)
+
+vhost_rpc_py="$rootdir/scripts/rpc.py"
+fuzz_generic_rpc_py="$rootdir/scripts/rpc.py -s $FUZZ_RPC_SOCK"
+fuzz_specific_rpc_py="$rootdir/test/app/fuzz/common/fuzz_rpc.py -s $FUZZ_RPC_SOCK"
+
+# This argument is used in addition to the test arguments in autotest_common.sh
+for i in "$@"; do
+ case "$i" in
+ --timeout=*)
+ TEST_TIMEOUT="${i#*=}"
+ ;;
+ esac
+done
+
+timing_enter vhost_fuzz_test
+
+#todo refactor this to use the vhosttestinit function when it becomes available.
+timing_enter setup
+$rootdir/scripts/setup.sh
+timing_exit setup
+
+"${VHOST_APP[@]}" &> "$output_dir/vhost_fuzz_tgt_output.txt" &
+vhostpid=$!
+waitforlisten $vhostpid
+
+trap 'killprocess $vhostpid; exit 1' SIGINT SIGTERM exit
+
+"${VHOST_FUZZ_APP[@]}" -t $TEST_TIMEOUT 2> "$output_dir/vhost_autofuzz_output1.txt" &
+fuzzpid=$!
+waitforlisten $fuzzpid $FUZZ_RPC_SOCK
+
+trap 'killprocess $vhostpid; killprocess $fuzzpid; exit 1' SIGINT SIGTERM exit
+
+if [ "$TEST_TRANSPORT" == "bdev" ] || [ "$TEST_TRANSPORT" == "all" ]; then
+ $vhost_rpc_py bdev_malloc_create -b Malloc0 64 512
+ $vhost_rpc_py vhost_create_blk_controller Vhost.1 Malloc0
+
+ # test the vhost blk controller with valid data buffers.
+ $fuzz_specific_rpc_py fuzz_vhost_create_dev -s $(pwd)/Vhost.1 -b -v
+fi
+
+if [ "$TEST_TRANSPORT" == "scsi" ] || [ "$TEST_TRANSPORT" == "all" ]; then
+ $vhost_rpc_py bdev_malloc_create -b Malloc1 64 512
+ $vhost_rpc_py vhost_create_scsi_controller naa.VhostScsi0.1
+ $vhost_rpc_py vhost_scsi_controller_add_target naa.VhostScsi0.1 0 Malloc1
+
+ $vhost_rpc_py bdev_malloc_create -b Malloc2 64 512
+ $vhost_rpc_py vhost_create_scsi_controller naa.VhostScsi0.2
+ $vhost_rpc_py vhost_scsi_controller_add_target naa.VhostScsi0.2 0 Malloc2
+
+ # test the vhost scsi I/O queue with valid data buffers on a valid lun.
+ $fuzz_specific_rpc_py fuzz_vhost_create_dev -s $(pwd)/naa.VhostScsi0.1 -l -v
+ # test the vhost scsi management queue with valid data buffers.
+ $fuzz_specific_rpc_py fuzz_vhost_create_dev -s $(pwd)/naa.VhostScsi0.2 -v -m
+fi
+
+# The test won't actually begin until this option is passed in.
+$fuzz_generic_rpc_py framework_start_init
+
+wait $fuzzpid
+
+trap - SIGINT SIGTERM exit
+killprocess $vhostpid
+timing_exit vhost_fuzz_test
diff --git a/src/spdk/test/ioat/ioat.sh b/src/spdk/test/ioat/ioat.sh
new file mode 100755
index 000000000..705aedcef
--- /dev/null
+++ b/src/spdk/test/ioat/ioat.sh
@@ -0,0 +1,9 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/test/common/autotest_common.sh
+
+run_test "ioat_perf" $SPDK_EXAMPLE_DIR/ioat_perf -t 1
+
+run_test "ioat_verify" $SPDK_EXAMPLE_DIR/verify -t 1
diff --git a/src/spdk/test/iscsi_tgt/bdev_io_wait/bdev_io_wait.sh b/src/spdk/test/iscsi_tgt/bdev_io_wait/bdev_io_wait.sh
new file mode 100755
index 000000000..15dfe1165
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/bdev_io_wait/bdev_io_wait.sh
@@ -0,0 +1,50 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/iscsi_tgt/common.sh
+
+iscsitestinit $1 $2
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+timing_enter start_iscsi_tgt
+
+"${ISCSI_APP[@]}" -m 0x2 -p 1 -s 512 --wait-for-rpc &
+pid=$!
+echo "iSCSI target launched. pid: $pid"
+trap 'killprocess $pid; iscsitestfini $1 $2; exit 1' SIGINT SIGTERM EXIT
+waitforlisten $pid
+$rpc_py iscsi_set_options -o 30 -a 4
+# Minimal number of bdev io pool (5) and cache (1)
+$rpc_py bdev_set_options -p 5 -c 1
+$rpc_py framework_start_init
+echo "iscsi_tgt is listening. Running tests..."
+
+timing_exit start_iscsi_tgt
+
+$rpc_py iscsi_create_portal_group $PORTAL_TAG $TARGET_IP:$ISCSI_PORT
+$rpc_py iscsi_create_initiator_group $INITIATOR_TAG $INITIATOR_NAME $NETMASK
+$rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE
+# "Malloc0:0" ==> use Malloc0 blockdev for LUN0
+# "1:2" ==> map PortalGroup1 to InitiatorGroup2
+# "64" ==> iSCSI queue depth 64
+# "-d" ==> disable CHAP authentication
+$rpc_py iscsi_create_target_node disk1 disk1_alias 'Malloc0:0' $PORTAL_TAG:$INITIATOR_TAG 256 -d
+sleep 1
+trap 'killprocess $pid; iscsitestfini $1 $2; exit 1' SIGINT SIGTERM EXIT
+
+"$rootdir/test/bdev/bdevperf/bdevperf" --json <(initiator_json_config) -q 128 -o 4096 -w write -t 1
+"$rootdir/test/bdev/bdevperf/bdevperf" --json <(initiator_json_config) -q 128 -o 4096 -w read -t 1
+"$rootdir/test/bdev/bdevperf/bdevperf" --json <(initiator_json_config) -q 128 -o 4096 -w flush -t 1
+"$rootdir/test/bdev/bdevperf/bdevperf" --json <(initiator_json_config) -q 128 -o 4096 -w unmap -t 1
+
+trap - SIGINT SIGTERM EXIT
+
+killprocess $pid
+
+iscsitestfini $1 $2
diff --git a/src/spdk/test/iscsi_tgt/calsoft/calsoft.py b/src/spdk/test/iscsi_tgt/calsoft/calsoft.py
new file mode 100755
index 000000000..5be658e61
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/calsoft/calsoft.py
@@ -0,0 +1,121 @@
+#!/usr/bin/env python3
+
+import os
+import time
+import sys
+import subprocess
+import threading
+import json
+
+CALSOFT_BIN_PATH = "/usr/local/calsoft/iscsi-pcts-v1.5/bin"
+
+'''
+11/26/2015 disable tc_login_11_2 and tc_login_11_4
+RFC 7143 6.3
+Neither the initiator nor the target should attempt to declare or
+negotiate a parameter more than once during login, except for
+responses to specific keys that explicitly allow repeated key
+declarations (e.g., TargetAddress)
+
+The spec didn't make it clear what other keys could be re-declare
+Disscussed this with UNH and get the conclusion that TargetName/
+TargetAddress/MaxRecvDataSegmentLength could be re-declare.
+'''
+'''
+12/1/2015 add tc_login_2_2 to known_failed_cases
+RFC 7143 6.1
+A standard-label MUST begin with a capital letter and must not exceed
+63 characters.
+key name: A standard-label
+'''
+'''
+06/10/2020 add tc_login_29_1 to known_failed_cases
+RFC 3720 12.19. DataSequenceInOrder
+Irrelevant when: SessionType=Discovery
+'''
+
+known_failed_cases = ['tc_ffp_15_2', 'tc_ffp_29_2', 'tc_ffp_29_3', 'tc_ffp_29_4',
+ 'tc_err_1_1', 'tc_err_1_2', 'tc_err_2_8',
+ 'tc_err_3_1', 'tc_err_3_2', 'tc_err_3_3',
+ 'tc_err_3_4', 'tc_err_5_1', 'tc_login_3_1',
+ 'tc_login_11_2', 'tc_login_11_4', 'tc_login_2_2', 'tc_login_29_1']
+
+
+def run_case(case, result_list, log_dir_path):
+ try:
+ case_log = subprocess.check_output("{}/{}".format(CALSOFT_BIN_PATH, case), stderr=subprocess.STDOUT, shell=True).decode('utf-8')
+ except subprocess.CalledProcessError as e:
+ result_list.append({"Name": case, "Result": "FAIL"})
+ case_log = e.output.decode('utf-8')
+ else:
+ result_list.append({"Name": case, "Result": "PASS"})
+ with open(log_dir_path + case + '.txt', 'w') as f:
+ f.write(case_log)
+
+
+def main():
+ if not os.path.exists(CALSOFT_BIN_PATH):
+ print("The Calsoft test suite is not available on this machine.")
+ sys.exit(1)
+
+ output_dir = sys.argv[1]
+ if len(sys.argv) > 2:
+ output_file = sys.argv[2]
+ else:
+ output_file = "%s/calsoft.json" % (output_dir)
+
+ log_dir = "%s/calsoft/" % output_dir
+
+ all_cases = [x for x in os.listdir(CALSOFT_BIN_PATH) if x.startswith('tc')]
+ all_cases.sort()
+
+ case_result_list = []
+
+ result = {"Calsoft iSCSI tests": case_result_list}
+
+ if not os.path.exists(log_dir):
+ os.mkdir(log_dir)
+ for case in known_failed_cases:
+ print("Skipping %s. It is known to fail." % (case))
+ case_result_list.append({"Name": case, "Result": "SKIP"})
+
+ thread_objs = []
+ left_cases = list(set(all_cases) - set(known_failed_cases))
+ index = 0
+ max_thread_count = 32
+
+ while index < len(left_cases):
+ cur_thread_count = 0
+ for thread_obj in thread_objs:
+ if thread_obj.is_alive():
+ cur_thread_count += 1
+ while cur_thread_count < max_thread_count and index < len(left_cases):
+ thread_obj = threading.Thread(target=run_case, args=(left_cases[index], case_result_list, log_dir, ))
+ thread_obj.start()
+ time.sleep(0.02)
+ thread_objs.append(thread_obj)
+ index += 1
+ cur_thread_count += 1
+ end_time = time.time() + 30
+ while time.time() < end_time:
+ for thread_obj in thread_objs:
+ if thread_obj.is_alive():
+ break
+ else:
+ break
+ else:
+ print("Thread timeout")
+ exit(1)
+ with open(output_file, 'w') as f:
+ json.dump(obj=result, fp=f, indent=2)
+
+ failed = 0
+ for x in case_result_list:
+ if x["Result"] == "FAIL":
+ print("Test case %s failed." % (x["Name"]))
+ failed = 1
+ exit(failed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/src/spdk/test/iscsi_tgt/calsoft/calsoft.sh b/src/spdk/test/iscsi_tgt/calsoft/calsoft.sh
new file mode 100755
index 000000000..048b529d5
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/calsoft/calsoft.sh
@@ -0,0 +1,63 @@
+#!/usr/bin/env bash
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/iscsi_tgt/common.sh
+
+delete_tmp_conf_files() {
+ rm -f /usr/local/etc/its.conf
+}
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+calsoft_py="$testdir/calsoft.py"
+
+# Copy the calsoft config file to /usr/local/etc
+mkdir -p /usr/local/etc
+cp $testdir/its.conf /usr/local/etc/
+
+# Append target ip to calsoft config
+echo "IP=$TARGET_IP" >> /usr/local/etc/its.conf
+
+timing_enter start_iscsi_tgt
+
+"${ISCSI_APP[@]}" -m 0x1 --wait-for-rpc &
+pid=$!
+echo "Process pid: $pid"
+
+trap 'killprocess $pid; delete_tmp_conf_files; exit 1 ' SIGINT SIGTERM EXIT
+
+waitforlisten $pid
+$rpc_py load_subsystem_config < $testdir/iscsi.json
+$rpc_py framework_start_init
+echo "iscsi_tgt is listening. Running tests..."
+
+timing_exit start_iscsi_tgt
+
+$rpc_py iscsi_create_auth_group 1 -c 'user:root secret:tester'
+$rpc_py iscsi_set_discovery_auth -g 1
+$rpc_py iscsi_create_portal_group $PORTAL_TAG $TARGET_IP:$ISCSI_PORT
+$rpc_py iscsi_create_initiator_group $INITIATOR_TAG $INITIATOR_NAME $NETMASK
+$rpc_py bdev_malloc_create -b MyBdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE
+# "MyBdev:0" ==> use MyBdev blockdev for LUN0
+# "1:2" ==> map PortalGroup1 to InitiatorGroup2
+# "64" ==> iSCSI queue depth 64
+# "0 0 0 1" ==> enable CHAP authentication using auth group 1
+$rpc_py iscsi_create_target_node Target3 Target3_alias 'MyBdev:0' $PORTAL_TAG:$INITIATOR_TAG 64 -g 1
+sleep 1
+
+if [ "$1" ]; then
+ $calsoft_py "$output_dir" "$1"
+ failed=$?
+else
+ $calsoft_py "$output_dir"
+ failed=$?
+fi
+
+trap - SIGINT SIGTERM EXIT
+
+killprocess $pid
+delete_tmp_conf_files
+exit $failed
diff --git a/src/spdk/test/iscsi_tgt/calsoft/iscsi.json b/src/spdk/test/iscsi_tgt/calsoft/iscsi.json
new file mode 100644
index 000000000..64b4a9595
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/calsoft/iscsi.json
@@ -0,0 +1,15 @@
+{
+ "subsystem": "iscsi",
+ "config": [
+ {
+ "params": {
+ "allow_duplicated_isid": true,
+ "nop_timeout": 30,
+ "nop_in_interval": 10,
+ "max_sessions": 256,
+ "error_recovery_level": 2
+ },
+ "method": "iscsi_set_options"
+ }
+ ]
+}
diff --git a/src/spdk/test/iscsi_tgt/calsoft/its.conf b/src/spdk/test/iscsi_tgt/calsoft/its.conf
new file mode 100644
index 000000000..6469dab63
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/calsoft/its.conf
@@ -0,0 +1,7 @@
+InitiatorName=iqn.1994-05.com.redhat:b3283535dc3b
+TargetName=iqn.2016-06.io.spdk:Target3
+DefaultTime2Retain=20
+DefaultTime2Wait=2
+AuthMethod=CHAP,None
+UserName=root
+PassWord=tester
diff --git a/src/spdk/test/iscsi_tgt/common.sh b/src/spdk/test/iscsi_tgt/common.sh
new file mode 100644
index 000000000..d42a2a3a2
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/common.sh
@@ -0,0 +1,209 @@
+# Network configuration
+TARGET_INTERFACE="spdk_tgt_int"
+INITIATOR_INTERFACE="spdk_init_int"
+TARGET_NAMESPACE="spdk_iscsi_ns"
+TARGET_NS_CMD=(ip netns exec "$TARGET_NAMESPACE")
+
+# iSCSI target configuration
+TARGET_IP=10.0.0.1
+INITIATOR_IP=10.0.0.2
+ISCSI_PORT=3260
+NETMASK=$INITIATOR_IP/32
+INITIATOR_TAG=2
+INITIATOR_NAME=ANY
+PORTAL_TAG=1
+ISCSI_APP=("${TARGET_NS_CMD[@]}" "${ISCSI_APP[@]}")
+if [ $SPDK_TEST_VPP -eq 1 ]; then
+ ISCSI_APP+=(-L sock_vpp)
+fi
+ISCSI_TEST_CORE_MASK=0xFF
+
+function create_veth_interfaces() {
+ # $1 = test type (posix/vpp)
+ ip netns del $TARGET_NAMESPACE || true
+ ip link delete $INITIATOR_INTERFACE || true
+
+ trap 'cleanup_veth_interfaces $1; exit 1' SIGINT SIGTERM EXIT
+
+ # Create veth (Virtual ethernet) interface pair
+ ip link add $INITIATOR_INTERFACE type veth peer name $TARGET_INTERFACE
+ ip addr add $INITIATOR_IP/24 dev $INITIATOR_INTERFACE
+ ip link set $INITIATOR_INTERFACE up
+
+ # Create and add interface for target to network namespace
+ ip netns add $TARGET_NAMESPACE
+ ip link set $TARGET_INTERFACE netns $TARGET_NAMESPACE
+
+ # Accept connections from veth interface
+ iptables -I INPUT 1 -i $INITIATOR_INTERFACE -p tcp --dport $ISCSI_PORT -j ACCEPT
+
+ "${TARGET_NS_CMD[@]}" ip link set $TARGET_INTERFACE up
+
+ if [ "$1" == "posix" ]; then
+ "${TARGET_NS_CMD[@]}" ip link set lo up
+ "${TARGET_NS_CMD[@]}" ip addr add $TARGET_IP/24 dev $TARGET_INTERFACE
+
+ # Verify connectivity
+ ping -c 1 $TARGET_IP
+ ip netns exec $TARGET_NAMESPACE ping -c 1 $INITIATOR_IP
+ else
+ start_vpp
+ fi
+}
+
+function cleanup_veth_interfaces() {
+ # $1 = test type (posix/vpp)
+ if [ "$1" == "vpp" ]; then
+ kill_vpp
+ fi
+
+ # Cleanup veth interfaces and network namespace
+ # Note: removing one veth, removes the pair
+ ip link delete $INITIATOR_INTERFACE
+ ip netns del $TARGET_NAMESPACE
+}
+
+function iscsitestinit() {
+ if [ "$1" == "iso" ]; then
+ $rootdir/scripts/setup.sh
+ if [ -n "$2" ]; then
+ create_veth_interfaces $2
+ else
+ # default to posix
+ create_veth_interfaces "posix"
+ fi
+ fi
+}
+
+function waitforiscsidevices() {
+ local num=$1
+
+ for ((i = 1; i <= 20; i++)); do
+ n=$(iscsiadm -m session -P 3 | grep -c "Attached scsi disk sd[a-z]*" || true)
+ if [ $n -ne $num ]; then
+ sleep 0.1
+ else
+ return 0
+ fi
+ done
+
+ return 1
+}
+
+function iscsitestfini() {
+ if [ "$1" == "iso" ]; then
+ if [ -n "$2" ]; then
+ cleanup_veth_interfaces $2
+ else
+ # default to posix
+ cleanup_veth_interfaces "posix"
+ fi
+ $rootdir/scripts/setup.sh reset
+ fi
+}
+
+function start_vpp() {
+ # We need to make sure that posix side doesn't send jumbo packets while
+ # for VPP side maximal size of MTU for TCP is 1460 and tests doesn't work
+ # stable with larger packets
+ MTU=1460
+ MTU_W_HEADER=$((MTU + 20))
+ ip link set dev $INITIATOR_INTERFACE mtu $MTU
+ ethtool -K $INITIATOR_INTERFACE tso off
+ ethtool -k $INITIATOR_INTERFACE
+
+ # Start VPP process in SPDK target network namespace
+ "${TARGET_NS_CMD[@]}" vpp \
+ unix { nodaemon cli-listen /run/vpp/cli.sock } \
+ dpdk { no-pci } \
+ session { evt_qs_memfd_seg } \
+ socksvr { socket-name /run/vpp-api.sock } \
+ plugins { \
+ plugin default { disable } \
+ plugin dpdk_plugin.so { enable } \
+ } &
+
+ vpp_pid=$!
+ echo "VPP Process pid: $vpp_pid"
+
+ gdb_attach $vpp_pid &
+
+ # Wait until VPP starts responding
+ xtrace_disable
+ counter=40
+ while [ $counter -gt 0 ]; do
+ vppctl show version | grep -E "vpp v[0-9]+\.[0-9]+" && break
+ counter=$((counter - 1))
+ sleep 0.5
+ done
+ xtrace_restore
+ if [ $counter -eq 0 ]; then
+ return 1
+ fi
+
+ # Below VPP commands are masked with "|| true" for the sake of
+ # running the test in the CI system. For reasons unknown when
+ # run via CI these commands result in 141 return code (pipefail)
+ # even despite producing valid output.
+ # Using "|| true" does not impact the "-e" flag used in test scripts
+ # because vppctl cli commands always return with 0, even if
+ # there was an error.
+ # As a result - grep checks on command outputs must be used to
+ # verify vpp configuration and connectivity.
+
+ # Setup host interface
+ vppctl create host-interface name $TARGET_INTERFACE || true
+ VPP_TGT_INT="host-$TARGET_INTERFACE"
+ vppctl set interface state $VPP_TGT_INT up || true
+ vppctl set interface ip address $VPP_TGT_INT $TARGET_IP/24 || true
+ vppctl set interface mtu $MTU $VPP_TGT_INT || true
+
+ vppctl show interface | tr -s " " | grep -E "host-$TARGET_INTERFACE [0-9]+ up $MTU/0/0/0"
+
+ # Disable session layer
+ # NOTE: VPP net framework should enable it itself.
+ vppctl session disable || true
+
+ # Verify connectivity
+ vppctl show int addr | grep -E "$TARGET_IP/24"
+ ip addr show $INITIATOR_INTERFACE
+ ip netns exec $TARGET_NAMESPACE ip addr show $TARGET_INTERFACE
+ sleep 3
+ # SC1010: ping -M do - in this case do is an option not bash special word
+ # shellcheck disable=SC1010
+ ping -c 1 $TARGET_IP -s $((MTU - 28)) -M do
+ vppctl ping $INITIATOR_IP repeat 1 size $((MTU - (28 + 8))) verbose | grep -E "$MTU_W_HEADER bytes from $INITIATOR_IP"
+}
+
+function kill_vpp() {
+ vppctl delete host-interface name $TARGET_INTERFACE || true
+
+ # Dump VPP configuration before kill
+ vppctl show api clients || true
+ vppctl show session || true
+ vppctl show errors || true
+
+ killprocess $vpp_pid
+}
+function initiator_json_config() {
+ # Prepare config file for iSCSI initiator
+ jq . <<- JSON
+ {
+ "subsystems": [
+ {
+ "subsystem": "bdev",
+ "config": [
+ {
+ "method": "bdev_iscsi_create",
+ "params": {
+ "name": "iSCSI0",
+ "url": "iscsi://$TARGET_IP/iqn.2016-06.io.spdk:disk1/0",
+ "initiator_iqn": "iqn.2016-06.io.spdk:disk1/0"
+ }
+ }${*:+,$*}
+ ]
+ }
+ ]
+ }
+ JSON
+}
diff --git a/src/spdk/test/iscsi_tgt/digests/digests.sh b/src/spdk/test/iscsi_tgt/digests/digests.sh
new file mode 100755
index 000000000..3a03c10ec
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/digests/digests.sh
@@ -0,0 +1,94 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/iscsi_tgt/common.sh
+
+# $1 = "iso" - triggers isolation mode (setting up required environment).
+# $2 = test type posix or vpp. defaults to posix.
+iscsitestinit $1 $2
+
+function node_login_fio_logout() {
+ for arg in "$@"; do
+ iscsiadm -m node -p $TARGET_IP:$ISCSI_PORT -o update -n node.conn[0].iscsi.$arg
+ done
+ iscsiadm -m node --login -p $TARGET_IP:$ISCSI_PORT
+ waitforiscsidevices 1
+ $fio_py -p iscsi -i 512 -d 1 -t write -r 2
+ $fio_py -p iscsi -i 512 -d 1 -t read -r 2
+ iscsiadm -m node --logout -p $TARGET_IP:$ISCSI_PORT
+ waitforiscsidevices 0
+}
+
+function iscsi_header_digest_test() {
+ # Enable HeaderDigest to CRC32C
+ node_login_fio_logout "HeaderDigest -v CRC32C"
+
+ # Let iscsi target to decide its preference on
+ # HeaderDigest based on its capability.
+ node_login_fio_logout "HeaderDigest -v CRC32C,None"
+}
+
+function iscsi_header_data_digest_test() {
+ # Only enable HeaderDigest to CRC32C
+ node_login_fio_logout "HeaderDigest -v CRC32C" "DataDigest -v None"
+
+ # Only enable DataDigest to CRC32C
+ node_login_fio_logout "HeaderDigest -v None" "DataDigest -v CRC32C"
+
+ # Let iscsi target to decide its preference on both
+ # HeaderDigest and DataDigest based on its capability.
+ node_login_fio_logout "HeaderDigest -v CRC32C,None" "DataDigest -v CRC32C,None"
+
+ # Enable HeaderDigest and DataDigest both.
+ node_login_fio_logout "HeaderDigest -v CRC32C" "DataDigest -v CRC32C"
+}
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+fio_py="$rootdir/scripts/fio.py"
+
+timing_enter start_iscsi_tgt
+
+"${ISCSI_APP[@]}" -m $ISCSI_TEST_CORE_MASK --wait-for-rpc &
+pid=$!
+echo "Process pid: $pid"
+
+trap 'killprocess $pid; iscsitestfini $1 $2; exit 1' SIGINT SIGTERM EXIT
+
+waitforlisten $pid
+$rpc_py iscsi_set_options -o 30 -a 16
+$rpc_py framework_start_init
+echo "iscsi_tgt is listening. Running tests..."
+
+timing_exit start_iscsi_tgt
+
+$rpc_py iscsi_create_portal_group $PORTAL_TAG $TARGET_IP:$ISCSI_PORT
+$rpc_py iscsi_create_initiator_group $INITIATOR_TAG $INITIATOR_NAME $NETMASK
+$rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE
+# "Malloc0:0" ==> use Malloc0 blockdev for LUN0
+# "1:2" ==> map PortalGroup1 to InitiatorGroup2
+# "64" ==> iSCSI queue depth 64
+# "-d" ==> disable CHAP authentication
+$rpc_py iscsi_create_target_node Target3 Target3_alias 'Malloc0:0' $PORTAL_TAG:$INITIATOR_TAG 64 -d
+sleep 1
+
+iscsiadm -m discovery -t sendtargets -p $TARGET_IP:$ISCSI_PORT
+
+# iscsiadm installed by some Fedora releases loses the ability to set DataDigest parameter.
+# Check and avoid setting DataDigest.
+DataDigestAbility=$(iscsiadm -m node -p $TARGET_IP:$ISCSI_PORT -o update -n node.conn[0].iscsi.DataDigest -v None 2>&1 || true)
+if [ "$DataDigestAbility"x != x ]; then
+ run_test "iscsi_tgt_digest" iscsi_header_digest_test
+else
+ run_test "iscsi_tgt_data_digest" iscsi_header_data_digest_test
+fi
+
+trap - SIGINT SIGTERM EXIT
+
+iscsicleanup
+killprocess $pid
+iscsitestfini $1 $2
diff --git a/src/spdk/test/iscsi_tgt/ext4test/ext4test.sh b/src/spdk/test/iscsi_tgt/ext4test/ext4test.sh
new file mode 100755
index 000000000..8de417367
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/ext4test/ext4test.sh
@@ -0,0 +1,131 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/iscsi_tgt/common.sh
+
+# $1 = "iso" - triggers isolation mode (setting up required environment).
+# $2 = test type posix or vpp. defaults to posix.
+iscsitestinit $1 $2
+
+rpc_py="$rootdir/scripts/rpc.py"
+node_base="iqn.2013-06.com.intel.ch.spdk"
+
+timing_enter start_iscsi_tgt
+
+"${ISCSI_APP[@]}" --wait-for-rpc &
+pid=$!
+echo "Process pid: $pid"
+
+trap '$rpc_py bdev_split_delete Name0n1 || true; killprocess $pid; iscsitestfini $1 $2; exit 1' SIGINT SIGTERM EXIT
+
+waitforlisten $pid
+$rpc_py iscsi_set_options -o 30 -a 4 -b $node_base
+$rpc_py framework_start_init
+$rootdir/scripts/gen_nvme.sh --json | $rpc_py load_subsystem_config
+$rpc_py bdev_malloc_create 512 4096 --name Malloc0
+echo "iscsi_tgt is listening. Running tests..."
+
+timing_exit start_iscsi_tgt
+
+$rpc_py iscsi_create_portal_group $PORTAL_TAG $TARGET_IP:$ISCSI_PORT
+$rpc_py iscsi_create_initiator_group $INITIATOR_TAG $INITIATOR_NAME $NETMASK
+$rpc_py bdev_error_create 'Malloc0'
+# "1:2" ==> map PortalGroup1 to InitiatorGroup2
+# "64" ==> iSCSI queue depth 64
+# "-d" ==> disable CHAP authentication
+$rpc_py iscsi_create_target_node Target0 Target0_alias EE_Malloc0:0 1:2 64 -d
+sleep 1
+
+iscsiadm -m discovery -t sendtargets -p $TARGET_IP:$ISCSI_PORT
+iscsiadm -m node --login -p $TARGET_IP:$ISCSI_PORT
+waitforiscsidevices 1
+
+trap 'for new_dir in $(dir -d /mnt/*dir); do umount $new_dir; rm -rf $new_dir; done;
+ iscsicleanup; killprocess $pid; iscsitestfini $1 $2; exit 1' SIGINT SIGTERM EXIT
+
+echo "Test error injection"
+$rpc_py bdev_error_inject_error EE_Malloc0 'all' 'failure' -n 1000
+
+dev=$(iscsiadm -m session -P 3 | grep "Attached scsi disk" | awk '{print $4}')
+
+set +e
+waitforfile /dev/${dev}
+if make_filesystem ext4 /dev/${dev}; then
+ echo "mkfs successful - expected failure"
+ iscsicleanup
+ killprocess $pid
+ exit 1
+else
+ echo "mkfs failed as expected"
+fi
+set -e
+
+iscsicleanup
+$rpc_py bdev_error_inject_error EE_Malloc0 'clear' 'failure'
+$rpc_py iscsi_delete_target_node $node_base:Target0
+echo "Error injection test done"
+
+if [ -z "$NO_NVME" ]; then
+ bdev_size=$(get_bdev_size Nvme0n1)
+ split_size=$((bdev_size / 2))
+ if [ $split_size -gt 10000 ]; then
+ split_size=10000
+ fi
+ $rpc_py bdev_split_create Nvme0n1 2 -s $split_size
+ $rpc_py iscsi_create_target_node Target1 Target1_alias Nvme0n1p0:0 1:2 64 -d
+fi
+
+iscsiadm -m discovery -t sendtargets -p $TARGET_IP:$ISCSI_PORT
+iscsiadm -m node --login -p $TARGET_IP:$ISCSI_PORT
+waitforiscsidevices 1
+
+devs=$(iscsiadm -m session -P 3 | grep "Attached scsi disk" | awk '{print $4}')
+
+for dev in $devs; do
+ make_filesystem ext4 /dev/${dev}
+ mkdir -p /mnt/${dev}dir
+ mount -o sync /dev/${dev} /mnt/${dev}dir
+
+ rsync -qav --exclude=".git" --exclude="*.o" $rootdir/ /mnt/${dev}dir/spdk
+
+ make -C /mnt/${dev}dir/spdk clean
+ (cd /mnt/${dev}dir/spdk && ./configure $(get_config_params))
+ make -C /mnt/${dev}dir/spdk -j16
+
+ # Print out space consumed on target device to help decide
+ # if/when we need to increase the size of the malloc LUN
+ df -h /dev/$dev
+
+ rm -rf /mnt/${dev}dir/spdk
+done
+
+for dev in $devs; do
+ umount /mnt/${dev}dir
+ rm -rf /mnt/${dev}dir
+
+ stats=($(cat /sys/block/$dev/stat))
+ echo ""
+ echo "$dev stats"
+ printf "READ IO cnt: % 8u merges: % 8u sectors: % 8u ticks: % 8u\n" \
+ ${stats[0]} ${stats[1]} ${stats[2]} ${stats[3]}
+ printf "WRITE IO cnt: % 8u merges: % 8u sectors: % 8u ticks: % 8u\n" \
+ ${stats[4]} ${stats[5]} ${stats[6]} ${stats[7]}
+ printf "in flight: % 8u io ticks: % 8u time in queue: % 8u\n" \
+ ${stats[8]} ${stats[9]} ${stats[10]}
+ echo ""
+done
+
+trap - SIGINT SIGTERM EXIT
+
+iscsicleanup
+$rpc_py bdev_split_delete Nvme0n1
+$rpc_py bdev_error_delete EE_Malloc0
+
+if [ -z "$NO_NVME" ]; then
+ $rpc_py bdev_nvme_detach_controller Nvme0
+fi
+
+killprocess $pid
+iscsitestfini $1 $2
diff --git a/src/spdk/test/iscsi_tgt/filesystem/filesystem.sh b/src/spdk/test/iscsi_tgt/filesystem/filesystem.sh
new file mode 100755
index 000000000..156b5bde3
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/filesystem/filesystem.sh
@@ -0,0 +1,145 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/iscsi_tgt/common.sh
+source $rootdir/scripts/common.sh
+
+# $1 = "iso" - triggers isolation mode (setting up required environment).
+# $2 = test type posix or vpp. defaults to posix.
+iscsitestinit $1 $2
+
+rpc_py="$rootdir/scripts/rpc.py"
+# Remove lvol bdevs and stores.
+function remove_backends() {
+ echo "INFO: Removing lvol bdev"
+ $rpc_py bdev_lvol_delete "lvs_0/lbd_0"
+
+ echo "INFO: Removing lvol stores"
+ $rpc_py bdev_lvol_delete_lvstore -l lvs_0
+
+ echo "INFO: Removing NVMe"
+ $rpc_py bdev_nvme_detach_controller Nvme0
+
+ return 0
+}
+
+timing_enter start_iscsi_tgt
+
+"${ISCSI_APP[@]}" -m $ISCSI_TEST_CORE_MASK --wait-for-rpc &
+pid=$!
+echo "Process pid: $pid"
+
+trap 'killprocess $pid; iscsitestfini $1 $2; exit 1' SIGINT SIGTERM EXIT
+
+waitforlisten $pid
+$rpc_py iscsi_set_options -o 30 -a 16
+$rpc_py framework_start_init
+echo "iscsi_tgt is listening. Running tests..."
+
+timing_exit start_iscsi_tgt
+
+bdf=$(get_first_nvme_bdf)
+$rpc_py iscsi_create_portal_group $PORTAL_TAG $TARGET_IP:$ISCSI_PORT
+$rpc_py iscsi_create_initiator_group $INITIATOR_TAG $INITIATOR_NAME $NETMASK
+$rpc_py bdev_nvme_attach_controller -b "Nvme0" -t "pcie" -a $bdf
+
+ls_guid=$($rpc_py bdev_lvol_create_lvstore Nvme0n1 lvs_0)
+free_mb=$(get_lvs_free_mb "$ls_guid")
+# Using maximum 2048MiB to reduce the test time
+if [ $free_mb -gt 2048 ]; then
+ $rpc_py bdev_lvol_create -u $ls_guid lbd_0 2048
+else
+ $rpc_py bdev_lvol_create -u $ls_guid lbd_0 $free_mb
+fi
+# "lvs_0/lbd_0:0" ==> use lvs_0/lbd_0 blockdev for LUN0
+# "1:2" ==> map PortalGroup1 to InitiatorGroup2
+# "256" ==> iSCSI queue depth 256
+# "-d" ==> disable CHAP authentication
+$rpc_py iscsi_create_target_node Target1 Target1_alias 'lvs_0/lbd_0:0' $PORTAL_TAG:$INITIATOR_TAG 256 -d
+sleep 1
+
+iscsiadm -m discovery -t sendtargets -p $TARGET_IP:$ISCSI_PORT
+iscsiadm -m node --login -p $TARGET_IP:$ISCSI_PORT
+waitforiscsidevices 1
+
+trap 'iscsicleanup; remove_backends; umount /mnt/device; rm -rf /mnt/device; killprocess $pid; iscsitestfini $1 $2; exit 1' SIGINT SIGTERM EXIT
+
+mkdir -p /mnt/device
+
+dev=$(iscsiadm -m session -P 3 | grep "Attached scsi disk" | awk '{print $4}')
+
+waitforfile /dev/$dev
+parted -s /dev/$dev mklabel msdos
+parted -s /dev/$dev mkpart primary '0%' '100%'
+sleep 1
+
+function filesystem_test() {
+ fstype=$1
+
+ make_filesystem ${fstype} /dev/${dev}1
+ mount /dev/${dev}1 /mnt/device
+ if [ $RUN_NIGHTLY -eq 1 ]; then
+ fio -filename=/mnt/device/test -direct=1 -iodepth 64 -thread=1 -invalidate=1 -rw=randwrite -ioengine=libaio -bs=4k \
+ -size=1024M -name=job0
+ umount /mnt/device
+
+ iscsiadm -m node --logout
+ waitforiscsidevices 0
+ iscsiadm -m node --login -p $TARGET_IP:$ISCSI_PORT
+ waitforiscsidevices 1
+
+ dev=$(iscsiadm -m session -P 3 | grep "Attached scsi disk" | awk '{print $4}')
+
+ waitforfile /dev/${dev}1
+ mount -o rw /dev/${dev}1 /mnt/device
+ if [ -f "/mnt/device/test" ]; then
+ echo "File existed."
+ fio -filename=/mnt/device/test -direct=1 -iodepth 64 -thread=1 -invalidate=1 -rw=randread \
+ -ioengine=libaio -bs=4k -runtime=20 -time_based=1 -name=job0
+ else
+ echo "File doesn't exist."
+ exit 1
+ fi
+
+ rm -rf /mnt/device/test
+ umount /mnt/device
+ else
+ touch /mnt/device/aaa
+ umount /mnt/device
+
+ iscsiadm -m node --logout
+ waitforiscsidevices 0
+ iscsiadm -m node --login -p $TARGET_IP:$ISCSI_PORT
+ waitforiscsidevices 1
+
+ dev=$(iscsiadm -m session -P 3 | grep "Attached scsi disk" | awk '{print $4}')
+
+ waitforfile /dev/${dev}1
+ mount -o rw /dev/${dev}1 /mnt/device
+
+ if [ -f "/mnt/device/aaa" ]; then
+ echo "File existed."
+ else
+ echo "File doesn't exist."
+ exit 1
+ fi
+
+ rm -rf /mnt/device/aaa
+ umount /mnt/device
+ fi
+}
+
+run_test "iscsi_tgt_filesystem_ext4" filesystem_test "ext4"
+run_test "iscsi_tgt_filesystem_btrfs" filesystem_test "btrfs"
+run_test "iscsi_tgt_filesystem_xfs" filesystem_test "xfs"
+
+rm -rf /mnt/device
+
+trap - SIGINT SIGTERM EXIT
+
+iscsicleanup
+remove_backends
+killprocess $pid
+iscsitestfini $1 $2
diff --git a/src/spdk/test/iscsi_tgt/fio/fio.sh b/src/spdk/test/iscsi_tgt/fio/fio.sh
new file mode 100755
index 000000000..ae3a2f308
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/fio/fio.sh
@@ -0,0 +1,150 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/iscsi_tgt/common.sh
+
+# $1 = "iso" - triggers isolation mode (setting up required environment).
+# $2 = test type posix or vpp. defaults to posix.
+iscsitestinit $1 $2
+
+delete_tmp_files() {
+ rm -f $testdir/iscsi2.json
+ rm -f ./local-job0-0-verify.state
+ rm -f ./local-job1-1-verify.state
+}
+
+function running_config() {
+ # dump a config file from the running iscsi_tgt
+ $rpc_py save_config > $testdir/iscsi2.json
+ sleep 1
+
+ # now start iscsi_tgt again using the generated config file
+ # keep the same iscsiadm configuration to confirm that the
+ # config file matched the running configuration
+ killprocess $pid
+ trap 'iscsicleanup; delete_tmp_files; exit 1' SIGINT SIGTERM EXIT
+
+ timing_enter start_iscsi_tgt2
+
+ "${ISCSI_APP[@]}" --wait-for-rpc &
+ pid=$!
+ echo "Process pid: $pid"
+ trap 'iscsicleanup; killprocess $pid; delete_tmp_files; exit 1' SIGINT SIGTERM EXIT
+ waitforlisten $pid
+
+ $rpc_py load_config < $testdir/iscsi2.json
+
+ echo "iscsi_tgt is listening. Running tests..."
+
+ timing_exit start_iscsi_tgt2
+
+ sleep 1
+ $fio_py -p iscsi -i 4096 -d 1 -t randrw -r 5
+}
+
+if [ -z "$TARGET_IP" ]; then
+ echo "TARGET_IP not defined in environment"
+ exit 1
+fi
+
+if [ -z "$INITIATOR_IP" ]; then
+ echo "INITIATOR_IP not defined in environment"
+ exit 1
+fi
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=4096
+
+rpc_py="$rootdir/scripts/rpc.py"
+fio_py="$rootdir/scripts/fio.py"
+
+timing_enter start_iscsi_tgt
+
+"${ISCSI_APP[@]}" --wait-for-rpc &
+pid=$!
+echo "Process pid: $pid"
+
+trap 'killprocess $pid; exit 1' SIGINT SIGTERM EXIT
+
+waitforlisten $pid
+
+$rpc_py load_config < $testdir/iscsi.json
+
+echo "iscsi_tgt is listening. Running tests..."
+
+timing_exit start_iscsi_tgt
+
+$rpc_py iscsi_create_portal_group $PORTAL_TAG $TARGET_IP:$ISCSI_PORT
+$rpc_py iscsi_create_initiator_group $INITIATOR_TAG $INITIATOR_NAME $NETMASK
+# Create a RAID-0 bdev from two malloc bdevs
+malloc_bdevs="$($rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE) "
+malloc_bdevs+="$($rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
+$rpc_py bdev_raid_create -n raid0 -z 64 -r 0 -b "$malloc_bdevs"
+bdev=$($rpc_py bdev_malloc_create 1024 512)
+# "raid0:0" ==> use raid0 blockdev for LUN0
+# "1:2" ==> map PortalGroup1 to InitiatorGroup2
+# "64" ==> iSCSI queue depth 64
+# "-d" ==> disable CHAP authentication
+$rpc_py iscsi_create_target_node Target3 Target3_alias "raid0:0 ${bdev}:1" $PORTAL_TAG:$INITIATOR_TAG 64 -d
+sleep 1
+
+iscsiadm -m discovery -t sendtargets -p $TARGET_IP:$ISCSI_PORT
+iscsiadm -m node --login -p $TARGET_IP:$ISCSI_PORT
+waitforiscsidevices 2
+
+trap 'iscsicleanup; killprocess $pid; iscsitestfini $1 $2; delete_tmp_files; exit 1' SIGINT SIGTERM EXIT
+
+$fio_py -p iscsi -i 4096 -d 1 -t randrw -r 1 -v
+$fio_py -p iscsi -i 131072 -d 32 -t randrw -r 1 -v
+$fio_py -p iscsi -i 524288 -d 128 -t randrw -r 1 -v
+$fio_py -p iscsi -i 1048576 -d 1024 -t read -r 1 -n 4
+
+if [ $RUN_NIGHTLY -eq 1 ]; then
+ $fio_py -p iscsi -i 4096 -d 1 -t write -r 300 -v
+
+ # Run the running_config test which will generate a config file from the
+ # running iSCSI target, then kill and restart the iSCSI target using the
+ # generated config file
+ # Temporarily disabled
+ # running_config
+fi
+
+# Start hotplug test case.
+$fio_py -p iscsi -i 1048576 -d 128 -t rw -r 10 &
+fio_pid=$!
+
+sleep 3
+
+# Delete raid0 blockdev
+$rpc_py bdev_raid_delete 'raid0'
+
+# Delete all allocated malloc blockdevs
+for malloc_bdev in $malloc_bdevs; do
+ $rpc_py bdev_malloc_delete $malloc_bdev
+done
+
+# Delete malloc device
+$rpc_py bdev_malloc_delete ${bdev}
+
+fio_status=0
+wait $fio_pid || fio_status=$?
+
+if [ $fio_status -eq 0 ]; then
+ echo "iscsi hotplug test: fio successful - expected failure"
+ exit 1
+else
+ echo "iscsi hotplug test: fio failed as expected"
+fi
+
+iscsicleanup
+$rpc_py iscsi_delete_target_node 'iqn.2016-06.io.spdk:Target3'
+
+delete_tmp_files
+
+trap - SIGINT SIGTERM EXIT
+
+killprocess $pid
+
+iscsitestfini $1 $2
diff --git a/src/spdk/test/iscsi_tgt/fio/iscsi.json b/src/spdk/test/iscsi_tgt/fio/iscsi.json
new file mode 100644
index 000000000..d901fc78b
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/fio/iscsi.json
@@ -0,0 +1,32 @@
+{
+ "subsystems": [
+ {
+ "subsystem": "bdev",
+ "config": [
+ {
+ "params": {
+ "retry_count": 4,
+ "nvme_adminq_poll_period_us": 100000,
+ "nvme_ioq_poll_period_us": 0,
+ "action_on_timeout": "none"
+ },
+ "method": "bdev_nvme_set_options"
+ }
+ ]
+ },
+ {
+ "subsystem": "iscsi",
+ "config": [
+ {
+ "method": "iscsi_set_options",
+ "params": {
+ "nop_timeout": 30,
+ "node_base": "iqn.2016-06.io.spdk",
+ "max_sessions": 16,
+ "error_recovery_level": 0
+ }
+ }
+ ]
+ }
+ ]
+}
diff --git a/src/spdk/test/iscsi_tgt/fuzz/fuzz.sh b/src/spdk/test/iscsi_tgt/fuzz/fuzz.sh
new file mode 100755
index 000000000..bc290fa8f
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/fuzz/fuzz.sh
@@ -0,0 +1,65 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/iscsi_tgt/common.sh
+
+# $1 = "iso" - triggers isolation mode (setting up required environment).
+# $2 = test type posix or vpp. defaults to posix.
+iscsitestinit $1 $2
+
+if [ -z "$TARGET_IP" ]; then
+ echo "TARGET_IP not defined in environment"
+ exit 1
+fi
+
+if [ -z "$INITIATOR_IP" ]; then
+ echo "INITIATOR_IP not defined in environment"
+ exit 1
+fi
+
+timing_enter iscsi_fuzz
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=4096
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+timing_enter start_iscsi_tgt
+
+"${ISCSI_APP[@]}" -m $ISCSI_TEST_CORE_MASK --wait-for-rpc &
+iscsipid=$!
+echo "Process iscsipid: $iscsipid"
+
+trap 'killprocess $iscsipid; exit 1' SIGINT SIGTERM EXIT
+
+waitforlisten $iscsipid
+# Fuzz initiator do not support nop-in yet. So we increase nop-in timeout value for now.
+$rpc_py iscsi_set_options -o 60 -a 16
+$rpc_py framework_start_init
+echo "iscsi_tgt is listening. Running tests..."
+timing_exit start_iscsi_tgt
+
+$rpc_py iscsi_create_portal_group $PORTAL_TAG $TARGET_IP:$ISCSI_PORT
+$rpc_py iscsi_create_initiator_group $INITIATOR_TAG $INITIATOR_NAME $NETMASK
+$rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE
+$rpc_py iscsi_create_target_node disk1 disk1_alias 'Malloc0:0' $PORTAL_TAG:$INITIATOR_TAG 256 -d
+sleep 1
+
+trap 'killprocess $iscsipid; iscsitestfini $1 $2; exit 1' SIGINT SIGTERM EXIT
+
+$rootdir/test/app/fuzz/iscsi_fuzz/iscsi_fuzz -m 0xF0 -T $TARGET_IP -t 30 2> $output_dir/iscsi_autofuzz_logs.txt
+
+$rpc_py iscsi_delete_target_node 'iqn.2016-06.io.spdk:disk1'
+
+# Delete malloc device
+$rpc_py bdev_malloc_delete Malloc0
+
+trap - SIGINT SIGTERM EXIT
+
+killprocess $iscsipid
+
+iscsitestfini $1 $2
+
+timing_exit iscsi_fuzz
diff --git a/src/spdk/test/iscsi_tgt/initiator/initiator.sh b/src/spdk/test/iscsi_tgt/initiator/initiator.sh
new file mode 100755
index 000000000..5da1f320b
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/initiator/initiator.sh
@@ -0,0 +1,52 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/iscsi_tgt/common.sh
+
+# $1 = "iso" - triggers isolation mode (setting up required environment).
+# $2 = test type posix or vpp. defaults to posix.
+iscsitestinit $1 $2
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+timing_enter start_iscsi_tgt
+
+"${ISCSI_APP[@]}" -m 0x2 -p 1 -s 512 --wait-for-rpc &
+pid=$!
+echo "iSCSI target launched. pid: $pid"
+trap 'killprocess $pid;exit 1' SIGINT SIGTERM EXIT
+waitforlisten $pid
+$rpc_py iscsi_set_options -o 30 -a 4
+$rpc_py framework_start_init
+echo "iscsi_tgt is listening. Running tests..."
+
+timing_exit start_iscsi_tgt
+
+$rpc_py iscsi_create_portal_group $PORTAL_TAG $TARGET_IP:$ISCSI_PORT
+$rpc_py iscsi_create_initiator_group $INITIATOR_TAG $INITIATOR_NAME $NETMASK
+$rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE
+# "Malloc0:0" ==> use Malloc0 blockdev for LUN0
+# "1:2" ==> map PortalGroup1 to InitiatorGroup2
+# "64" ==> iSCSI queue depth 64
+# "-d" ==> disable CHAP authentication
+$rpc_py iscsi_create_target_node disk1 disk1_alias 'Malloc0:0' $PORTAL_TAG:$INITIATOR_TAG 256 -d
+sleep 1
+trap 'killprocess $pid; iscsitestfini $1 $2; exit 1' SIGINT SIGTERM EXIT
+
+"$rootdir/test/bdev/bdevperf/bdevperf" --json <(initiator_json_config) -q 128 -o 4096 -w verify -t 5 -s 512
+if [ $RUN_NIGHTLY -eq 1 ]; then
+ "$rootdir/test/bdev/bdevperf/bdevperf" --json <(initiator_json_config) -q 128 -o 4096 -w unmap -t 5 -s 512
+ "$rootdir/test/bdev/bdevperf/bdevperf" --json <(initiator_json_config) -q 128 -o 4096 -w flush -t 5 -s 512
+ "$rootdir/test/bdev/bdevperf/bdevperf" --json <(initiator_json_config) -q 128 -o 4096 -w reset -t 10 -s 512
+fi
+
+trap - SIGINT SIGTERM EXIT
+
+killprocess $pid
+
+iscsitestfini $1 $2
diff --git a/src/spdk/test/iscsi_tgt/ip_migration/ip_migration.sh b/src/spdk/test/iscsi_tgt/ip_migration/ip_migration.sh
new file mode 100755
index 000000000..d737e01b3
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/ip_migration/ip_migration.sh
@@ -0,0 +1,131 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/iscsi_tgt/common.sh
+
+# $1 = "iso" - triggers isolation mode (setting up required environment).
+# $2 = test type posix or vpp. defaults to posix.
+iscsitestinit $1 $2
+
+rpc_py="$rootdir/scripts/rpc.py"
+fio_py="$rootdir/scripts/fio.py"
+
+# Namespaces are NOT used here on purpose. This test requires changes to detect
+# ifc_index for interface that was put into namespace. Needed for net_interface_add_ip_address.
+# Reset ISCSI_APP[] to use only the plain app for this test without TARGET_NS_CMD preset.
+source "$rootdir/test/common/applications.sh"
+NETMASK=127.0.0.0/24
+MIGRATION_ADDRESS=127.0.0.2
+
+function kill_all_iscsi_target() {
+ for ((i = 0; i < 2; i++)); do
+ rpc_addr="/var/tmp/spdk${i}.sock"
+ $rpc_py -s $rpc_addr spdk_kill_instance SIGTERM
+ done
+}
+
+function rpc_config() {
+ # $1 = RPC server address
+ # $2 = Netmask
+ $rpc_py -s $1 iscsi_create_initiator_group $INITIATOR_TAG $INITIATOR_NAME $2
+ $rpc_py -s $1 bdev_malloc_create 64 512
+}
+
+function rpc_validate_ip() {
+ # Always delete the IP first in case it is there already
+ cmd="$rpc_py -s $1 net_interface_delete_ip_address 1 $MIGRATION_ADDRESS"
+ if $cmd; then
+ echo "Delete existing IP succeeded."
+ else
+ echo "Ignore the failure as IP did not exist."
+ fi
+
+ cmd="$rpc_py -s $1 net_interface_add_ip_address 1 $MIGRATION_ADDRESS"
+ if $cmd; then
+ echo "Add new IP succeeded."
+ else
+ echo "Add new IP failed. Expected to succeed..."
+ exit 1
+ fi
+ # Add same IP again
+ if $cmd; then
+ echo "Same IP existed. Expected to fail..."
+ exit 1
+ fi
+
+ cmd="$rpc_py -s $1 net_interface_delete_ip_address 1 $MIGRATION_ADDRESS"
+ if $cmd; then
+ echo "Delete existing IP succeeded."
+ else
+ echo "Delete existing IP failed. Expected to succeed..."
+ exit 1
+ fi
+ # Delete same IP again
+ if $cmd; then
+ echo "No required IP existed. Expected to fail..."
+ exit 1
+ fi
+}
+
+function rpc_add_target_node() {
+ $rpc_py -s $1 net_interface_add_ip_address 1 $MIGRATION_ADDRESS
+ $rpc_py -s $1 iscsi_create_portal_group $PORTAL_TAG $MIGRATION_ADDRESS:$ISCSI_PORT
+ $rpc_py -s $1 iscsi_create_target_node target1 target1_alias 'Malloc0:0' $PORTAL_TAG:$INITIATOR_TAG 64 -d
+ $rpc_py -s $1 net_interface_delete_ip_address 1 $MIGRATION_ADDRESS
+}
+
+echo "Running ip migration tests"
+for ((i = 0; i < 2; i++)); do
+ timing_enter start_iscsi_tgt_$i
+
+ rpc_addr="/var/tmp/spdk${i}.sock"
+
+ # TODO: run the different iSCSI instances on non-overlapping CPU masks
+ "${ISCSI_APP[@]}" -r $rpc_addr -i $i -m $ISCSI_TEST_CORE_MASK --wait-for-rpc &
+ pid=$!
+ echo "Process pid: $pid"
+
+ trap 'kill_all_iscsi_target; exit 1' SIGINT SIGTERM EXIT
+
+ waitforlisten $pid $rpc_addr
+ $rpc_py -s $rpc_addr iscsi_set_options -o 30 -a 64
+ $rpc_py -s $rpc_addr framework_start_init
+ echo "iscsi_tgt is listening. Running tests..."
+
+ timing_exit start_iscsi_tgt_$i
+
+ rpc_config $rpc_addr $NETMASK
+ trap 'kill_all_iscsi_target; iscsitestfini $1 $2; exit 1' \
+ SIGINT SIGTERM EXIT
+done
+
+rpc_first_addr="/var/tmp/spdk0.sock"
+rpc_validate_ip $rpc_first_addr
+rpc_add_target_node $rpc_first_addr
+
+sleep 1
+iscsiadm -m discovery -t sendtargets -p $MIGRATION_ADDRESS:$ISCSI_PORT
+sleep 1
+iscsiadm -m node --login -p $MIGRATION_ADDRESS:$ISCSI_PORT
+waitforiscsidevices 1
+
+# fio tests for multi-process
+$fio_py -p iscsi -i 4096 -d 32 -t randrw -r 12 &
+fiopid=$!
+sleep 3
+
+$rpc_py -s $rpc_first_addr spdk_kill_instance SIGTERM
+
+rpc_second_addr="/var/tmp/spdk1.sock"
+rpc_add_target_node $rpc_second_addr
+
+wait $fiopid
+
+trap - SIGINT SIGTERM EXIT
+
+iscsicleanup
+
+$rpc_py -s $rpc_second_addr spdk_kill_instance SIGTERM
+iscsitestfini $1 $2
diff --git a/src/spdk/test/iscsi_tgt/iscsi_tgt.sh b/src/spdk/test/iscsi_tgt/iscsi_tgt.sh
new file mode 100755
index 000000000..0316229b6
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/iscsi_tgt.sh
@@ -0,0 +1,97 @@
+#!/usr/bin/env bash
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/test/common/autotest_common.sh
+
+if [ ! $(uname -s) = Linux ]; then
+ exit 0
+fi
+
+source $rootdir/test/iscsi_tgt/common.sh
+
+# $1 = test type (posix/vpp)
+if [ "$1" == "posix" ] || [ "$1" == "vpp" ]; then
+ TEST_TYPE=$1
+else
+ echo "No iSCSI test type specified"
+ exit 1
+fi
+
+# Run cleanup once to make sure we remove any stale iscsiadm
+# entries if they were missed in previous runs
+iscsicleanup
+
+# Network configuration
+create_veth_interfaces $TEST_TYPE
+
+trap 'cleanup_veth_interfaces $TEST_TYPE; exit 1' SIGINT SIGTERM EXIT
+
+run_test "iscsi_tgt_sock" ./test/iscsi_tgt/sock/sock.sh $TEST_TYPE
+if [ "$TEST_TYPE" == "posix" ]; then
+ # calsoft doesn't handle TCP stream properly and fails decoding iSCSI
+ # requests when are divided by TCP segmentation. This is very common
+ # situation for VPP and causes that calsoft.sh never PASS.
+ if [[ -d /usr/local/calsoft ]]; then
+ run_test "iscsi_tgt_calsoft" ./test/iscsi_tgt/calsoft/calsoft.sh
+ else
+ skip_run_test_with_warning "WARNING: Calsoft binaries not found, skipping test!"
+ fi
+fi
+run_test "iscsi_tgt_filesystem" ./test/iscsi_tgt/filesystem/filesystem.sh
+run_test "iscsi_tgt_reset" ./test/iscsi_tgt/reset/reset.sh
+run_test "iscsi_tgt_rpc_config" ./test/iscsi_tgt/rpc_config/rpc_config.sh $TEST_TYPE
+run_test "iscsi_tgt_iscsi_lvol" ./test/iscsi_tgt/lvol/iscsi_lvol.sh
+run_test "iscsi_tgt_fio" ./test/iscsi_tgt/fio/fio.sh
+run_test "iscsi_tgt_qos" ./test/iscsi_tgt/qos/qos.sh
+
+# IP Migration tests do not support network namespaces,
+# they can only be run on posix sockets.
+if [ "$TEST_TYPE" == "posix" ]; then
+ run_test "iscsi_tgt_ip_migration" ./test/iscsi_tgt/ip_migration/ip_migration.sh
+fi
+run_test "iscsi_tgt_trace_record" ./test/iscsi_tgt/trace_record/trace_record.sh
+
+if [ $RUN_NIGHTLY -eq 1 ]; then
+ if [ $SPDK_TEST_PMDK -eq 1 ]; then
+ run_test "iscsi_tgt_pmem" ./test/iscsi_tgt/pmem/iscsi_pmem.sh 4096 10
+ fi
+ run_test "iscsi_tgt_ext4test" ./test/iscsi_tgt/ext4test/ext4test.sh
+ run_test "iscsi_tgt_digests" ./test/iscsi_tgt/digests/digests.sh
+fi
+if [ $SPDK_TEST_RBD -eq 1 ]; then
+ # RBD tests do not support network namespaces,
+ # they can only be run on posix sockets.
+ if [ "$TEST_TYPE" == "posix" ]; then
+ if ! hash ceph; then
+ echo "ERROR: SPDK_TEST_RBD requested but no ceph installed!"
+ false
+ fi
+ run_test "iscsi_tgt_rbd" ./test/iscsi_tgt/rbd/rbd.sh
+ fi
+fi
+
+trap 'cleanup_veth_interfaces $TEST_TYPE; exit 1' SIGINT SIGTERM EXIT
+
+if [ $SPDK_TEST_NVMF -eq 1 ]; then
+ # NVMe-oF tests do not support network namespaces,
+ # they can only be run on posix sockets.
+ if [ "$TEST_TYPE" == "posix" ]; then
+ # Test configure remote NVMe device from rpc and conf file
+ run_test "iscsi_tgt_fio_remote_nvme" ./test/iscsi_tgt/nvme_remote/fio_remote_nvme.sh
+ fi
+fi
+
+if [ $RUN_NIGHTLY -eq 1 ]; then
+ if [ "$TEST_TYPE" == "posix" ]; then
+ run_test "iscsi_tgt_fuzz" ./test/iscsi_tgt/fuzz/fuzz.sh
+ fi
+ run_test "iscsi_tgt_multiconnection" ./test/iscsi_tgt/multiconnection/multiconnection.sh
+fi
+
+if [ $SPDK_TEST_ISCSI_INITIATOR -eq 1 ]; then
+ run_test "iscsi_tgt_initiator" ./test/iscsi_tgt/initiator/initiator.sh
+ run_test "iscsi_tgt_bdev_io_wait" ./test/iscsi_tgt/bdev_io_wait/bdev_io_wait.sh
+fi
+
+cleanup_veth_interfaces $TEST_TYPE
+trap - SIGINT SIGTERM EXIT
diff --git a/src/spdk/test/iscsi_tgt/lvol/iscsi_lvol.sh b/src/spdk/test/iscsi_tgt/lvol/iscsi_lvol.sh
new file mode 100755
index 000000000..e55899d56
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/lvol/iscsi_lvol.sh
@@ -0,0 +1,85 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/iscsi_tgt/common.sh
+
+# $1 = "iso" - triggers isolation mode (setting up required environment).
+# $2 = test type posix or vpp. defaults to posix.
+iscsitestinit $1 $2
+
+MALLOC_BDEV_SIZE=128
+MALLOC_BLOCK_SIZE=512
+if [ $RUN_NIGHTLY -eq 1 ]; then
+ NUM_LVS=10
+ NUM_LVOL=10
+else
+ NUM_LVS=2
+ NUM_LVOL=2
+fi
+
+rpc_py="$rootdir/scripts/rpc.py"
+fio_py="$rootdir/scripts/fio.py"
+
+timing_enter start_iscsi_tgt
+
+"${ISCSI_APP[@]}" -m $ISCSI_TEST_CORE_MASK --wait-for-rpc &
+pid=$!
+echo "Process pid: $pid"
+
+trap 'iscsicleanup; killprocess $pid; iscsitestfini $1 $2; exit 1' SIGINT SIGTERM EXIT
+
+waitforlisten $pid
+$rpc_py iscsi_set_options -o 30 -a 16
+$rpc_py framework_start_init
+echo "iscsi_tgt is listening. Running tests..."
+
+timing_exit start_iscsi_tgt
+
+timing_enter setup
+$rpc_py iscsi_create_portal_group $PORTAL_TAG $TARGET_IP:$ISCSI_PORT
+# Create the first LVS from a Raid-0 bdev, which is created from two malloc bdevs
+# Create remaining LVSs from a malloc bdev, respectively
+for i in $(seq 1 $NUM_LVS); do
+ INITIATOR_TAG=$((i + 2))
+ $rpc_py iscsi_create_initiator_group $INITIATOR_TAG $INITIATOR_NAME $NETMASK
+ if [ $i -eq 1 ]; then
+ # construct RAID bdev and put its name in $bdev
+ malloc_bdevs="$($rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE) "
+ malloc_bdevs+="$($rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
+ $rpc_py bdev_raid_create -n raid0 -z 64 -r 0 -b "$malloc_bdevs"
+ bdev="raid0"
+ else
+ # construct malloc bdev and put its name in $bdev
+ bdev=$($rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)
+ fi
+ ls_guid=$($rpc_py bdev_lvol_create_lvstore $bdev lvs_$i -c 1048576)
+ LUNs=""
+ for j in $(seq 1 $NUM_LVOL); do
+ lb_name=$($rpc_py bdev_lvol_create -u $ls_guid lbd_$j 10)
+ LUNs+="$lb_name:$((j - 1)) "
+ done
+ $rpc_py iscsi_create_target_node Target$i Target${i}_alias "$LUNs" "1:$INITIATOR_TAG" 256 -d
+done
+timing_exit setup
+
+sleep 1
+
+timing_enter discovery
+iscsiadm -m discovery -t sendtargets -p $TARGET_IP:$ISCSI_PORT
+iscsiadm -m node --login -p $TARGET_IP:$ISCSI_PORT
+waitforiscsidevices $((NUM_LVS * NUM_LVOL))
+timing_exit discovery
+
+timing_enter fio
+$fio_py -p iscsi -i 131072 -d 8 -t randwrite -r 10 -v
+timing_exit fio
+
+rm -f ./local-job0-0-verify.state
+trap - SIGINT SIGTERM EXIT
+
+rm -f ./local-job*
+iscsicleanup
+killprocess $pid
+iscsitestfini $1 $2
diff --git a/src/spdk/test/iscsi_tgt/multiconnection/multiconnection.sh b/src/spdk/test/iscsi_tgt/multiconnection/multiconnection.sh
new file mode 100755
index 000000000..badf70197
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/multiconnection/multiconnection.sh
@@ -0,0 +1,84 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/iscsi_tgt/common.sh
+
+# $1 = "iso" - triggers isolation mode (setting up required environment).
+# $2 = test type posix or vpp. defaults to posix.
+iscsitestinit $1 $2
+
+rpc_py="$rootdir/scripts/rpc.py"
+fio_py="$rootdir/scripts/fio.py"
+
+CONNECTION_NUMBER=30
+
+# Remove lvol bdevs and stores.
+function remove_backends() {
+ echo "INFO: Removing lvol bdevs"
+ for i in $(seq 1 $CONNECTION_NUMBER); do
+ lun="lvs0/lbd_$i"
+ $rpc_py bdev_lvol_delete $lun
+ echo -e "\tINFO: lvol bdev $lun removed"
+ done
+ sleep 1
+
+ echo "INFO: Removing lvol stores"
+ $rpc_py bdev_lvol_delete_lvstore -l lvs0
+ echo "INFO: lvol store lvs0 removed"
+
+ echo "INFO: Removing NVMe"
+ $rpc_py bdev_nvme_detach_controller Nvme0
+
+ return 0
+}
+
+timing_enter start_iscsi_tgt
+"${ISCSI_APP[@]}" --wait-for-rpc &
+iscsipid=$!
+echo "iSCSI target launched. pid: $iscsipid"
+trap 'remove_backends; iscsicleanup; killprocess $iscsipid; iscsitestfini $1 $2; exit 1' SIGINT SIGTERM EXIT
+
+waitforlisten $iscsipid
+$rpc_py iscsi_set_options -o 30 -a 128
+$rpc_py framework_start_init
+$rootdir/scripts/gen_nvme.sh --json | $rpc_py load_subsystem_config
+timing_exit start_iscsi_tgt
+
+$rpc_py iscsi_create_portal_group $PORTAL_TAG $TARGET_IP:$ISCSI_PORT
+$rpc_py iscsi_create_initiator_group $INITIATOR_TAG $INITIATOR_NAME $NETMASK
+
+echo "Creating an iSCSI target node."
+ls_guid=$($rpc_py bdev_lvol_create_lvstore "Nvme0n1" "lvs0" -c 1048576)
+
+# Assign even size for each lvol_bdev.
+get_lvs_free_mb $ls_guid
+lvol_bdev_size=$((free_mb / CONNECTION_NUMBER))
+for i in $(seq 1 $CONNECTION_NUMBER); do
+ $rpc_py bdev_lvol_create -u $ls_guid lbd_$i $lvol_bdev_size
+done
+
+for i in $(seq 1 $CONNECTION_NUMBER); do
+ lun="lvs0/lbd_$i:0"
+ $rpc_py iscsi_create_target_node Target$i Target${i}_alias "$lun" $PORTAL_TAG:$INITIATOR_TAG 256 -d
+done
+sleep 1
+
+echo "Logging into iSCSI target."
+iscsiadm -m discovery -t sendtargets -p $TARGET_IP:$ISCSI_PORT
+iscsiadm -m node --login -p $TARGET_IP:$ISCSI_PORT
+waitforiscsidevices $CONNECTION_NUMBER
+
+echo "Running FIO"
+$fio_py -p iscsi -i 131072 -d 64 -t randrw -r 5
+$fio_py -p iscsi -i 262144 -d 16 -t randwrite -r 10
+sync
+
+trap - SIGINT SIGTERM EXIT
+
+rm -f ./local-job*
+iscsicleanup
+remove_backends
+killprocess $iscsipid
+iscsitestfini $1 $2
diff --git a/src/spdk/test/iscsi_tgt/nvme_remote/fio_remote_nvme.sh b/src/spdk/test/iscsi_tgt/nvme_remote/fio_remote_nvme.sh
new file mode 100755
index 000000000..38329dc43
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/nvme_remote/fio_remote_nvme.sh
@@ -0,0 +1,99 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+source $rootdir/test/iscsi_tgt/common.sh
+
+nvmftestinit
+# $1 = "iso" - triggers isolation mode (setting up required environment).
+# $2 = test type posix or vpp. defaults to posix.
+iscsitestinit $1 $2
+
+rpc_py="$rootdir/scripts/rpc.py"
+fio_py="$rootdir/scripts/fio.py"
+
+# Namespaces are NOT used here on purpose. Rxe_cfg utilility used for NVMf tests do not support namespaces.
+TARGET_IP=127.0.0.1
+INITIATOR_IP=127.0.0.1
+NETMASK=$INITIATOR_IP/32
+
+function run_nvme_remote() {
+ echo "now use $1 method to run iscsi tgt."
+
+ iscsi_rpc_addr="/var/tmp/spdk-iscsi.sock"
+ "${ISCSI_APP[@]}" -r "$iscsi_rpc_addr" -m 0x1 -p 0 -s 512 --wait-for-rpc &
+ iscsipid=$!
+ echo "iSCSI target launched. pid: $iscsipid"
+ trap 'killprocess $iscsipid; iscsitestfini $1 $2; nvmftestfini; exit 1' SIGINT SIGTERM EXIT
+ waitforlisten $iscsipid "$iscsi_rpc_addr"
+ $rpc_py -s "$iscsi_rpc_addr" iscsi_set_options -o 30 -a 16
+ $rpc_py -s "$iscsi_rpc_addr" framework_start_init
+ if [ "$1" = "remote" ]; then
+ $rpc_py -s $iscsi_rpc_addr bdev_nvme_attach_controller -b "Nvme0" -t "rdma" -f "ipv4" -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT -n nqn.2016-06.io.spdk:cnode1
+ fi
+
+ echo "iSCSI target has started."
+
+ timing_exit start_iscsi_tgt
+
+ echo "Creating an iSCSI target node."
+ $rpc_py -s "$iscsi_rpc_addr" iscsi_create_portal_group $PORTAL_TAG $TARGET_IP:$ISCSI_PORT
+ $rpc_py -s "$iscsi_rpc_addr" iscsi_create_initiator_group $INITIATOR_TAG $INITIATOR_NAME $NETMASK
+ if [ "$1" = "local" ]; then
+ $rpc_py -s "$iscsi_rpc_addr" bdev_nvme_attach_controller -b "Nvme0" -t "rdma" -f "ipv4" -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT -n nqn.2016-06.io.spdk:cnode1
+ fi
+ $rpc_py -s "$iscsi_rpc_addr" iscsi_create_target_node Target1 Target1_alias 'Nvme0n1:0' $PORTAL_TAG:$INITIATOR_TAG 64 -d
+ sleep 1
+
+ echo "Logging in to iSCSI target."
+ iscsiadm -m discovery -t sendtargets -p $TARGET_IP:$ISCSI_PORT
+ iscsiadm -m node --login -p $TARGET_IP:$ISCSI_PORT
+}
+
+# Start the NVMf target
+"${NVMF_APP[@]}" -m 0x2 -p 1 -s 512 --wait-for-rpc &
+nvmfpid=$!
+echo "NVMf target launched. pid: $nvmfpid"
+trap 'iscsitestfini $1 $2; nvmftestfini; exit 1' SIGINT SIGTERM EXIT
+waitforlisten $nvmfpid
+$rpc_py framework_start_init
+$rpc_py nvmf_create_transport -t RDMA -u 8192
+echo "NVMf target has started."
+bdevs=$($rpc_py bdev_malloc_create 64 512)
+$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t rdma -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+for bdev in $bdevs; do
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 $bdev
+done
+echo "NVMf subsystem created."
+
+timing_enter start_iscsi_tgt
+
+run_nvme_remote "local"
+
+trap 'iscsicleanup; killprocess $iscsipid;
+ rm -f ./local-job0-0-verify.state; iscsitestfini $1 $2; nvmftestfini; exit 1' SIGINT SIGTERM EXIT
+
+echo "Running FIO"
+$fio_py -p iscsi -i 4096 -d 1 -t randrw -r 1 -v
+
+rm -f ./local-job0-0-verify.state
+iscsicleanup
+killprocess $iscsipid
+
+run_nvme_remote "remote"
+
+echo "Running FIO"
+$fio_py -p iscsi -i 4096 -d 1 -t randrw -r 1 -v
+
+rm -f ./local-job0-0-verify.state
+trap - SIGINT SIGTERM EXIT
+
+iscsicleanup
+killprocess $iscsipid
+$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1
+
+iscsitestfini $1 $2
+nvmftestfini
diff --git a/src/spdk/test/iscsi_tgt/perf/iscsi_initiator.sh b/src/spdk/test/iscsi_tgt/perf/iscsi_initiator.sh
new file mode 100755
index 000000000..b7d08bbc2
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/perf/iscsi_initiator.sh
@@ -0,0 +1,37 @@
+#!/usr/bin/env bash
+testdir=$(readlink -f $(dirname $0))
+ISCSI_PORT=3260
+FIO_PATH=$1
+IP_T=$2
+
+set -xe
+trap "exit 1" ERR SIGTERM SIGABRT
+
+if [ ! -x $FIO_PATH/fio ]; then
+ error "Invalid path of fio binary"
+fi
+
+function run_spdk_iscsi_fio() {
+ $FIO_PATH/fio $testdir/perf.job "$@" --output-format=json
+}
+
+mkdir -p $testdir/perf_output
+iscsi_fio_results="$testdir/perf_output/iscsi_fio.json"
+trap "iscsiadm -m node --logout; iscsiadm -m node -o delete; exit 1" ERR SIGTERM SIGABRT
+iscsiadm -m discovery -t sendtargets -p $IP_T:$ISCSI_PORT
+iscsiadm -m node --login -p $IP_T:$ISCSI_PORT
+waitforiscsidevices 1
+
+disks=($(iscsiadm -m session -P 3 | grep "Attached scsi disk" | awk '{print $4}'))
+for ((i = 0; i < ${#disks[@]}; i++)); do
+ filename+=$(printf /dev/%s: "${disks[i]}")
+ waitforfile $filename
+ echo noop > /sys/block/${disks[i]}/queue/scheduler
+ echo "2" > /sys/block/${disks[i]}/queue/nomerges
+ echo "1024" > /sys/block/${disks[i]}/queue/nr_requests
+done
+
+run_spdk_iscsi_fio --filename=$filename "--output=$iscsi_fio_results"
+
+iscsiadm -m node --logout || true
+iscsiadm -m node -o delete || true
diff --git a/src/spdk/test/iscsi_tgt/perf/iscsi_target.sh b/src/spdk/test/iscsi_tgt/perf/iscsi_target.sh
new file mode 100755
index 000000000..ec02f9e0c
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/perf/iscsi_target.sh
@@ -0,0 +1,134 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/iscsi_tgt/common.sh
+
+rpc_py="$rootdir/scripts/rpc.py -s $testdir/rpc_iscsi.sock"
+
+BLK_SIZE=4096
+RW=randrw
+MIX=100
+IODEPTH=128
+RUNTIME=60
+RAMP_TIME=10
+FIO_PATH=$CONFIG_FIO_SOURCE_DIR
+DISKNO="ALL"
+CPUMASK=0x02
+NUM_JOBS=1
+ISCSI_TGT_CM=0x02
+
+# Performance test for iscsi_tgt, run on devices with proper hardware support (target and inititator)
+function usage() {
+ [[ -n $2 ]] && (
+ echo "$2"
+ echo ""
+ )
+ echo "Usage: $(basename $1) [options]"
+ echo "-h, --help Print help and exit"
+ echo " --fiopath=PATH Path to fio directory on initiator. [default=$FIO_PATH]"
+ echo " --disk_no=INT,ALL Number of disks to test on, if =ALL then test on all found disks. [default=$DISKNO]"
+ echo " --target_ip=IP The IP address of target used for test."
+ echo " --initiator_ip=IP The IP address of initiator used for test."
+ echo " --init_mgmnt_ip=IP The IP address of initiator used for communication."
+ echo " --iscsi_tgt_mask=HEX iscsi_tgt core mask. [default=$ISCSI_TGT_CM]"
+}
+
+while getopts 'h-:' optchar; do
+ case "$optchar" in
+ -)
+ case "$OPTARG" in
+ help)
+ usage $0
+ exit 0
+ ;;
+ fiopath=*) FIO_BIN="${OPTARG#*=}" ;;
+ disk_no=*) DISKNO="${OPTARG#*=}" ;;
+ target_ip=*) TARGET_IP="${OPTARG#*=}" ;;
+ initiator_ip=*) INITIATOR_IP="${OPTARG#*=}" ;;
+ init_mgmnt_ip=*) IP_I_SSH="${OPTARG#*=}" ;;
+ iscsi_tgt_mask=*) ISCSI_TGT_CM="${OPTARG#*=}" ;;
+ *)
+ usage $0 echo "Invalid argument '$OPTARG'"
+ exit 1
+ ;;
+ esac
+ ;;
+ h)
+ usage $0
+ exit 0
+ ;;
+ *)
+ usage $0 "Invalid argument '$optchar'"
+ exit 1
+ ;;
+ esac
+done
+
+if [ -z "$TARGET_IP" ]; then
+ error "No IP address of iscsi target is given"
+fi
+
+if [ -z "$INITIATOR_IP" ]; then
+ error "No IP address of iscsi initiator is given"
+fi
+
+if [ -z "$IP_I_SSH" ]; then
+ error "No IP address of initiator is given"
+fi
+
+if [ $EUID -ne 0 ]; then
+ error "INFO: This script must be run with root privileges"
+fi
+
+function ssh_initiator() {
+ # shellcheck disable=SC2029
+ # (we want to expand $@ immediately, not on the VM)
+ ssh -i $HOME/.ssh/spdk_vhost_id_rsa root@$IP_I_SSH "$@"
+}
+
+NETMASK=$INITIATOR_IP/32
+iscsi_fio_results="$testdir/perf_output/iscsi_fio.json"
+rm -rf $iscsi_fio_results
+mkdir -p $testdir/perf_output
+touch $iscsi_fio_results
+
+timing_enter run_iscsi_app
+$SPDK_BIN_DIR/iscsi_tgt -m $ISCSI_TGT_CM -r $testdir/rpc_iscsi.sock --wait-for-rpc &
+pid=$!
+trap 'rm -f $testdir/perf.job; killprocess $pid; print_backtrace; exit 1' ERR SIGTERM SIGABRT
+waitforlisten "$pid" "$testdir/rpc_iscsi.sock"
+$rpc_py iscsi_set_options -b "iqn.2016-06.io.spdk" -f "/usr/local/etc/spdk/auth.conf" -o 30 -i -l 0 -a 16
+$rpc_py framework_start_init
+$rootdir/scripts/gen_nvme.sh --json | $rpc_py load_subsystem_config
+sleep 1
+timing_exit run_iscsi_app
+
+timing_enter iscsi_config
+bdevs=($($rpc_py bdev_get_bdevs | jq -r '.[].name'))
+if [[ $DISKNO == "ALL" ]] || [[ $DISKNO == "all" ]]; then
+ DISKNO=${#bdevs[@]}
+elif [[ $DISKNO -gt ${#bdevs[@]} ]] || [[ ! $DISKNO =~ ^[0-9]+$ ]]; then
+ error "Required device number ($DISKNO) is not a valid number or it's larger than the number of devices found (${#bdevs[@]})"
+fi
+
+$rpc_py iscsi_create_portal_group $PORTAL_TAG $TARGET_IP:$ISCSI_PORT
+$rpc_py iscsi_create_initiator_group $INITIATOR_TAG $INITIATOR_NAME $NETMASK
+
+for ((i = 0; i < DISKNO; i++)); do
+ $rpc_py iscsi_create_target_node Target${i} Target${i}_alias "${bdevs[i]}:0" "$PORTAL_TAG:$INITIATOR_TAG" 64 -d
+done
+
+ssh_initiator "cat > perf.job" < $testdir/perf.job
+rm -f $testdir/perf.job
+timing_exit iscsi_config
+
+timing_enter iscsi_initiator
+ssh_initiator bash -s - $FIO_PATH $TARGET_IP < $testdir/iscsi_initiator.sh
+timing_exit iscsi_initiator
+
+ssh_initiator "cat perf_output/iscsi_fio.json" > $iscsi_fio_results
+ssh_initiator "rm -rf perf_output perf.job"
+
+killprocess $pid
diff --git a/src/spdk/test/iscsi_tgt/perf/perf.job b/src/spdk/test/iscsi_tgt/perf/perf.job
new file mode 100644
index 000000000..0f169d4ab
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/perf/perf.job
@@ -0,0 +1,19 @@
+[global]
+thread=1
+group_reporting=1
+direct=1
+norandommap=1
+ioengine=libaio
+percentile_list=50:90:99:99.5:99.9:99.99:99.999
+
+[4k_rand_read_qd_128]
+stonewall
+time_based=1
+runtime=60
+ramp_time=10
+bs=4096
+rw=randrw
+rwmixread=100
+iodepth=128
+cpumask=0x02
+numjobs=1
diff --git a/src/spdk/test/iscsi_tgt/pmem/iscsi_pmem.sh b/src/spdk/test/iscsi_tgt/pmem/iscsi_pmem.sh
new file mode 100755
index 000000000..da6fd77f4
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/pmem/iscsi_pmem.sh
@@ -0,0 +1,74 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/iscsi_tgt/common.sh
+
+BLOCKSIZE=$1
+RUNTIME=$2
+PMEM_BDEVS=""
+PMEM_SIZE=128
+PMEM_BLOCK_SIZE=512
+TGT_NR=10
+PMEM_PER_TGT=1
+rpc_py="$rootdir/scripts/rpc.py"
+fio_py="$rootdir/scripts/fio.py"
+
+timing_enter start_iscsi_target
+"${ISCSI_APP[@]}" -m $ISCSI_TEST_CORE_MASK --wait-for-rpc &
+pid=$!
+echo "Process pid: $pid"
+
+trap 'iscsicleanup; killprocess $pid; rm -f /tmp/pool_file*; exit 1' SIGINT SIGTERM EXIT
+
+waitforlisten $pid
+$rpc_py iscsi_set_options -o 30 -a 16
+$rpc_py framework_start_init
+echo "iscsi_tgt is listening. Running tests..."
+timing_exit start_iscsi_target
+
+timing_enter setup
+$rpc_py iscsi_create_portal_group $PORTAL_TAG $TARGET_IP:$ISCSI_PORT
+for i in $(seq 1 $TGT_NR); do
+ INITIATOR_TAG=$((i + 1))
+ $rpc_py iscsi_create_initiator_group $INITIATOR_TAG $INITIATOR_NAME $NETMASK
+
+ luns=""
+ for j in $(seq 1 $PMEM_PER_TGT); do
+ $rpc_py create_pmem_pool /tmp/pool_file${i}_${j} $PMEM_SIZE $PMEM_BLOCK_SIZE
+ bdevs_name="$($rpc_py bdev_pmem_create -n pmem${i}_${j} /tmp/pool_file${i}_${j})"
+ PMEM_BDEVS+="$bdevs_name "
+ luns+="$bdevs_name:$((j - 1)) "
+ done
+ $rpc_py iscsi_create_target_node Target$i Target${i}_alias "$luns" "1:$INITIATOR_TAG " 256 -d
+done
+timing_exit setup
+sleep 1
+
+timing_enter discovery
+iscsiadm -m discovery -t sendtargets -p $TARGET_IP:$ISCSI_PORT
+iscsiadm -m node --login -p $TARGET_IP:$ISCSI_PORT
+timing_exit discovery
+
+timing_enter fio_test
+$fio_py -p iscsi -i $BLOCKSIZE -d 64 -t randwrite -r $RUNTIME -v
+timing_exit fio_test
+
+iscsicleanup
+
+for pmem in $PMEM_BDEVS; do
+ $rpc_py bdev_pmem_delete $pmem
+done
+
+for i in $(seq 1 $TGT_NR); do
+ for c in $(seq 1 $PMEM_PER_TGT); do
+ $rpc_py bdev_pmem_delete_pool /tmp/pool_file${i}_${c}
+ done
+done
+
+trap - SIGINT SIGTERM EXIT
+
+rm -f ./local-job*
+rm -f /tmp/pool_file*
+killprocess $pid
diff --git a/src/spdk/test/iscsi_tgt/qos/qos.sh b/src/spdk/test/iscsi_tgt/qos/qos.sh
new file mode 100755
index 000000000..0a8015e18
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/qos/qos.sh
@@ -0,0 +1,145 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/iscsi_tgt/common.sh
+
+# $1 = "iso" - triggers isolation mode (setting up required environment).
+# $2 = test type posix or vpp. defaults to posix.
+iscsitestinit $1 $2
+
+function run_fio() {
+ local bdev_name=$1
+ local iostats
+ local start_io_count
+ local start_bytes_read
+ local end_io_count
+ local end_bytes_read
+ local run_time=5
+
+ iostats=$($rpc_py bdev_get_iostat -b $bdev_name)
+ start_io_count=$(jq -r '.bdevs[0].num_read_ops' <<< "$iostats")
+ start_bytes_read=$(jq -r '.bdevs[0].bytes_read' <<< "$iostats")
+
+ $fio_py -p iscsi -i 1024 -d 128 -t randread -r $run_time
+
+ iostats=$($rpc_py bdev_get_iostat -b $bdev_name)
+ end_io_count=$(jq -r '.bdevs[0].num_read_ops' <<< "$iostats")
+ end_bytes_read=$(jq -r '.bdevs[0].bytes_read' <<< "$iostats")
+
+ IOPS_RESULT=$(((end_io_count - start_io_count) / run_time))
+ BANDWIDTH_RESULT=$(((end_bytes_read - start_bytes_read) / run_time))
+}
+
+function verify_qos_limits() {
+ local result=$1
+ local limit=$2
+
+ [ "$(bc <<< "$result > $limit*0.85")" -eq 1 ] \
+ && [ "$(bc <<< "$result < $limit*1.05")" -eq 1 ]
+}
+
+if [ -z "$TARGET_IP" ]; then
+ echo "TARGET_IP not defined in environment"
+ exit 1
+fi
+
+if [ -z "$INITIATOR_IP" ]; then
+ echo "INITIATOR_IP not defined in environment"
+ exit 1
+fi
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+IOPS_RESULT=
+BANDWIDTH_RESULT=
+rpc_py="$rootdir/scripts/rpc.py"
+fio_py="$rootdir/scripts/fio.py"
+
+timing_enter start_iscsi_tgt
+
+"${ISCSI_APP[@]}" &
+pid=$!
+echo "Process pid: $pid"
+trap 'killprocess $pid; iscsitestfini $1 $2; exit 1' SIGINT SIGTERM EXIT
+waitforlisten $pid
+echo "iscsi_tgt is listening. Running tests..."
+
+timing_exit start_iscsi_tgt
+
+$rpc_py iscsi_create_portal_group $PORTAL_TAG $TARGET_IP:$ISCSI_PORT
+$rpc_py iscsi_create_initiator_group $INITIATOR_TAG $INITIATOR_NAME $NETMASK
+$rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE
+# "Malloc0:0" ==> use Malloc0 blockdev for LUN0
+# "1:2" ==> map PortalGroup1 to InitiatorGroup2
+# "64" ==> iSCSI queue depth 64
+# "-d" ==> disable CHAP authentication
+$rpc_py iscsi_create_target_node Target1 Target1_alias 'Malloc0:0' $PORTAL_TAG:$INITIATOR_TAG 64 -d
+sleep 1
+
+iscsiadm -m discovery -t sendtargets -p $TARGET_IP:$ISCSI_PORT
+iscsiadm -m node --login -p $TARGET_IP:$ISCSI_PORT
+
+trap 'iscsicleanup; killprocess $pid; iscsitestfini $1 $2; exit 1' SIGINT SIGTERM EXIT
+
+# Run FIO without any QOS limits to determine the raw performance
+run_fio Malloc0
+
+# Set IOPS/bandwidth limit to 50% of the actual unrestrained performance.
+IOPS_LIMIT=$((IOPS_RESULT / 2))
+BANDWIDTH_LIMIT=$((BANDWIDTH_RESULT / 2))
+# Set READ bandwidth limit to 50% of the RW bandwidth limit to be able
+# to differentiate those two.
+READ_BANDWIDTH_LIMIT=$((BANDWIDTH_LIMIT / 2))
+
+# Also round them down to nearest multiple of either 1000 IOPS or 1MB BW
+# which are the minimal QoS granularities
+IOPS_LIMIT=$((IOPS_LIMIT / 1000 * 1000))
+BANDWIDTH_LIMIT_MB=$((BANDWIDTH_LIMIT / 1024 / 1024))
+BANDWIDTH_LIMIT=$((BANDWIDTH_LIMIT_MB * 1024 * 1024))
+READ_BANDWIDTH_LIMIT_MB=$((READ_BANDWIDTH_LIMIT / 1024 / 1024))
+READ_BANDWIDTH_LIMIT=$((READ_BANDWIDTH_LIMIT_MB * 1024 * 1024))
+
+# Limit the I/O rate by RPC, then confirm the observed rate matches.
+$rpc_py bdev_set_qos_limit Malloc0 --rw_ios_per_sec $IOPS_LIMIT
+run_fio Malloc0
+verify_qos_limits $IOPS_RESULT $IOPS_LIMIT
+
+# Now disable the rate limiting, and confirm the observed rate is not limited anymore.
+$rpc_py bdev_set_qos_limit Malloc0 --rw_ios_per_sec 0
+run_fio Malloc0
+[ "$IOPS_RESULT" -gt "$IOPS_LIMIT" ]
+
+# Limit the I/O rate again.
+$rpc_py bdev_set_qos_limit Malloc0 --rw_ios_per_sec $IOPS_LIMIT
+run_fio Malloc0
+verify_qos_limits $IOPS_RESULT $IOPS_LIMIT
+
+echo "I/O rate limiting tests successful"
+
+# Limit the I/O bandwidth rate by RPC, then confirm the observed rate matches.
+$rpc_py bdev_set_qos_limit Malloc0 --rw_ios_per_sec 0 --rw_mbytes_per_sec $BANDWIDTH_LIMIT_MB
+run_fio Malloc0
+verify_qos_limits $BANDWIDTH_RESULT $BANDWIDTH_LIMIT
+
+# Now disable the bandwidth rate limiting, and confirm the observed rate is not limited anymore.
+$rpc_py bdev_set_qos_limit Malloc0 --rw_mbytes_per_sec 0
+run_fio Malloc0
+[ "$BANDWIDTH_RESULT" -gt "$BANDWIDTH_LIMIT" ]
+
+# Limit the I/O bandwidth rate again with both read/write and read/only.
+$rpc_py bdev_set_qos_limit Malloc0 --rw_mbytes_per_sec $BANDWIDTH_LIMIT_MB --r_mbytes_per_sec $READ_BANDWIDTH_LIMIT_MB
+run_fio Malloc0
+verify_qos_limits $BANDWIDTH_RESULT $READ_BANDWIDTH_LIMIT
+
+echo "I/O bandwidth limiting tests successful"
+
+iscsicleanup
+$rpc_py iscsi_delete_target_node 'iqn.2016-06.io.spdk:Target1'
+
+rm -f ./local-job0-0-verify.state
+trap - SIGINT SIGTERM EXIT
+killprocess $pid
+
+iscsitestfini $1 $2
diff --git a/src/spdk/test/iscsi_tgt/rbd/rbd.sh b/src/spdk/test/iscsi_tgt/rbd/rbd.sh
new file mode 100755
index 000000000..060cc7af0
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/rbd/rbd.sh
@@ -0,0 +1,72 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/iscsi_tgt/common.sh
+
+# $1 = "iso" - triggers isolation mode (setting up required environment).
+# $2 = test type posix or vpp. defaults to posix.
+iscsitestinit $1 $2
+
+timing_enter rbd_setup
+rbd_setup $TARGET_IP $TARGET_NAMESPACE
+trap 'rbd_cleanup; exit 1' SIGINT SIGTERM EXIT
+timing_exit rbd_setup
+
+rpc_py="$rootdir/scripts/rpc.py"
+fio_py="$rootdir/scripts/fio.py"
+
+timing_enter start_iscsi_tgt
+
+"${ISCSI_APP[@]}" -m $ISCSI_TEST_CORE_MASK --wait-for-rpc &
+pid=$!
+
+trap 'killprocess $pid; rbd_cleanup; iscsitestfini $1 $2; exit 1' SIGINT SIGTERM EXIT
+
+waitforlisten $pid
+$rpc_py iscsi_set_options -o 30 -a 16
+$rpc_py framework_start_init
+echo "iscsi_tgt is listening. Running tests..."
+
+timing_exit start_iscsi_tgt
+
+$rpc_py iscsi_create_portal_group $PORTAL_TAG $TARGET_IP:$ISCSI_PORT
+$rpc_py iscsi_create_initiator_group $INITIATOR_TAG $INITIATOR_NAME $NETMASK
+rbd_bdev="$($rpc_py bdev_rbd_create $RBD_POOL $RBD_NAME 4096)"
+$rpc_py bdev_get_bdevs
+
+$rpc_py bdev_rbd_resize $rbd_bdev 2000
+num_block=$($rpc_py bdev_get_bdevs | grep num_blocks | sed 's/[^[:digit:]]//g')
+# get the bdev size in MiB.
+total_size=$((num_block * 4096 / 1048576))
+if [ $total_size != 2000 ]; then
+ echo "resize failed."
+ exit 1
+fi
+# "Ceph0:0" ==> use Ceph0 blockdev for LUN0
+# "1:2" ==> map PortalGroup1 to InitiatorGroup2
+# "64" ==> iSCSI queue depth 64
+# "-d" ==> disable CHAP authentication
+$rpc_py iscsi_create_target_node Target3 Target3_alias 'Ceph0:0' $PORTAL_TAG:$INITIATOR_TAG 64 -d
+sleep 1
+
+iscsiadm -m discovery -t sendtargets -p $TARGET_IP:$ISCSI_PORT
+iscsiadm -m node --login -p $TARGET_IP:$ISCSI_PORT
+waitforiscsidevices 1
+
+trap 'iscsicleanup; killprocess $pid; rbd_cleanup; exit 1' SIGINT SIGTERM EXIT
+
+$fio_py -p iscsi -i 4096 -d 1 -t randrw -r 1 -v
+$fio_py -p iscsi -i 131072 -d 32 -t randrw -r 1 -v
+
+rm -f ./local-job0-0-verify.state
+
+trap - SIGINT SIGTERM EXIT
+
+iscsicleanup
+$rpc_py bdev_rbd_delete $rbd_bdev
+killprocess $pid
+rbd_cleanup
+
+iscsitestfini $1 $2
diff --git a/src/spdk/test/iscsi_tgt/reset/reset.sh b/src/spdk/test/iscsi_tgt/reset/reset.sh
new file mode 100755
index 000000000..406a10c45
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/reset/reset.sh
@@ -0,0 +1,77 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/iscsi_tgt/common.sh
+
+# $1 = "iso" - triggers isolation mode (setting up required environment).
+# $2 = test type posix or vpp. defaults to posix.
+iscsitestinit $1 $2
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+fio_py="$rootdir/scripts/fio.py"
+
+if ! hash sg_reset; then
+ exit 1
+fi
+
+timing_enter start_iscsi_tgt
+
+"${ISCSI_APP[@]}" --wait-for-rpc &
+pid=$!
+echo "Process pid: $pid"
+
+trap 'killprocess $pid; exit 1' SIGINT SIGTERM EXIT
+
+waitforlisten $pid
+$rpc_py iscsi_set_options -o 30 -a 16
+$rpc_py framework_start_init
+echo "iscsi_tgt is listening. Running tests..."
+
+timing_exit start_iscsi_tgt
+
+$rpc_py iscsi_create_portal_group $PORTAL_TAG $TARGET_IP:$ISCSI_PORT
+$rpc_py iscsi_create_initiator_group $INITIATOR_TAG $INITIATOR_NAME $NETMASK
+$rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE
+# "Malloc0:0" ==> use Malloc0 blockdev for LUN0
+# "1:2" ==> map PortalGroup1 to InitiatorGroup2
+# "64" ==> iSCSI queue depth 64
+# "-d" ==> disable CHAP authentication
+$rpc_py iscsi_create_target_node Target3 Target3_alias 'Malloc0:0' $PORTAL_TAG:$INITIATOR_TAG 64 -d
+sleep 1
+
+iscsiadm -m discovery -t sendtargets -p $TARGET_IP:$ISCSI_PORT
+iscsiadm -m node --login -p $TARGET_IP:$ISCSI_PORT
+waitforiscsidevices 1
+
+dev=$(iscsiadm -m session -P 3 | grep "Attached scsi disk" | awk '{print $4}')
+
+$fio_py -p iscsi -i 512 -d 1 -t read -r 60 &
+fiopid=$!
+echo "FIO pid: $fiopid"
+
+trap 'iscsicleanup; killprocess $pid; killprocess $fiopid; iscsitestfini $1 $2; exit 1' SIGINT SIGTERM EXIT
+
+# Do 3 resets while making sure iscsi_tgt and fio are still running
+for i in 1 2 3; do
+ sleep 1
+ kill -s 0 $pid
+ kill -s 0 $fiopid
+ sg_reset -d /dev/$dev
+ sleep 1
+ kill -s 0 $pid
+ kill -s 0 $fiopid
+done
+
+kill $fiopid
+wait $fiopid || true
+
+trap - SIGINT SIGTERM EXIT
+
+iscsicleanup
+killprocess $pid
+iscsitestfini $1 $2
diff --git a/src/spdk/test/iscsi_tgt/rpc_config/rpc_config.py b/src/spdk/test/iscsi_tgt/rpc_config/rpc_config.py
new file mode 100755
index 000000000..9f8e32909
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/rpc_config/rpc_config.py
@@ -0,0 +1,481 @@
+#!/usr/bin/env python3
+
+
+import os
+import os.path
+import re
+import sys
+import time
+import json
+import random
+from subprocess import check_call, call, check_output, Popen, PIPE, CalledProcessError
+
+if (len(sys.argv) == 8):
+ target_ip = sys.argv[2]
+ initiator_ip = sys.argv[3]
+ port = sys.argv[4]
+ netmask = sys.argv[5]
+ namespace = sys.argv[6]
+ test_type = sys.argv[7]
+
+ns_cmd = 'ip netns exec ' + namespace
+other_ip = '127.0.0.6'
+initiator_name = 'ANY'
+portal_tag = '1'
+initiator_tag = '1'
+
+rpc_param = {
+ 'target_ip': target_ip,
+ 'initiator_ip': initiator_ip,
+ 'port': port,
+ 'initiator_name': initiator_name,
+ 'netmask': netmask,
+ 'lun_total': 3,
+ 'malloc_bdev_size': 64,
+ 'malloc_block_size': 512,
+ 'queue_depth': 64,
+ 'target_name': 'Target3',
+ 'alias_name': 'Target3_alias',
+ 'disable_chap': True,
+ 'mutual_chap': False,
+ 'require_chap': False,
+ 'chap_group': 0,
+ 'header_digest': False,
+ 'data_digest': False,
+ 'log_flag': 'rpc',
+ 'cpumask': 0x1
+}
+
+
+class RpcException(Exception):
+
+ def __init__(self, retval, msg):
+ super(RpcException, self).__init__(msg)
+ self.retval = retval
+ self.message = msg
+
+
+class spdk_rpc(object):
+
+ def __init__(self, rpc_py):
+ self.rpc_py = rpc_py
+
+ def __getattr__(self, name):
+ def call(*args):
+ cmd = "{} {}".format(self.rpc_py, name)
+ for arg in args:
+ cmd += " {}".format(arg)
+ return check_output(cmd, shell=True).decode("utf-8")
+ return call
+
+
+def verify(expr, retcode, msg):
+ if not expr:
+ raise RpcException(retcode, msg)
+
+
+def verify_log_flag_rpc_methods(rpc_py, rpc_param):
+ rpc = spdk_rpc(rpc_py)
+ output = rpc.log_get_flags()
+ jsonvalue = json.loads(output)
+ verify(not jsonvalue[rpc_param['log_flag']], 1,
+ "log_get_flags returned {}, expected false".format(jsonvalue))
+ rpc.log_set_flag(rpc_param['log_flag'])
+ output = rpc.log_get_flags()
+ jsonvalue = json.loads(output)
+ verify(jsonvalue[rpc_param['log_flag']], 1,
+ "log_get_flags returned {}, expected true".format(jsonvalue))
+ rpc.log_clear_flag(rpc_param['log_flag'])
+ output = rpc.log_get_flags()
+ jsonvalue = json.loads(output)
+ verify(not jsonvalue[rpc_param['log_flag']], 1,
+ "log_get_flags returned {}, expected false".format(jsonvalue))
+
+ print("verify_log_flag_rpc_methods passed")
+
+
+def verify_iscsi_connection_rpc_methods(rpc_py):
+ rpc = spdk_rpc(rpc_py)
+ output = rpc.iscsi_get_connections()
+ jsonvalue = json.loads(output)
+ verify(not jsonvalue, 1,
+ "iscsi_get_connections returned {}, expected empty".format(jsonvalue))
+
+ rpc.bdev_malloc_create(rpc_param['malloc_bdev_size'], rpc_param['malloc_block_size'])
+ rpc.iscsi_create_portal_group(portal_tag, "{}:{}".format(rpc_param['target_ip'], str(rpc_param['port'])))
+ rpc.iscsi_create_initiator_group(initiator_tag, rpc_param['initiator_name'], rpc_param['netmask'])
+
+ lun_mapping = "Malloc" + str(rpc_param['lun_total']) + ":0"
+ net_mapping = portal_tag + ":" + initiator_tag
+ rpc.iscsi_create_target_node(rpc_param['target_name'], rpc_param['alias_name'], lun_mapping,
+ net_mapping, rpc_param['queue_depth'], '-d')
+ check_output('iscsiadm -m discovery -t st -p {}'.format(rpc_param['target_ip']), shell=True)
+ check_output('iscsiadm -m node --login', shell=True)
+ name = json.loads(rpc.iscsi_get_target_nodes())[0]['name']
+ output = rpc.iscsi_get_connections()
+ jsonvalues = json.loads(output)
+ verify(jsonvalues[0]['target_node_name'] == rpc_param['target_name'], 1,
+ "target node name vaule is {}, expected {}".format(jsonvalues[0]['target_node_name'], rpc_param['target_name']))
+ verify(jsonvalues[0]['initiator_addr'] == rpc_param['initiator_ip'], 1,
+ "initiator address values is {}, expected {}".format(jsonvalues[0]['initiator_addr'], rpc_param['initiator_ip']))
+ verify(jsonvalues[0]['target_addr'] == rpc_param['target_ip'], 1,
+ "target address values is {}, expected {}".format(jsonvalues[0]['target_addr'], rpc_param['target_ip']))
+
+ check_output('iscsiadm -m node --logout', shell=True)
+ check_output('iscsiadm -m node -o delete', shell=True)
+ rpc.iscsi_delete_initiator_group(initiator_tag)
+ rpc.iscsi_delete_portal_group(portal_tag)
+ rpc.iscsi_delete_target_node(name)
+ output = rpc.iscsi_get_connections()
+ jsonvalues = json.loads(output)
+ verify(not jsonvalues, 1,
+ "iscsi_get_connections returned {}, expected empty".format(jsonvalues))
+
+ print("verify_iscsi_connection_rpc_methods passed")
+
+
+def verify_scsi_devices_rpc_methods(rpc_py):
+ rpc = spdk_rpc(rpc_py)
+ output = rpc.scsi_get_devices()
+ jsonvalue = json.loads(output)
+ verify(not jsonvalue, 1,
+ "scsi_get_devices returned {}, expected empty".format(jsonvalue))
+
+ rpc.bdev_malloc_create(rpc_param['malloc_bdev_size'], rpc_param['malloc_block_size'])
+ rpc.iscsi_create_portal_group(portal_tag, "{}:{}".format(rpc_param['target_ip'], str(rpc_param['port'])))
+ rpc.iscsi_create_initiator_group(initiator_tag, rpc_param['initiator_name'], rpc_param['netmask'])
+
+ lun_mapping = "Malloc" + str(rpc_param['lun_total']) + ":0"
+ net_mapping = portal_tag + ":" + initiator_tag
+ rpc.iscsi_create_target_node(rpc_param['target_name'], rpc_param['alias_name'], lun_mapping,
+ net_mapping, rpc_param['queue_depth'], '-d')
+ check_output('iscsiadm -m discovery -t st -p {}'.format(rpc_param['target_ip']), shell=True)
+ check_output('iscsiadm -m node --login', shell=True)
+ name = json.loads(rpc.iscsi_get_target_nodes())[0]['name']
+ output = rpc.iscsi_get_options()
+ jsonvalues = json.loads(output)
+ nodebase = jsonvalues['node_base']
+ output = rpc.scsi_get_devices()
+ jsonvalues = json.loads(output)
+ verify(jsonvalues[0]['device_name'] == nodebase + ":" + rpc_param['target_name'], 1,
+ "device name vaule is {}, expected {}".format(jsonvalues[0]['device_name'], rpc_param['target_name']))
+ verify(jsonvalues[0]['id'] == 0, 1,
+ "device id value is {}, expected 0".format(jsonvalues[0]['id']))
+
+ check_output('iscsiadm -m node --logout', shell=True)
+ check_output('iscsiadm -m node -o delete', shell=True)
+ rpc.iscsi_delete_initiator_group(initiator_tag)
+ rpc.iscsi_delete_portal_group(portal_tag)
+ rpc.iscsi_delete_target_node(name)
+ output = rpc.scsi_get_devices()
+ jsonvalues = json.loads(output)
+ verify(not jsonvalues, 1,
+ "scsi_get_devices returned {}, expected empty".format(jsonvalues))
+
+ print("verify_scsi_devices_rpc_methods passed")
+
+
+def create_malloc_bdevs_rpc_methods(rpc_py, rpc_param):
+ rpc = spdk_rpc(rpc_py)
+
+ for i in range(1, rpc_param['lun_total'] + 1):
+ rpc.bdev_malloc_create(rpc_param['malloc_bdev_size'], rpc_param['malloc_block_size'])
+
+ print("create_malloc_bdevs_rpc_methods passed")
+
+
+def verify_portal_groups_rpc_methods(rpc_py, rpc_param):
+ rpc = spdk_rpc(rpc_py)
+ output = rpc.iscsi_get_portal_groups()
+ jsonvalues = json.loads(output)
+ verify(not jsonvalues, 1,
+ "iscsi_get_portal_groups returned {} groups, expected empty".format(jsonvalues))
+
+ lo_ip = (target_ip, other_ip)
+ nics = json.loads(rpc.net_get_interfaces())
+ for x in nics:
+ if x["ifc_index"] == 'lo':
+ rpc.net_interface_add_ip_address(x["ifc_index"], lo_ip[1])
+ for idx, value in enumerate(lo_ip):
+ # The portal group tag must start at 1
+ tag = idx + 1
+ rpc.iscsi_create_portal_group(tag, "{}:{}".format(value, rpc_param['port']))
+ output = rpc.iscsi_get_portal_groups()
+ jsonvalues = json.loads(output)
+ verify(len(jsonvalues) == tag, 1,
+ "iscsi_get_portal_groups returned {} groups, expected {}".format(len(jsonvalues), tag))
+
+ tag_list = []
+ for idx, value in enumerate(jsonvalues):
+ verify(value['portals'][0]['host'] == lo_ip[idx], 1,
+ "host value is {}, expected {}".format(value['portals'][0]['host'], rpc_param['target_ip']))
+ verify(value['portals'][0]['port'] == str(rpc_param['port']), 1,
+ "port value is {}, expected {}".format(value['portals'][0]['port'], str(rpc_param['port'])))
+ tag_list.append(value['tag'])
+ verify(value['tag'] == idx + 1, 1,
+ "tag value is {}, expected {}".format(value['tag'], idx + 1))
+
+ for idx, value in enumerate(tag_list):
+ rpc.iscsi_delete_portal_group(value)
+ output = rpc.iscsi_get_portal_groups()
+ jsonvalues = json.loads(output)
+ verify(len(jsonvalues) == (len(tag_list) - (idx + 1)), 1,
+ "get_portal_group returned {} groups, expected {}".format(len(jsonvalues), (len(tag_list) - (idx + 1))))
+ if not jsonvalues:
+ break
+
+ for jidx, jvalue in enumerate(jsonvalues):
+ verify(jvalue['portals'][0]['host'] == lo_ip[idx + jidx + 1], 1,
+ "host value is {}, expected {}".format(jvalue['portals'][0]['host'], lo_ip[idx + jidx + 1]))
+ verify(jvalue['portals'][0]['port'] == str(rpc_param['port']), 1,
+ "port value is {}, expected {}".format(jvalue['portals'][0]['port'], str(rpc_param['port'])))
+ verify(jvalue['tag'] != value or jvalue['tag'] == tag_list[idx + jidx + 1], 1,
+ "tag value is {}, expected {} and not {}".format(jvalue['tag'], tag_list[idx + jidx + 1], value))
+
+ for x in nics:
+ if x["ifc_index"] == 'lo':
+ rpc.net_interface_delete_ip_address(x["ifc_index"], lo_ip[1])
+
+ print("verify_portal_groups_rpc_methods passed")
+
+
+def verify_initiator_groups_rpc_methods(rpc_py, rpc_param):
+ rpc = spdk_rpc(rpc_py)
+ output = rpc.iscsi_get_initiator_groups()
+ jsonvalues = json.loads(output)
+ verify(not jsonvalues, 1,
+ "iscsi_get_initiator_groups returned {}, expected empty".format(jsonvalues))
+ for idx, value in enumerate(rpc_param['netmask']):
+ # The initiator group tag must start at 1
+ tag = idx + 1
+ rpc.iscsi_create_initiator_group(tag, rpc_param['initiator_name'], value)
+ output = rpc.iscsi_get_initiator_groups()
+ jsonvalues = json.loads(output)
+ verify(len(jsonvalues) == tag, 1,
+ "iscsi_get_initiator_groups returned {} groups, expected {}".format(len(jsonvalues), tag))
+
+ tag_list = []
+ for idx, value in enumerate(jsonvalues):
+ verify(value['initiators'][0] == rpc_param['initiator_name'], 1,
+ "initiator value is {}, expected {}".format(value['initiators'][0], rpc_param['initiator_name']))
+ tag_list.append(value['tag'])
+ verify(value['tag'] == idx + 1, 1,
+ "tag value is {}, expected {}".format(value['tag'], idx + 1))
+ verify(value['netmasks'][0] == rpc_param['netmask'][idx], 1,
+ "netmasks value is {}, expected {}".format(value['netmasks'][0], rpc_param['netmask'][idx]))
+
+ for idx, value in enumerate(rpc_param['netmask']):
+ tag = idx + 1
+ rpc.iscsi_initiator_group_remove_initiators(tag, '-n', rpc_param['initiator_name'], '-m', value)
+
+ output = rpc.iscsi_get_initiator_groups()
+ jsonvalues = json.loads(output)
+ verify(len(jsonvalues) == tag, 1,
+ "iscsi_get_initiator_groups returned {} groups, expected {}".format(len(jsonvalues), tag))
+
+ for idx, value in enumerate(jsonvalues):
+ verify(value['tag'] == idx + 1, 1,
+ "tag value is {}, expected {}".format(value['tag'], idx + 1))
+ initiators = value.get('initiators')
+ verify(len(initiators) == 0, 1,
+ "length of initiator list is {}, expected 0".format(len(initiators)))
+ netmasks = value.get('netmasks')
+ verify(len(netmasks) == 0, 1,
+ "length of netmask list is {}, expected 0".format(len(netmasks)))
+
+ for idx, value in enumerate(rpc_param['netmask']):
+ tag = idx + 1
+ rpc.iscsi_initiator_group_add_initiators(tag, '-n', rpc_param['initiator_name'], '-m', value)
+ output = rpc.iscsi_get_initiator_groups()
+ jsonvalues = json.loads(output)
+ verify(len(jsonvalues) == tag, 1,
+ "iscsi_get_initiator_groups returned {} groups, expected {}".format(len(jsonvalues), tag))
+
+ tag_list = []
+ for idx, value in enumerate(jsonvalues):
+ verify(value['initiators'][0] == rpc_param['initiator_name'], 1,
+ "initiator value is {}, expected {}".format(value['initiators'][0], rpc_param['initiator_name']))
+ tag_list.append(value['tag'])
+ verify(value['tag'] == idx + 1, 1,
+ "tag value is {}, expected {}".format(value['tag'], idx + 1))
+ verify(value['netmasks'][0] == rpc_param['netmask'][idx], 1,
+ "netmasks value is {}, expected {}".format(value['netmasks'][0], rpc_param['netmask'][idx]))
+
+ for idx, value in enumerate(tag_list):
+ rpc.iscsi_delete_initiator_group(value)
+ output = rpc.iscsi_get_initiator_groups()
+ jsonvalues = json.loads(output)
+ verify(len(jsonvalues) == (len(tag_list) - (idx + 1)), 1,
+ "iscsi_get_initiator_groups returned {} groups, expected {}".format(len(jsonvalues), (len(tag_list) - (idx + 1))))
+ if not jsonvalues:
+ break
+ for jidx, jvalue in enumerate(jsonvalues):
+ verify(jvalue['initiators'][0] == rpc_param['initiator_name'], 1,
+ "initiator value is {}, expected {}".format(jvalue['initiators'][0], rpc_param['initiator_name']))
+ verify(jvalue['tag'] != value or jvalue['tag'] == tag_list[idx + jidx + 1], 1,
+ "tag value is {}, expected {} and not {}".format(jvalue['tag'], tag_list[idx + jidx + 1], value))
+ verify(jvalue['netmasks'][0] == rpc_param['netmask'][idx + jidx + 1], 1,
+ "netmasks value is {}, expected {}".format(jvalue['netmasks'][0], rpc_param['netmask'][idx + jidx + 1]))
+
+ print("verify_initiator_groups_rpc_method passed.")
+
+
+def verify_target_nodes_rpc_methods(rpc_py, rpc_param):
+ rpc = spdk_rpc(rpc_py)
+ output = rpc.iscsi_get_options()
+ jsonvalues = json.loads(output)
+ nodebase = jsonvalues['node_base']
+ output = rpc.iscsi_get_target_nodes()
+ jsonvalues = json.loads(output)
+ verify(not jsonvalues, 1,
+ "iscsi_get_target_nodes returned {}, expected empty".format(jsonvalues))
+
+ rpc.bdev_malloc_create(rpc_param['malloc_bdev_size'], rpc_param['malloc_block_size'])
+ rpc.iscsi_create_portal_group(portal_tag, "{}:{}".format(rpc_param['target_ip'], str(rpc_param['port'])))
+ rpc.iscsi_create_initiator_group(initiator_tag, rpc_param['initiator_name'], rpc_param['netmask'])
+
+ lun_mapping = "Malloc" + str(rpc_param['lun_total']) + ":0"
+ net_mapping = portal_tag + ":" + initiator_tag
+ rpc.iscsi_create_target_node(rpc_param['target_name'], rpc_param['alias_name'], lun_mapping,
+ net_mapping, rpc_param['queue_depth'], '-d')
+ output = rpc.iscsi_get_target_nodes()
+ jsonvalues = json.loads(output)
+ verify(len(jsonvalues) == 1, 1,
+ "iscsi_get_target_nodes returned {} nodes, expected 1".format(len(jsonvalues)))
+ bdev_name = jsonvalues[0]['luns'][0]['bdev_name']
+ verify(bdev_name == "Malloc" + str(rpc_param['lun_total']), 1,
+ "bdev_name value is {}, expected Malloc{}".format(jsonvalues[0]['luns'][0]['bdev_name'], str(rpc_param['lun_total'])))
+ name = jsonvalues[0]['name']
+ verify(name == nodebase + ":" + rpc_param['target_name'], 1,
+ "target name value is {}, expected {}".format(name, nodebase + ":" + rpc_param['target_name']))
+ verify(jsonvalues[0]['alias_name'] == rpc_param['alias_name'], 1,
+ "target alias_name value is {}, expected {}".format(jsonvalues[0]['alias_name'], rpc_param['alias_name']))
+ verify(jsonvalues[0]['luns'][0]['lun_id'] == 0, 1,
+ "lun id value is {}, expected 0".format(jsonvalues[0]['luns'][0]['lun_id']))
+ verify(jsonvalues[0]['pg_ig_maps'][0]['ig_tag'] == int(initiator_tag), 1,
+ "initiator group tag value is {}, expected {}".format(jsonvalues[0]['pg_ig_maps'][0]['ig_tag'], initiator_tag))
+ verify(jsonvalues[0]['queue_depth'] == rpc_param['queue_depth'], 1,
+ "queue depth value is {}, expected {}".format(jsonvalues[0]['queue_depth'], rpc_param['queue_depth']))
+ verify(jsonvalues[0]['pg_ig_maps'][0]['pg_tag'] == int(portal_tag), 1,
+ "portal group tag value is {}, expected {}".format(jsonvalues[0]['pg_ig_maps'][0]['pg_tag'], portal_tag))
+ verify(jsonvalues[0]['disable_chap'] == rpc_param['disable_chap'], 1,
+ "disable chap value is {}, expected {}".format(jsonvalues[0]['disable_chap'], rpc_param['disable_chap']))
+ verify(jsonvalues[0]['mutual_chap'] == rpc_param['mutual_chap'], 1,
+ "chap mutual value is {}, expected {}".format(jsonvalues[0]['mutual_chap'], rpc_param['mutual_chap']))
+ verify(jsonvalues[0]['require_chap'] == rpc_param['require_chap'], 1,
+ "chap required value is {}, expected {}".format(jsonvalues[0]['require_chap'], rpc_param['require_chap']))
+ verify(jsonvalues[0]['chap_group'] == rpc_param['chap_group'], 1,
+ "chap auth group value is {}, expected {}".format(jsonvalues[0]['chap_group'], rpc_param['chap_group']))
+ verify(jsonvalues[0]['header_digest'] == rpc_param['header_digest'], 1,
+ "header digest value is {}, expected {}".format(jsonvalues[0]['header_digest'], rpc_param['header_digest']))
+ verify(jsonvalues[0]['data_digest'] == rpc_param['data_digest'], 1,
+ "data digest value is {}, expected {}".format(jsonvalues[0]['data_digest'], rpc_param['data_digest']))
+ lun_id = '1'
+ rpc.iscsi_target_node_add_lun(name, bdev_name, "-i", lun_id)
+ output = rpc.iscsi_get_target_nodes()
+ jsonvalues = json.loads(output)
+ verify(jsonvalues[0]['luns'][1]['bdev_name'] == "Malloc" + str(rpc_param['lun_total']), 1,
+ "bdev_name value is {}, expected Malloc{}".format(jsonvalues[0]['luns'][0]['bdev_name'], str(rpc_param['lun_total'])))
+ verify(jsonvalues[0]['luns'][1]['lun_id'] == 1, 1,
+ "lun id value is {}, expected 1".format(jsonvalues[0]['luns'][1]['lun_id']))
+
+ rpc.iscsi_delete_target_node(name)
+ output = rpc.iscsi_get_target_nodes()
+ jsonvalues = json.loads(output)
+ verify(not jsonvalues, 1,
+ "iscsi_get_target_nodes returned {}, expected empty".format(jsonvalues))
+
+ rpc.iscsi_create_target_node(rpc_param['target_name'], rpc_param['alias_name'], lun_mapping,
+ net_mapping, rpc_param['queue_depth'], '-d')
+
+ rpc.iscsi_delete_portal_group(portal_tag)
+ rpc.iscsi_delete_initiator_group(initiator_tag)
+ rpc.iscsi_delete_target_node(name)
+ output = rpc.iscsi_get_target_nodes()
+ jsonvalues = json.loads(output)
+ if not jsonvalues:
+ print("This issue will be fixed later.")
+
+ print("verify_target_nodes_rpc_methods passed.")
+
+
+def verify_net_get_interfaces(rpc_py):
+ rpc = spdk_rpc(rpc_py)
+ nics = json.loads(rpc.net_get_interfaces())
+ nics_names = set(x["name"] for x in nics)
+ # parse ip link show to verify the net_get_interfaces result
+ ip_show = ns_cmd + " ip link show"
+ ifcfg_nics = set(re.findall(r'\S+:\s(\S+?)(?:@\S+){0,1}:\s<.*', check_output(ip_show.split()).decode()))
+ verify(nics_names == ifcfg_nics, 1, "net_get_interfaces returned {}".format(nics))
+ print("verify_net_get_interfaces passed.")
+
+
+def help_get_interface_ip_list(rpc_py, nic_name):
+ rpc = spdk_rpc(rpc_py)
+ nics = json.loads(rpc.net_get_interfaces())
+ nic = list([x for x in nics if x["name"] == nic_name])
+ verify(len(nic) != 0, 1,
+ "Nic name: {} is not found in {}".format(nic_name, [x["name"] for x in nics]))
+ return nic[0]["ip_addr"]
+
+
+def verify_net_interface_add_delete_ip_address(rpc_py):
+ rpc = spdk_rpc(rpc_py)
+ nics = json.loads(rpc.net_get_interfaces())
+ # add ip on up to first 2 nics
+ for x in nics[:2]:
+ faked_ip = "123.123.{}.{}".format(random.randint(1, 254), random.randint(1, 254))
+ ping_cmd = ns_cmd + " ping -c 1 -W 1 " + faked_ip
+ rpc.net_interface_add_ip_address(x["ifc_index"], faked_ip)
+ verify(faked_ip in help_get_interface_ip_list(rpc_py, x["name"]), 1,
+ "add ip {} to nic {} failed.".format(faked_ip, x["name"]))
+ try:
+ check_call(ping_cmd.split())
+ except BaseException:
+ verify(False, 1,
+ "ping ip {} for {} was failed(adding was successful)".format
+ (faked_ip, x["name"]))
+ rpc.net_interface_delete_ip_address(x["ifc_index"], faked_ip)
+ verify(faked_ip not in help_get_interface_ip_list(rpc_py, x["name"]), 1,
+ "delete ip {} from nic {} failed.(adding and ping were successful)".format
+ (faked_ip, x["name"]))
+ # ping should be failed and throw an CalledProcessError exception
+ try:
+ check_call(ping_cmd.split())
+ except CalledProcessError as _:
+ pass
+ except Exception as e:
+ verify(False, 1,
+ "Unexpected exception was caught {}(adding/ping/delete were successful)".format
+ (str(e)))
+ else:
+ verify(False, 1,
+ "ip {} for {} could be pinged after delete ip(adding/ping/delete were successful)".format
+ (faked_ip, x["name"]))
+ print("verify_net_interface_add_delete_ip_address passed.")
+
+
+if __name__ == "__main__":
+
+ rpc_py = sys.argv[1]
+
+ try:
+ verify_log_flag_rpc_methods(rpc_py, rpc_param)
+ verify_net_get_interfaces(rpc_py)
+ # Add/delete IP will not be supported in VPP.
+ # It has separate vppctl utility for that.
+ if test_type == 'posix':
+ verify_net_interface_add_delete_ip_address(rpc_py)
+ create_malloc_bdevs_rpc_methods(rpc_py, rpc_param)
+ verify_portal_groups_rpc_methods(rpc_py, rpc_param)
+ verify_initiator_groups_rpc_methods(rpc_py, rpc_param)
+ verify_target_nodes_rpc_methods(rpc_py, rpc_param)
+ verify_scsi_devices_rpc_methods(rpc_py)
+ verify_iscsi_connection_rpc_methods(rpc_py)
+ except RpcException as e:
+ print("{}. Exiting with status {}".format(e.message, e.retval))
+ raise e
+ except Exception as e:
+ raise e
+
+ sys.exit(0)
diff --git a/src/spdk/test/iscsi_tgt/rpc_config/rpc_config.sh b/src/spdk/test/iscsi_tgt/rpc_config/rpc_config.sh
new file mode 100755
index 000000000..ce54b4ab2
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/rpc_config/rpc_config.sh
@@ -0,0 +1,63 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/iscsi_tgt/common.sh
+
+# $1 = test type posix or vpp.
+# $2 = "iso" - triggers isolation mode (setting up required environment).
+iscsitestinit $2 $1
+
+if [ "$1" == "posix" ] || [ "$1" == "vpp" ]; then
+ TEST_TYPE=$1
+else
+ echo "No iSCSI test type specified"
+ exit 1
+fi
+
+MALLOC_BDEV_SIZE=64
+
+rpc_py=$rootdir/scripts/rpc.py
+rpc_config_py="$testdir/rpc_config.py"
+
+timing_enter start_iscsi_tgt
+
+"${ISCSI_APP[@]}" --wait-for-rpc &
+pid=$!
+echo "Process pid: $pid"
+
+trap 'killprocess $pid; exit 1' SIGINT SIGTERM EXIT
+
+waitforlisten $pid
+$rpc_py framework_wait_init &
+rpc_wait_pid=$!
+$rpc_py iscsi_set_options -o 30 -a 16
+
+# RPC framework_wait_init should be blocked, so its process must be existed
+ps $rpc_wait_pid
+
+$rpc_py framework_start_init
+echo "iscsi_tgt is listening. Running tests..."
+
+# RPC framework_wait_init should be already returned, so its process must be non-existed
+! ps $rpc_wait_pid
+
+# RPC framework_wait_init will directly returned after subsystem initialized.
+$rpc_py framework_wait_init &
+rpc_wait_pid=$!
+sleep 1
+! ps $rpc_wait_pid
+
+timing_exit start_iscsi_tgt
+
+$rpc_config_py $rpc_py $TARGET_IP $INITIATOR_IP $ISCSI_PORT $NETMASK $TARGET_NAMESPACE $TEST_TYPE
+
+$rpc_py bdev_get_bdevs
+
+trap - SIGINT SIGTERM EXIT
+
+iscsicleanup
+killprocess $pid
+
+iscsitestfini $2 $1
diff --git a/src/spdk/test/iscsi_tgt/sock/sock.sh b/src/spdk/test/iscsi_tgt/sock/sock.sh
new file mode 100755
index 000000000..14615d3bc
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/sock/sock.sh
@@ -0,0 +1,142 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/iscsi_tgt/common.sh
+
+function waitfortcp() {
+ local addr="$2"
+
+ if hash ip &> /dev/null; then
+ local have_ip_cmd=true
+ else
+ local have_ip_cmd=false
+ fi
+
+ if hash ss &> /dev/null; then
+ local have_ss_cmd=true
+ else
+ local have_ss_cmd=false
+ fi
+
+ echo "Waiting for process to start up and listen on address $addr..."
+ # turn off trace for this loop
+ xtrace_disable
+ local ret=0
+ local i
+ for ((i = 40; i != 0; i--)); do
+ # if the process is no longer running, then exit the script
+ # since it means the application crashed
+ if ! kill -s 0 $1; then
+ echo "ERROR: process (pid: $1) is no longer running"
+ ret=1
+ break
+ fi
+
+ if $have_ip_cmd; then
+ namespace=$(ip netns identify $1)
+ if [ -n "$namespace" ]; then
+ ns_cmd="ip netns exec $namespace"
+ fi
+ fi
+
+ if $have_ss_cmd; then
+ if $ns_cmd ss -ln | grep -E -q "\s+$addr\s+"; then
+ break
+ fi
+ elif [[ "$(uname -s)" == "Linux" ]]; then
+ # For Linux, if system doesn't have ss, just assume it has netstat
+ if $ns_cmd netstat -an | grep -iw LISTENING | grep -E -q "\s+$addr\$"; then
+ break
+ fi
+ fi
+ sleep 0.5
+ done
+
+ xtrace_restore
+ if ((i == 0)); then
+ echo "ERROR: timeout while waiting for process (pid: $1) to start listening on '$addr'"
+ ret=1
+ fi
+ return $ret
+}
+
+# $1 = "iso" - triggers isolation mode (setting up required environment).
+# $2 = test type posix or vpp. defaults to posix.
+iscsitestinit $1 $2
+
+if [ "$1" == "iso" ]; then
+ TEST_TYPE=$2
+else
+ TEST_TYPE=$1
+fi
+
+if [ -z "$TEST_TYPE" ]; then
+ TEST_TYPE="posix"
+fi
+
+if [ "$TEST_TYPE" != "posix" ] && [ "$TEST_TYPE" != "vpp" ]; then
+ echo "No correct sock implmentation specified"
+ exit 1
+fi
+
+HELLO_SOCK_APP="${TARGET_NS_CMD[*]} $SPDK_EXAMPLE_DIR/hello_sock"
+if [ $SPDK_TEST_VPP -eq 1 ]; then
+ HELLO_SOCK_APP+=" -L sock_vpp"
+fi
+SOCAT_APP="socat"
+
+# ----------------
+# Test client path
+# ----------------
+timing_enter sock_client
+echo "Testing client path"
+
+# start echo server using socat
+$SOCAT_APP tcp-l:$ISCSI_PORT,fork,bind=$INITIATOR_IP exec:'/bin/cat' &
+server_pid=$!
+trap 'killprocess $server_pid;iscsitestfini $1 $2; exit 1' SIGINT SIGTERM EXIT
+
+waitfortcp $server_pid $INITIATOR_IP:$ISCSI_PORT
+
+# send message using hello_sock client
+message="**MESSAGE:This is a test message from the client**"
+response=$(echo $message | $HELLO_SOCK_APP -H $INITIATOR_IP -P $ISCSI_PORT -N $TEST_TYPE)
+
+if ! echo "$response" | grep -q "$message"; then
+ exit 1
+fi
+
+trap '-' SIGINT SIGTERM EXIT
+# NOTE: socat returns code 143 on SIGINT
+killprocess $server_pid || true
+
+timing_exit sock_client
+
+# ----------------
+# Test server path
+# ----------------
+
+timing_enter sock_server
+
+# start echo server using hello_sock echo server
+$HELLO_SOCK_APP -H $TARGET_IP -P $ISCSI_PORT -S -N $TEST_TYPE &
+server_pid=$!
+trap 'killprocess $server_pid; iscsitestfini $1 $2; exit 1' SIGINT SIGTERM EXIT
+waitforlisten $server_pid
+
+# send message to server using socat
+message="**MESSAGE:This is a test message to the server**"
+response=$(echo $message | $SOCAT_APP - tcp:$TARGET_IP:$ISCSI_PORT 2> /dev/null)
+
+if [ "$message" != "$response" ]; then
+ exit 1
+fi
+
+trap - SIGINT SIGTERM EXIT
+
+killprocess $server_pid
+
+iscsitestfini $1 $2
+timing_exit sock_server
diff --git a/src/spdk/test/iscsi_tgt/trace_record/trace_record.sh b/src/spdk/test/iscsi_tgt/trace_record/trace_record.sh
new file mode 100755
index 000000000..baa7f39d4
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/trace_record/trace_record.sh
@@ -0,0 +1,135 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/iscsi_tgt/common.sh
+
+# $1 = "iso" - triggers isolation mode (setting up required environment).
+# $2 = test type posix or vpp. defaults to posix.
+iscsitestinit $1 $2
+
+TRACE_TMP_FOLDER=./tmp-trace
+TRACE_RECORD_OUTPUT=${TRACE_TMP_FOLDER}/record.trace
+TRACE_RECORD_NOTICE_LOG=${TRACE_TMP_FOLDER}/record.notice
+TRACE_TOOL_LOG=${TRACE_TMP_FOLDER}/trace.log
+
+delete_tmp_files() {
+ rm -rf $TRACE_TMP_FOLDER
+}
+
+if [ -z "$TARGET_IP" ]; then
+ echo "TARGET_IP not defined in environment"
+ exit 1
+fi
+
+if [ -z "$INITIATOR_IP" ]; then
+ echo "INITIATOR_IP not defined in environment"
+ exit 1
+fi
+
+NUM_TRACE_ENTRIES=4096
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=4096
+
+rpc_py="$rootdir/scripts/rpc.py"
+fio_py="$rootdir/scripts/fio.py"
+
+timing_enter start_iscsi_tgt
+
+echo "start iscsi_tgt with trace enabled"
+"${ISCSI_APP[@]}" -m 0xf --num-trace-entries $NUM_TRACE_ENTRIES --tpoint-group-mask 0xf &
+iscsi_pid=$!
+echo "Process pid: $iscsi_pid"
+
+trap 'killprocess $iscsi_pid; iscsitestfini $1 $2; exit 1' SIGINT SIGTERM EXIT
+
+waitforlisten $iscsi_pid
+
+echo "iscsi_tgt is listening. Running tests..."
+
+timing_exit start_iscsi_tgt
+
+mkdir -p ${TRACE_TMP_FOLDER}
+./build/bin/spdk_trace_record -s iscsi -p ${iscsi_pid} -f ${TRACE_RECORD_OUTPUT} -q 1> ${TRACE_RECORD_NOTICE_LOG} &
+record_pid=$!
+echo "Trace record pid: $record_pid"
+
+RPCS=
+RPCS+="iscsi_create_portal_group $PORTAL_TAG $TARGET_IP:$ISCSI_PORT\n"
+RPCS+="iscsi_create_initiator_group $INITIATOR_TAG $INITIATOR_NAME $NETMASK\n"
+
+echo "Create bdevs and target nodes"
+CONNECTION_NUMBER=15
+for i in $(seq 0 $CONNECTION_NUMBER); do
+ RPCS+="bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc$i\n"
+ RPCS+="iscsi_create_target_node Target$i Target${i}_alias "Malloc$i:0" $PORTAL_TAG:$INITIATOR_TAG 256 -d\n"
+done
+echo -e $RPCS | $rpc_py
+
+sleep 1
+
+iscsiadm -m discovery -t sendtargets -p $TARGET_IP:$ISCSI_PORT
+iscsiadm -m node --login -p $TARGET_IP:$ISCSI_PORT
+waitforiscsidevices $((CONNECTION_NUMBER + 1))
+
+trap 'iscsicleanup; killprocess $iscsi_pid; killprocess $record_pid; delete_tmp_files; iscsitestfini $1 $2; exit 1' SIGINT SIGTERM EXIT
+
+echo "Running FIO"
+$fio_py -p iscsi -i 131072 -d 32 -t randrw -r 1
+
+iscsicleanup
+
+RPCS=
+# Delete Malloc blockdevs and targets
+for i in $(seq 0 $CONNECTION_NUMBER); do
+ RPCS+="iscsi_delete_target_node iqn.2016-06.io.spdk:Target$i\n"
+ RPCS+="bdev_malloc_delete Malloc$i\n"
+done
+echo -e $RPCS | $rpc_py
+
+trap 'delete_tmp_files; iscsitestfini $1 $2; exit 1' SIGINT SIGTERM EXIT
+
+killprocess $iscsi_pid
+killprocess $record_pid
+./build/bin/spdk_trace -f ${TRACE_RECORD_OUTPUT} > ${TRACE_TOOL_LOG}
+
+#verify trace record and trace tool
+#trace entries str in trace-record, like "Trace Size of lcore (0): 4136"
+record_num="$(grep "trace entries for lcore" ${TRACE_RECORD_NOTICE_LOG} | cut -d ' ' -f 2)"
+
+#trace entries str in trace-tool, like "Port 4096 trace entries for lcore (0) in 441871 msec"
+trace_tool_num="$(grep "Trace Size of lcore" ${TRACE_TOOL_LOG} | cut -d ' ' -f 6)"
+
+delete_tmp_files
+
+echo "entries numbers from trace record are:" $record_num
+echo "entries numbers from trace tool are:" $trace_tool_num
+
+arr_record_num=($record_num)
+arr_trace_tool_num=($trace_tool_num)
+len_arr_record_num=${#arr_record_num[@]}
+len_arr_trace_tool_num=${#arr_trace_tool_num[@]}
+
+#lcore num check
+if [ $len_arr_record_num -ne $len_arr_trace_tool_num ]; then
+ echo "trace record test on iscsi: failure on lcore number check"
+ set -e
+ exit 1
+fi
+#trace entries num check
+for i in $(seq 0 $((len_arr_record_num - 1))); do
+ if [ ${arr_record_num[$i]} -le ${NUM_TRACE_ENTRIES} ]; then
+ echo "trace record test on iscsi: failure on inefficient entries number check"
+ set -e
+ exit 1
+ fi
+ if [ ${arr_record_num[$i]} -ne ${arr_trace_tool_num[$i]} ]; then
+ echo "trace record test on iscsi: failure on entries number check"
+ set -e
+ exit 1
+ fi
+done
+
+trap - SIGINT SIGTERM EXIT
+iscsitestfini $1 $2
diff --git a/src/spdk/test/json_config/alias_rpc/alias_rpc.sh b/src/spdk/test/json_config/alias_rpc/alias_rpc.sh
new file mode 100755
index 000000000..25e07fae4
--- /dev/null
+++ b/src/spdk/test/json_config/alias_rpc/alias_rpc.sh
@@ -0,0 +1,20 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+
+trap 'killprocess $spdk_tgt_pid; exit 1' ERR
+
+$SPDK_BIN_DIR/spdk_tgt &
+spdk_tgt_pid=$!
+waitforlisten $spdk_tgt_pid
+
+# Test deprecated rpcs in json
+$rootdir/scripts/rpc.py load_config -i < $testdir/conf.json
+
+# Test deprecated rpcs in rpc.py
+$rootdir/scripts/rpc.py delete_malloc_bdev "Malloc0"
+$rootdir/scripts/rpc.py delete_malloc_bdev "Malloc1"
+
+killprocess $spdk_tgt_pid
diff --git a/src/spdk/test/json_config/alias_rpc/conf.json b/src/spdk/test/json_config/alias_rpc/conf.json
new file mode 100644
index 000000000..ba3e6b254
--- /dev/null
+++ b/src/spdk/test/json_config/alias_rpc/conf.json
@@ -0,0 +1,44 @@
+{
+ "subsystems": [
+ {
+ "subsystem": "accel",
+ "config": []
+ },
+ {
+ "subsystem": "interface",
+ "config": null
+ },
+ {
+ "subsystem": "net_framework",
+ "config": null
+ },
+ {
+ "subsystem": "bdev",
+ "config": [
+ {
+ "params": {
+ "block_size": 4096,
+ "num_blocks": 32
+ },
+ "method": "construct_malloc_bdev"
+ },
+ {
+ "params": {
+ "name": "Malloc1",
+ "block_size": 4096,
+ "num_blocks": 32
+ },
+ "method": "construct_malloc_bdev"
+ }
+ ]
+ },
+ {
+ "subsystem": "nbd",
+ "config": []
+ },
+ {
+ "subsystem": "scsi",
+ "config": null
+ }
+ ]
+}
diff --git a/src/spdk/test/json_config/clear_config.py b/src/spdk/test/json_config/clear_config.py
new file mode 100755
index 000000000..bac1beebb
--- /dev/null
+++ b/src/spdk/test/json_config/clear_config.py
@@ -0,0 +1,215 @@
+#!/usr/bin/env python3
+
+import os
+import sys
+import argparse
+import logging
+sys.path.append(os.path.join(os.path.dirname(__file__), "../../scripts"))
+import rpc # noqa
+from rpc.client import print_dict, JSONRPCException # noqa
+
+
+def get_bdev_name_key(bdev):
+ bdev_name_key = 'name'
+ if 'method' in bdev and bdev['method'] == 'bdev_split_create':
+ bdev_name_key = "base_bdev"
+ return bdev_name_key
+
+
+def get_bdev_name(bdev):
+ bdev_name = None
+ if 'params' in bdev:
+ if 'name' in bdev['params']:
+ bdev_name = bdev['params']['name']
+ elif 'base_name' in bdev['params']:
+ bdev_name = bdev['params']['base_name']
+ elif 'base_bdev' in bdev['params']:
+ bdev_name = bdev['params']['base_bdev']
+ if 'method' in bdev and bdev['method'] == 'bdev_error_create':
+ bdev_name = "EE_%s" % bdev_name
+ return bdev_name
+
+
+def get_bdev_delete_method(bdev):
+ delete_method_map = {'bdev_malloc_create': "bdev_malloc_delete",
+ 'bdev_null_create': "bdev_null_delete",
+ 'bdev_rbd_create': "bdev_rbd_delete",
+ 'bdev_pmem_create': "bdev_pmem_delete",
+ 'bdev_aio_create': "bdev_aio_delete",
+ 'bdev_error_create': "bdev_error_delete",
+ 'construct_split_vbdev': "destruct_split_vbdev",
+ 'bdev_virtio_attach_controller': "remove_virtio_bdev",
+ 'bdev_crypto_create': "bdev_crypto_delete",
+ 'bdev_delay_create': "bdev_delay_delete",
+ 'bdev_passthru_create': "bdev_passthru_delete",
+ 'bdev_compress_create': 'bdev_compress_delete',
+ }
+ destroy_method = None
+ if 'method' in bdev:
+ construct_method = bdev['method']
+ if construct_method in list(delete_method_map.keys()):
+ destroy_method = delete_method_map[construct_method]
+
+ return destroy_method
+
+
+def clear_bdev_subsystem(args, bdev_config):
+ rpc_bdevs = args.client.call("bdev_get_bdevs")
+ for bdev in bdev_config:
+ bdev_name_key = get_bdev_name_key(bdev)
+ bdev_name = get_bdev_name(bdev)
+ destroy_method = get_bdev_delete_method(bdev)
+ if destroy_method:
+ args.client.call(destroy_method, {bdev_name_key: bdev_name})
+
+ nvme_controllers = args.client.call("bdev_nvme_get_controllers")
+ for ctrlr in nvme_controllers:
+ args.client.call('bdev_nvme_detach_controller', {'name': ctrlr['name']})
+
+ ''' Disable and reset hotplug '''
+ rpc.bdev.bdev_nvme_set_hotplug(args.client, False)
+
+
+def get_nvmf_destroy_method(nvmf):
+ delete_method_map = {'nvmf_create_subsystem': "nvmf_delete_subsystem"}
+ try:
+ return delete_method_map[nvmf['method']]
+ except KeyError:
+ return None
+
+
+def clear_nvmf_subsystem(args, nvmf_config):
+ for nvmf in nvmf_config:
+ destroy_method = get_nvmf_destroy_method(nvmf)
+ if destroy_method:
+ args.client.call(destroy_method, {'nqn': nvmf['params']['nqn']})
+
+
+def get_iscsi_destroy_method(iscsi):
+ delete_method_map = {'iscsi_create_portal_group': "iscsi_delete_portal_group",
+ 'iscsi_create_initiator_group': "iscsi_delete_initiator_group",
+ 'iscsi_create_target_node': "iscsi_delete_target_node",
+ 'iscsi_set_options': None
+ }
+ return delete_method_map[iscsi['method']]
+
+
+def get_iscsi_name(iscsi):
+ if 'name' in iscsi['params']:
+ return iscsi['params']['name']
+ else:
+ return iscsi['params']['tag']
+
+
+def get_iscsi_name_key(iscsi):
+ if iscsi['method'] == 'iscsi_create_target_node':
+ return "name"
+ else:
+ return 'tag'
+
+
+def clear_iscsi_subsystem(args, iscsi_config):
+ for iscsi in iscsi_config:
+ destroy_method = get_iscsi_destroy_method(iscsi)
+ if destroy_method:
+ args.client.call(destroy_method, {get_iscsi_name_key(iscsi): get_iscsi_name(iscsi)})
+
+
+def get_nbd_destroy_method(nbd):
+ delete_method_map = {'nbd_start_disk': "nbd_stop_disk"
+ }
+ return delete_method_map[nbd['method']]
+
+
+def clear_nbd_subsystem(args, nbd_config):
+ for nbd in nbd_config:
+ destroy_method = get_nbd_destroy_method(nbd)
+ if destroy_method:
+ args.client.call(destroy_method, {'nbd_device': nbd['params']['nbd_device']})
+
+
+def clear_net_framework_subsystem(args, net_framework_config):
+ pass
+
+
+def clear_accel_subsystem(args, accel_config):
+ pass
+
+
+def clear_interface_subsystem(args, interface_config):
+ pass
+
+
+def clear_vhost_subsystem(args, vhost_config):
+ for vhost in reversed(vhost_config):
+ if 'method' in vhost:
+ method = vhost['method']
+ if method in ['vhost_scsi_controller_add_target']:
+ args.client.call("vhost_scsi_controller_remove_target",
+ {"ctrlr": vhost['params']['ctrlr'],
+ "scsi_target_num": vhost['params']['scsi_target_num']})
+ elif method in ['vhost_create_scsi_controller', 'vhost_create_blk_controller',
+ 'vhost_create_nvme_controller']:
+ args.client.call("vhost_delete_controller", {'ctrlr': vhost['params']['ctrlr']})
+
+
+def clear_vmd_subsystem(args, vmd_config):
+ pass
+
+
+def clear_sock_subsystem(args, sock_config):
+ pass
+
+
+def call_test_cmd(func):
+ def rpc_test_cmd(*args, **kwargs):
+ try:
+ func(*args, **kwargs)
+ except JSONRPCException as ex:
+ print((ex.message))
+ exit(1)
+ return rpc_test_cmd
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description='Clear config command')
+ parser.add_argument('-s', dest='server_addr', default='/var/tmp/spdk.sock')
+ parser.add_argument('-p', dest='port', default=5260, type=int)
+ parser.add_argument('-t', dest='timeout', default=60.0, type=float)
+ parser.add_argument('-v', dest='verbose', action='store_const', const="INFO",
+ help='Set verbose mode to INFO', default="ERROR")
+ parser.add_argument('--verbose', dest='verbose', choices=['DEBUG', 'INFO', 'ERROR'],
+ help="""Set verbose level. """)
+ subparsers = parser.add_subparsers(help='RPC methods')
+
+ @call_test_cmd
+ def clear_config(args):
+ for subsystem_item in reversed(args.client.call('framework_get_subsystems')):
+ args.subsystem = subsystem_item['subsystem']
+ clear_subsystem(args)
+
+ p = subparsers.add_parser('clear_config', help="""Clear configuration of all SPDK subsystems and targets using JSON RPC""")
+ p.set_defaults(func=clear_config)
+
+ @call_test_cmd
+ def clear_subsystem(args):
+ config = args.client.call('framework_get_config', {"name": args.subsystem})
+ if config is None:
+ return
+ if args.verbose:
+ print("Calling clear_%s_subsystem" % args.subsystem)
+ globals()["clear_%s_subsystem" % args.subsystem](args, config)
+
+ p = subparsers.add_parser('clear_subsystem', help="""Clear configuration of SPDK subsystem using JSON RPC""")
+ p.add_argument('--subsystem', help="""Subsystem name""")
+ p.set_defaults(func=clear_subsystem)
+
+ args = parser.parse_args()
+
+ with rpc.client.JSONRPCClient(args.server_addr, args.port, args.timeout, log_level=getattr(logging, args.verbose.upper())) as client:
+ try:
+ args.client = client
+ args.func(args)
+ except JSONRPCException as ex:
+ print((ex.message))
+ exit(1)
diff --git a/src/spdk/test/json_config/config_filter.py b/src/spdk/test/json_config/config_filter.py
new file mode 100755
index 000000000..cde2e24f9
--- /dev/null
+++ b/src/spdk/test/json_config/config_filter.py
@@ -0,0 +1,96 @@
+#!/usr/bin/env python3
+
+import sys
+import json
+import argparse
+from collections import OrderedDict
+
+
+def sort_json_object(o):
+ if isinstance(o, dict):
+ sorted_o = OrderedDict()
+ """ Order of keys in JSON object is irrelevant but we need to pick one
+ to be able to compare JSONS. """
+ for key in sorted(o.keys()):
+ sorted_o[key] = sort_json_object(o[key])
+ return sorted_o
+ if isinstance(o, list):
+ """ Keep list in the same orded but sort each item """
+ return [sort_json_object(item) for item in o]
+ else:
+ return o
+
+
+def filter_methods(do_remove_global_rpcs):
+ global_rpcs = [
+ 'idxd_scan_accel_engine',
+ 'iscsi_set_options',
+ 'nvmf_set_config',
+ 'nvmf_set_max_subsystems',
+ 'nvmf_create_transport',
+ 'bdev_set_options',
+ 'bdev_nvme_set_options',
+ 'bdev_nvme_set_hotplug',
+ 'sock_impl_set_options',
+ ]
+
+ data = json.loads(sys.stdin.read())
+ out = {'subsystems': []}
+ for s in data['subsystems']:
+ if s['config']:
+ s_config = []
+ for config in s['config']:
+ m_name = config['method']
+ is_global_rpc = m_name in global_rpcs
+ if do_remove_global_rpcs != is_global_rpc:
+ s_config.append(config)
+ else:
+ s_config = None
+ out['subsystems'].append({
+ 'subsystem': s['subsystem'],
+ 'config': s_config,
+ })
+
+ print(json.dumps(out, indent=2))
+
+
+def check_empty():
+ data = json.loads(sys.stdin.read())
+ if not data:
+ raise EOFError("Cant read config!")
+
+ for s in data['subsystems']:
+ if s['config']:
+ print("Config not empty")
+ print(s['config'])
+ sys.exit(1)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
+ parser.add_argument('-method', dest='method', default=None,
+ help="""One of the methods:
+check_empty
+ check if provided configuration is logically empty
+delete_global_parameters
+ remove pre-init configuration (pre framework_start_init RPC methods)
+delete_configs
+ remove post-init configuration (post framework_start_init RPC methods)
+sort
+ remove nothing - just sort JSON objects (and subobjects but not arrays)
+ in lexicographical order. This can be used to do plain text diff.""")
+
+ args = parser.parse_args()
+ if args.method == "delete_global_parameters":
+ filter_methods(True)
+ elif args.method == "delete_configs":
+ filter_methods(False)
+ elif args.method == "check_empty":
+ check_empty()
+ elif args.method == "sort":
+ """ Wrap input into JSON object so any input is possible here
+ like output from bdev_get_bdevs RPC method"""
+ o = json.loads('{ "the_object": ' + sys.stdin.read() + ' }')
+ print(json.dumps(sort_json_object(o)['the_object'], indent=2))
+ else:
+ raise ValueError("Invalid method '{}'\n\n{}".format(args.method, parser.format_help()))
diff --git a/src/spdk/test/json_config/json_config.sh b/src/spdk/test/json_config/json_config.sh
new file mode 100755
index 000000000..03d6bd5bd
--- /dev/null
+++ b/src/spdk/test/json_config/json_config.sh
@@ -0,0 +1,475 @@
+#!/usr/bin/env bash
+
+rootdir=$(readlink -f $(dirname $0)/../..)
+source "$rootdir/test/common/autotest_common.sh"
+source "$rootdir/test/nvmf/common.sh"
+
+if [[ $SPDK_TEST_ISCSI -eq 1 ]]; then
+ source "$rootdir/test/iscsi_tgt/common.sh"
+fi
+
+if [[ $SPDK_TEST_VHOST -ne 1 && $SPDK_TEST_VHOST_INIT -eq 1 ]]; then
+ SPDK_TEST_VHOST=1
+ echo "WARNING: Virtio initiator JSON_config test requires vhost target."
+ echo " Setting SPDK_TEST_VHOST=1 for duration of current script."
+fi
+
+if ((SPDK_TEST_BLOCKDEV + \
+ SPDK_TEST_ISCSI + \
+ SPDK_TEST_NVMF + \
+ SPDK_TEST_VHOST + \
+ SPDK_TEST_VHOST_INIT + \
+ SPDK_TEST_PMDK + \
+ SPDK_TEST_RBD == 0)); then
+ echo "WARNING: No tests are enabled so not running JSON configuration tests"
+ exit 0
+fi
+
+declare -A app_pid=([target]="" [initiator]="")
+declare -A app_socket=([target]='/var/tmp/spdk_tgt.sock' [initiator]='/var/tmp/spdk_initiator.sock')
+declare -A app_params=([target]='-m 0x1 -s 1024' [initiator]='-m 0x2 -g -u -s 1024')
+declare -A configs_path=([target]="$rootdir/spdk_tgt_config.json" [initiator]="$rootdir/spdk_initiator_config.json")
+
+function tgt_rpc() {
+ $rootdir/scripts/rpc.py -s "${app_socket[target]}" "$@"
+}
+
+function initiator_rpc() {
+ $rootdir/scripts/rpc.py -s "${app_socket[initiator]}" "$@"
+}
+
+RE_UUID="[[:alnum:]-]+"
+last_event_id=0
+
+function tgt_check_notification_types() {
+ timing_enter "${FUNCNAME[0]}"
+
+ local ret=0
+ local enabled_types=("bdev_register" "bdev_unregister")
+
+ local get_types=($(tgt_rpc notify_get_types | jq -r '.[]'))
+ if [[ ${enabled_types[*]} != "${get_types[*]}" ]]; then
+ echo "ERROR: expected types: ${enabled_types[*]}, but got: ${get_types[*]}"
+ ret=1
+ fi
+
+ timing_exit "${FUNCNAME[0]}"
+ return $ret
+}
+
+function tgt_check_notifications() {
+ local event_line event ev_type ev_ctx
+ local rc=""
+
+ while read -r event_line; do
+ # remove ID
+ event="${event_line%:*}"
+
+ ev_type=${event%:*}
+ ev_ctx=${event#*:}
+
+ ex_ev_type=${1%%:*}
+ ex_ev_ctx=${1#*:}
+
+ last_event_id=${event_line##*:}
+
+ # set rc=false in case of failure so all errors can be printed
+ if (($# == 0)); then
+ echo "ERROR: got extra event: $event_line"
+ rc=false
+ continue
+ elif ! echo "$ev_type" | grep -E -q "^${ex_ev_type}\$" || ! echo "$ev_ctx" | grep -E -q "^${ex_ev_ctx}\$"; then
+ echo "ERROR: expected event '$1' but got '$event' (whole event line: $event_line)"
+ rc=false
+ fi
+
+ shift
+ done < <(tgt_rpc notify_get_notifications -i ${last_event_id} | jq -r '.[] | "\(.type):\(.ctx):\(.id)"')
+
+ $rc
+
+ if (($# != 0)); then
+ echo "ERROR: missing events:"
+ echo "$@"
+ return 1
+ fi
+}
+
+# $1 - target / initiator
+# $2..$n app parameters
+function json_config_test_start_app() {
+ local app=$1
+ shift
+
+ [[ -n "${#app_socket[$app]}" ]] # Check app type
+ [[ -z "${app_pid[$app]}" ]] # Assert if app is not running
+
+ local app_extra_params=""
+ if [[ $SPDK_TEST_VHOST -eq 1 || $SPDK_TEST_VHOST_INIT -eq 1 ]]; then
+ # If PWD is nfs/sshfs we can't create UNIX sockets there. Always use safe location instead.
+ app_extra_params='-S /var/tmp'
+ fi
+
+ $SPDK_BIN_DIR/spdk_tgt ${app_params[$app]} ${app_extra_params} -r ${app_socket[$app]} "$@" &
+ app_pid[$app]=$!
+
+ echo "Waiting for $app to run..."
+ waitforlisten ${app_pid[$app]} ${app_socket[$app]}
+ echo ""
+}
+
+# $1 - target / initiator
+function json_config_test_shutdown_app() {
+ local app=$1
+
+ # Check app type && assert app was started
+ [[ -n "${#app_socket[$app]}" ]]
+ [[ -n "${app_pid[$app]}" ]]
+
+ # spdk_kill_instance RPC will trigger ASAN
+ kill -SIGINT ${app_pid[$app]}
+
+ for ((i = 0; i < 30; i++)); do
+ if ! kill -0 ${app_pid[$app]} 2> /dev/null; then
+ app_pid[$app]=
+ break
+ fi
+ sleep 0.5
+ done
+
+ if [[ -n "${app_pid[$app]}" ]]; then
+ echo "SPDK $app shutdown timeout"
+ return 1
+ fi
+
+ echo "SPDK $app shutdown done"
+}
+
+function create_bdev_subsystem_config() {
+ timing_enter "${FUNCNAME[0]}"
+
+ local expected_notifications=()
+
+ if [[ $SPDK_TEST_BLOCKDEV -eq 1 ]]; then
+ local lvol_store_base_bdev=Nvme0n1
+ if ! tgt_rpc get_bdevs --name ${lvol_store_base_bdev} > /dev/null; then
+ if [[ $(uname -s) = Linux ]]; then
+ lvol_store_base_bdev=aio_disk
+ echo "WARNING: No NVMe drive found. Using '$lvol_store_base_bdev' instead."
+ else
+ echo "ERROR: No NVMe drive found and bdev_aio is not supported on $(uname -s)."
+ timing_exit "${FUNCNAME[0]}"
+ return 1
+ fi
+ fi
+
+ tgt_rpc bdev_split_create $lvol_store_base_bdev 2
+ tgt_rpc bdev_split_create Malloc0 3
+ tgt_rpc bdev_malloc_create 8 4096 --name Malloc3
+ tgt_rpc bdev_passthru_create -b Malloc3 -p PTBdevFromMalloc3
+
+ tgt_rpc bdev_null_create Null0 32 512
+
+ tgt_rpc bdev_malloc_create 32 512 --name Malloc0
+ tgt_rpc bdev_malloc_create 16 4096 --name Malloc1
+
+ expected_notifications+=(
+ bdev_register:${lvol_store_base_bdev}
+ bdev_register:${lvol_store_base_bdev}p0
+ bdev_register:${lvol_store_base_bdev}p1
+ bdev_register:Malloc3
+ bdev_register:PTBdevFromMalloc3
+ bdev_register:Null0
+ bdev_register:Malloc0p0
+ bdev_register:Malloc0p1
+ bdev_register:Malloc0p2
+ bdev_register:Malloc0
+ bdev_register:Malloc1
+ )
+
+ if [[ $(uname -s) = Linux ]]; then
+ # This AIO bdev must be large enough to be used as LVOL store
+ dd if=/dev/zero of="$SPDK_TEST_STORAGE/sample_aio" bs=1024 count=102400
+ tgt_rpc bdev_aio_create "$SPDK_TEST_STORAGE/sample_aio" aio_disk 1024
+ expected_notifications+=(bdev_register:aio_disk)
+ fi
+
+ # For LVOLs use split to check for proper order of initialization.
+ # If LVOLs cofniguration will be reordered (eg moved before splits or AIO/NVMe)
+ # it should fail loading JSON config from file.
+ tgt_rpc bdev_lvol_create_lvstore -c 1048576 ${lvol_store_base_bdev}p0 lvs_test
+ tgt_rpc bdev_lvol_create -l lvs_test lvol0 32
+ tgt_rpc bdev_lvol_create -l lvs_test -t lvol1 32
+ tgt_rpc bdev_lvol_snapshot lvs_test/lvol0 snapshot0
+ tgt_rpc bdev_lvol_clone lvs_test/snapshot0 clone0
+
+ expected_notifications+=(
+ "bdev_register:$RE_UUID"
+ "bdev_register:$RE_UUID"
+ "bdev_register:$RE_UUID"
+ "bdev_register:$RE_UUID"
+ )
+ fi
+
+ if [[ $SPDK_TEST_CRYPTO -eq 1 ]]; then
+ tgt_rpc bdev_malloc_create 8 1024 --name MallocForCryptoBdev
+ if [[ $(lspci -d:37c8 | wc -l) -eq 0 ]]; then
+ local crypto_dirver=crypto_aesni_mb
+ else
+ local crypto_dirver=crypto_qat
+ fi
+
+ tgt_rpc bdev_crypto_create MallocForCryptoBdev CryptoMallocBdev $crypto_dirver 0123456789123456
+ expected_notifications+=(
+ bdev_register:MallocForCryptoBdev
+ bdev_register:CryptoMallocBdev
+ )
+ fi
+
+ if [[ $SPDK_TEST_PMDK -eq 1 ]]; then
+ pmem_pool_file=$(mktemp /tmp/pool_file1.XXXXX)
+ rm -f $pmem_pool_file
+ tgt_rpc create_pmem_pool $pmem_pool_file 128 4096
+ tgt_rpc bdev_pmem_create -n pmem1 $pmem_pool_file
+ expected_notifications+=(bdev_register:pmem1)
+ fi
+
+ if [[ $SPDK_TEST_RBD -eq 1 ]]; then
+ rbd_setup 127.0.0.1
+ tgt_rpc bdev_rbd_create $RBD_POOL $RBD_NAME 4096
+ expected_notifications+=(bdev_register:Ceph0)
+ fi
+
+ tgt_check_notifications "${expected_notifications[@]}"
+
+ timing_exit "${FUNCNAME[0]}"
+}
+
+function cleanup_bdev_subsystem_config() {
+ timing_enter "${FUNCNAME[0]}"
+
+ if [[ $SPDK_TEST_BLOCKDEV -eq 1 ]]; then
+ tgt_rpc bdev_lvol_delete lvs_test/clone0
+ tgt_rpc bdev_lvol_delete lvs_test/lvol0
+ tgt_rpc bdev_lvol_delete lvs_test/snapshot0
+ tgt_rpc bdev_lvol_delete_lvstore -l lvs_test
+ fi
+
+ if [[ $(uname -s) = Linux ]]; then
+ rm -f "$SPDK_TEST_STORAGE/sample_aio"
+ fi
+
+ if [[ $SPDK_TEST_PMDK -eq 1 && -n "$pmem_pool_file" && -f "$pmem_pool_file" ]]; then
+ tgt_rpc bdev_pmem_delete pmem1
+ tgt_rpc bdev_pmem_delete_pool $pmem_pool_file
+ rm -f $pmem_pool_file
+ fi
+
+ if [[ $SPDK_TEST_RBD -eq 1 ]]; then
+ rbd_cleanup
+ fi
+
+ timing_exit "${FUNCNAME[0]}"
+}
+
+function create_vhost_subsystem_config() {
+ timing_enter "${FUNCNAME[0]}"
+
+ tgt_rpc bdev_malloc_create 64 1024 --name MallocForVhost0
+ tgt_rpc bdev_split_create MallocForVhost0 8
+
+ tgt_rpc vhost_create_scsi_controller VhostScsiCtrlr0
+ tgt_rpc vhost_scsi_controller_add_target VhostScsiCtrlr0 0 MallocForVhost0p3
+ tgt_rpc vhost_scsi_controller_add_target VhostScsiCtrlr0 -1 MallocForVhost0p4
+ tgt_rpc vhost_controller_set_coalescing VhostScsiCtrlr0 1 100
+
+ tgt_rpc vhost_create_blk_controller VhostBlkCtrlr0 MallocForVhost0p5
+
+ # FIXME: enable after vhost-nvme is properly implemented against the latest rte_vhost (DPDK 19.05+)
+ # tgt_rpc vhost_create_nvme_controller VhostNvmeCtrlr0 16
+ # tgt_rpc vhost_nvme_controller_add_ns VhostNvmeCtrlr0 MallocForVhost0p6
+
+ timing_exit "${FUNCNAME[0]}"
+}
+
+function create_iscsi_subsystem_config() {
+ timing_enter "${FUNCNAME[0]}"
+ tgt_rpc bdev_malloc_create 64 1024 --name MallocForIscsi0
+ tgt_rpc iscsi_create_portal_group $PORTAL_TAG 127.0.0.1:$ISCSI_PORT
+ tgt_rpc iscsi_create_initiator_group $INITIATOR_TAG $INITIATOR_NAME $NETMASK
+ tgt_rpc iscsi_create_target_node Target3 Target3_alias 'MallocForIscsi0:0' $PORTAL_TAG:$INITIATOR_TAG 64 -d
+ timing_exit "${FUNCNAME[0]}"
+}
+
+function create_nvmf_subsystem_config() {
+ timing_enter "${FUNCNAME[0]}"
+
+ RDMA_IP_LIST=$(get_available_rdma_ips)
+ NVMF_FIRST_TARGET_IP=$(echo "$RDMA_IP_LIST" | head -n 1)
+ if [[ -z $NVMF_FIRST_TARGET_IP ]]; then
+ echo "Error: no NIC for nvmf test"
+ return 1
+ fi
+
+ tgt_rpc bdev_malloc_create 8 512 --name MallocForNvmf0
+ tgt_rpc bdev_malloc_create 4 1024 --name MallocForNvmf1
+
+ tgt_rpc nvmf_create_transport -t RDMA -u 8192 -c 0
+ tgt_rpc nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
+ tgt_rpc nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 MallocForNvmf0
+ tgt_rpc nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 MallocForNvmf1
+ tgt_rpc nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t RDMA -a $NVMF_FIRST_TARGET_IP -s "$NVMF_PORT"
+
+ timing_exit "${FUNCNAME[0]}"
+}
+
+function create_virtio_initiator_config() {
+ timing_enter "${FUNCNAME[0]}"
+ initiator_rpc bdev_virtio_attach_controller -t user -a /var/tmp/VhostScsiCtrlr0 -d scsi VirtioScsiCtrlr0
+ initiator_rpc bdev_virtio_attach_controller -t user -a /var/tmp/VhostBlkCtrlr0 -d blk VirtioBlk0
+ # TODO: initiator_rpc bdev_virtio_attach_controller -t user -a /var/tmp/VhostNvmeCtrlr0 -d nvme VirtioNvme0
+ timing_exit "${FUNCNAME[0]}"
+}
+
+function json_config_test_init() {
+ timing_enter "${FUNCNAME[0]}"
+ timing_enter json_config_setup_target
+
+ json_config_test_start_app target --wait-for-rpc
+
+ #TODO: global subsystem params
+
+ # Load nvme configuration. The load_config will issue framework_start_init automatically
+ (
+ echo '{"subsystems": ['
+ $rootdir/scripts/gen_nvme.sh --json | jq -r "del(.config[] | select(.params.name!=\"Nvme0\"))"
+ echo ']}'
+ ) | tgt_rpc load_config
+
+ tgt_check_notification_types
+
+ if [[ $SPDK_TEST_BLOCKDEV -eq 1 ]]; then
+ create_bdev_subsystem_config
+ fi
+
+ if [[ $SPDK_TEST_VHOST -eq 1 ]]; then
+ create_vhost_subsystem_config
+ fi
+
+ if [[ $SPDK_TEST_ISCSI -eq 1 ]]; then
+ create_iscsi_subsystem_config
+ fi
+
+ if [[ $SPDK_TEST_NVMF -eq 1 ]]; then
+ create_nvmf_subsystem_config
+ fi
+ timing_exit json_config_setup_target
+
+ if [[ $SPDK_TEST_VHOST_INIT -eq 1 ]]; then
+ json_config_test_start_app initiator
+ create_virtio_initiator_config
+ fi
+
+ tgt_rpc bdev_malloc_create 8 512 --name MallocBdevForConfigChangeCheck
+
+ timing_exit "${FUNCNAME[0]}"
+}
+
+function json_config_test_fini() {
+ timing_enter "${FUNCNAME[0]}"
+ local ret=0
+
+ if [[ -n "${app_pid[initiator]}" ]]; then
+ killprocess ${app_pid[initiator]}
+ fi
+
+ if [[ -n "${app_pid[target]}" ]]; then
+
+ # Remove any artifacts we created (files, lvol etc)
+ cleanup_bdev_subsystem_config
+
+ # SPDK_TEST_NVMF: Should we clear something?
+ killprocess ${app_pid[target]}
+ fi
+
+ rm -f "${configs_path[@]}"
+ timing_exit "${FUNCNAME[0]}"
+ return $ret
+}
+
+function json_config_clear() {
+ [[ -n "${#app_socket[$1]}" ]] # Check app type
+ $rootdir/test/json_config/clear_config.py -s ${app_socket[$1]} clear_config
+
+ # Check if config is clean.
+ # Global params can't be cleared so need to filter them out.
+ local config_filter="$rootdir/test/json_config/config_filter.py"
+
+ # RPC's used to cleanup configuration (e.g. to delete split and nvme bdevs)
+ # complete immediately and they don't wait for the unregister callback.
+ # It causes that configuration may not be fully cleaned at this moment and
+ # we should to wait a while. (See github issue #789)
+ count=100
+ while [ $count -gt 0 ]; do
+ $rootdir/scripts/rpc.py -s "${app_socket[$1]}" save_config | $config_filter -method delete_global_parameters | $config_filter -method check_empty && break
+ count=$((count - 1))
+ sleep 0.1
+ done
+
+ if [ $count -eq 0 ]; then
+ return 1
+ fi
+}
+
+on_error_exit() {
+ set -x
+ set +e
+ print_backtrace
+ trap - ERR
+ echo "Error on $1 - $2"
+ json_config_test_fini
+ exit 1
+}
+
+trap 'on_error_exit "${FUNCNAME}" "${LINENO}"' ERR
+echo "INFO: JSON configuration test init"
+json_config_test_init
+
+tgt_rpc save_config > ${configs_path[target]}
+
+echo "INFO: shutting down applications..."
+if [[ $SPDK_TEST_VHOST_INIT -eq 1 ]]; then
+ initiator_rpc save_config > ${configs_path[initiator]}
+ json_config_clear initiator
+ json_config_test_shutdown_app initiator
+fi
+
+json_config_clear target
+json_config_test_shutdown_app target
+
+echo "INFO: relaunching applications..."
+json_config_test_start_app target --json ${configs_path[target]}
+if [[ $SPDK_TEST_VHOST_INIT -eq 1 ]]; then
+ json_config_test_start_app initiator --json ${configs_path[initiator]}
+fi
+
+echo "INFO: Checking if target configuration is the same..."
+$rootdir/test/json_config/json_diff.sh <(tgt_rpc save_config) "${configs_path[target]}"
+if [[ $SPDK_TEST_VHOST_INIT -eq 1 ]]; then
+ echo "INFO: Checking if virtio initiator configuration is the same..."
+ $rootdir/test/json_config/json_diff.sh <(initiator_rpc save_config) "${configs_path[initiator]}"
+fi
+
+echo "INFO: changing configuration and checking if this can be detected..."
+# Self test to check if configuration diff can be detected.
+tgt_rpc bdev_malloc_delete MallocBdevForConfigChangeCheck
+if $rootdir/test/json_config/json_diff.sh <(tgt_rpc save_config) "${configs_path[target]}" > /dev/null; then
+ echo "ERROR: intentional configuration difference not detected!"
+ false
+else
+ echo "INFO: configuration change detected."
+fi
+
+json_config_test_fini
+
+echo "INFO: Success"
diff --git a/src/spdk/test/json_config/json_diff.sh b/src/spdk/test/json_config/json_diff.sh
new file mode 100755
index 000000000..e6b9e223d
--- /dev/null
+++ b/src/spdk/test/json_config/json_diff.sh
@@ -0,0 +1,41 @@
+#!/usr/bin/env bash
+
+set -x
+
+if [ $# -ne 2 ]; then
+ echo "This script need exactly two arguments"
+ exit 1
+fi
+
+rootdir=$(readlink -f $(dirname $0)/../..)
+
+# Compare two JSON files.
+#
+# NOTE: Order of objects in JSON can change by just doing loads -> dumps so all JSON objects (not arrays) are sorted by
+# config_filter.py script. Sorted output is used to compare JSON output.
+#
+
+tmp_file_1=$(mktemp /tmp/$(basename ${1}).XXX)
+tmp_file_2=$(mktemp /tmp/$(basename ${2}).XXX)
+ret=0
+
+$rootdir/test/json_config/config_filter.py -method "sort" < $1 > $tmp_file_1
+$rootdir/test/json_config/config_filter.py -method "sort" < $2 > $tmp_file_2
+
+if ! diff -u $tmp_file_1 $tmp_file_2; then
+ ret=1
+
+ echo "=== Start of file: $tmp_file_1 ==="
+ cat $tmp_file_1
+ echo "=== End of file: $tmp_file_1 ==="
+ echo ""
+ echo "=== Start of file: $tmp_file_2 ==="
+ cat $tmp_file_2
+ echo "=== End of file: $tmp_file_2 ==="
+ echo ""
+else
+ echo "INFO: JSON config files are the same"
+fi
+
+rm $tmp_file_1 $tmp_file_2
+exit $ret
diff --git a/src/spdk/test/lvol/basic.sh b/src/spdk/test/lvol/basic.sh
new file mode 100755
index 000000000..2e25855f9
--- /dev/null
+++ b/src/spdk/test/lvol/basic.sh
@@ -0,0 +1,568 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/lvol/common.sh
+source "$rootdir/test/bdev/nbd_common.sh"
+
+# create empty lvol store and verify its parameters
+function test_construct_lvs() {
+ # create a malloc bdev
+ malloc_name=$(rpc_cmd bdev_malloc_create $MALLOC_SIZE_MB $MALLOC_BS)
+
+ # create a valid lvs
+ lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
+ lvs=$(rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid")
+
+ # try to destroy inexistent lvs, this should obviously fail
+ dummy_uuid="00000000-0000-0000-0000-000000000000"
+ NOT rpc_cmd bdev_lvol_delete_lvstore -u "$dummy_uuid"
+ # our lvs should not be impacted
+ rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid"
+
+ # verify it's there
+ [ "$(jq -r '.[0].uuid' <<< "$lvs")" = "$lvs_uuid" ]
+ [ "$(jq -r '.[0].name' <<< "$lvs")" = "lvs_test" ]
+ [ "$(jq -r '.[0].base_bdev' <<< "$lvs")" = "$malloc_name" ]
+
+ # verify some of its parameters
+ cluster_size=$(jq -r '.[0].cluster_size' <<< "$lvs")
+ [ "$cluster_size" = "$LVS_DEFAULT_CLUSTER_SIZE" ]
+ total_clusters=$(jq -r '.[0].total_data_clusters' <<< "$lvs")
+ [ "$(jq -r '.[0].free_clusters' <<< "$lvs")" = "$total_clusters" ]
+ [ "$((total_clusters * cluster_size))" = "$LVS_DEFAULT_CAPACITY" ]
+
+ # remove the lvs and verify it's gone
+ rpc_cmd bdev_lvol_delete_lvstore -u "$lvs_uuid"
+ NOT rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid"
+ # make sure we can't delete the same lvs again
+ NOT rpc_cmd bdev_lvol_delete_lvstore -u "$lvs_uuid"
+
+ rpc_cmd bdev_malloc_delete "$malloc_name"
+ check_leftover_devices
+}
+
+# call bdev_lvol_create_lvstore with base bdev name which does not
+# exist in configuration
+function test_construct_lvs_nonexistent_bdev() {
+ # make sure we can't create lvol store on nonexistent bdev
+ rpc_cmd bdev_lvol_create_lvstore NotMalloc lvs_test && false
+ return 0
+}
+
+# try to create two lvol stores on the same bdev
+function test_construct_two_lvs_on_the_same_bdev() {
+ # create an lvol store
+ malloc_name=$(rpc_cmd bdev_malloc_create $MALLOC_SIZE_MB $MALLOC_BS)
+ lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
+
+ # try to create another lvs on the same malloc bdev
+ rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test2 && false
+
+ # clean up
+ rpc_cmd bdev_lvol_delete_lvstore -u "$lvs_uuid"
+ rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid" && false
+ rpc_cmd bdev_malloc_delete "$malloc_name"
+ rpc_cmd bdev_get_bdevs -b "$malloc_name" && false
+ check_leftover_devices
+}
+
+# try to create two lvs with conflicting aliases
+function test_construct_lvs_conflict_alias() {
+ # create first bdev and lvs
+ malloc1_name=$(rpc_cmd construct_malloc_bdev $MALLOC_SIZE_MB $MALLOC_BS)
+ lvs1_uuid=$(rpc_cmd construct_lvol_store "$malloc1_name" lvs_test)
+
+ # create second bdev and lvs with the same name as previously
+ malloc2_name=$(rpc_cmd construct_malloc_bdev $MALLOC_SIZE_MB $MALLOC_BS)
+ rpc_cmd construct_lvol_store "$malloc2_name" lvs_test && false
+
+ # clean up
+ rpc_cmd destroy_lvol_store -u "$lvs1_uuid"
+ rpc_cmd get_lvol_stores -u "$lvs1_uuid" && false
+ rpc_cmd delete_malloc_bdev "$malloc1_name"
+ rpc_cmd delete_malloc_bdev "$malloc2_name"
+ check_leftover_devices
+}
+
+# call bdev_lvol_create_lvstore with cluster size equals to malloc bdev size + 1B
+# call bdev_lvol_create_lvstore with cluster size smaller than minimal value of 8192
+function test_construct_lvs_different_cluster_size() {
+ # create the first lvs
+ malloc1_name=$(rpc_cmd bdev_malloc_create $MALLOC_SIZE_MB $MALLOC_BS)
+ lvs1_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc1_name" lvs_test)
+
+ # make sure we've got 1 lvs
+ lvol_stores=$(rpc_cmd bdev_lvol_get_lvstores)
+ [ "$(jq length <<< "$lvol_stores")" == "1" ]
+
+ # use the second malloc for some more lvs creation negative tests
+ malloc2_name=$(rpc_cmd bdev_malloc_create $MALLOC_SIZE_MB $MALLOC_BS)
+ # capacity bigger than malloc's
+ rpc_cmd bdev_lvol_create_lvstore "$malloc2_name" lvs2_test -c $((MALLOC_SIZE + 1)) && false
+ # capacity equal to malloc's (no space left for metadata)
+ rpc_cmd bdev_lvol_create_lvstore "$malloc2_name" lvs2_test -c $MALLOC_SIZE && false
+ # capacity smaller than malloc's, but still no space left for metadata
+ rpc_cmd bdev_lvol_create_lvstore "$malloc2_name" lvs2_test -c $((MALLOC_SIZE - 1)) && false
+ # cluster size smaller than the minimum (8192)
+ rpc_cmd bdev_lvol_create_lvstore "$malloc2_name" lvs2_test -c 8191 && false
+
+ # no additional lvol stores should have been created
+ lvol_stores=$(rpc_cmd bdev_lvol_get_lvstores)
+ [ "$(jq length <<< "$lvol_stores")" == "1" ]
+
+ # this one should be fine
+ lvs2_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc2_name" lvs2_test -c 8192)
+ # we should have one more lvs
+ lvol_stores=$(rpc_cmd bdev_lvol_get_lvstores)
+ [ "$(jq length <<< "$lvol_stores")" == "2" ]
+
+ # clean up
+ rpc_cmd bdev_lvol_delete_lvstore -u "$lvs1_uuid"
+ rpc_cmd bdev_lvol_get_lvstores -u "$lvs1_uuid" && false
+
+ # delete the second lvs (using its name only)
+ rpc_cmd bdev_lvol_delete_lvstore -l lvs2_test
+ rpc_cmd bdev_lvol_get_lvstores -l lvs2_test && false
+ rpc_cmd bdev_lvol_get_lvstores -u "$lvs2_uuid" && false
+
+ rpc_cmd bdev_malloc_delete "$malloc1_name"
+ rpc_cmd bdev_malloc_delete "$malloc2_name"
+ check_leftover_devices
+}
+
+# test different methods of clearing the disk on lvolstore creation
+function test_construct_lvs_clear_methods() {
+ malloc_name=$(rpc_cmd bdev_malloc_create $MALLOC_SIZE_MB $MALLOC_BS)
+
+ # first try to provide invalid clear method
+ rpc_cmd bdev_lvol_create_lvstore "$malloc2_name" lvs2_test --clear-method invalid123 && false
+
+ # no lvs should be created
+ lvol_stores=$(rpc_cmd bdev_lvol_get_lvstores)
+ [ "$(jq length <<< "$lvol_stores")" == "0" ]
+
+ methods="none unmap write_zeroes"
+ for clear_method in $methods; do
+ lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test --clear-method $clear_method)
+
+ # create an lvol on top
+ lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$LVS_DEFAULT_CAPACITY_MB")
+ lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
+ [ "$(jq -r '.[0].name' <<< "$lvol")" = "$lvol_uuid" ]
+ [ "$(jq -r '.[0].uuid' <<< "$lvol")" = "$lvol_uuid" ]
+ [ "$(jq -r '.[0].aliases[0]' <<< "$lvol")" = "lvs_test/lvol_test" ]
+ [ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ]
+ [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((LVS_DEFAULT_CAPACITY / MALLOC_BS))" ]
+
+ # clean up
+ rpc_cmd bdev_lvol_delete "$lvol_uuid"
+ rpc_cmd bdev_get_bdevs -b "$lvol_uuid" && false
+ rpc_cmd bdev_lvol_delete_lvstore -u "$lvs_uuid"
+ rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid" && false
+ done
+ rpc_cmd bdev_malloc_delete "$malloc_name"
+ check_leftover_devices
+}
+
+# Test for clear_method equals to none
+function test_construct_lvol_fio_clear_method_none() {
+ local nbd_name=/dev/nbd0
+ local clear_method=none
+
+ local lvstore_name=lvs_test lvstore_uuid
+ local lvol_name=lvol_test lvol_uuid
+ local malloc_dev
+
+ malloc_dev=$(rpc_cmd bdev_malloc_create 256 "$MALLOC_BS")
+ lvstore_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_dev" "$lvstore_name")
+
+ get_lvs_jq bdev_lvol_get_lvstores -u "$lvstore_uuid"
+
+ lvol_uuid=$(rpc_cmd bdev_lvol_create \
+ -c "$clear_method" \
+ -u "$lvstore_uuid" \
+ "$lvol_name" \
+ $((jq_out["cluster_size"] / 1024 ** 2)))
+
+ nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid" "$nbd_name"
+ run_fio_test "$nbd_name" 0 "${jq_out["cluster_size"]}" write 0xdd
+ nbd_stop_disks "$DEFAULT_RPC_ADDR" "$nbd_name"
+
+ rpc_cmd bdev_lvol_delete "$lvol_uuid"
+ rpc_cmd bdev_lvol_delete_lvstore -u "$lvstore_uuid"
+ nbd_start_disks "$DEFAULT_RPC_ADDR" "$malloc_dev" "$nbd_name"
+
+ local metadata_pages
+ local last_metadata_lba
+ local offset_metadata_end
+ local last_cluster_of_metadata
+ local offset
+ local size_metadata_end
+
+ metadata_pages=$(calc "1 + ${jq_out["total_data_clusters"]} + ceil(5 + ceil(${jq_out["total_data_clusters"]} / 8) / 4096) * 3")
+
+ last_metadata_lba=$((metadata_pages * 4096 / MALLOC_BS))
+ offset_metadata_end=$((last_metadata_lba * MALLOC_BS))
+ last_cluster_of_metadata=$(calc "ceil($metadata_pages / ${jq_out["cluster_size"]} / 4096)")
+ last_cluster_of_metadata=$((last_cluster_of_metadata == 0 ? 1 : last_cluster_of_metadata))
+ offset=$((last_cluster_of_metadata * jq_out["cluster_size"]))
+ size_metadata_end=$((offset - offset_metadata_end))
+
+ # Check if data on area between end of metadata and first cluster of lvol bdev remained unchaged.
+ run_fio_test "$nbd_name" "$offset_metadata_end" "$size_metadata_end" "read" 0x00
+ # Check if data on first lvol bdevs remains unchanged.
+ run_fio_test "$nbd_name" "$offset" "${jq_out["cluster_size"]}" "read" 0xdd
+
+ nbd_stop_disks "$DEFAULT_RPC_ADDR" "$nbd_name"
+ rpc_cmd bdev_malloc_delete "$malloc_dev"
+
+ check_leftover_devices
+}
+
+# Test for clear_method equals to unmap
+function test_construct_lvol_fio_clear_method_unmap() {
+ local nbd_name=/dev/nbd0
+ local clear_method=unmap
+
+ local lvstore_name=lvs_test lvstore_uuid
+ local lvol_name=lvol_test lvol_uuid
+ local malloc_dev
+
+ malloc_dev=$(rpc_cmd bdev_malloc_create 256 "$MALLOC_BS")
+
+ nbd_start_disks "$DEFAULT_RPC_ADDR" "$malloc_dev" "$nbd_name"
+ run_fio_test "$nbd_name" 0 $((256 * 1024 ** 2)) write 0xdd
+ nbd_stop_disks "$DEFAULT_RPC_ADDR" "$nbd_name"
+
+ lvstore_uuid=$(rpc_cmd bdev_lvol_create_lvstore --clear-method none "$malloc_dev" "$lvstore_name")
+ get_lvs_jq bdev_lvol_get_lvstores -u "$lvstore_uuid"
+
+ lvol_uuid=$(rpc_cmd bdev_lvol_create \
+ -c "$clear_method" \
+ -u "$lvstore_uuid" \
+ "$lvol_name" \
+ $((jq_out["cluster_size"] / 1024 ** 2)))
+
+ nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid" "$nbd_name"
+ run_fio_test "$nbd_name" 0 "${jq_out["cluster_size"]}" read 0xdd
+ nbd_stop_disks "$DEFAULT_RPC_ADDR" "$nbd_name"
+
+ rpc_cmd bdev_lvol_delete "$lvol_uuid"
+ rpc_cmd bdev_lvol_delete_lvstore -u "$lvstore_uuid"
+ nbd_start_disks "$DEFAULT_RPC_ADDR" "$malloc_dev" "$nbd_name"
+
+ local metadata_pages
+ local last_metadata_lba
+ local offset_metadata_end
+ local last_cluster_of_metadata
+ local offset
+ local size_metadata_end
+
+ metadata_pages=$(calc "1 + ${jq_out["total_data_clusters"]} + ceil(5 + ceil(${jq_out["total_data_clusters"]} / 8) / 4096) * 3")
+
+ last_metadata_lba=$((metadata_pages * 4096 / MALLOC_BS))
+ offset_metadata_end=$((last_metadata_lba * MALLOC_BS))
+ last_cluster_of_metadata=$(calc "ceil($metadata_pages / ${jq_out["cluster_size"]} / 4096)")
+ last_cluster_of_metadata=$((last_cluster_of_metadata == 0 ? 1 : last_cluster_of_metadata))
+ offset=$((last_cluster_of_metadata * jq_out["cluster_size"]))
+ size_metadata_end=$((offset - offset_metadata_end))
+
+ # Check if data on area between end of metadata and first cluster of lvol bdev remained unchaged.
+ run_fio_test "$nbd_name" "$offset_metadata_end" "$size_metadata_end" "read" 0xdd
+ # Check if data on lvol bdev was zeroed. Malloc bdev should zero any data that is unmapped.
+ run_fio_test "$nbd_name" "$offset" "${jq_out["cluster_size"]}" "read" 0x00
+
+ nbd_stop_disks "$DEFAULT_RPC_ADDR" "$nbd_name"
+ rpc_cmd bdev_malloc_delete "$malloc_dev"
+
+ check_leftover_devices
+}
+
+# create lvs + lvol on top, verify lvol's parameters
+function test_construct_lvol() {
+ # create an lvol store
+ malloc_name=$(rpc_cmd bdev_malloc_create $MALLOC_SIZE_MB $MALLOC_BS)
+ lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
+
+ # create an lvol on top
+ lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$LVS_DEFAULT_CAPACITY_MB")
+ lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
+
+ [ "$(jq -r '.[0].name' <<< "$lvol")" = "$lvol_uuid" ]
+ [ "$(jq -r '.[0].uuid' <<< "$lvol")" = "$lvol_uuid" ]
+ [ "$(jq -r '.[0].aliases[0]' <<< "$lvol")" = "lvs_test/lvol_test" ]
+ [ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ]
+ [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((LVS_DEFAULT_CAPACITY / MALLOC_BS))" ]
+ [ "$(jq -r '.[0].driver_specific.lvol.lvol_store_uuid' <<< "$lvol")" = "$lvs_uuid" ]
+
+ # clean up and create another lvol, this time use lvs alias instead of uuid
+ rpc_cmd bdev_lvol_delete "$lvol_uuid"
+ rpc_cmd bdev_get_bdevs -b "$lvol_uuid" && false
+ lvol_uuid=$(rpc_cmd bdev_lvol_create -l lvs_test lvol_test "$LVS_DEFAULT_CAPACITY_MB")
+ lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
+
+ [ "$(jq -r '.[0].name' <<< "$lvol")" = "$lvol_uuid" ]
+ [ "$(jq -r '.[0].uuid' <<< "$lvol")" = "$lvol_uuid" ]
+ [ "$(jq -r '.[0].aliases[0]' <<< "$lvol")" = "lvs_test/lvol_test" ]
+ [ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ]
+ [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((LVS_DEFAULT_CAPACITY / MALLOC_BS))" ]
+ [ "$(jq -r '.[0].driver_specific.lvol.lvol_store_uuid' <<< "$lvol")" = "$lvs_uuid" ]
+
+ # clean up
+ rpc_cmd bdev_lvol_delete "$lvol_uuid"
+ rpc_cmd bdev_get_bdevs -b "$lvol_uuid" && false
+ rpc_cmd bdev_lvol_delete_lvstore -u "$lvs_uuid"
+ rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid" && false
+ rpc_cmd bdev_malloc_delete "$malloc_name"
+ check_leftover_devices
+}
+
+# create lvs + multiple lvols, verify their params
+function test_construct_multi_lvols() {
+ # create an lvol store
+ malloc_name=$(rpc_cmd bdev_malloc_create $MALLOC_SIZE_MB $MALLOC_BS)
+ lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
+
+ # create 4 lvols
+ lvol_size_mb=$((LVS_DEFAULT_CAPACITY_MB / 4))
+ # round down lvol size to the nearest cluster size boundary
+ lvol_size_mb=$((lvol_size_mb / LVS_DEFAULT_CLUSTER_SIZE_MB * LVS_DEFAULT_CLUSTER_SIZE_MB))
+ lvol_size=$((lvol_size_mb * 1024 * 1024))
+ for i in $(seq 1 4); do
+ lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" "lvol_test${i}" "$lvol_size_mb")
+ lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
+
+ [ "$(jq -r '.[0].name' <<< "$lvol")" = "$lvol_uuid" ]
+ [ "$(jq -r '.[0].uuid' <<< "$lvol")" = "$lvol_uuid" ]
+ [ "$(jq -r '.[0].aliases[0]' <<< "$lvol")" = "lvs_test/lvol_test${i}" ]
+ [ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ]
+ [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / MALLOC_BS))" ]
+ done
+
+ lvols=$(rpc_cmd bdev_get_bdevs | jq -r '[ .[] | select(.product_name == "Logical Volume") ]')
+ [ "$(jq length <<< "$lvols")" == "4" ]
+
+ # remove all lvols
+ for i in $(seq 0 3); do
+ lvol_uuid=$(jq -r ".[$i].name" <<< "$lvols")
+ rpc_cmd bdev_lvol_delete "$lvol_uuid"
+ done
+ lvols=$(rpc_cmd bdev_get_bdevs | jq -r '[ .[] | select(.product_name == "Logical Volume") ]')
+ [ "$(jq length <<< "$lvols")" == "0" ]
+
+ # create the same 4 lvols again and perform the same checks
+ for i in $(seq 1 4); do
+ lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" "lvol_test${i}" "$lvol_size_mb")
+ lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
+
+ [ "$(jq -r '.[0].name' <<< "$lvol")" = "$lvol_uuid" ]
+ [ "$(jq -r '.[0].uuid' <<< "$lvol")" = "$lvol_uuid" ]
+ [ "$(jq -r '.[0].aliases[0]' <<< "$lvol")" = "lvs_test/lvol_test${i}" ]
+ [ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ]
+ [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / MALLOC_BS))" ]
+ done
+
+ lvols=$(rpc_cmd bdev_get_bdevs | jq -r '[ .[] | select(.product_name == "Logical Volume") ]')
+ [ "$(jq length <<< "$lvols")" == "4" ]
+
+ # clean up
+ for i in $(seq 0 3); do
+ lvol_uuid=$(jq -r ".[$i].name" <<< "$lvols")
+ rpc_cmd bdev_lvol_delete "$lvol_uuid"
+ done
+ lvols=$(rpc_cmd bdev_get_bdevs | jq -r '[ .[] | select(.product_name == "Logical Volume") ]')
+ [ "$(jq length <<< "$lvols")" == "0" ]
+
+ rpc_cmd bdev_lvol_delete_lvstore -u "$lvs_uuid"
+ rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid" && false
+ rpc_cmd bdev_malloc_delete "$malloc_name"
+ check_leftover_devices
+}
+
+# create 2 lvolstores, each with a single lvol on top.
+# use a single alias for both lvols, there should be no conflict
+# since they're in different lvolstores
+function test_construct_lvols_conflict_alias() {
+ # create an lvol store 1
+ malloc1_name=$(rpc_cmd bdev_malloc_create $MALLOC_SIZE_MB $MALLOC_BS)
+ lvs1_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc1_name" lvs_test1)
+
+ # create an lvol on lvs1
+ lvol1_uuid=$(rpc_cmd bdev_lvol_create -l lvs_test1 lvol_test "$LVS_DEFAULT_CAPACITY_MB")
+ lvol1=$(rpc_cmd bdev_get_bdevs -b "$lvol1_uuid")
+
+ # use a different size for second malloc to keep those differentiable
+ malloc2_size_mb=$((MALLOC_SIZE_MB / 2))
+
+ # create an lvol store 2
+ malloc2_name=$(rpc_cmd bdev_malloc_create $malloc2_size_mb $MALLOC_BS)
+ lvs2_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc2_name" lvs_test2)
+
+ lvol2_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 2)))
+
+ # create an lvol on lvs2
+ lvol2_uuid=$(rpc_cmd bdev_lvol_create -l lvs_test2 lvol_test "$lvol2_size_mb")
+ lvol2=$(rpc_cmd bdev_get_bdevs -b "$lvol2_uuid")
+
+ [ "$(jq -r '.[0].name' <<< "$lvol1")" = "$lvol1_uuid" ]
+ [ "$(jq -r '.[0].uuid' <<< "$lvol1")" = "$lvol1_uuid" ]
+ [ "$(jq -r '.[0].aliases[0]' <<< "$lvol1")" = "lvs_test1/lvol_test" ]
+ [ "$(jq -r '.[0].driver_specific.lvol.lvol_store_uuid' <<< "$lvol1")" = "$lvs1_uuid" ]
+
+ [ "$(jq -r '.[0].name' <<< "$lvol2")" = "$lvol2_uuid" ]
+ [ "$(jq -r '.[0].uuid' <<< "$lvol2")" = "$lvol2_uuid" ]
+ [ "$(jq -r '.[0].aliases[0]' <<< "$lvol2")" = "lvs_test2/lvol_test" ]
+ [ "$(jq -r '.[0].driver_specific.lvol.lvol_store_uuid' <<< "$lvol2")" = "$lvs2_uuid" ]
+
+ # clean up
+ rpc_cmd bdev_lvol_delete_lvstore -u "$lvs1_uuid"
+ rpc_cmd bdev_lvol_get_lvstores -u "$lvs1_uuid" && false
+ rpc_cmd bdev_lvol_delete_lvstore -u "$lvs2_uuid"
+ rpc_cmd bdev_lvol_get_lvstores -u "$lvs2_uuid" && false
+ rpc_cmd bdev_malloc_delete "$malloc1_name"
+ rpc_cmd bdev_get_bdevs -b "$malloc1_name" && false
+ rpc_cmd bdev_malloc_delete "$malloc2_name"
+ check_leftover_devices
+}
+
+# try to create an lvol on inexistent lvs uuid
+function test_construct_lvol_inexistent_lvs() {
+ # create an lvol store
+ malloc_name=$(rpc_cmd bdev_malloc_create $MALLOC_SIZE_MB $MALLOC_BS)
+ lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
+
+ # try to create an lvol on inexistent lvs
+ dummy_uuid="00000000-0000-0000-0000-000000000000"
+ rpc_cmd bdev_lvol_create -u "$dummy_uuid" lvol_test "$LVS_DEFAULT_CAPACITY_MB" && false
+
+ lvols=$(rpc_cmd bdev_get_bdevs | jq -r '[ .[] | select(.product_name == "Logical Volume") ]')
+ [ "$(jq length <<< "$lvols")" == "0" ]
+
+ # clean up
+ rpc_cmd bdev_lvol_delete_lvstore -u "$lvs_uuid"
+ rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid" && false
+ rpc_cmd bdev_malloc_delete "$malloc_name"
+ check_leftover_devices
+}
+
+# try to create lvol on full lvs
+function test_construct_lvol_full_lvs() {
+ # create an lvol store
+ malloc_name=$(rpc_cmd bdev_malloc_create $MALLOC_SIZE_MB $MALLOC_BS)
+ lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
+
+ # create valid lvol
+ lvol1_uuid=$(rpc_cmd bdev_lvol_create -l lvs_test lvol_test1 "$LVS_DEFAULT_CAPACITY_MB")
+ lvol1=$(rpc_cmd bdev_get_bdevs -b "$lvol1_uuid")
+
+ # try to create an lvol on lvs without enough free clusters
+ rpc_cmd bdev_lvol_create -l lvs_test lvol_test2 1 && false
+
+ # clean up
+ rpc_cmd bdev_lvol_delete_lvstore -u "$lvs_uuid"
+ rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid" && false
+ rpc_cmd bdev_malloc_delete "$malloc_name"
+ check_leftover_devices
+}
+
+# try to create two lvols with conflicting aliases
+function test_construct_lvol_alias_conflict() {
+ # create an lvol store
+ malloc_name=$(rpc_cmd bdev_malloc_create $MALLOC_SIZE_MB $MALLOC_BS)
+ lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
+
+ # create valid lvol
+ lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 2)))
+ lvol1_uuid=$(rpc_cmd bdev_lvol_create -l lvs_test lvol_test "$lvol_size_mb")
+ lvol1=$(rpc_cmd bdev_get_bdevs -b "$lvol1_uuid")
+
+ # try to create another lvol with a name that's already taken
+ rpc_cmd bdev_lvol_create -l lvs_test lvol_test "$lvol_size_mb" && false
+
+ # clean up
+ rpc_cmd bdev_lvol_delete_lvstore -u "$lvs_uuid"
+ rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid" && false
+ rpc_cmd bdev_malloc_delete "$malloc_name"
+ rpc_cmd bdev_get_bdevs -b "$malloc_name" && false
+ check_leftover_devices
+}
+
+# create an lvs+lvol, create another lvs on lvol and then a nested lvol
+function test_construct_nested_lvol() {
+ # create an lvol store
+ malloc_name=$(rpc_cmd bdev_malloc_create $MALLOC_SIZE_MB $MALLOC_BS)
+ lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
+
+ # create an lvol on top
+ lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$LVS_DEFAULT_CAPACITY_MB")
+ # create a nested lvs
+ nested_lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$lvol_uuid" nested_lvs)
+
+ nested_lvol_size_mb=$((LVS_DEFAULT_CAPACITY_MB - LVS_DEFAULT_CLUSTER_SIZE_MB))
+ nested_lvol_size=$((nested_lvol_size_mb * 1024 * 1024))
+
+ # create a nested lvol
+ nested_lvol1_uuid=$(rpc_cmd bdev_lvol_create -u "$nested_lvs_uuid" nested_lvol1 "$nested_lvol_size_mb")
+ nested_lvol1=$(rpc_cmd bdev_get_bdevs -b "$nested_lvol1_uuid")
+
+ [ "$(jq -r '.[0].name' <<< "$nested_lvol1")" = "$nested_lvol1_uuid" ]
+ [ "$(jq -r '.[0].uuid' <<< "$nested_lvol1")" = "$nested_lvol1_uuid" ]
+ [ "$(jq -r '.[0].aliases[0]' <<< "$nested_lvol1")" = "nested_lvs/nested_lvol1" ]
+ [ "$(jq -r '.[0].block_size' <<< "$nested_lvol1")" = "$MALLOC_BS" ]
+ [ "$(jq -r '.[0].num_blocks' <<< "$nested_lvol1")" = "$((nested_lvol_size / MALLOC_BS))" ]
+ [ "$(jq -r '.[0].driver_specific.lvol.lvol_store_uuid' <<< "$nested_lvol1")" = "$nested_lvs_uuid" ]
+
+ # try to create another nested lvol on a lvs that's already full
+ rpc_cmd bdev_lvol_create -u "$nested_lvs_uuid" nested_lvol2 "$nested_lvol_size_mb" && false
+
+ # clean up
+ rpc_cmd bdev_lvol_delete "$nested_lvol1_uuid"
+ rpc_cmd bdev_get_bdevs -b "$nested_lvol1_uuid" && false
+ rpc_cmd bdev_lvol_delete_lvstore -u "$nested_lvs_uuid"
+ rpc_cmd bdev_lvol_get_lvstores -u "$nested_lvs_uuid" && false
+ rpc_cmd bdev_lvol_delete "$lvol_uuid"
+ rpc_cmd bdev_get_bdevs -b "$lvol_uuid" && false
+ rpc_cmd bdev_lvol_delete_lvstore -u "$lvs_uuid"
+ rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid" && false
+ rpc_cmd bdev_malloc_delete "$malloc_name"
+ check_leftover_devices
+}
+
+# Send SIGTERM after creating lvol store
+function test_sigterm() {
+ # create an lvol store
+ malloc_name=$(rpc_cmd bdev_malloc_create $MALLOC_SIZE_MB $MALLOC_BS)
+ lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
+
+ # Send SIGTERM signal to the application
+ killprocess $spdk_pid
+}
+
+$SPDK_BIN_DIR/spdk_tgt &
+spdk_pid=$!
+trap 'killprocess "$spdk_pid"; exit 1' SIGINT SIGTERM EXIT
+waitforlisten $spdk_pid
+
+run_test "test_construct_lvs" test_construct_lvs
+run_test "test_construct_lvs_nonexistent_bdev" test_construct_lvs_nonexistent_bdev
+run_test "test_construct_two_lvs_on_the_same_bdev" test_construct_two_lvs_on_the_same_bdev
+run_test "test_construct_lvs_conflict_alias" test_construct_lvs_conflict_alias
+run_test "test_construct_lvs_different_cluster_size" test_construct_lvs_different_cluster_size
+run_test "test_construct_lvs_clear_methods" test_construct_lvs_clear_methods
+run_test "test_construct_lvol_fio_clear_method_none" test_construct_lvol_fio_clear_method_none
+run_test "test_construct_lvol_fio_clear_method_unmap" test_construct_lvol_fio_clear_method_unmap
+run_test "test_construct_lvol" test_construct_lvol
+run_test "test_construct_multi_lvols" test_construct_multi_lvols
+run_test "test_construct_lvols_conflict_alias" test_construct_lvols_conflict_alias
+run_test "test_construct_lvol_inexistent_lvs" test_construct_lvol_inexistent_lvs
+run_test "test_construct_lvol_full_lvs" test_construct_lvol_full_lvs
+run_test "test_construct_lvol_alias_conflict" test_construct_lvol_alias_conflict
+run_test "test_construct_nested_lvol" test_construct_nested_lvol
+run_test "test_sigterm" test_sigterm
+
+trap - SIGINT SIGTERM EXIT
+if ps -p $spdk_pid; then
+ killprocess $spdk_pid
+fi
diff --git a/src/spdk/test/lvol/common.sh b/src/spdk/test/lvol/common.sh
new file mode 100644
index 000000000..b0fd119b3
--- /dev/null
+++ b/src/spdk/test/lvol/common.sh
@@ -0,0 +1,53 @@
+MALLOC_SIZE_MB=128
+MALLOC_BS=512
+AIO_SIZE_MB=400
+AIO_BS=4096
+LVS_DEFAULT_CLUSTER_SIZE_MB=4
+LVS_DEFAULT_CLUSTER_SIZE=$((LVS_DEFAULT_CLUSTER_SIZE_MB * 1024 * 1024))
+# reserve some MBs for lvolstore metadata
+LVS_DEFAULT_CAPACITY_MB=$((MALLOC_SIZE_MB - LVS_DEFAULT_CLUSTER_SIZE_MB))
+LVS_DEFAULT_CAPACITY=$((LVS_DEFAULT_CAPACITY_MB * 1024 * 1024))
+
+function get_bdev_jq() {
+ rpc_cmd_simple_data_json bdev "$@"
+}
+
+function get_lvs_jq() {
+ rpc_cmd_simple_data_json lvs "$@"
+}
+
+function check_leftover_devices() {
+ leftover_bdevs=$(rpc_cmd bdev_get_bdevs)
+ [ "$(jq length <<< "$leftover_bdevs")" == "0" ]
+ leftover_lvs=$(rpc_cmd bdev_lvol_get_lvstores)
+ [ "$(jq length <<< "$leftover_lvs")" == "0" ]
+}
+
+function round_down() {
+ local CLUSTER_SIZE_MB=$LVS_DEFAULT_CLUSTER_SIZE_MB
+ if [ -n "$2" ]; then
+ CLUSTER_SIZE_MB=$2
+ fi
+ echo $(($1 / CLUSTER_SIZE_MB * CLUSTER_SIZE_MB))
+}
+
+function run_fio_test() {
+ local file=$1
+ local offset=$2
+ local size=$3
+ local rw=$4
+ local pattern=$5
+ local extra_params=$6
+
+ local pattern_template="" fio_template=""
+ if [[ -n "$pattern" ]]; then
+ pattern_template="--do_verify=1 --verify=pattern --verify_pattern=$pattern --verify_state_save=0"
+ fi
+
+ fio_template="fio --name=fio_test --filename=$file --offset=$offset --size=$size --rw=$rw --direct=1 $extra_params $pattern_template"
+ $fio_template
+}
+
+function calc() {
+ bc -l <<< "define ceil(x) { scale=0; return(x + (x % 1 > 0))/1 } $1"
+}
diff --git a/src/spdk/test/lvol/hotremove.sh b/src/spdk/test/lvol/hotremove.sh
new file mode 100755
index 000000000..8306b301c
--- /dev/null
+++ b/src/spdk/test/lvol/hotremove.sh
@@ -0,0 +1,216 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/lvol/common.sh
+
+# create an lvol on lvs, then remove the lvs
+function test_hotremove_lvol_store() {
+ # create lvs + lvol on top
+ malloc_name=$(rpc_cmd bdev_malloc_create $MALLOC_SIZE_MB $MALLOC_BS)
+ lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
+ lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$LVS_DEFAULT_CAPACITY_MB")
+
+ # remove lvs (with one lvol open)
+ rpc_cmd bdev_lvol_delete_lvstore -u "$lvs_uuid"
+ rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid" && false
+ lvolstores=$(rpc_cmd bdev_lvol_get_lvstores)
+ [ "$(jq length <<< "$lvolstores")" == "0" ]
+
+ # make sure we can't destroy the lvs again
+ rpc_cmd bdev_lvol_delete_lvstore -u "$lvs_uuid" && false
+
+ # make sure the lvol is also gone
+ rpc_cmd bdev_get_bdevs -b "$lvol_uuid" && false
+ lvols=$(rpc_cmd bdev_get_bdevs | jq -r '[ .[] | select(.product_name == "Logical Volume") ]')
+ [ "$(jq length <<< "$lvols")" == "0" ]
+
+ # clean up
+ rpc_cmd bdev_malloc_delete "$malloc_name"
+ check_leftover_devices
+}
+
+# destroy lvs with 4 lvols on top
+function test_hotremove_lvol_store_multiple_lvols() {
+ # create lvs
+ malloc_name=$(rpc_cmd bdev_malloc_create $MALLOC_SIZE_MB $MALLOC_BS)
+ lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
+
+ # calculate lvol size
+ lvol_size_mb=$(round_down $(((MALLOC_SIZE_MB - LVS_DEFAULT_CLUSTER_SIZE_MB) / 4)))
+
+ # create 4 lvols
+ for i in $(seq 1 4); do
+ rpc_cmd bdev_lvol_create -u "$lvs_uuid" "lvol_test${i}" "$lvol_size_mb"
+ done
+
+ lvols=$(rpc_cmd bdev_get_bdevs | jq -r '[ .[] | select(.product_name == "Logical Volume") ]')
+ [ "$(jq length <<< "$lvols")" == "4" ]
+
+ # remove lvs (with 4 lvols open)
+ rpc_cmd bdev_lvol_delete_lvstore -u "$lvs_uuid"
+ rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid" && false
+
+ # make sure all lvols are gone
+ lvols=$(rpc_cmd bdev_get_bdevs | jq -r '[ .[] | select(.product_name == "Logical Volume") ]')
+ [ "$(jq length <<< "$lvols")" == "0" ]
+
+ # clean up
+ rpc_cmd bdev_malloc_delete "$malloc_name"
+ check_leftover_devices
+}
+
+# create an lvs on malloc, then remove just the malloc
+function test_hotremove_lvol_store_base() {
+ # create lvs + lvol on top
+ malloc_name=$(rpc_cmd bdev_malloc_create $MALLOC_SIZE_MB $MALLOC_BS)
+ lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
+
+ # clean up
+ rpc_cmd bdev_malloc_delete "$malloc_name"
+ # make sure the lvs is gone
+ rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid" && false
+ # make sure we can't delete the lvs again
+ rpc_cmd bdev_lvol_delete_lvstore -u "$lvs_uuid" && false
+ check_leftover_devices
+}
+
+# create an lvs on malloc, then an lvol, then remove just the malloc
+function test_hotremove_lvol_store_base_with_lvols() {
+ # create lvs + lvol on top
+ malloc_name=$(rpc_cmd bdev_malloc_create $MALLOC_SIZE_MB $MALLOC_BS)
+ lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
+ lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$LVS_DEFAULT_CAPACITY_MB")
+
+ rpc_cmd bdev_get_bdevs -b "$lvol_uuid"
+
+ # clean up
+ rpc_cmd bdev_malloc_delete "$malloc_name"
+ # make sure the lvol is gone
+ rpc_cmd bdev_get_bdevs -b "$lvol_uuid" && false
+ # make sure the lvs is gone as well
+ rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid" && false
+
+ # make sure we can't delete the lvs again
+ rpc_cmd bdev_lvol_delete_lvstore -u "$lvs_uuid" && false
+ check_leftover_devices
+}
+
+function test_bdev_lvol_delete_lvstore_with_clones() {
+ local snapshot_name1=snapshot1 snapshot_uuid1
+ local snapshot_name2=snapshot2 snapshot_uuid2
+ local clone_name=clone clone_uuid
+ local lbd_name=lbd_test
+
+ local bdev_uuid
+ local lvstore_name=lvs_name lvstore_uuid
+ local malloc_dev
+
+ malloc_dev=$(rpc_cmd bdev_malloc_create 256 "$MALLOC_BS")
+ lvstore_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_dev" "$lvstore_name")
+
+ get_lvs_jq bdev_lvol_get_lvstores -u "$lvstore_uuid"
+ [[ ${jq_out["uuid"]} == "$lvstore_uuid" ]]
+ [[ ${jq_out["name"]} == "$lvstore_name" ]]
+ [[ ${jq_out["base_bdev"]} == "$malloc_dev" ]]
+
+ size=$((jq_out["free_clusters"] * jq_out["cluster_size"] / 4 / 1024 ** 2))
+
+ bdev_uuid=$(rpc_cmd bdev_lvol_create -u "$lvstore_uuid" "$lbd_name" "$size")
+
+ get_bdev_jq bdev_get_bdevs -b "$bdev_uuid"
+
+ snapshot_uuid1=$(rpc_cmd bdev_lvol_snapshot "${jq_out["name"]}" "$snapshot_name1")
+
+ get_bdev_jq bdev_get_bdevs -b "$lvstore_name/$snapshot_name1"
+ [[ ${jq_out["name"]} == "$snapshot_uuid1" ]]
+ [[ ${jq_out["product_name"]} == "Logical Volume" ]]
+ [[ ${jq_out["aliases[0]"]} == "$lvstore_name/$snapshot_name1" ]]
+
+ clone_uuid=$(rpc_cmd bdev_lvol_clone "$lvstore_name/$snapshot_name1" "$clone_name")
+
+ get_bdev_jq bdev_get_bdevs -b "$lvstore_name/$clone_name"
+ [[ ${jq_out["name"]} == "$clone_uuid" ]]
+ [[ ${jq_out["product_name"]} == "Logical Volume" ]]
+ [[ ${jq_out["aliases[0]"]} == "$lvstore_name/$clone_name" ]]
+
+ snapshot_uuid2=$(rpc_cmd bdev_lvol_snapshot "${jq_out["name"]}" "$snapshot_name2")
+
+ get_bdev_jq bdev_get_bdevs -b "$lvstore_name/$snapshot_name2"
+ [[ ${jq_out["name"]} == "$snapshot_uuid2" ]]
+ [[ ${jq_out["product_name"]} == "Logical Volume" ]]
+ [[ ${jq_out["aliases[0]"]} == "$lvstore_name/$snapshot_name2" ]]
+
+ rpc_cmd bdev_lvol_delete "$snapshot_uuid1" && false
+ rpc_cmd bdev_lvol_delete_lvstore -u "$lvstore_uuid"
+ rpc_cmd bdev_malloc_delete "$malloc_dev"
+
+ check_leftover_devices
+}
+
+# Test for unregistering the lvol bdevs. Removing malloc bdev under an lvol
+# store triggers unregister of all lvol bdevs. Verify it with clones present.
+function test_unregister_lvol_bdev() {
+ local snapshot_name1=snapshot1 snapshot_uuid1
+ local snapshot_name2=snapshot2 snapshot_uuid2
+ local clone_name=clone clone_uuid
+ local lbd_name=lbd_test
+
+ local bdev_uuid
+ local lvstore_name=lvs_name lvstore_uuid
+ local malloc_dev
+
+ malloc_dev=$(rpc_cmd bdev_malloc_create 256 "$MALLOC_BS")
+ lvstore_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_dev" "$lvstore_name")
+
+ get_lvs_jq bdev_lvol_get_lvstores -u "$lvstore_uuid"
+ [[ ${jq_out["uuid"]} == "$lvstore_uuid" ]]
+ [[ ${jq_out["name"]} == "$lvstore_name" ]]
+ [[ ${jq_out["base_bdev"]} == "$malloc_dev" ]]
+
+ size=$((jq_out["free_clusters"] * jq_out["cluster_size"] / 4 / 1024 ** 2))
+
+ bdev_uuid=$(rpc_cmd bdev_lvol_create -u "$lvstore_uuid" "$lbd_name" "$size")
+
+ get_bdev_jq bdev_get_bdevs -b "$bdev_uuid"
+
+ snapshot_uuid1=$(rpc_cmd bdev_lvol_snapshot "${jq_out["name"]}" "$snapshot_name1")
+
+ get_bdev_jq bdev_get_bdevs -b "$lvstore_name/$snapshot_name1"
+ [[ ${jq_out["name"]} == "$snapshot_uuid1" ]]
+ [[ ${jq_out["product_name"]} == "Logical Volume" ]]
+ [[ ${jq_out["aliases[0]"]} == "$lvstore_name/$snapshot_name1" ]]
+
+ clone_uuid=$(rpc_cmd bdev_lvol_clone "$lvstore_name/$snapshot_name1" "$clone_name")
+
+ get_bdev_jq bdev_get_bdevs -b "$lvstore_name/$clone_name"
+ [[ ${jq_out["name"]} == "$clone_uuid" ]]
+ [[ ${jq_out["product_name"]} == "Logical Volume" ]]
+ [[ ${jq_out["aliases[0]"]} == "$lvstore_name/$clone_name" ]]
+
+ snapshot_uuid2=$(rpc_cmd bdev_lvol_snapshot "${jq_out["name"]}" "$snapshot_name2")
+
+ get_bdev_jq bdev_get_bdevs -b "$lvstore_name/$snapshot_name2"
+ [[ ${jq_out["name"]} == "$snapshot_uuid2" ]]
+ [[ ${jq_out["product_name"]} == "Logical Volume" ]]
+ [[ ${jq_out["aliases[0]"]} == "$lvstore_name/$snapshot_name2" ]]
+
+ rpc_cmd bdev_malloc_delete "$malloc_dev"
+ check_leftover_devices
+}
+
+$SPDK_BIN_DIR/spdk_tgt &
+spdk_pid=$!
+trap 'killprocess "$spdk_pid"; exit 1' SIGINT SIGTERM EXIT
+waitforlisten $spdk_pid
+
+run_test "test_hotremove_lvol_store" test_hotremove_lvol_store
+run_test "test_hotremove_lvol_store_multiple_lvols" test_hotremove_lvol_store_multiple_lvols
+run_test "test_hotremove_lvol_store_base" test_hotremove_lvol_store_base
+run_test "test_hotremove_lvol_store_base_with_lvols" test_hotremove_lvol_store_base_with_lvols
+run_test "test_bdev_lvol_delete_lvstore_with_clones" test_bdev_lvol_delete_lvstore_with_clones
+run_test "test_unregister_lvol_bdev" test_unregister_lvol_bdev
+
+trap - SIGINT SIGTERM EXIT
+killprocess $spdk_pid
diff --git a/src/spdk/test/lvol/lvol2.sh b/src/spdk/test/lvol/lvol2.sh
new file mode 100755
index 000000000..3c33ca64b
--- /dev/null
+++ b/src/spdk/test/lvol/lvol2.sh
@@ -0,0 +1,19 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/test/common/autotest_common.sh
+
+timing_enter lvol
+
+timing_enter basic
+run_test "lvol_basic" test/lvol/basic.sh
+run_test "lvol_resize" test/lvol/resize.sh
+run_test "lvol_hotremove" test/lvol/hotremove.sh
+run_test "lvol_tasting" test/lvol/tasting.sh
+run_test "lvol_snapshot_clone" test/lvol/snapshot_clone.sh
+run_test "lvol_rename" test/lvol/rename.sh
+run_test "lvol_provisioning" test/lvol/thin_provisioning.sh
+timing_exit basic
+
+timing_exit lvol
diff --git a/src/spdk/test/lvol/rename.sh b/src/spdk/test/lvol/rename.sh
new file mode 100755
index 000000000..607073c51
--- /dev/null
+++ b/src/spdk/test/lvol/rename.sh
@@ -0,0 +1,219 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/lvol/common.sh
+
+# Positive test for lvol store and lvol bdev rename.
+function test_rename_positive() {
+ malloc_name=$(rpc_cmd bdev_malloc_create $MALLOC_SIZE_MB $MALLOC_BS)
+ lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
+ bdev_name=("lvol_test"{0..3})
+ bdev_aliases=("lvs_test/lvol_test"{0..3})
+
+ # Calculate size and create two lvol bdevs on top
+ lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 4)))
+ lvol_size=$((lvol_size_mb * 1024 * 1024))
+
+ # Create 4 lvol bdevs on top of previously created lvol store
+ bdev_uuids=()
+ for i in "${!bdev_name[@]}"; do
+ lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" "${bdev_name[i]}" "$lvol_size_mb")
+ lvol=$(rpc_cmd bdev_get_bdevs -b $lvol_uuid)
+ [ "$(jq -r '.[0].driver_specific.lvol.lvol_store_uuid' <<< "$lvol")" = "$lvs_uuid" ]
+ [ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ]
+ [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / MALLOC_BS))" ]
+ [ "$(jq '.[0].aliases|sort' <<< "$lvol")" = "$(jq '.|sort' <<< '["'${bdev_aliases[i]}'"]')" ]
+ bdev_uuids+=("$lvol_uuid")
+ done
+
+ # Rename lvol store and check if lvol store name and
+ # lvol bdev aliases were updated properly
+ new_lvs_name="lvs_new"
+ bdev_aliases=("$new_lvs_name/lvol_test"{0..3})
+
+ rpc_cmd bdev_lvol_rename_lvstore lvs_test "$new_lvs_name"
+
+ lvs=$(rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid")
+
+ # verify it's there
+ [ "$(jq -r '.[0].uuid' <<< "$lvs")" = "$lvs_uuid" ]
+ [ "$(jq -r '.[0].name' <<< "$lvs")" = "$new_lvs_name" ]
+ [ "$(jq -r '.[0].base_bdev' <<< "$lvs")" = "$malloc_name" ]
+
+ # verify some of its parameters
+ cluster_size=$(jq -r '.[0].cluster_size' <<< "$lvs")
+ [ "$cluster_size" = "$LVS_DEFAULT_CLUSTER_SIZE" ]
+ total_clusters=$(jq -r '.[0].total_data_clusters' <<< "$lvs")
+ [ "$((total_clusters * cluster_size))" = "$LVS_DEFAULT_CAPACITY" ]
+
+ for i in "${!bdev_uuids[@]}"; do
+ lvol=$(rpc_cmd bdev_get_bdevs -b "${bdev_uuids[i]}")
+ [ "$(jq -r '.[0].driver_specific.lvol.lvol_store_uuid' <<< "$lvol")" = "$lvs_uuid" ]
+ [ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ]
+ [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / MALLOC_BS))" ]
+ [ "$(jq -r '.[0].aliases|sort' <<< "$lvol")" = "$(jq '.|sort' <<< '["'${bdev_aliases[i]}'"]')" ]
+ done
+
+ # Now try to rename the bdevs using their uuid as "old_name"
+ # Verify that all bdev names were successfully updated
+ bdev_names=("lbd_new"{0..3})
+ new_bdev_aliases=()
+ for bdev_name in "${bdev_names[@]}"; do
+ new_bdev_aliases+=("$new_lvs_name/$bdev_name")
+ done
+ for i in "${!bdev_names[@]}"; do
+ rpc_cmd bdev_lvol_rename "${bdev_aliases[i]}" "${bdev_names[i]}"
+ lvol=$(rpc_cmd bdev_get_bdevs -b "${bdev_uuids[i]}")
+ [ "$(jq -r '.[0].driver_specific.lvol.lvol_store_uuid' <<< "$lvol")" = "$lvs_uuid" ]
+ [ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ]
+ [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / MALLOC_BS))" ]
+ [ "$(jq -r '.[0].aliases|sort' <<< "$lvol")" = "$(jq '.|sort' <<< '["'${new_bdev_aliases[i]}'"]')" ]
+ done
+
+ # Clean up
+ for bdev in "${new_bdev_aliases[@]}"; do
+ rpc_cmd bdev_lvol_delete "$bdev"
+ done
+ rpc_cmd bdev_lvol_delete_lvstore -u "$lvs_uuid"
+ rpc_cmd bdev_malloc_delete "$malloc_name"
+ check_leftover_devices
+}
+
+# Negative test case for lvol store rename.
+# Check that error is returned when trying to rename not existing lvol store.
+# Check that error is returned when trying to rename to a name which is already
+# used by another lvol store.
+function test_rename_lvs_negative() {
+ # Call bdev_lvol_rename_lvstore with name pointing to not existing lvol store
+ rpc_cmd bdev_lvol_rename_lvstore NOTEXIST WHATEVER && false
+
+ # Construct two malloc bdevs
+ malloc_name1=$(rpc_cmd bdev_malloc_create $MALLOC_SIZE_MB $MALLOC_BS)
+ malloc_name2=$(rpc_cmd bdev_malloc_create $MALLOC_SIZE_MB $MALLOC_BS)
+
+ # Create lvol store on each malloc bdev
+ lvs_uuid1=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name1" lvs_test1)
+ lvs_uuid2=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name2" lvs_test2)
+
+ # Create lists with lvol bdev names and aliases for later use
+ bdev_names_1=("lvol_test_1_"{0..3})
+ bdev_names_2=("lvol_test_2_"{0..3})
+ bdev_aliases_1=("lvs_test1/lvol_test_1_"{0..3})
+ bdev_aliases_2=("lvs_test2/lvol_test_2_"{0..3})
+
+ # Calculate size and create two lvol bdevs on top
+ lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 4)))
+ lvol_size=$((lvol_size_mb * 1024 * 1024))
+
+ # # Create 4 lvol bdevs on top of each lvol store
+ bdev_uuids_1=()
+ bdev_uuids_2=()
+ for i in "${!bdev_names_1[@]}"; do
+ lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid1" "${bdev_names_1[i]}" "$lvol_size_mb")
+ lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
+ [ "$(jq -r '.[0].driver_specific.lvol.lvol_store_uuid' <<< "$lvol")" = "$lvs_uuid1" ]
+ [ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ]
+ [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / MALLOC_BS))" ]
+ [ "$(jq '.[0].aliases|sort' <<< "$lvol")" = "$(jq '.|sort' <<< '["'${bdev_aliases_1[i]}'"]')" ]
+ bdev_uuids_1+=("$lvol_uuid")
+
+ lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid2" "${bdev_names_2[i]}" "$lvol_size_mb")
+ lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
+ [ "$(jq -r '.[0].driver_specific.lvol.lvol_store_uuid' <<< "$lvol")" = "$lvs_uuid2" ]
+ [ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ]
+ [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / MALLOC_BS))" ]
+ [ "$(jq '.[0].aliases|sort' <<< "$lvol")" = "$(jq '.|sort' <<< '["'${bdev_aliases_2[i]}'"]')" ]
+ bdev_uuids_2+=("$lvol_uuid")
+ done
+
+ # Call bdev_lvol_rename_lvstore on first lvol store and try to change its name to
+ # the same name as used by second lvol store
+ rpc_cmd bdev_lvol_rename_lvstore lvs_test1 lvs_test2 && false
+
+ # Verify that names of lvol stores and lvol bdevs did not change
+ lvs=$(rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid1")
+ [ "$(jq -r '.[0].uuid' <<< "$lvs")" = "$lvs_uuid1" ]
+ [ "$(jq -r '.[0].name' <<< "$lvs")" = "lvs_test1" ]
+ [ "$(jq -r '.[0].base_bdev' <<< "$lvs")" = "$malloc_name1" ]
+ [ "$(jq -r '.[0].cluster_size' <<< "$lvs")" = "$LVS_DEFAULT_CLUSTER_SIZE" ]
+ lvs=$(rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid2")
+ [ "$(jq -r '.[0].uuid' <<< "$lvs")" = "$lvs_uuid2" ]
+ [ "$(jq -r '.[0].name' <<< "$lvs")" = "lvs_test2" ]
+ [ "$(jq -r '.[0].base_bdev' <<< "$lvs")" = "$malloc_name2" ]
+ [ "$(jq -r '.[0].cluster_size' <<< "$lvs")" = "$LVS_DEFAULT_CLUSTER_SIZE" ]
+
+ for i in "${!bdev_uuids_1[@]}"; do
+ lvol=$(rpc_cmd bdev_get_bdevs -b "${bdev_uuids_1[i]}")
+ [ "$(jq -r '.[0].driver_specific.lvol.lvol_store_uuid' <<< "$lvol")" = "$lvs_uuid1" ]
+ [ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ]
+ [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / MALLOC_BS))" ]
+ [ "$(jq '.[0].aliases|sort' <<< "$lvol")" = "$(jq '.|sort' <<< '["'${bdev_aliases_1[i]}'"]')" ]
+
+ lvol=$(rpc_cmd bdev_get_bdevs -b "${bdev_uuids_2[i]}")
+ [ "$(jq -r '.[0].driver_specific.lvol.lvol_store_uuid' <<< "$lvol")" = "$lvs_uuid2" ]
+ [ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ]
+ [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / MALLOC_BS))" ]
+ [ "$(jq '.[0].aliases|sort' <<< "$lvol")" = "$(jq '.|sort' <<< '["'${bdev_aliases_2[i]}'"]')" ]
+ done
+
+ # Clean up
+ for bdev in "${bdev_aliases_1[@]}" "${bdev_alisaes_2[@]}"; do
+ rpc_cmd bdev_lvol_delete "$bdev"
+ done
+ rpc_cmd bdev_lvol_delete_lvstore -u "$lvs_uuid1"
+ rpc_cmd bdev_lvol_delete_lvstore -u "$lvs_uuid2"
+ rpc_cmd bdev_malloc_delete "$malloc_name1"
+ rpc_cmd bdev_malloc_delete "$malloc_name2"
+ check_leftover_devices
+}
+
+# Negative test case for lvol bdev rename.
+# Check that error is returned when trying to rename not existing lvol bdev
+# Check that error is returned when trying to rename to a name which is already
+# used by another lvol bdev.
+function test_lvol_rename_negative() {
+ # Call bdev_lvol_rename with name pointing to not existing lvol bdev
+ rpc_cmd bdev_lvol_rename NOTEXIST WHATEVER && false
+
+ malloc_name=$(rpc_cmd bdev_malloc_create $MALLOC_SIZE_MB $MALLOC_BS)
+ lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
+
+ # Calculate lvol bdev size
+ lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 2)))
+ lvol_size=$((lvol_size_mb * 1024 * 1024))
+
+ # Create two lvol bdevs on top of previously created lvol store
+ lvol_uuid1=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test1 "$lvol_size_mb")
+ lvol_uuid2=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test2 "$lvol_size_mb")
+
+ # Call bdev_lvol_rename on first lvol bdev and try to change its name to
+ # the same name as used by second lvol bdev
+ rpc_cmd bdev_lvol_rename lvol_test1 lvol_test2 && false
+
+ # Verify that lvol bdev still have the same names as before
+ lvol=$(rpc_cmd bdev_get_bdevs -b $lvol_uuid1)
+ [ "$(jq -r '.[0].driver_specific.lvol.lvol_store_uuid' <<< "$lvol")" = "$lvs_uuid" ]
+ [ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ]
+ [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / MALLOC_BS))" ]
+ [ "$(jq -r '.[0].aliases|sort' <<< "$lvol")" = "$(jq '.|sort' <<< '["lvs_test/lvol_test1"]')" ]
+
+ rpc_cmd bdev_lvol_delete lvs_test/lvol_test1
+ rpc_cmd bdev_lvol_delete lvs_test/lvol_test2
+ rpc_cmd bdev_lvol_delete_lvstore -u "$lvs_uuid"
+ rpc_cmd bdev_malloc_delete "$malloc_name"
+ check_leftover_devices
+}
+
+$SPDK_BIN_DIR/spdk_tgt &
+spdk_pid=$!
+trap 'killprocess "$spdk_pid"; exit 1' SIGINT SIGTERM EXIT
+waitforlisten $spdk_pid
+
+run_test "test_rename_positive" test_rename_positive
+run_test "test_rename_lvs_negative" test_rename_lvs_negative
+run_test "test_lvol_rename_negative" test_lvol_rename_negative
+
+trap - SIGINT SIGTERM EXIT
+killprocess $spdk_pid
diff --git a/src/spdk/test/lvol/resize.sh b/src/spdk/test/lvol/resize.sh
new file mode 100755
index 000000000..be0410275
--- /dev/null
+++ b/src/spdk/test/lvol/resize.sh
@@ -0,0 +1,219 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/lvol/common.sh
+source $rootdir/test/bdev/nbd_common.sh
+
+# resize an lvol a few times
+function test_resize_lvol() {
+ # create an lvol store
+ malloc_name=$(rpc_cmd bdev_malloc_create $MALLOC_SIZE_MB $MALLOC_BS)
+ lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
+
+ # calculate lvol size
+ lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 4)))
+ lvol_size=$((lvol_size_mb * 1024 * 1024))
+
+ # create an lvol on top
+ lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb")
+ lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
+ [ "$(jq -r '.[0].name' <<< "$lvol")" = "$lvol_uuid" ]
+ [ "$(jq -r '.[0].uuid' <<< "$lvol")" = "$lvol_uuid" ]
+ [ "$(jq -r '.[0].aliases[0]' <<< "$lvol")" = "lvs_test/lvol_test" ]
+ [ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ]
+ [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / MALLOC_BS))" ]
+
+ # resize the lvol to twice its original size
+ lvol_size_mb=$((lvol_size_mb * 2))
+ lvol_size=$((lvol_size_mb * 1024 * 1024))
+ rpc_cmd bdev_lvol_resize "$lvol_uuid" "$lvol_size_mb"
+ lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
+ [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / MALLOC_BS))" ]
+
+ # resize the lvol to four times its original size, use its name instead of uuid
+ lvol_size_mb=$((lvol_size_mb * 2))
+ lvol_size=$((lvol_size_mb * 1024 * 1024))
+ rpc_cmd bdev_lvol_resize lvs_test/lvol_test "$lvol_size_mb"
+ lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
+ [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / MALLOC_BS))" ]
+
+ # resize the lvol to 0 using lvol bdev alias
+ lvol_size_mb=0
+ lvol_size=0
+ rpc_cmd bdev_lvol_resize "lvs_test/lvol_test" "$lvol_size_mb"
+ lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
+ [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / MALLOC_BS))" ]
+
+ # clean up
+ rpc_cmd bdev_lvol_delete "$lvol_uuid"
+ rpc_cmd bdev_get_bdevs -b "$lvol_uuid" && false
+ rpc_cmd bdev_lvol_delete_lvstore -u "$lvs_uuid"
+ rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid" && false
+ rpc_cmd bdev_malloc_delete "$malloc_name"
+}
+
+# negative test for resizing a logical volume
+# call bdev_lvol_resize with logical volume which does not exist in configuration
+# call bdev_lvol_resize with size argument bigger than size of base bdev
+function test_resize_lvol_negative() {
+ # create an lvol store
+ malloc_name=$(rpc_cmd bdev_malloc_create $MALLOC_SIZE_MB $MALLOC_BS)
+ lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
+
+ # create an lvol on top
+ lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$LVS_DEFAULT_CAPACITY_MB")
+
+ # try to resize another, inexistent lvol
+ dummy_uuid="00000000-0000-0000-0000-000000000000"
+ rpc_cmd bdev_lvol_resize "$dummy_uuid" 0 && false
+ # just make sure the size of the real lvol did not change
+ lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
+ [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((LVS_DEFAULT_CAPACITY / MALLOC_BS))" ]
+
+ # try to resize an lvol to a size bigger than lvs
+ rpc_cmd bdev_lvol_resize "$lvol_uuid" "$MALLOC_SIZE_MB" && false
+ # just make sure the size of the real lvol did not change
+ lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
+ [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((LVS_DEFAULT_CAPACITY / MALLOC_BS))" ]
+
+ # clean up
+ rpc_cmd bdev_lvol_delete "$lvol_uuid"
+ rpc_cmd bdev_get_bdevs -b "$lvol_uuid" && false
+ rpc_cmd bdev_lvol_delete_lvstore -u "$lvs_uuid"
+ rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid" && false
+ rpc_cmd bdev_malloc_delete "$malloc_name"
+}
+
+# resize an lvol a few times
+function test_resize_lvol_with_io_traffic() {
+ # create an lvol store
+ malloc_name=$(rpc_cmd bdev_malloc_create $MALLOC_SIZE_MB $MALLOC_BS)
+ lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
+
+ # calculate lvol size
+ lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 2)))
+ lvol_size=$((lvol_size_mb * 1024 * 1024))
+
+ # create an lvol on top
+ lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb")
+ lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
+ [ "$(jq -r '.[0].name' <<< "$lvol")" = "$lvol_uuid" ]
+ [ "$(jq -r '.[0].uuid' <<< "$lvol")" = "$lvol_uuid" ]
+ [ "$(jq -r '.[0].aliases[0]' <<< "$lvol")" = "lvs_test/lvol_test" ]
+ [ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ]
+ [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / MALLOC_BS))" ]
+
+ # prepare to do some I/O
+ trap 'nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0; exit 1' SIGINT SIGTERM EXIT
+ nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid" /dev/nbd0
+
+ # write to the entire lvol
+ count=$((lvol_size / LVS_DEFAULT_CLUSTER_SIZE))
+ dd if=/dev/urandom of=/dev/nbd0 oflag=direct bs="$LVS_DEFAULT_CLUSTER_SIZE" count=$count
+
+ # writing beyond lvol size should fail
+ offset=$((lvol_size / LVS_DEFAULT_CLUSTER_SIZE + 1))
+ dd if=/dev/urandom of=/dev/nbd0 oflag=direct bs="$LVS_DEFAULT_CLUSTER_SIZE" seek=$offset count=1 && false
+
+ # resize the lvol to twice its original size
+ lvol_size_mb=$((lvol_size_mb * 2))
+ lvol_size=$((lvol_size_mb * 1024 * 1024))
+ rpc_cmd bdev_lvol_resize "$lvol_uuid" "$lvol_size_mb"
+ lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
+ [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / MALLOC_BS))" ]
+
+ # writing beyond the original lvol size should now succeed, we need
+ # to restart NBD though as it may still use the old, cached size
+ nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0
+ nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid" /dev/nbd0
+ dd if=/dev/urandom of=/dev/nbd0 oflag=direct bs="$LVS_DEFAULT_CLUSTER_SIZE" seek=$offset count=1
+
+ # lvol can't be downsized if they have any open descriptors, so close them now
+ trap - SIGINT SIGTERM EXIT
+ nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0
+
+ # resize lvol down to a single cluster
+ rpc_cmd bdev_lvol_resize "$lvol_uuid" "$LVS_DEFAULT_CLUSTER_SIZE_MB"
+ lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
+ [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((LVS_DEFAULT_CLUSTER_SIZE / MALLOC_BS))" ]
+
+ # make sure we can't write beyond the first cluster
+ trap 'nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0; exit 1' SIGINT SIGTERM EXIT
+ nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid" /dev/nbd0
+ dd if=/dev/urandom of=/dev/nbd0 oflag=direct bs="$LVS_DEFAULT_CLUSTER_SIZE" seek=1 count=1 && false
+
+ # clean up
+ trap - SIGINT SIGTERM EXIT
+ nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0
+ rpc_cmd bdev_lvol_delete "$lvol_uuid"
+ rpc_cmd bdev_get_bdevs -b "$lvol_uuid" && false
+ rpc_cmd bdev_lvol_delete_lvstore -u "$lvs_uuid"
+ rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid" && false
+ rpc_cmd bdev_malloc_delete "$malloc_name"
+}
+
+# Positive test for destroying a logical_volume after resizing.
+# Call bdev_lvol_delete_lvstore with correct logical_volumes name.
+function test_destroy_after_bdev_lvol_resize_positive() {
+ local malloc_dev
+ local lvstore_name=lvs_test lvstore_uuid
+ local lbd_name=lbd_test bdev_uuid bdev_size
+
+ malloc_dev=$(rpc_cmd bdev_malloc_create 256 "$MALLOC_BS")
+ lvstore_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_dev" "$lvstore_name")
+
+ get_lvs_jq bdev_lvol_get_lvstores -u "$lvstore_uuid"
+ [[ ${jq_out["uuid"]} == "$lvstore_uuid" ]]
+ [[ ${jq_out["name"]} == "$lvstore_name" ]]
+
+ bdev_size=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 4)))
+ bdev_uuid=$(rpc_cmd bdev_lvol_create -u "$lvstore_uuid" "$lbd_name" "$bdev_size")
+
+ # start resizing in the following fashion:
+ # - size is equal to one quarter of size malloc bdev plus 4 MB
+ # - size is equal half of size malloc bdev
+ # - size is equal to three quarters of size malloc bdev
+ # - size is equal to size if malloc bdev minus 4 MB
+ # - size is equal 0 MiB
+ local resize
+ for resize in \
+ "$bdev_size" \
+ $((bdev_size + 4)) \
+ $((bdev_size * 2)) \
+ $((bdev_size * 3)) \
+ $((bdev_size * 4 - 4)) \
+ 0; do
+ resize=$(round_down $((resize / 4)))
+ rpc_cmd bdev_lvol_resize "$bdev_uuid" "$resize"
+
+ get_bdev_jq bdev_get_bdevs -b "$bdev_uuid"
+ [[ ${jq_out["name"]} == "$bdev_uuid" ]]
+ [[ ${jq_out["name"]} == "${jq_out["uuid"]}" ]]
+ ((jq_out["block_size"] == MALLOC_BS))
+ ((jq_out["num_blocks"] * jq_out["block_size"] == resize * 1024 ** 2))
+ done
+
+ # cleanup
+ rpc_cmd bdev_lvol_delete "$bdev_uuid"
+ rpc_cmd bdev_lvol_delete_lvstore -u "$lvstore_uuid"
+ rpc_cmd bdev_get_bdevs -b "$bdev_uuid" && false
+ rpc_cmd bdev_lvol_get_lvstores -u "$lvstore_uuid" && false
+ rpc_cmd bdev_malloc_delete "$malloc_dev"
+ check_leftover_devices
+}
+
+modprobe nbd
+$SPDK_BIN_DIR/spdk_tgt &
+spdk_pid=$!
+trap 'killprocess "$spdk_pid"; exit 1' SIGINT SIGTERM EXIT
+waitforlisten $spdk_pid
+
+run_test "test_resize_lvol" test_resize_lvol
+run_test "test_resize_lvol_negative" test_resize_lvol_negative
+run_test "test_resize_lvol_with_io_traffic" test_resize_lvol_with_io_traffic
+run_test "test_destroy_after_bdev_lvol_resize_positive" test_destroy_after_bdev_lvol_resize_positive
+
+trap - SIGINT SIGTERM EXIT
+killprocess $spdk_pid
diff --git a/src/spdk/test/lvol/snapshot_clone.sh b/src/spdk/test/lvol/snapshot_clone.sh
new file mode 100755
index 000000000..49a98ca93
--- /dev/null
+++ b/src/spdk/test/lvol/snapshot_clone.sh
@@ -0,0 +1,617 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/lvol/common.sh
+source $rootdir/test/bdev/nbd_common.sh
+
+function test_snapshot_compare_with_lvol_bdev() {
+ malloc_name=$(rpc_cmd bdev_malloc_create $MALLOC_SIZE_MB $MALLOC_BS)
+ lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
+
+ # Create two lvol bdevs
+ lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 6)))
+ lvol_size=$((lvol_size_mb * 1024 * 1024))
+
+ lvol_uuid1=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test1 "$lvol_size_mb" -t)
+ lvol_uuid2=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test2 "$lvol_size_mb")
+
+ # Fill thin provisoned lvol bdev with 50% of its space
+ nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid1" /dev/nbd0
+ count=$((lvol_size / LVS_DEFAULT_CLUSTER_SIZE / 2))
+ dd if=/dev/urandom of=/dev/nbd0 oflag=direct bs="$LVS_DEFAULT_CLUSTER_SIZE" count=$count
+ nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0
+ # Fill whole thick provisioned lvol bdev
+ nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid2" /dev/nbd0
+ count=$((lvol_size / LVS_DEFAULT_CLUSTER_SIZE))
+ dd if=/dev/urandom of=/dev/nbd0 oflag=direct bs="$LVS_DEFAULT_CLUSTER_SIZE" count=$count
+ nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0
+
+ # Create snapshots of lvol bdevs
+ snapshot_uuid1=$(rpc_cmd bdev_lvol_snapshot lvs_test/lvol_test1 lvol_snapshot1)
+ snapshot_uuid2=$(rpc_cmd bdev_lvol_snapshot lvs_test/lvol_test2 lvol_snapshot2)
+
+ nbd_start_disks "$DEFAULT_RPC_ADDR" "$snapshot_uuid1" /dev/nbd0
+ # Try to perform write operation on created snapshot
+ # Check if filling snapshot of lvol bdev fails
+ count=$((lvol_size / LVS_DEFAULT_CLUSTER_SIZE))
+ dd if=/dev/urandom of=/dev/nbd0 oflag=direct bs="$LVS_DEFAULT_CLUSTER_SIZE" count=$count && false
+ nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0
+
+ # Declare nbd devices as vars for an easy cross-reference
+ local lvol_nbd1=/dev/nbd0 lvol_nbd2=/dev/nbd1
+ local snapshot_nbd1=/dev/nbd2 snapshot_nbd2=/dev/nbd3
+
+ nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid1" "$lvol_nbd1"
+ nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid2" "$lvol_nbd2"
+ nbd_start_disks "$DEFAULT_RPC_ADDR" "$snapshot_uuid1" "$snapshot_nbd1"
+ nbd_start_disks "$DEFAULT_RPC_ADDR" "$snapshot_uuid2" "$snapshot_nbd2"
+ # Compare every lvol bdev with corresponding snapshot and check that data are the same
+ cmp "$lvol_nbd1" "$snapshot_nbd1"
+ cmp "$lvol_nbd2" "$snapshot_nbd2"
+
+ # Fill second half of thin provisioned lvol bdev
+ count=$((lvol_size / LVS_DEFAULT_CLUSTER_SIZE / 2))
+ dd if=/dev/urandom of="$lvol_nbd1" oflag=direct seek=$count bs="$LVS_DEFAULT_CLUSTER_SIZE" count=$count
+
+ # Compare thin provisioned lvol bdev with its snapshot and check if it fails
+ cmp "$lvol_nbd1" "$snapshot_nbd1" && false
+
+ # clean up
+ for bdev in "${!lvol_nbd@}" "${!snapshot_nbd@}"; do
+ nbd_stop_disks "$DEFAULT_RPC_ADDR" "${!bdev}"
+ done
+
+ rpc_cmd bdev_lvol_delete "$lvol_uuid1"
+ rpc_cmd bdev_get_bdevs -b "$lvol_uuid1" && false
+ rpc_cmd bdev_lvol_delete "$snapshot_uuid1"
+ rpc_cmd bdev_get_bdevs -b "$snapshot_uuid1" && false
+ rpc_cmd bdev_lvol_delete "$lvol_uuid2"
+ rpc_cmd bdev_get_bdevs -b "$lvol_uuid2" && false
+ rpc_cmd bdev_lvol_delete "$snapshot_uuid2"
+ rpc_cmd bdev_get_bdevs -b "$snapshot_uuid2" && false
+ rpc_cmd bdev_lvol_delete_lvstore -u "$lvs_uuid"
+ rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid" && false
+ rpc_cmd bdev_malloc_delete "$malloc_name"
+ check_leftover_devices
+}
+
+# Check that when writing to lvol bdev
+# creating snapshot ends with success
+function test_create_snapshot_with_io() {
+ malloc_name=$(rpc_cmd bdev_malloc_create $MALLOC_SIZE_MB $MALLOC_BS)
+ lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
+
+ # Create lvol bdev
+ lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 2)))
+ lvol_size=$((lvol_size_mb * 1024 * 1024))
+
+ lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb" -t)
+
+ # Run fio in background that writes to lvol bdev
+ nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid" /dev/nbd0
+ run_fio_test /dev/nbd0 0 $lvol_size "write" "0xcc" "--time_based --runtime=16" &
+ fio_proc=$!
+ sleep 4
+ # Create snapshot of lvol bdev
+ snapshot_uuid=$(rpc_cmd bdev_lvol_snapshot lvs_test/lvol_test lvol_snapshot)
+ wait $fio_proc
+
+ # Clean up
+ nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0
+ rpc_cmd bdev_lvol_delete "$lvol_uuid"
+ rpc_cmd bdev_get_bdevs -b "$lvol_uuid" && false
+ rpc_cmd bdev_lvol_delete "$snapshot_uuid"
+ rpc_cmd bdev_get_bdevs -b "$snapshot_uuid" && false
+ rpc_cmd bdev_lvol_delete_lvstore -u "$lvs_uuid"
+ rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid" && false
+ rpc_cmd bdev_malloc_delete "$malloc_name"
+ check_leftover_devices
+}
+
+# Check that creating snapshot of snapshot will fail
+function test_create_snapshot_of_snapshot() {
+ malloc_name=$(rpc_cmd bdev_malloc_create $MALLOC_SIZE_MB $MALLOC_BS)
+ lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
+
+ # Create lvol bdev
+ lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 3)))
+
+ lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb")
+ lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
+
+ # Create snapshots of lvol bdev
+ snapshot_uuid=$(rpc_cmd bdev_lvol_snapshot lvs_test/lvol_test lvol_snapshot)
+
+ # Create snapshot of previously created snapshot
+ # and check if operation will fail
+ rpc_cmd bdev_lvol_snapshot lvs_test/lvol_snapshot lvol_snapshot2 && false
+
+ # Clean up
+ rpc_cmd bdev_lvol_delete "$lvol_uuid"
+ rpc_cmd bdev_get_bdevs -b "$lvol_uuid" && false
+ rpc_cmd bdev_lvol_delete "$snapshot_uuid"
+ rpc_cmd bdev_get_bdevs -b "$snapshot_uuid" && false
+ rpc_cmd bdev_lvol_delete_lvstore -u "$lvs_uuid"
+ rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid" && false
+ rpc_cmd bdev_malloc_delete "$malloc_name"
+ check_leftover_devices
+}
+
+# Check if only clone of snapshot can be created.
+# Check if writing to one clone doesn't affect other clone
+# Check if relations between clones and snapshots are properly set in configuration
+function test_clone_snapshot_relations() {
+ malloc_name=$(rpc_cmd bdev_malloc_create $MALLOC_SIZE_MB $MALLOC_BS)
+ lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
+
+ # Calculate size and create lvol bdev
+ lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 6)))
+ lvol_size=$((lvol_size_mb * 1024 * 1024))
+
+ lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb")
+ lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
+
+ # Fill lvol bdev with 100% of its space
+ nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid" /dev/nbd0
+ run_fio_test /dev/nbd0 0 $lvol_size "write" "0xcc"
+ nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0
+
+ # An attempt to create a clone from lvol that is rw capable should fail
+ rpc_cmd bdev_lvol_clone lvs_test/lvol_test clone_test && false
+
+ # Create snapshots of lvol bdev
+ snapshot_uuid=$(rpc_cmd bdev_lvol_snapshot lvs_test/lvol_test lvol_snapshot)
+
+ # Create again clone of lvol bdev and check if it fails
+ rpc_cmd bdev_lvol_clone lvs_test/lvol_test clone_test && false
+
+ # Create two clones of snapshot and check if it ends with success
+ clone_uuid1=$(rpc_cmd bdev_lvol_clone lvs_test/lvol_snapshot clone_test1)
+ clone_uuid2=$(rpc_cmd bdev_lvol_clone lvs_test/lvol_snapshot clone_test2)
+
+ # Perform write operation to first clone
+ # Change first half of its space
+ nbd_start_disks "$DEFAULT_RPC_ADDR" "$clone_uuid1" /dev/nbd0
+ fill_size=$((lvol_size / 2))
+ run_fio_test /dev/nbd0 0 $fill_size "write" "0xaa"
+
+ # Compare snapshot with second clone. Data on both bdevs should be the same
+ nbd_start_disks "$DEFAULT_RPC_ADDR" "$snapshot_uuid" /dev/nbd1
+ nbd_start_disks "$DEFAULT_RPC_ADDR" "$clone_uuid2" /dev/nbd2
+ sleep 1
+ cmp /dev/nbd1 /dev/nbd2
+ # Compare snapshot with first clone
+ cmp /dev/nbd0 /dev/nbd1 && false
+
+ snapshot_bdev=$(rpc_cmd bdev_get_bdevs -b "lvs_test/lvol_snapshot")
+ clone_bdev1=$(rpc_cmd bdev_get_bdevs -b "lvs_test/clone_test1")
+ clone_bdev2=$(rpc_cmd bdev_get_bdevs -b "lvs_test/lvol_test")
+
+ # Check snapshot consistency
+ [ "$(jq '.[].driver_specific.lvol.snapshot' <<< "$snapshot_bdev")" = "true" ]
+ [ "$(jq '.[].driver_specific.lvol.clone' <<< "$snapshot_bdev")" = "false" ]
+ [ "$(jq '.[].driver_specific.lvol.clones|sort' <<< "$snapshot_bdev")" = "$(jq '.|sort' <<< '["lvol_test", "clone_test1", "clone_test2"]')" ]
+
+ # Check first clone consistency
+ [ "$(jq '.[].driver_specific.lvol.snapshot' <<< "$clone_bdev1")" = "false" ]
+ [ "$(jq '.[].driver_specific.lvol.clone' <<< "$clone_bdev1")" = "true" ]
+ [ "$(jq '.[].driver_specific.lvol.base_snapshot' <<< "$clone_bdev1")" = '"lvol_snapshot"' ]
+
+ # Check second clone consistency
+ [ "$(jq '.[].driver_specific.lvol.snapshot' <<< "$clone_bdev2")" = "false" ]
+ [ "$(jq '.[].driver_specific.lvol.clone' <<< "$clone_bdev2")" = "true" ]
+ [ "$(jq '.[].driver_specific.lvol.base_snapshot' <<< "$clone_bdev2")" = '"lvol_snapshot"' ]
+
+ # Destroy first clone and check if it is deleted from snapshot
+ nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0
+ rpc_cmd bdev_lvol_delete "$clone_uuid1"
+ snapshot_bdev=$(rpc_cmd bdev_get_bdevs -b "lvs_test/lvol_snapshot")
+ [ "$(jq '.[].driver_specific.lvol.clones|sort' <<< "$snapshot_bdev")" = "$(jq '.|sort' <<< '["lvol_test", "clone_test2"]')" ]
+
+ # Clean up
+ nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd1
+ nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd2
+ rpc_cmd bdev_lvol_delete "$lvol_uuid"
+ rpc_cmd bdev_lvol_delete "$clone_uuid2"
+ rpc_cmd bdev_lvol_delete "$snapshot_uuid"
+ rpc_cmd bdev_lvol_delete_lvstore -u "$lvs_uuid"
+ rpc_cmd bdev_malloc_delete "$malloc_name"
+ check_leftover_devices
+}
+
+# Testing usage of bdev_lvol_inflate
+function test_clone_inflate() {
+ malloc_name=$(rpc_cmd bdev_malloc_create $MALLOC_SIZE_MB $MALLOC_BS)
+ lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
+
+ # Calculate size and create lvol bdev
+ lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 4)))
+
+ lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb")
+ lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
+
+ # Fill lvol bdev with 100% of its space
+ nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid" /dev/nbd0
+ run_fio_test /dev/nbd0 0 $((lvol_size_mb * 1024 * 1024)) "write" "0xcc"
+ nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0
+
+ # Create snapshots of lvol bdev
+ snapshot_uuid=$(rpc_cmd bdev_lvol_snapshot lvs_test/lvol_test lvol_snapshot)
+
+ # Create clone of snapshot
+ lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
+ [ "$(jq '.[].driver_specific.lvol.thin_provision' <<< "$lvol")" = "true" ]
+
+ # Fill part of clone with data of known pattern
+ nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid" /dev/nbd0
+ first_fill=0
+ second_fill=$((lvol_size_mb * 1024 * 1024 * 3 / 4))
+ run_fio_test /dev/nbd0 $first_fill $((1024 * 1024)) "write" "0xdd"
+ run_fio_test /dev/nbd0 $second_fill $((1024 * 1024)) "write" "0xdd"
+ nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0
+
+ # Do inflate
+ rpc_cmd bdev_lvol_inflate lvs_test/lvol_test
+ lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
+ [ "$(jq '.[].driver_specific.lvol.thin_provision' <<< "$lvol")" = "false" ]
+
+ # Delete snapshot
+ rpc_cmd bdev_lvol_delete "$snapshot_uuid"
+
+ # Check data consistency
+ nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid" /dev/nbd0
+ run_fio_test /dev/nbd0 $first_fill $((1024 * 1024)) "read" "0xdd"
+ run_fio_test /dev/nbd0 $(((first_fill + 1) * 1024 * 1024)) $((second_fill - 1024 * 1024)) "read" "0xcc"
+ run_fio_test /dev/nbd0 $second_fill $((1024 * 1024)) "read" "0xdd"
+ run_fio_test /dev/nbd0 $((second_fill + 1024 * 1024)) $((lvol_size_mb * 1024 * 1024 - (second_fill + 1024 * 1024))) "read" "0xcc"
+ nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0
+
+ # Clean up
+ rpc_cmd bdev_lvol_delete "$lvol_uuid"
+ rpc_cmd bdev_lvol_delete_lvstore -u "$lvs_uuid"
+ rpc_cmd bdev_malloc_delete "$malloc_name"
+ check_leftover_devices
+}
+
+# Create chain of snapshot<-snapshot2<-lvol_test lvol bdevs.
+# Decouple lvol_test twice and delete the remaining snapshot lvol.
+# Each time check consistency of snapshot-clone relations and written data.
+function test_clone_decouple_parent() {
+ malloc_name=$(rpc_cmd bdev_malloc_create $MALLOC_SIZE_MB $MALLOC_BS)
+ lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
+
+ # Calculate size and create lvol bdev
+ lvol_size_mb=$((5 * LVS_DEFAULT_CLUSTER_SIZE_MB))
+ lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb" -t)
+ lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
+
+ # Decouple_parent should fail on lvol bdev without a parent
+ rpc_cmd bdev_lvol_decouple_parent lvs_test/lvol_test && false
+
+ # Fill first four out of 5 clusters of clone with data of known pattern
+ nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid" /dev/nbd0
+ begin_fill=0
+ end_fill=$((lvol_size_mb * 4 * 1024 * 1024 / 5))
+ run_fio_test /dev/nbd0 $begin_fill $end_fill "write" "0xdd"
+
+ # Create snapshot (snapshot<-lvol_bdev)
+ snapshot_uuid=$(rpc_cmd bdev_lvol_snapshot lvs_test/lvol_test lvol_snapshot)
+
+ # Fill second and fourth cluster of clone with data of known pattern
+ start_fill=$((lvol_size_mb * 1024 * 1024 / 5))
+ fill_range=$start_fill
+ run_fio_test /dev/nbd0 $start_fill $fill_range "write" "0xcc"
+ start_fill=$((lvol_size_mb * 3 * 1024 * 1024 / 5))
+ run_fio_test /dev/nbd0 $start_fill $fill_range "write" "0xcc"
+
+ # Create snapshot (snapshot<-snapshot2<-lvol_bdev)
+ snapshot_uuid2=$(rpc_cmd bdev_lvol_snapshot lvs_test/lvol_test lvol_snapshot2)
+
+ # Fill second cluster of clone with data of known pattern
+ start_fill=$fill_range
+ run_fio_test /dev/nbd0 $start_fill $fill_range "write" "0xee"
+
+ # Check data consistency
+ pattern=("0xdd" "0xee" "0xdd" "0xcc" "0x00")
+ for i in "${!pattern[@]}"; do
+ start_fill=$((lvol_size_mb * i * 1024 * 1024 / 5))
+ run_fio_test /dev/nbd0 $start_fill $fill_range "read" "${pattern[i]}"
+ done
+
+ # Decouple_parent of lvol bdev resulting in two relation chains:
+ # - snapshot<-lvol_bdev
+ # - snapshot<-snapshot2
+ rpc_cmd bdev_lvol_decouple_parent lvs_test/lvol_test
+ lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
+ snapshot=$(rpc_cmd bdev_get_bdevs -b "$snapshot_uuid")
+ snapshot2=$(rpc_cmd bdev_get_bdevs -b "$snapshot_uuid2")
+ [ "$(jq '.[].driver_specific.lvol.thin_provision' <<< "$lvol")" = "true" ]
+ [ "$(jq '.[].driver_specific.lvol.clone' <<< "$lvol")" = "true" ]
+ [ "$(jq '.[].driver_specific.lvol.snapshot' <<< "$lvol")" = "false" ]
+ [ "$(jq '.[].driver_specific.lvol.clone' <<< "$snapshot")" = "false" ]
+ [ "$(jq '.[].driver_specific.lvol.clone' <<< "$snapshot2")" = "true" ]
+ [ "$(jq '.[].driver_specific.lvol.snapshot' <<< "$snapshot2")" = "true" ]
+
+ # Delete second snapshot
+ rpc_cmd bdev_lvol_delete "$snapshot_uuid2"
+
+ # Check data consistency
+ for i in "${!pattern[@]}"; do
+ start_fill=$((lvol_size_mb * i * 1024 * 1024 / 5))
+ run_fio_test /dev/nbd0 $start_fill $fill_range "read" "${pattern[i]}"
+ done
+
+ # Decouple_parent of lvol bdev again resulting in two relation chains:
+ # - lvol_bdev
+ # - snapshot<-snapshot2
+ rpc_cmd bdev_lvol_decouple_parent lvs_test/lvol_test
+ lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
+ snapshot=$(rpc_cmd bdev_get_bdevs -b "$snapshot_uuid")
+ [ "$(jq '.[].driver_specific.lvol.thin_provision' <<< "$lvol")" = "true" ]
+ [ "$(jq '.[].driver_specific.lvol.clone' <<< "$lvol")" = "false" ]
+ [ "$(jq '.[].driver_specific.lvol.snapshot' <<< "$lvol")" = "false" ]
+ [ "$(jq '.[].driver_specific.lvol.clone' <<< "$snapshot")" = "false" ]
+
+ # Delete first snapshot
+ rpc_cmd bdev_lvol_delete "$snapshot_uuid"
+
+ # Check data consistency
+ for i in "${!pattern[@]}"; do
+ start_fill=$((lvol_size_mb * i * 1024 * 1024 / 5))
+ run_fio_test /dev/nbd0 $start_fill $fill_range "read" "${pattern[i]}"
+ done
+
+ # Clean up
+ rpc_cmd bdev_lvol_delete "$lvol_uuid"
+ rpc_cmd bdev_lvol_delete_lvstore -u "$lvs_uuid"
+ rpc_cmd bdev_malloc_delete "$malloc_name"
+ check_leftover_devices
+}
+
+# Set lvol bdev as read only and perform clone on it.
+function test_lvol_bdev_readonly() {
+ malloc_name=$(rpc_cmd bdev_malloc_create $MALLOC_SIZE_MB $MALLOC_BS)
+ lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
+
+ # Calculate size and create lvol bdev
+ lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 2)))
+
+ lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb")
+ lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
+
+ # Set lvol bdev as read only
+ rpc_cmd bdev_lvol_set_read_only lvs_test/lvol_test
+
+ # Try to perform write operation on lvol marked as read only
+ nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid" /dev/nbd0
+ run_fio_test /dev/nbd0 0 $lvol_size "write" "0xcc" && false
+ nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0
+
+ # Create clone of lvol set to read only
+ clone_uuid=$(rpc_cmd bdev_lvol_clone lvs_test/lvol_test clone_test)
+
+ # Try to perform write operation on lvol clone
+ nbd_start_disks "$DEFAULT_RPC_ADDR" "$clone_uuid" /dev/nbd0
+ run_fio_test /dev/nbd0 0 $lvol_size "write" "0xcc"
+ nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0
+
+ # Clean up
+ rpc_cmd bdev_lvol_delete "$clone_uuid"
+ rpc_cmd bdev_lvol_delete "$lvol_uuid"
+ rpc_cmd bdev_lvol_delete_lvstore -u "$lvs_uuid"
+ rpc_cmd bdev_malloc_delete "$malloc_name"
+ check_leftover_devices
+}
+
+# Check if it is possible to delete snapshot with clone
+function test_delete_snapshot_with_clone() {
+ malloc_name=$(rpc_cmd bdev_malloc_create $MALLOC_SIZE_MB $MALLOC_BS)
+ lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
+
+ # Calculate size and create lvol bdev
+ lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 2)))
+ lvol_size=$((lvol_size_mb * 1024 * 1024))
+
+ lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb")
+ lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
+
+ # Perform write operation on lvol
+ nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid" /dev/nbd0
+ run_fio_test /dev/nbd0 0 $lvol_size "write" "0xcc"
+
+ # Create snapshots of lvol bdev
+ snapshot_uuid=$(rpc_cmd bdev_lvol_snapshot lvs_test/lvol_test lvol_snapshot)
+
+ # Fill first half of lvol bdev
+ half_size=$((lvol_size / 2 - 1))
+ run_fio_test /dev/nbd0 0 $half_size "write" "0xee"
+
+ # Check if snapshot was unchanged
+ nbd_start_disks "$DEFAULT_RPC_ADDR" "$snapshot_uuid" /dev/nbd1
+ run_fio_test /dev/nbd1 0 $half_size "read" "0xcc"
+
+ # Verify lvol bdev
+ run_fio_test /dev/nbd0 0 $half_size "read" "0xee"
+ lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
+ [ "$(jq '.[].driver_specific.lvol.clone' <<< "$lvol")" = "true" ]
+
+ # Delete snapshot - should succeed
+ nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd1
+ rpc_cmd bdev_lvol_delete "$snapshot_uuid"
+
+ # Check data consistency
+ lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
+ [ "$(jq '.[].driver_specific.lvol.clone' <<< "$lvol")" = "false" ]
+ run_fio_test /dev/nbd0 0 $half_size "read" "0xee"
+ run_fio_test /dev/nbd0 $((half_size + 1)) $half_size "read" "0xcc"
+
+ # Clean up
+ nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0
+ rpc_cmd bdev_lvol_delete "$lvol_uuid"
+ rpc_cmd bdev_lvol_delete_lvstore -u "$lvs_uuid"
+ rpc_cmd bdev_malloc_delete "$malloc_name"
+ check_leftover_devices
+}
+
+# Check if it is possible to delete snapshot with one snapshot on it
+function test_delete_snapshot_with_snapshot() {
+ malloc_name=$(rpc_cmd bdev_malloc_create $MALLOC_SIZE_MB $MALLOC_BS)
+ lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
+
+ # Calculate size and create lvol bdev
+ lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 5)))
+ lvol_size=$((lvol_size_mb * 1024 * 1024))
+
+ lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb")
+ lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
+
+ # Perform write operation on lvol
+ nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid" /dev/nbd0
+ run_fio_test /dev/nbd0 0 $lvol_size "write" "0xcc"
+
+ # Create snapshot of lvol bdev
+ snapshot_uuid=$(rpc_cmd bdev_lvol_snapshot lvs_test/lvol_test lvol_snapshot)
+ lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
+ [ "$(jq '.[].driver_specific.lvol.base_snapshot' <<< "$lvol")" = '"lvol_snapshot"' ]
+
+ # Fill second 1/3 of lvol bdev
+ first_part=$((lvol_size / 3))
+ second_part=$((lvol_size * 2 / 3))
+ run_fio_test /dev/nbd0 $first_part $((second_part - first_part)) "write" "0xee"
+
+ # Check if snapshot was unchanged
+ nbd_start_disks "$DEFAULT_RPC_ADDR" "$snapshot_uuid" /dev/nbd1
+ run_fio_test /dev/nbd1 0 $lvol_size "read" "0xcc"
+
+ # Create second snapshot of lvol_bdev
+ # First snapshot becomes snapshot of second snapshot
+ snapshot_uuid2=$(rpc_cmd bdev_lvol_snapshot lvs_test/lvol_test lvol_snapshot2)
+ lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
+ snapshot=$(rpc_cmd bdev_get_bdevs -b "$snapshot_uuid")
+ snapshot2=$(rpc_cmd bdev_get_bdevs -b "$snapshot_uuid2")
+ [ "$(jq '.[].driver_specific.lvol.base_snapshot' <<< "$snapshot2")" = '"lvol_snapshot"' ]
+ [ "$(jq '.[].driver_specific.lvol.clones|sort' <<< "$snapshot2")" = "$(jq '.|sort' <<< '["lvol_test"]')" ]
+ [ "$(jq '.[].driver_specific.lvol.clone' <<< "$snapshot2")" = "true" ]
+ [ "$(jq '.[].driver_specific.lvol.snapshot' <<< "$snapshot2")" = "true" ]
+ [ "$(jq '.[].driver_specific.lvol.clones|sort' <<< "$snapshot")" = "$(jq '.|sort' <<< '["lvol_snapshot2"]')" ]
+
+ # Verify snapshots
+ run_fio_test /dev/nbd1 0 $size "read" "0xcc"
+ nbd_start_disks "$DEFAULT_RPC_ADDR" "$snapshot_uuid2" /dev/nbd2
+ run_fio_test /dev/nbd2 0 $((first_part - 1)) "read" "0xcc"
+ run_fio_test /dev/nbd2 $first_part $((second_part - first_part)) "read" "0xee"
+ run_fio_test /dev/nbd2 $second_part $((lvol_size - second_part)) "read" "0xcc"
+
+ # Verify lvol bdev
+ run_fio_test /dev/nbd0 $first_part $((second_part - first_part)) "read" "0xee"
+ run_fio_test /dev/nbd0 $second_part $((lvol_size - second_part)) "read" "0xcc"
+ [ "$(jq '.[].driver_specific.lvol.clone' <<< "$lvol")" = "true" ]
+ [ "$(jq '.[].driver_specific.lvol.base_snapshot' <<< "$lvol")" = '"lvol_snapshot2"' ]
+
+ # Fill third part of lvol bdev
+ run_fio_test /dev/nbd0 $second_part $((lvol_size - second_part)) "write" "0xdd"
+
+ # Verify snapshots
+ run_fio_test /dev/nbd1 0 $size "read" "0xcc"
+ run_fio_test /dev/nbd0 $second_part $((lvol_size - second_part)) "read" "0xdd"
+ nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd2
+ nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd1
+
+ # Delete snapshot - should succeed
+ rpc_cmd bdev_lvol_delete "$snapshot_uuid2"
+
+ # Check data consistency
+ lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
+ snapshot=$(rpc_cmd bdev_get_bdevs -b "$snapshot_uuid")
+ [ "$(jq '.[].driver_specific.lvol.clone' <<< "$lvol")" = "true" ]
+ [ "$(jq '.[].driver_specific.lvol.base_snapshot' <<< "$lvol")" = '"lvol_snapshot"' ]
+ [ "$(jq '.[].driver_specific.lvol.clones|sort' <<< "$snapshot")" = "$(jq '.|sort' <<< '["lvol_test"]')" ]
+ run_fio_test /dev/nbd0 $first_part $((second_part - first_part)) "read" "0xee"
+ run_fio_test /dev/nbd0 $second_part $((lvol_size - second_part)) "read" "0xdd"
+
+ # Clean up
+ nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0
+ rpc_cmd bdev_lvol_delete "$snapshot_uuid"
+ rpc_cmd bdev_lvol_delete "$lvol_uuid"
+ rpc_cmd bdev_lvol_delete_lvstore -u "$lvs_uuid"
+ rpc_cmd bdev_malloc_delete "$malloc_name"
+ check_leftover_devices
+}
+
+# Test for destroying lvol bdevs in particular order.
+function test_bdev_lvol_delete_ordering() {
+ local snapshot_name=snapshot snapshot_uuid
+ local clone_name=clone clone_uuid
+
+ local bdev_uuid
+ local lbd_name=lbd_test
+ local lvstore_uuid lvstore_name=lvs_name
+ local malloc_dev
+ local size
+
+ malloc_dev=$(rpc_cmd bdev_malloc_create 256 "$MALLOC_BS")
+ lvstore_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_dev" "$lvstore_name")
+
+ get_lvs_jq bdev_lvol_get_lvstores -u "$lvstore_uuid"
+ [[ ${jq_out["uuid"]} == "$lvstore_uuid" ]]
+ [[ ${jq_out["name"]} == "$lvstore_name" ]]
+ [[ ${jq_out["base_bdev"]} == "$malloc_dev" ]]
+
+ size=$((jq_out["free_clusters"] * jq_out["cluster_size"] / 4 / 1024 ** 2))
+
+ bdev_uuid=$(rpc_cmd bdev_lvol_create -t -u "$lvstore_uuid" "$lbd_name" "$size")
+
+ get_bdev_jq bdev_get_bdevs -b "$bdev_uuid"
+
+ snapshot_uuid=$(rpc_cmd bdev_lvol_snapshot "${jq_out["name"]}" "$snapshot_name")
+
+ get_bdev_jq bdev_get_bdevs -b "$lvstore_name/$snapshot_name"
+ [[ ${jq_out["name"]} == "$snapshot_uuid" ]]
+ [[ ${jq_out["product_name"]} == "Logical Volume" ]]
+ [[ ${jq_out["aliases[0]"]} == "$lvstore_name/$snapshot_name" ]]
+
+ clone_uuid=$(rpc_cmd bdev_lvol_clone "$lvstore_name/$snapshot_name" "$clone_name")
+
+ get_bdev_jq bdev_get_bdevs -b "$lvstore_name/$clone_name"
+ [[ ${jq_out["name"]} == "$clone_uuid" ]]
+ [[ ${jq_out["product_name"]} == "Logical Volume" ]]
+ [[ ${jq_out["aliases[0]"]} == "$lvstore_name/$clone_name" ]]
+
+ # Try to destroy snapshot with clones and check if it fails
+ rpc_cmd bdev_lvol_delete "$snapshot_uuid" && false
+
+ # cleanup logical volumes
+ rpc_cmd bdev_lvol_delete "$bdev_uuid"
+ rpc_cmd bdev_lvol_delete "$clone_uuid"
+ rpc_cmd bdev_lvol_delete "$snapshot_uuid"
+
+ # cleanup lvstore
+ rpc_cmd bdev_lvol_delete_lvstore -u "$lvstore_uuid"
+
+ # cleanup malloc dev
+ rpc_cmd bdev_malloc_delete "$malloc_dev"
+
+ check_leftover_devices
+}
+
+$SPDK_BIN_DIR/spdk_tgt &
+spdk_pid=$!
+trap 'killprocess "$spdk_pid"; exit 1' SIGINT SIGTERM EXIT
+waitforlisten $spdk_pid
+modprobe nbd
+
+run_test "test_snapshot_compare_with_lvol_bdev" test_snapshot_compare_with_lvol_bdev
+run_test "test_create_snapshot_with_io" test_create_snapshot_with_io
+run_test "test_create_snapshot_of_snapshot" test_create_snapshot_of_snapshot
+run_test "test_clone_snapshot_relations" test_clone_snapshot_relations
+run_test "test_clone_inflate" test_clone_inflate
+run_test "test_clone_decouple_parent" test_clone_decouple_parent
+run_test "test_lvol_bdev_readonly" test_lvol_bdev_readonly
+run_test "test_delete_snapshot_with_clone" test_delete_snapshot_with_clone
+run_test "test_delete_snapshot_with_snapshot" test_delete_snapshot_with_snapshot
+run_test "test_bdev_lvol_delete_ordering" test_bdev_lvol_delete_ordering
+
+trap - SIGINT SIGTERM EXIT
+killprocess $spdk_pid
diff --git a/src/spdk/test/lvol/tasting.sh b/src/spdk/test/lvol/tasting.sh
new file mode 100755
index 000000000..dbb75d241
--- /dev/null
+++ b/src/spdk/test/lvol/tasting.sh
@@ -0,0 +1,171 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/lvol/common.sh
+
+# Make sure lvol stores are automatically detected after base bdev detach and subsequent attach
+function test_tasting() {
+ # Create two aio bdevs
+ rpc_cmd bdev_aio_create $testdir/aio_bdev_0 aio_bdev0 "$AIO_BS"
+ rpc_cmd bdev_aio_create $testdir/aio_bdev_1 aio_bdev1 "$AIO_BS"
+ # Create a valid lvs
+ lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore aio_bdev0 lvs_test)
+ # Destroy lvol store
+ rpc_cmd bdev_lvol_delete_lvstore -u "$lvs_uuid"
+ # Remove the lvol stores and make sure it's not being automatically detected after base
+ # bdev re-attach.
+ rpc_cmd bdev_aio_delete aio_bdev0
+ # Create aio bdev on the same file
+ rpc_cmd bdev_aio_create $testdir/aio_bdev_0 aio_bdev0 "$AIO_BS"
+ sleep 1
+ # Check if destroyed lvol store does not exist on aio bdev
+ rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid" && false
+
+ # Create a valid lvs
+ lvs1_cluster_size=$((1 * 1024 * 1024))
+ lvs2_cluster_size=$((32 * 1024 * 1024))
+ lvs_uuid1=$(rpc_cmd bdev_lvol_create_lvstore aio_bdev0 lvs_test1 -c $lvs1_cluster_size)
+ lvs_uuid2=$(rpc_cmd bdev_lvol_create_lvstore aio_bdev1 lvs_test2 -c $lvs2_cluster_size)
+
+ # Create 5 lvols on first lvs
+ lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 10)))
+ lvol_size=$((lvol_size_mb * 1024 * 1024))
+
+ for i in $(seq 1 5); do
+ lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid1" "lvol_test${i}" "$lvol_size_mb")
+ lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
+
+ [ "$(jq -r '.[0].name' <<< "$lvol")" = "$lvol_uuid" ]
+ [ "$(jq -r '.[0].uuid' <<< "$lvol")" = "$lvol_uuid" ]
+ [ "$(jq -r '.[0].aliases[0]' <<< "$lvol")" = "lvs_test1/lvol_test${i}" ]
+ [ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$AIO_BS" ]
+ [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / AIO_BS))" ]
+ done
+
+ # Create 5 lvols on second lvs
+ lvol2_size_mb=$(round_down $(((AIO_SIZE_MB - 16) / 5)) 32)
+ lvol2_size=$((lvol2_size_mb * 1024 * 1024))
+
+ for i in $(seq 1 5); do
+ lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid2" "lvol_test${i}" "$lvol2_size_mb")
+ lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
+
+ [ "$(jq -r '.[0].name' <<< "$lvol")" = "$lvol_uuid" ]
+ [ "$(jq -r '.[0].uuid' <<< "$lvol")" = "$lvol_uuid" ]
+ [ "$(jq -r '.[0].aliases[0]' <<< "$lvol")" = "lvs_test2/lvol_test${i}" ]
+ [ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$AIO_BS" ]
+ [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol2_size / AIO_BS))" ]
+ done
+
+ old_lvols=$(rpc_cmd bdev_get_bdevs | jq -r '[ .[] | select(.product_name == "Logical Volume") ]')
+ [ "$(jq length <<< "$old_lvols")" == "10" ]
+ old_lvs=$(rpc_cmd bdev_lvol_get_lvstores | jq .)
+
+ # Restart spdk app
+ killprocess $spdk_pid
+ $SPDK_BIN_DIR/spdk_tgt &
+ spdk_pid=$!
+ waitforlisten $spdk_pid
+
+ # Create aio bdevs
+ rpc_cmd bdev_aio_create $testdir/aio_bdev_0 aio_bdev0 "$AIO_BS"
+ rpc_cmd bdev_aio_create $testdir/aio_bdev_1 aio_bdev1 "$AIO_BS"
+ sleep 1
+
+ # Check tasting feature
+ new_lvols=$(rpc_cmd bdev_get_bdevs | jq -r '[ .[] | select(.product_name == "Logical Volume") ]')
+ [ "$(jq length <<< "$new_lvols")" == "10" ]
+ new_lvs=$(rpc_cmd bdev_lvol_get_lvstores | jq .)
+ if ! diff <(jq '. | sort' <<< "$old_lvs") <(jq '. | sort' <<< "$new_lvs"); then
+ echo "ERROR: old and loaded lvol store is not the same"
+ return 1
+ fi
+ if ! diff <(jq '. | sort' <<< "$old_lvols") <(jq '. | sort' <<< "$new_lvols"); then
+ echo "ERROR: old and loaded lvols are not the same"
+ return 1
+ fi
+
+ # Check if creation and deletion lvol bdevs on lvs is possible
+ for i in $(seq 6 10); do
+ lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid1" "lvol_test${i}" "$lvol_size_mb")
+ lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
+
+ [ "$(jq -r '.[0].name' <<< "$lvol")" = "$lvol_uuid" ]
+ [ "$(jq -r '.[0].uuid' <<< "$lvol")" = "$lvol_uuid" ]
+ [ "$(jq -r '.[0].aliases[0]' <<< "$lvol")" = "lvs_test1/lvol_test${i}" ]
+ [ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$AIO_BS" ]
+ [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / AIO_BS))" ]
+ done
+
+ for i in $(seq 1 10); do
+ rpc_cmd bdev_lvol_delete "lvs_test1/lvol_test${i}"
+ done
+
+ rpc_cmd bdev_lvol_delete_lvstore -u "$lvs_uuid1"
+
+ # Create an lvstore and 10 lvol on top to see if deletion of such struct works as it should.
+ lvs_uuid1=$(rpc_cmd bdev_lvol_create_lvstore aio_bdev0 lvs_test1)
+ for i in $(seq 1 10); do
+ rpc_cmd bdev_lvol_create -u "$lvs_uuid1" "lvol_test${i}" "$lvol_size_mb"
+ done
+
+ # Clean up
+ rpc_cmd bdev_lvol_delete_lvstore -u "$lvs_uuid1"
+ rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid1" && false
+ rpc_cmd bdev_lvol_delete_lvstore -u "$lvs_uuid2"
+ rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid2" && false
+ rpc_cmd bdev_aio_delete aio_bdev0
+ rpc_cmd bdev_aio_delete aio_bdev1
+ check_leftover_devices
+}
+
+# Positive test for removing lvol store persistently
+function test_delete_lvol_store_persistent_positive() {
+ local aio0=$testdir/aio_bdev_0
+ local bdev_aio_name=${aio0##*/} bdev_block_size=4096
+ local lvstore_name=lvstore_test lvstore_uuid
+
+ rpc_cmd bdev_aio_create "$aio0" "$bdev_aio_name" "$bdev_block_size"
+
+ get_bdev_jq bdev_get_bdevs -b "$bdev_aio_name"
+ [[ ${jq_out["name"]} == "$bdev_aio_name" ]]
+ [[ ${jq_out["product_name"]} == "AIO disk" ]]
+ ((jq_out["block_size"] == bdev_block_size))
+
+ lvstore_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$bdev_aio_name" "$lvstore_name")
+
+ get_lvs_jq bdev_lvol_get_lvstores -u "$lvstore_uuid"
+ [[ ${jq_out["uuid"]} == "$lvstore_uuid" ]]
+ [[ ${jq_out["name"]} == "$lvstore_name" ]]
+ [[ ${jq_out["base_bdev"]} == "$bdev_aio_name" ]]
+
+ rpc_cmd bdev_lvol_delete_lvstore -u "$lvstore_uuid"
+ rpc_cmd bdev_aio_delete "$bdev_aio_name"
+ # Create aio bdev on the same file
+ rpc_cmd bdev_aio_create "$aio0" "$bdev_aio_name" "$bdev_block_size"
+ # Wait 1 second to allow time for lvolstore tasting
+ sleep 1
+ # bdev_lvol_get_lvstores should not report any existsing lvol stores in configuration
+ # after deleting and adding NVMe bdev, thus check if destroyed lvol store does not exist
+ # on aio bdev anymore.
+ rpc_cmd bdev_lvol_get_lvstores -u "$lvstore_uuid" && false
+
+ # cleanup
+ rpc_cmd bdev_aio_delete "$bdev_aio_name"
+ check_leftover_devices
+}
+
+$SPDK_BIN_DIR/spdk_tgt &
+spdk_pid=$!
+trap 'killprocess "$spdk_pid"; rm -f $testdir/aio_bdev_0 $testdir/aio_bdev_1; exit 1' SIGINT SIGTERM EXIT
+waitforlisten $spdk_pid
+truncate -s "${AIO_SIZE_MB}M" $testdir/aio_bdev_0 $testdir/aio_bdev_1
+
+run_test "test_tasting" test_tasting
+run_test "test_delete_lvol_store_persistent_positive" test_delete_lvol_store_persistent_positive
+
+trap - SIGINT SIGTERM EXIT
+killprocess $spdk_pid
+rm -f $testdir/aio_bdev_0 $testdir/aio_bdev_1
diff --git a/src/spdk/test/lvol/thin_provisioning.sh b/src/spdk/test/lvol/thin_provisioning.sh
new file mode 100755
index 000000000..cb7bfcb01
--- /dev/null
+++ b/src/spdk/test/lvol/thin_provisioning.sh
@@ -0,0 +1,236 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/lvol/common.sh
+source $rootdir/test/bdev/nbd_common.sh
+
+# Check if number of free clusters on lvol store decreases
+# if we write to created thin provisioned lvol bdev
+function test_thin_lvol_check_space() {
+ malloc_name=$(rpc_cmd bdev_malloc_create $MALLOC_SIZE_MB $MALLOC_BS)
+ lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
+ lvs=$(rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid")
+ free_clusters_start="$(jq -r '.[0].free_clusters' <<< "$lvs")"
+
+ # Create thin provision lvol bdev with size equals to lvol store space
+ lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB)))
+ lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb" -t)
+
+ lvs=$(rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid")
+ free_clusters_create_lvol="$(jq -r '.[0].free_clusters' <<< "$lvs")"
+ [ $free_clusters_start == $free_clusters_create_lvol ]
+
+ # Write data (lvs cluster size) to created lvol bdev starting from offset 0.
+ size=$LVS_DEFAULT_CLUSTER_SIZE
+ nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid" /dev/nbd0
+ run_fio_test /dev/nbd0 0 $size "write" "0xcc"
+ lvs=$(rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid")
+ free_clusters_first_fio="$(jq -r '.[0].free_clusters' <<< "$lvs")"
+ [ $((free_clusters_first_fio + 1)) == $free_clusters_start ]
+
+ # Write data (lvs cluster size) to lvol bdev with offset set to one and half of cluster size
+ offset=$((LVS_DEFAULT_CLUSTER_SIZE * 3 / 2))
+ size=$LVS_DEFAULT_CLUSTER_SIZE
+ run_fio_test /dev/nbd0 $offset $size "write" "0xcc"
+ lvs=$(rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid")
+ free_clusters_second_fio="$(jq -r '.[0].free_clusters' <<< "$lvs")"
+ [ $((free_clusters_second_fio + 3)) == $free_clusters_start ]
+
+ # write data to lvol bdev to the end of its size
+ size=$((LVS_DEFAULT_CLUSTER_SIZE * free_clusters_first_fio))
+ offset=$((3 * LVS_DEFAULT_CLUSTER_SIZE))
+ run_fio_test /dev/nbd0 $offset $size "write" "0xcc"
+ lvs=$(rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid")
+ # Check that lvol store free clusters number equals to 0
+ free_clusters_third_fio="$(jq -r '.[0].free_clusters' <<< "$lvs")"
+ [ $((free_clusters_third_fio)) == 0 ]
+
+ nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0
+ rpc_cmd bdev_lvol_delete "$lvol_uuid"
+ rpc_cmd bdev_get_bdevs -b "$lvol_uuid" && false
+ lvs=$(rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid")
+ free_clusters_end="$(jq -r '.[0].free_clusters' <<< "$lvs")"
+ [ $((free_clusters_end)) == $free_clusters_start ]
+
+ # Clean up
+ rpc_cmd bdev_lvol_delete_lvstore -u "$lvs_uuid"
+ rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid" && false
+ rpc_cmd bdev_malloc_delete "$malloc_name"
+}
+
+# Check if we can create thin provisioned bdev on empty lvol store
+# and check if we can read from this device and it returns zeroes.
+function test_thin_lvol_check_zeroes() {
+ malloc_name=$(rpc_cmd bdev_malloc_create $MALLOC_SIZE_MB $MALLOC_BS)
+ lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
+ lvs=$(rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid")
+ free_clusters_start="$(jq -r '.[0].free_clusters' <<< "$lvs")"
+
+ # Create thick and thin provisioned lvol bdevs with size equals to lvol store space
+ lbd_name0=lvol_test0
+ lbd_name1=lvol_test1
+ lvol_size_mb=$((LVS_DEFAULT_CAPACITY_MB))
+ # Round down lvol size to the nearest cluster size boundary
+ lvol_size_mb=$((lvol_size_mb / LVS_DEFAULT_CLUSTER_SIZE_MB * LVS_DEFAULT_CLUSTER_SIZE_MB))
+ lvol_size=$((lvol_size_mb * 1024 * 1024))
+ lvol_uuid0=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" $lbd_name0 "$lvol_size_mb")
+ lvol_uuid1=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" $lbd_name1 "$lvol_size_mb" -t)
+
+ nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid0" /dev/nbd0
+ nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid1" /dev/nbd1
+
+ # Fill the whole thick provisioned lvol bdev
+ run_fio_test /dev/nbd0 0 $lvol_size "write" "0xcc"
+
+ # Perform read operations on thin provisioned lvol bdev
+ # and check if they return zeroes
+ run_fio_test /dev/nbd1 0 $lvol_size "read" "0x00"
+
+ # Clean up
+ nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd1
+ nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0
+ rpc_cmd bdev_lvol_delete "$lvol_uuid1"
+ rpc_cmd bdev_lvol_delete "$lvol_uuid0"
+ rpc_cmd bdev_lvol_delete_lvstore -u "$lvs_uuid"
+ rpc_cmd bdev_malloc_delete "$malloc_name"
+}
+
+# Check if data written to thin provisioned lvol bdev
+# were properly written (fio test with verification)
+function test_thin_lvol_check_integrity() {
+ malloc_name=$(rpc_cmd bdev_malloc_create $MALLOC_SIZE_MB $MALLOC_BS)
+ lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
+
+ # Create thin provisioned lvol bdev with size equals to lvol store space
+ lvol_size_mb=$((LVS_DEFAULT_CAPACITY_MB))
+ # Round down lvol size to the nearest cluster size boundary
+ lvol_size_mb=$((lvol_size_mb / LVS_DEFAULT_CLUSTER_SIZE_MB * LVS_DEFAULT_CLUSTER_SIZE_MB))
+ lvol_size=$((lvol_size_mb * 1024 * 1024))
+ lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb" -t)
+
+ nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid" /dev/nbd0
+ run_fio_test /dev/nbd0 0 $lvol_size "write" "0xcc"
+
+ # Clean up
+ nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0
+ rpc_cmd bdev_lvol_delete "$lvol_uuid"
+ rpc_cmd bdev_lvol_delete_lvstore -u "$lvs_uuid"
+ rpc_cmd bdev_malloc_delete "$malloc_name"
+}
+
+# Check thin provisioned bdev resize
+function test_thin_lvol_resize() {
+ malloc_name=$(rpc_cmd bdev_malloc_create $MALLOC_SIZE_MB $MALLOC_BS)
+ lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
+
+ # Construct thin provisioned lvol bdevs on created lvol store
+ # with size equal to 50% of lvol store
+ lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 2)))
+ lvol_size=$((lvol_size_mb * 1024 * 1024))
+ lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb" -t)
+
+ # Fill all free space of lvol bdev with data
+ nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid" /dev/nbd0
+ run_fio_test /dev/nbd0 0 $lvol_size "write" "0xcc"
+ nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0
+
+ # Save number of free clusters for lvs
+ lvs=$(rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid")
+ free_clusters_start="$(jq -r '.[0].free_clusters' <<< "$lvs")"
+ # Resize bdev to full size of lvs
+ lvol_size_full_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB)))
+ lvol_size_full=$((lvol_size_full_mb * 1024 * 1024))
+ rpc_cmd bdev_lvol_resize $lvol_uuid $lvol_size_full_mb
+
+ # Check if bdev size changed (total_data_clusters*cluster_size
+ # equals to num_blocks*block_size)
+ lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
+ [ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ]
+ [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = $((lvol_size_full / MALLOC_BS)) ]
+
+ # Check if free_clusters on lvs remain unaffected
+ lvs=$(rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid")
+ free_clusters_resize="$(jq -r '.[0].free_clusters' <<< "$lvs")"
+ [ $free_clusters_start == $free_clusters_resize ]
+
+ # Perform write operation with verification
+ # to newly created free space of lvol bdev
+ nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid" /dev/nbd0
+ run_fio_test /dev/nbd0 0 $lvol_size_full "write" "0xcc"
+ nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0
+
+ # Check if free clusters on lvs equals to zero
+ lvs=$(rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid")
+ free_clusters_start="$(jq -r '.[0].free_clusters' <<< "$lvs")"
+ [ $free_clusters_start == 0 ]
+
+ # Resize bdev to 25% of lvs and check if it ended with success
+ lvol_size_quarter_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 4)))
+ rpc_cmd bdev_lvol_resize $lvol_uuid $lvol_size_quarter_mb
+
+ # Check free clusters on lvs
+ lvs=$(rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid")
+ free_clusters_resize_quarter="$(jq -r '.[0].free_clusters' <<< "$lvs")"
+ free_clusters_expected=$(((lvol_size_full_mb - lvol_size_quarter_mb) / LVS_DEFAULT_CLUSTER_SIZE_MB))
+ [ $free_clusters_resize_quarter == $free_clusters_expected ]
+
+ rpc_cmd bdev_lvol_delete "$lvol_uuid"
+ rpc_cmd bdev_lvol_delete_lvstore -u "$lvs_uuid"
+ rpc_cmd bdev_malloc_delete "$malloc_name"
+}
+
+function test_thin_overprovisioning() {
+ malloc_name=$(rpc_cmd bdev_malloc_create $MALLOC_SIZE_MB $MALLOC_BS)
+ lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
+
+ # Construct two thin provisioned lvol bdevs on created lvol store
+ # with size equal to free lvol store size
+ lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB)))
+ lvol_size=$((lvol_size_mb * 1024 * 1024))
+ lvol_uuid1=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test1 "$lvol_size_mb" -t)
+ lvol_uuid2=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test2 "$lvol_size_mb" -t)
+
+ nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid1" /dev/nbd0
+ nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid2" /dev/nbd1
+ # Fill first bdev to 50% of its space with specific pattern
+ fill_size=$((lvol_size_mb * 5 / 10 / LVS_DEFAULT_CLUSTER_SIZE_MB * LVS_DEFAULT_CLUSTER_SIZE_MB))
+ fill_size=$((fill_size * 1024 * 1024))
+ run_fio_test /dev/nbd0 0 $fill_size "write" "0xcc"
+
+ # Fill second bdev up to 50% of its space
+ run_fio_test /dev/nbd1 0 $fill_size "write" "0xcc"
+
+ # Fill rest of second bdev
+ # Check that error message occured while filling second bdev with data
+ offset=$fill_size
+ fill_size_rest=$((lvol_size - fill_size))
+ run_fio_test /dev/nbd1 "$offset" "$fill_size_rest" "write" "0xcc" && false
+
+ # Check if data on first disk stayed unchanged
+ run_fio_test /dev/nbd0 0 $fill_size "read" "0xcc"
+ run_fio_test /dev/nbd0 $offset $fill_size_rest "read" "0x00"
+
+ nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0
+ nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd1
+
+ rpc_cmd bdev_lvol_delete "$lvol_uuid2"
+ rpc_cmd bdev_lvol_delete "$lvol_uuid1"
+ rpc_cmd bdev_lvol_delete_lvstore -u "$lvs_uuid"
+ rpc_cmd bdev_malloc_delete "$malloc_name"
+}
+
+$SPDK_BIN_DIR/spdk_tgt &
+spdk_pid=$!
+trap 'killprocess "$spdk_pid"; exit 1' SIGINT SIGTERM EXIT
+waitforlisten $spdk_pid
+
+run_test "test_thin_lvol_check_space" test_thin_lvol_check_space
+run_test "test_thin_lvol_check_zeroes" test_thin_lvol_check_zeroes
+run_test "test_thin_lvol_check_integrity" test_thin_lvol_check_integrity
+run_test "test_thin_lvol_resize" test_thin_lvol_resize
+run_test "test_thin_overprovisioning" test_thin_overprovisioning
+
+trap - SIGINT SIGTERM EXIT
+killprocess $spdk_pid
diff --git a/src/spdk/test/make/check_so_deps.sh b/src/spdk/test/make/check_so_deps.sh
new file mode 100755
index 000000000..5f38f56a1
--- /dev/null
+++ b/src/spdk/test/make/check_so_deps.sh
@@ -0,0 +1,508 @@
+#!/usr/bin/env bash
+
+if [ "$(uname -s)" = "FreeBSD" ]; then
+ echo "Not testing for shared object dependencies on FreeBSD."
+ exit 0
+fi
+
+rootdir=$(readlink -f $(dirname $0)/../..)
+
+if [[ ! -f $1 ]]; then
+ echo "ERROR: SPDK test configuration not specified"
+ exit 1
+fi
+
+source $1
+source "$rootdir/test/common/autotest_common.sh"
+
+libdir="$rootdir/build/lib"
+libdeps_file="$rootdir/mk/spdk.lib_deps.mk"
+source_abi_dir="$HOME/spdk_20_04/build/lib"
+suppression_file="$HOME/abigail_suppressions.ini"
+
+function confirm_abi_deps() {
+ local processed_so=0
+
+ if ! hash abidiff; then
+ echo "Unable to check ABI compatibility. Please install abidiff."
+ return 1
+ fi
+
+ if [ ! -d $source_abi_dir ]; then
+ echo "No source ABI available, failing this test."
+ return 1
+ fi
+
+ cat << EOF > ${suppression_file}
+[suppress_variable]
+ name = SPDK_LOG_IDXD
+[suppress_variable]
+ name = SPDK_LOG_IOAT
+[suppress_variable]
+ name = SPDK_LOG_JSON_UTIL
+[suppress_variable]
+ name = SPDK_LOG_RPC
+[suppress_variable]
+ name = SPDK_LOG_RPC_CLIENT
+[suppress_function]
+ name = spdk_jsonrpc_server_handle_request
+[suppress_function]
+ name = spdk_jsonrpc_server_handle_error
+[suppress_function]
+ name = spdk_jsonrpc_server_send_response
+[suppress_function]
+ name = spdk_jsonrpc_parse_request
+[suppress_function]
+ name = spdk_jsonrpc_free_request
+[suppress_function]
+ name = spdk_jsonrpc_parse_response
+[suppress_variable]
+ name = SPDK_LOG_LOG_RPC
+[suppress_variable]
+ name = SPDK_LOG_LOG
+[suppress_variable]
+ name = SPDK_LOG_LVOL
+[suppress_variable]
+ name = SPDK_LOG_NBD
+[suppress_function]
+ name = spdk_nbd_disk_find_by_nbd_path
+[suppress_function]
+ name = spdk_nbd_disk_first
+[suppress_function]
+ name = spdk_nbd_disk_next
+[suppress_function]
+ name = spdk_nbd_disk_get_nbd_path
+[suppress_function]
+ name = spdk_nbd_disk_get_bdev_name
+[suppress_variable]
+ name = SPDK_LOG_NET
+[suppress_function]
+ name = spdk_interface_net_interface_add_ip_address
+[suppress_function]
+ name = spdk_interface_net_interface_delete_ip_address
+[suppress_function]
+ name = spdk_interface_get_list
+[suppress_function]
+ name = spdk_get_uevent
+[suppress_function]
+ name = spdk_uevent_connect
+[suppress_function]
+ name = spdk_nvme_ctrlr_get_current_process
+[suppress_function]
+ name = spdk_nvme_ctrlr_get_process
+[suppress_function]
+ name = spdk_nvme_get_ctrlr_by_trid_unsafe
+[suppress_function]
+ name = spdk_nvme_io_msg_process
+[suppress_function]
+ name = spdk_nvme_wait_for_completion
+[suppress_function]
+ name = spdk_nvme_wait_for_completion_robust_lock
+[suppress_function]
+ name = spdk_nvme_wait_for_completion_timeout
+[suppress_variable]
+ name = SPDK_LOG_NVME
+[suppress_variable]
+ name = SPDK_LOG_OPAL
+[suppress_variable]
+ name = spdk_opal_method
+[suppress_variable]
+ name = spdk_opal_uid
+[suppress_variable]
+ name = SPDK_LOG_REDUCE
+[suppress_variable]
+ name = SPDK_LOG_THREAD
+[suppress_variable]
+ name = SPDK_LOG_TRACE
+[suppress_function]
+ name = spdk_crc32_table_init
+[suppress_function]
+ name = spdk_crc32_update
+[suppress_variable]
+ name = SPDK_LOG_VIRTIO_DEV
+[suppress_variable]
+ name = SPDK_LOG_VIRTIO_PCI
+[suppress_variable]
+ name = SPDK_LOG_VIRTIO_USER
+[suppress_variable]
+ name = SPDK_LOG_VMD
+[suppress_variable]
+ name = SPDK_LOG_ACCEL_IDXD
+[suppress_variable]
+ name = SPDK_LOG_ACCEL_IOAT
+[suppress_variable]
+ name = SPDK_LOG_AIO
+[suppress_variable]
+ name = SPDK_LOG_VBDEV_COMPRESS
+[suppress_variable]
+ name = SPDK_LOG_CRYPTO
+[suppress_variable]
+ name = SPDK_LOG_VBDEV_DELAY
+[suppress_function]
+ name = spdk_vbdev_error_create
+[suppress_function]
+ name = spdk_vbdev_error_delete
+[suppress_function]
+ name = spdk_vbdev_error_inject_error
+[suppress_variable]
+ name = SPDK_LOG_BDEV_FTL
+[suppress_variable]
+ name = SPDK_LOG_GPT_PARSE
+[suppress_variable]
+ name = SPDK_LOG_VBDEV_GPT
+[suppress_function]
+ name = spdk_gpt_parse_mbr
+[suppress_function]
+ name = spdk_gpt_parse_partition_table
+[suppress_variable]
+ name = SPDK_LOG_ISCSI_INIT
+[suppress_variable]
+ name = SPDK_LOG_LVOL_RPC
+[suppress_variable]
+ name = SPDK_LOG_VBDEV_LVOL
+[suppress_variable]
+ name = SPDK_LOG_BDEV_MALLOC
+[suppress_variable]
+ name = SPDK_LOG_BDEV_NULL
+[suppress_variable]
+ name = SPDK_LOG_BDEV_NVME
+[suppress_function]
+ name = spdk_bdev_nvme_create
+[suppress_function]
+ name = spdk_bdev_nvme_delete
+[suppress_function]
+ name = spdk_bdev_nvme_get_ctrlr
+[suppress_function]
+ name = spdk_bdev_nvme_get_io_qpair
+[suppress_function]
+ name = spdk_bdev_nvme_get_opts
+[suppress_function]
+ name = spdk_bdev_nvme_set_hotplug
+[suppress_function]
+ name = spdk_bdev_nvme_set_opts
+[suppress_function]
+ name = spdk_vbdev_opal_create
+[suppress_function]
+ name = spdk_vbdev_opal_destruct
+[suppress_function]
+ name = spdk_vbdev_opal_enable_new_user
+[suppress_function]
+ name = spdk_vbdev_opal_get_info_from_bdev
+[suppress_function]
+ name = spdk_vbdev_opal_set_lock_state
+[suppress_variable]
+ name = SPDK_LOG_BDEV_OCSSD
+[suppress_variable]
+ name = SPDK_LOG_VBDEV_OPAL
+[suppress_variable]
+ name = SPDK_LOG_OCFCTX
+[suppress_variable]
+ name = SPDK_LOG_VBDEV_PASSTHRU
+[suppress_variable]
+ name = SPDK_LOG_BDEV_PMEM
+[suppress_function]
+ name = spdk_create_pmem_disk
+[suppress_function]
+ name = spdk_delete_pmem_disk
+[suppress_variable]
+ name = SPDK_LOG_BDEV_RAID
+[suppress_variable]
+ name = SPDK_LOG_BDEV_RAID0
+[suppress_variable]
+ name = SPDK_LOG_BDEV_RAID5
+[suppress_variable]
+ name = SPDK_LOG_RAID_RPC
+[suppress_variable]
+ name = SPDK_LOG_BDEV_RBD
+[suppress_function]
+ name = spdk_bdev_rbd_create
+[suppress_function]
+ name = spdk_bdev_rbd_delete
+[suppress_function]
+ name = spdk_bdev_rbd_dup_config
+[suppress_function]
+ name = spdk_bdev_rbd_free_config
+[suppress_function]
+ name = spdk_bdev_rbd_resize
+[suppress_variable]
+ name = SPDK_LOG_VBDEV_SPLIT
+[suppress_function]
+ name = spdk_vbdev_split_destruct
+[suppress_function]
+ name = spdk_vbdev_split_get_part_base
+[suppress_variable]
+ name = SPDK_LOG_URING
+[suppress_variable]
+ name = SPDK_LOG_VIRTIO
+[suppress_variable]
+ name = SPDK_LOG_VIRTIO_BLK
+[suppress_variable]
+ name = SPDK_LOG_VBDEV_ZONE_BLOCK
+[suppress_function]
+ name = spdk_vbdev_zone_block_create
+[suppress_function]
+ name = spdk_vbdev_zone_block_delete
+[suppress_variable]
+ name = SPDK_LOG_BLOBFS_BDEV
+[suppress_variable]
+ name = SPDK_LOG_BLOBFS_BDEV_RPC
+[suppress_function]
+ name = spdk_blobfs_fuse_send_request
+[suppress_function]
+ name = spdk_blobfs_fuse_start
+[suppress_function]
+ name = spdk_blobfs_fuse_stop
+[suppress_variable]
+ name = SPDK_LOG_APP_RPC
+[suppress_function]
+ name = spdk_nvmf_parse_conf
+[suppress_variable]
+ name = SPDK_LOG_VHOST
+[suppress_variable]
+ name = SPDK_LOG_VHOST_BLK
+[suppress_variable]
+ name = SPDK_LOG_VHOST_BLK_DATA
+[suppress_variable]
+ name = SPDK_LOG_VHOST_RING
+[suppress_variable]
+ name = SPDK_LOG_VHOST_RPC
+[suppress_variable]
+ name = SPDK_LOG_VHOST_SCSI
+[suppress_variable]
+ name = SPDK_LOG_VHOST_SCSI_DATA
+[suppress_variable]
+ name = SPDK_LOG_VHOST_SCSI_QUEUE
+[suppress_variable]
+ name = spdk_vhost_scsi_device_backend
+[suppress_type]
+ name = spdk_net_impl
+[suppress_type]
+ name = spdk_lvol
+[suppress_type]
+ name = spdk_pci_device
+EOF
+
+ for object in "$libdir"/libspdk_*.so; do
+ so_file=$(basename $object)
+ if [ ! -f "$source_abi_dir/$so_file" ]; then
+ echo "No corresponding object for $so_file in canonical directory. Skipping."
+ continue
+ fi
+
+ if ! output=$(abidiff "$source_abi_dir/$so_file" "$libdir/$so_file" --headers-dir1 "$source_abi_dir/../../include/" --headers-dir2 "$rootdir/include" --leaf-changes-only --suppressions $suppression_file --stat); then
+ # remove any filtered out variables.
+ output=$(sed "s/ [()][^)]*[)]//g" <<< "$output")
+
+ IFS="." read -r _ _ new_so_maj new_so_min < <(readlink "$libdir/$so_file")
+ IFS="." read -r _ _ old_so_maj old_so_min < <(readlink "$source_abi_dir/$so_file")
+
+ found_abi_change=false
+ so_name_changed=no
+
+ if [[ $output == *"ELF SONAME changed"* ]]; then
+ so_name_changed=yes
+ fi
+
+ changed_leaf_types=0
+ if [[ $output =~ "leaf types summary: "([0-9]+) ]]; then
+ changed_leaf_types=${BASH_REMATCH[1]}
+ fi
+
+ removed_functions=0 changed_functions=0 added_functions=0
+ if [[ $output =~ "functions summary: "([0-9]+)" Removed, "([0-9]+)" Changed, "([0-9]+)" Added" ]]; then
+ removed_functions=${BASH_REMATCH[1]} changed_functions=${BASH_REMATCH[2]} added_functions=${BASH_REMATCH[3]}
+ fi
+
+ removed_vars=0 changed_vars=0 added_vars=0
+ if [[ $output =~ "variables summary: "([0-9]+)" Removed, "([0-9]+)" Changed, "([0-9]+)" Added" ]]; then
+ removed_vars=${BASH_REMATCH[1]} changed_vars=${BASH_REMATCH[2]} added_vars=${BASH_REMATCH[3]}
+ fi
+
+ if ((changed_leaf_types != 0)); then
+ if ((new_so_maj == old_so_maj)); then
+ touch $fail_file
+ echo "Please update the major SO version for $so_file. A header accesible type has been modified since last release."
+ fi
+ found_abi_change=true
+ fi
+
+ if ((removed_functions != 0)) || ((removed_vars != 0)); then
+ if ((new_so_maj == old_so_maj)); then
+ touch $fail_file
+ echo "Please update the major SO version for $so_file. API functions or variables have been removed since last release."
+ fi
+ found_abi_change=true
+ fi
+
+ if ((changed_functions != 0)) || ((changed_vars != 0)); then
+ if ((new_so_maj == old_so_maj)); then
+ touch $fail_file
+ echo "Please update the major SO version for $so_file. API functions or variables have been changed since last release."
+ fi
+ found_abi_change=true
+ fi
+
+ if ((added_functions != 0)) || ((added_vars != 0)); then
+ if ((new_so_min == old_so_min && new_so_maj == old_so_maj)) && ! $found_abi_change; then
+ touch $fail_file
+ echo "Please update the minor SO version for $so_file. API functions or variables have been added since last release."
+ fi
+ found_abi_change=true
+ fi
+
+ if [[ $so_name_changed == yes ]]; then
+ if ! $found_abi_change; then
+ # Unfortunately, libspdk_idxd made it into 20.04 without an SO suffix. TODO:: remove after 20.07
+ if [ "$so_file" != "libspdk_idxd.so" ] && [ "$so_file" != "libspdk_accel_idxd.so" ]; then
+ echo "SO name for $so_file changed without a change to abi. please revert that change."
+ touch $fail_file
+ fi
+ fi
+
+ if ((new_so_maj != old_so_maj && new_so_min != 0)); then
+ echo "SO major version for $so_file was bumped. Please reset the minor version to 0."
+ touch $fail_file
+ fi
+
+ expected_new_so_min=$((old_so_min + 1))
+ if ((new_so_min > old_so_min && expected_new_so_min != new_so_min)); then
+ echo "SO minor version for $so_file was incremented more than once. Please revert minor version to $expected_new_so_min."
+ touch $fail_file
+ fi
+ fi
+
+ continue
+ fi
+ processed_so=$((processed_so + 1))
+ done
+ rm -f $suppression_file
+ echo "Processed $processed_so objects."
+}
+
+# This function is needed to properly evaluate the Make variables into actual dependencies.
+function replace_defined_variables() {
+ local arr=("$@")
+ local bad_values=()
+ local good_values=()
+ local new_values
+ for dep in "${arr[@]}"; do
+ if [[ $dep == *'$'* ]]; then
+ raw_dep=${dep/$\(/}
+ raw_dep=${raw_dep/\)/ }
+ bad_values+=("$raw_dep")
+ else
+ good_values+=("$dep")
+ fi
+ done
+ for dep in "${bad_values[@]}"; do
+ dep_def_arr=($(grep -v "#" $libdeps_file | grep "${dep}" | cut -d "=" -f 2 | xargs))
+ new_values=($(replace_defined_variables "${dep_def_arr[@]}"))
+ good_values=("${good_values[@]}" "${new_values[@]}")
+ done
+ echo ${good_values[*]}
+}
+
+function confirm_deps() {
+ lib=$1
+ missing_syms=()
+ dep_names=()
+ found_symbol_lib=""
+
+ #keep the space here to differentiate bdev and bdev_*
+ lib_shortname=$(basename $lib | sed 's,libspdk_,,g' | sed 's,\.so, ,g')
+ lib_make_deps=($(grep "DEPDIRS-${lib_shortname}" $libdeps_file | cut -d "=" -f 2 | xargs))
+ lib_make_deps=($(replace_defined_variables "${lib_make_deps[@]}"))
+
+ for ign_dep in "${IGNORED_LIBS[@]}"; do
+ for i in "${!lib_make_deps[@]}"; do
+ if [[ ${lib_make_deps[i]} == "$ign_dep" ]]; then
+ unset 'lib_make_deps[i]'
+ fi
+ done
+ done
+
+ symbols=$(readelf -s $lib | grep -E "NOTYPE.*GLOBAL.*UND" | awk '{print $8}' | sort | uniq)
+ for symbol in $symbols; do
+ for deplib in $DEP_LIBS; do
+ if [ "$deplib" == "$lib" ]; then
+ continue
+ fi
+ found_symbol=$(readelf -s $deplib | grep -E "DEFAULT\s+[0-9]+\s$symbol$") || true
+ if [ "$found_symbol" != "" ]; then
+ found_symbol_lib=$(basename $deplib | sed 's,libspdk_,,g' | sed 's,\.so,,g')
+ break
+ fi
+ done
+ if [ "$found_symbol" == "" ]; then
+ missing_syms+=("$symbol")
+ else
+ dep_names+=("$found_symbol_lib")
+ fi
+ done
+ IFS=$'\n'
+ # Ignore any event_* dependencies. Those are based on the subsystem configuration and not readelf.
+ lib_make_deps=($(printf "%s\n" "${lib_make_deps[@]}" | sort | grep -v "event_"))
+ # Ignore the env_dpdk readelf dependency. We don't want people explicitly linking against it.
+ dep_names=($(printf "%s\n" "${dep_names[@]}" | sort | uniq | grep -v "env_dpdk"))
+ unset IFS
+ diff=$(echo "${dep_names[@]}" "${lib_make_deps[@]}" | tr ' ' '\n' | sort | uniq -u)
+ if [ "$diff" != "" ]; then
+ touch $fail_file
+ echo "there was a dependency mismatch in the library $lib_shortname"
+ echo "The makefile lists: '${lib_make_deps[*]}'"
+ echo "readelf outputs : '${dep_names[*]}'"
+ echo "---------------------------------------------------------------------"
+ fi
+}
+
+# By removing the spdk.lib_deps.mk file from spdk.lib.mk, we ensure that we won't
+# create any link dependencies. Then we can be sure we get a valid accounting of the
+# symbol dependencies we have.
+sed -i -e 's,include $(SPDK_ROOT_DIR)/mk/spdk.lib_deps.mk,,g' "$rootdir/mk/spdk.lib.mk"
+
+source ~/autorun-spdk.conf
+config_params=$(get_config_params)
+if [ "$SPDK_TEST_OCF" -eq 1 ]; then
+ config_params="$config_params --with-ocf=$rootdir/build/ocf.a"
+fi
+
+$MAKE $MAKEFLAGS clean
+./configure $config_params --with-shared
+$MAKE $MAKEFLAGS
+
+xtrace_disable
+
+fail_file=$output_dir/check_so_deps_fail
+
+rm -f $fail_file
+
+run_test "confirm_abi_deps" confirm_abi_deps
+
+echo "---------------------------------------------------------------------"
+# Exclude libspdk_env_dpdk.so from the library list. We don't link against this one so that
+# users can define their own environment abstraction. However we do want to still check it
+# for dependencies to avoid printing out a bunch of confusing symbols under the missing
+# symbols section.
+SPDK_LIBS=$(ls -1 $libdir/libspdk_*.so | grep -v libspdk_env_dpdk.so)
+DEP_LIBS=$(ls -1 $libdir/libspdk_*.so)
+
+IGNORED_LIBS=()
+if grep -q 'CONFIG_VHOST_INTERNAL_LIB?=n' $rootdir/mk/config.mk; then
+ IGNORED_LIBS+=("rte_vhost")
+fi
+
+(
+ for lib in $SPDK_LIBS; do confirm_deps $lib & done
+ wait
+)
+
+$MAKE $MAKEFLAGS clean
+git checkout "$rootdir/mk/spdk.lib.mk"
+
+if [ -f $fail_file ]; then
+ rm -f $fail_file
+ echo "shared object test failed"
+ exit 1
+fi
+
+xtrace_restore
diff --git a/src/spdk/test/nvme/Makefile b/src/spdk/test/nvme/Makefile
new file mode 100644
index 000000000..b2ed73a09
--- /dev/null
+++ b/src/spdk/test/nvme/Makefile
@@ -0,0 +1,46 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = aer reset sgl e2edp overhead deallocated_value err_injection \
+ startup reserve
+DIRS-$(CONFIG_NVME_CUSE) += cuse
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/nvme/aer/.gitignore b/src/spdk/test/nvme/aer/.gitignore
new file mode 100644
index 000000000..313796176
--- /dev/null
+++ b/src/spdk/test/nvme/aer/.gitignore
@@ -0,0 +1 @@
+aer
diff --git a/src/spdk/test/nvme/aer/Makefile b/src/spdk/test/nvme/aer/Makefile
new file mode 100644
index 000000000..616800777
--- /dev/null
+++ b/src/spdk/test/nvme/aer/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../..)
+
+APP = aer
+
+include $(SPDK_ROOT_DIR)/mk/nvme.libtest.mk
diff --git a/src/spdk/test/nvme/aer/aer.c b/src/spdk/test/nvme/aer/aer.c
new file mode 100644
index 000000000..701109ced
--- /dev/null
+++ b/src/spdk/test/nvme/aer/aer.c
@@ -0,0 +1,610 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk/log.h"
+#include "spdk/nvme.h"
+#include "spdk/env.h"
+#include "spdk/string.h"
+
+#define MAX_DEVS 64
+
+struct dev {
+ struct spdk_nvme_ctrlr *ctrlr;
+ struct spdk_nvme_health_information_page *health_page;
+ struct spdk_nvme_ns_list *changed_ns_list;
+ uint32_t orig_temp_threshold;
+ char name[SPDK_NVMF_TRADDR_MAX_LEN + 1];
+};
+
+static void get_feature_test(struct dev *dev);
+
+static struct dev g_devs[MAX_DEVS];
+static int g_num_devs = 0;
+
+#define foreach_dev(iter) \
+ for (iter = g_devs; iter - g_devs < g_num_devs; iter++)
+
+static int g_outstanding_commands = 0;
+static int g_aer_done = 0;
+static int g_temperature_done = 0;
+static int g_failed = 0;
+static struct spdk_nvme_transport_id g_trid;
+static char *g_touch_file;
+
+/* Enable AER temperature test */
+static int g_enable_temp_test = 0;
+/* Enable AER namespace attribute notice test, this variable holds
+ * the NSID that is expected to be in the Changed NS List.
+ */
+static uint32_t g_expected_ns_test = 0;
+
+static void
+set_temp_completion(void *cb_arg, const struct spdk_nvme_cpl *cpl)
+{
+ struct dev *dev = cb_arg;
+
+ g_outstanding_commands--;
+
+ if (spdk_nvme_cpl_is_error(cpl)) {
+ printf("%s: set feature (temp threshold) failed\n", dev->name);
+ g_failed = 1;
+ return;
+ }
+
+ /* Admin command completions are synchronized by the NVMe driver,
+ * so we don't need to do any special locking here. */
+ g_temperature_done++;
+}
+
+static int
+set_temp_threshold(struct dev *dev, uint32_t temp)
+{
+ struct spdk_nvme_cmd cmd = {};
+ int rc;
+
+ cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
+ cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
+ cmd.cdw11_bits.feat_temp_threshold.bits.tmpth = temp;
+
+ rc = spdk_nvme_ctrlr_cmd_admin_raw(dev->ctrlr, &cmd, NULL, 0, set_temp_completion, dev);
+ if (rc == 0) {
+ g_outstanding_commands++;
+ }
+
+ return rc;
+}
+
+static void
+get_temp_completion(void *cb_arg, const struct spdk_nvme_cpl *cpl)
+{
+ struct dev *dev = cb_arg;
+
+ g_outstanding_commands--;
+
+ if (spdk_nvme_cpl_is_error(cpl)) {
+ printf("%s: get feature (temp threshold) failed\n", dev->name);
+ g_failed = 1;
+ return;
+ }
+
+ dev->orig_temp_threshold = cpl->cdw0;
+ printf("%s: original temperature threshold: %u Kelvin (%d Celsius)\n",
+ dev->name, dev->orig_temp_threshold, dev->orig_temp_threshold - 273);
+
+ g_temperature_done++;
+}
+
+static int
+get_temp_threshold(struct dev *dev)
+{
+ struct spdk_nvme_cmd cmd = {};
+ int rc;
+
+ cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
+ cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
+
+ rc = spdk_nvme_ctrlr_cmd_admin_raw(dev->ctrlr, &cmd, NULL, 0, get_temp_completion, dev);
+ if (rc == 0) {
+ g_outstanding_commands++;
+ }
+
+ return rc;
+}
+
+static void
+print_health_page(struct dev *dev, struct spdk_nvme_health_information_page *hip)
+{
+ printf("%s: Current Temperature: %u Kelvin (%d Celsius)\n",
+ dev->name, hip->temperature, hip->temperature - 273);
+}
+
+static void
+get_health_log_page_completion(void *cb_arg, const struct spdk_nvme_cpl *cpl)
+{
+ struct dev *dev = cb_arg;
+
+ g_outstanding_commands --;
+
+ if (spdk_nvme_cpl_is_error(cpl)) {
+ printf("%s: get log page failed\n", dev->name);
+ g_failed = 1;
+ return;
+ }
+
+ print_health_page(dev, dev->health_page);
+ g_aer_done++;
+}
+
+static void
+get_changed_ns_log_page_completion(void *cb_arg, const struct spdk_nvme_cpl *cpl)
+{
+ struct dev *dev = cb_arg;
+ bool found = false;
+ uint32_t i;
+
+ g_outstanding_commands --;
+
+ if (spdk_nvme_cpl_is_error(cpl)) {
+ printf("%s: get log page failed\n", dev->name);
+ g_failed = 1;
+ return;
+ }
+
+ /* Let's compare the expected namespce ID is
+ * in changed namespace list
+ */
+ if (dev->changed_ns_list->ns_list[0] != 0xffffffffu) {
+ for (i = 0; i < sizeof(*dev->changed_ns_list) / sizeof(uint32_t); i++) {
+ if (g_expected_ns_test == dev->changed_ns_list->ns_list[i]) {
+ printf("%s: changed NS list contains expected NSID: %u\n",
+ dev->name, g_expected_ns_test);
+ found = true;
+ break;
+ }
+ }
+ }
+
+ if (!found) {
+ printf("%s: Error: Can't find expected NSID %u\n", dev->name, g_expected_ns_test);
+ g_failed = 1;
+ }
+
+ g_aer_done++;
+}
+
+static int
+get_health_log_page(struct dev *dev)
+{
+ int rc;
+
+ rc = spdk_nvme_ctrlr_cmd_get_log_page(dev->ctrlr, SPDK_NVME_LOG_HEALTH_INFORMATION,
+ SPDK_NVME_GLOBAL_NS_TAG, dev->health_page, sizeof(*dev->health_page), 0,
+ get_health_log_page_completion, dev);
+
+ if (rc == 0) {
+ g_outstanding_commands++;
+ }
+
+ return rc;
+}
+
+static int
+get_changed_ns_log_page(struct dev *dev)
+{
+ int rc;
+
+ rc = spdk_nvme_ctrlr_cmd_get_log_page(dev->ctrlr, SPDK_NVME_LOG_CHANGED_NS_LIST,
+ SPDK_NVME_GLOBAL_NS_TAG, dev->changed_ns_list,
+ sizeof(*dev->changed_ns_list), 0,
+ get_changed_ns_log_page_completion, dev);
+
+ if (rc == 0) {
+ g_outstanding_commands++;
+ }
+
+ return rc;
+}
+
+static void
+cleanup(void)
+{
+ struct dev *dev;
+
+ foreach_dev(dev) {
+ if (dev->health_page) {
+ spdk_free(dev->health_page);
+ }
+ if (dev->changed_ns_list) {
+ spdk_free(dev->changed_ns_list);
+ }
+ }
+}
+
+static void
+aer_cb(void *arg, const struct spdk_nvme_cpl *cpl)
+{
+ uint32_t log_page_id = (cpl->cdw0 & 0xFF0000) >> 16;
+ struct dev *dev = arg;
+
+ if (spdk_nvme_cpl_is_error(cpl)) {
+ printf("%s: AER failed\n", dev->name);
+ g_failed = 1;
+ return;
+ }
+
+ printf("%s: aer_cb for log page %d\n", dev->name, log_page_id);
+
+ if (log_page_id == SPDK_NVME_LOG_HEALTH_INFORMATION) {
+ /* Set the temperature threshold back to the original value
+ * so the AER doesn't trigger again.
+ */
+ set_temp_threshold(dev, dev->orig_temp_threshold);
+ get_health_log_page(dev);
+ } else if (log_page_id == SPDK_NVME_LOG_CHANGED_NS_LIST) {
+ get_changed_ns_log_page(dev);
+ }
+}
+
+static void
+usage(const char *program_name)
+{
+ printf("%s [options]", program_name);
+ printf("\n");
+ printf("options:\n");
+ printf(" -T enable temperature tests\n");
+ printf(" -n expected Namespace attribute notice ID\n");
+ printf(" -t <file> touch specified file when ready to receive AER\n");
+ printf(" -r trid remote NVMe over Fabrics target address\n");
+ printf(" Format: 'key:value [key:value] ...'\n");
+ printf(" Keys:\n");
+ printf(" trtype Transport type (e.g. RDMA)\n");
+ printf(" adrfam Address family (e.g. IPv4, IPv6)\n");
+ printf(" traddr Transport address (e.g. 192.168.100.8)\n");
+ printf(" trsvcid Transport service identifier (e.g. 4420)\n");
+ printf(" subnqn Subsystem NQN (default: %s)\n", SPDK_NVMF_DISCOVERY_NQN);
+ printf(" Example: -r 'trtype:RDMA adrfam:IPv4 traddr:192.168.100.8 trsvcid:4420'\n");
+
+ spdk_log_usage(stdout, "-L");
+
+ printf(" -v verbose (enable warnings)\n");
+ printf(" -H show this usage\n");
+}
+
+static int
+parse_args(int argc, char **argv)
+{
+ int op, rc;
+ long int val;
+
+ spdk_nvme_trid_populate_transport(&g_trid, SPDK_NVME_TRANSPORT_PCIE);
+ snprintf(g_trid.subnqn, sizeof(g_trid.subnqn), "%s", SPDK_NVMF_DISCOVERY_NQN);
+
+ while ((op = getopt(argc, argv, "n:r:t:HL:T")) != -1) {
+ switch (op) {
+ case 'n':
+ val = spdk_strtol(optarg, 10);
+ if (val < 0) {
+ fprintf(stderr, "Invalid NS attribute notice ID\n");
+ return val;
+ }
+ g_expected_ns_test = (uint32_t)val;
+ break;
+ case 'r':
+ if (spdk_nvme_transport_id_parse(&g_trid, optarg) != 0) {
+ fprintf(stderr, "Error parsing transport address\n");
+ return 1;
+ }
+ break;
+ case 't':
+ g_touch_file = optarg;
+ break;
+ case 'L':
+ rc = spdk_log_set_flag(optarg);
+ if (rc < 0) {
+ fprintf(stderr, "unknown flag\n");
+ usage(argv[0]);
+ exit(EXIT_FAILURE);
+ }
+ spdk_log_set_print_level(SPDK_LOG_DEBUG);
+#ifndef DEBUG
+ fprintf(stderr, "%s must be rebuilt with CONFIG_DEBUG=y for -L flag.\n",
+ argv[0]);
+ usage(argv[0]);
+ return 0;
+#endif
+ break;
+ case 'T':
+ g_enable_temp_test = 1;
+ break;
+ case 'H':
+ default:
+ usage(argv[0]);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static bool
+probe_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
+ struct spdk_nvme_ctrlr_opts *opts)
+{
+ printf("Attaching to %s\n", trid->traddr);
+
+ return true;
+}
+
+static void
+attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
+ struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_ctrlr_opts *opts)
+{
+ struct dev *dev;
+
+ /* add to dev list */
+ dev = &g_devs[g_num_devs++];
+
+ dev->ctrlr = ctrlr;
+
+ snprintf(dev->name, sizeof(dev->name), "%s",
+ trid->traddr);
+
+ printf("Attached to %s\n", dev->name);
+
+ dev->health_page = spdk_zmalloc(sizeof(*dev->health_page), 4096, NULL, SPDK_ENV_LCORE_ID_ANY,
+ SPDK_MALLOC_DMA);
+ if (dev->health_page == NULL) {
+ printf("Allocation error (health page)\n");
+ g_failed = 1;
+ }
+ dev->changed_ns_list = spdk_zmalloc(sizeof(*dev->changed_ns_list), 4096, NULL,
+ SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
+ if (dev->changed_ns_list == NULL) {
+ printf("Allocation error (changed namespace list page)\n");
+ g_failed = 1;
+ }
+}
+
+static void
+get_feature_test_cb(void *cb_arg, const struct spdk_nvme_cpl *cpl)
+{
+ struct dev *dev = cb_arg;
+
+ g_outstanding_commands--;
+
+ if (spdk_nvme_cpl_is_error(cpl)) {
+ printf("%s: get number of queues failed\n", dev->name);
+ g_failed = 1;
+ return;
+ }
+
+ if (g_aer_done < g_num_devs) {
+ /*
+ * Resubmit Get Features command to continue filling admin queue
+ * while the test is running.
+ */
+ get_feature_test(dev);
+ }
+}
+
+static void
+get_feature_test(struct dev *dev)
+{
+ struct spdk_nvme_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
+ cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_NUMBER_OF_QUEUES;
+ if (spdk_nvme_ctrlr_cmd_admin_raw(dev->ctrlr, &cmd, NULL, 0,
+ get_feature_test_cb, dev) != 0) {
+ printf("Failed to send Get Features command for dev=%p\n", dev);
+ g_failed = 1;
+ return;
+ }
+
+ g_outstanding_commands++;
+}
+
+static int
+spdk_aer_temperature_test(void)
+{
+ struct dev *dev;
+
+ printf("Getting temperature thresholds of all controllers...\n");
+ foreach_dev(dev) {
+ /* Get the original temperature threshold */
+ get_temp_threshold(dev);
+ }
+
+ while (!g_failed && g_temperature_done < g_num_devs) {
+ foreach_dev(dev) {
+ spdk_nvme_ctrlr_process_admin_completions(dev->ctrlr);
+ }
+ }
+
+ if (g_failed) {
+ return g_failed;
+ }
+ g_temperature_done = 0;
+ g_aer_done = 0;
+
+ /* Send admin commands to test admin queue wraparound while waiting for the AER */
+ foreach_dev(dev) {
+ get_feature_test(dev);
+ }
+
+ if (g_failed) {
+ return g_failed;
+ }
+
+ printf("Waiting for all controllers to trigger AER...\n");
+ foreach_dev(dev) {
+ /* Set the temperature threshold to a low value */
+ set_temp_threshold(dev, 200);
+ }
+
+ if (g_failed) {
+ return g_failed;
+ }
+
+ while (!g_failed && (g_aer_done < g_num_devs || g_temperature_done < g_num_devs)) {
+ foreach_dev(dev) {
+ spdk_nvme_ctrlr_process_admin_completions(dev->ctrlr);
+ }
+ }
+
+ if (g_failed) {
+ return g_failed;
+ }
+
+ return 0;
+}
+
+static int
+spdk_aer_changed_ns_test(void)
+{
+ struct dev *dev;
+
+ g_aer_done = 0;
+
+ printf("Starting namespce attribute notice tests for all controllers...\n");
+
+ foreach_dev(dev) {
+ get_feature_test(dev);
+ }
+
+ if (g_failed) {
+ return g_failed;
+ }
+
+ while (!g_failed && (g_aer_done < g_num_devs)) {
+ foreach_dev(dev) {
+ spdk_nvme_ctrlr_process_admin_completions(dev->ctrlr);
+ }
+ }
+
+ if (g_failed) {
+ return g_failed;
+ }
+
+ return 0;
+}
+
+int main(int argc, char **argv)
+{
+ struct dev *dev;
+ int i;
+ struct spdk_env_opts opts;
+ int rc;
+
+ rc = parse_args(argc, argv);
+ if (rc != 0) {
+ return rc;
+ }
+
+ spdk_env_opts_init(&opts);
+ opts.name = "aer";
+ opts.core_mask = "0x1";
+ if (spdk_env_init(&opts) < 0) {
+ fprintf(stderr, "Unable to initialize SPDK env\n");
+ return 1;
+ }
+
+ printf("Asynchronous Event Request test\n");
+
+ if (spdk_nvme_probe(&g_trid, NULL, probe_cb, attach_cb, NULL) != 0) {
+ fprintf(stderr, "spdk_nvme_probe() failed\n");
+ return 1;
+ }
+
+ if (g_failed) {
+ goto done;
+ }
+
+ printf("Registering asynchronous event callbacks...\n");
+ foreach_dev(dev) {
+ spdk_nvme_ctrlr_register_aer_callback(dev->ctrlr, aer_cb, dev);
+ }
+
+ if (g_touch_file) {
+ int fd;
+
+ fd = open(g_touch_file, O_CREAT | O_EXCL | O_RDWR, S_IFREG);
+ if (fd == -1) {
+ fprintf(stderr, "Could not touch %s (%s).\n", g_touch_file, strerror(errno));
+ g_failed = true;
+ goto done;
+ }
+ close(fd);
+ }
+
+ /* AER temperature test */
+ if (g_enable_temp_test) {
+ if (spdk_aer_temperature_test()) {
+ goto done;
+ }
+ }
+
+ /* AER changed namespace list test */
+ if (g_expected_ns_test) {
+ if (spdk_aer_changed_ns_test()) {
+ goto done;
+ }
+ }
+
+ printf("Cleaning up...\n");
+
+ while (g_outstanding_commands) {
+ foreach_dev(dev) {
+ spdk_nvme_ctrlr_process_admin_completions(dev->ctrlr);
+ }
+ }
+
+ /* unregister AER callback so we don't fail on aborted AERs when we close out qpairs. */
+ foreach_dev(dev) {
+ spdk_nvme_ctrlr_register_aer_callback(dev->ctrlr, NULL, NULL);
+ }
+
+ for (i = 0; i < g_num_devs; i++) {
+ struct dev *dev = &g_devs[i];
+
+ spdk_nvme_detach(dev->ctrlr);
+ }
+
+done:
+ cleanup();
+
+ return g_failed;
+}
diff --git a/src/spdk/test/nvme/cuse/.gitignore b/src/spdk/test/nvme/cuse/.gitignore
new file mode 100644
index 000000000..b13d42337
--- /dev/null
+++ b/src/spdk/test/nvme/cuse/.gitignore
@@ -0,0 +1 @@
+cuse
diff --git a/src/spdk/test/nvme/cuse/Makefile b/src/spdk/test/nvme/cuse/Makefile
new file mode 100644
index 000000000..c847fe13f
--- /dev/null
+++ b/src/spdk/test/nvme/cuse/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../..)
+
+TEST_FILE = cuse.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/nvme/cuse/cuse.c b/src/spdk/test/nvme/cuse/cuse.c
new file mode 100644
index 000000000..fe5c26f0c
--- /dev/null
+++ b/src/spdk/test/nvme/cuse/cuse.c
@@ -0,0 +1,189 @@
+
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "common/lib/test_env.c"
+#include "nvme/nvme_cuse.c"
+
+DEFINE_STUB(nvme_io_msg_send, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
+ spdk_nvme_io_msg_fn fn, void *arg), 0);
+
+DEFINE_STUB(spdk_nvme_ctrlr_alloc_cmb_io_buffer, void *, (struct spdk_nvme_ctrlr *ctrlr,
+ size_t size), NULL);
+
+DEFINE_STUB(spdk_nvme_ctrlr_cmd_admin_raw, int, (struct spdk_nvme_ctrlr *ctrlr,
+ struct spdk_nvme_cmd *cmd, void *buf, uint32_t len,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
+
+DEFINE_STUB(spdk_nvme_ctrlr_get_num_ns, uint32_t, (struct spdk_nvme_ctrlr *ctrlr), 128);
+
+static uint32_t g_active_num_ns = 4;
+static uint32_t g_active_nsid_min = 1;
+
+bool
+spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
+{
+ return nsid >= g_active_nsid_min && nsid < g_active_num_ns + g_active_nsid_min;
+}
+
+DEFINE_STUB(spdk_nvme_ctrlr_reset, int, (struct spdk_nvme_ctrlr *ctrlr), 0);
+
+DEFINE_STUB(spdk_nvme_ns_cmd_read, int, (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
+ void *payload,
+ uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
+ uint32_t io_flags), 0);
+
+DEFINE_STUB(spdk_nvme_ns_cmd_write, int, (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
+ void *payload,
+ uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
+ uint32_t io_flags), 0);
+
+DEFINE_STUB(spdk_nvme_ns_get_num_sectors, uint64_t, (struct spdk_nvme_ns *ns), 0);
+
+DEFINE_STUB(spdk_nvme_ns_get_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
+
+DEFINE_STUB_V(spdk_unaffinitize_thread, (void));
+
+DEFINE_STUB(spdk_nvme_ctrlr_get_ns, struct spdk_nvme_ns *, (struct spdk_nvme_ctrlr *ctrlr,
+ uint32_t nsid), NULL);
+
+static bool
+wait_for_file(char *filename, bool exists)
+{
+ int i;
+
+ for (i = 0; i < 1000; i++) {
+ if ((access(filename, F_OK) != -1) ^ (!exists)) {
+ return true;
+ }
+ usleep(100);
+ }
+ return false;
+}
+
+static void
+verify_devices(struct spdk_nvme_ctrlr *ctrlr)
+{
+ char ctrlr_name[256];
+ size_t ctrlr_name_size;
+ char ctrlr_dev[256], ns_dev[256 + 10];
+ uint32_t nsid, num_ns;
+ int rv;
+
+ ctrlr_name_size = sizeof(ctrlr_name);
+ rv = spdk_nvme_cuse_get_ctrlr_name(ctrlr, ctrlr_name, &ctrlr_name_size);
+ SPDK_CU_ASSERT_FATAL(rv == 0);
+
+ rv = snprintf(ctrlr_dev, sizeof(ctrlr_dev), "/dev/%s", ctrlr_name);
+ CU_ASSERT(rv > 0);
+ CU_ASSERT(wait_for_file(ctrlr_dev, true));
+
+ num_ns = spdk_nvme_ctrlr_get_num_ns(ctrlr);
+
+ for (nsid = 1; nsid <= num_ns; nsid++) {
+ snprintf(ns_dev, sizeof(ns_dev), "%sn%" PRIu32, ctrlr_dev, nsid);
+ if (spdk_nvme_ctrlr_is_active_ns(ctrlr, nsid)) {
+ CU_ASSERT(wait_for_file(ns_dev, true));
+ } else {
+ CU_ASSERT(wait_for_file(ns_dev, false));
+ }
+ }
+
+ /* Next one should never exist */
+ snprintf(ns_dev, sizeof(ns_dev), "%sn%" PRIu32, ctrlr_dev, nsid);
+ CU_ASSERT(wait_for_file(ns_dev, false));
+}
+
+static void
+test_cuse_update(void)
+{
+ int rc;
+ struct spdk_nvme_ctrlr ctrlr = {};
+
+ rc = nvme_cuse_start(&ctrlr);
+ CU_ASSERT(rc == 0);
+
+ g_active_num_ns = 4;
+ g_active_nsid_min = 1;
+ nvme_cuse_update(&ctrlr);
+ verify_devices(&ctrlr);
+
+ g_active_num_ns = 0;
+ nvme_cuse_update(&ctrlr);
+ verify_devices(&ctrlr);
+
+ g_active_num_ns = 4;
+ g_active_nsid_min = spdk_nvme_ctrlr_get_num_ns(&ctrlr) - g_active_num_ns;
+ nvme_cuse_update(&ctrlr);
+ verify_devices(&ctrlr);
+
+ g_active_num_ns = 2;
+ g_active_nsid_min = 2;
+ nvme_cuse_update(&ctrlr);
+ verify_devices(&ctrlr);
+
+ g_active_num_ns = 10;
+ g_active_nsid_min = 5;
+ nvme_cuse_update(&ctrlr);
+ verify_devices(&ctrlr);
+
+ g_active_num_ns = 5;
+ g_active_nsid_min = 3;
+ nvme_cuse_update(&ctrlr);
+ verify_devices(&ctrlr);
+
+ g_active_num_ns = 6;
+ g_active_nsid_min = 1;
+ nvme_cuse_update(&ctrlr);
+ verify_devices(&ctrlr);
+
+ nvme_cuse_stop(&ctrlr);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+ suite = CU_add_suite("nvme_cuse", NULL, NULL);
+ CU_ADD_TEST(suite, test_cuse_update);
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/nvme/cuse/nvme_cuse.sh b/src/spdk/test/nvme/cuse/nvme_cuse.sh
new file mode 100755
index 000000000..699cd5ac8
--- /dev/null
+++ b/src/spdk/test/nvme/cuse/nvme_cuse.sh
@@ -0,0 +1,46 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/scripts/common.sh
+source $rootdir/test/common/autotest_common.sh
+
+if [[ $(uname) != "Linux" ]]; then
+ echo "NVMe cuse tests only supported on Linux"
+ exit 1
+fi
+
+modprobe cuse
+run_test "nvme_cuse_app" $testdir/cuse
+run_test "nvme_cuse_rpc" $testdir/nvme_cuse_rpc.sh
+run_test "nvme_cli_cuse" $testdir/spdk_nvme_cli_cuse.sh
+run_test "nvme_smartctl_cuse" $testdir/spdk_smartctl_cuse.sh
+
+# Only run Namespace managment test case when such device is present
+bdfs=$(get_nvme_bdfs)
+
+$rootdir/scripts/setup.sh reset
+sleep 1
+
+# Find bdf that supports Namespace managment
+for bdf in $bdfs; do
+ nvme_name=$(get_nvme_ctrlr_from_bdf ${bdf})
+ if [[ -z "$nvme_name" ]]; then
+ continue
+ fi
+
+ # Check Optional Admin Command Support for Namespace Management
+ oacs=$(nvme id-ctrl /dev/${nvme_name} | grep oacs | cut -d: -f2)
+ oacs_ns_manage=$((oacs & 0x8))
+
+ if [[ "$oacs_ns_manage" -ne 0 ]]; then
+ break
+ fi
+done
+
+if [[ "$oacs_ns_manage" -ne 0 ]]; then
+ run_test "nvme_ns_manage_cuse" $testdir/nvme_ns_manage_cuse.sh
+fi
+$rootdir/scripts/setup.sh
+
+rmmod cuse
diff --git a/src/spdk/test/nvme/cuse/nvme_cuse_rpc.sh b/src/spdk/test/nvme/cuse/nvme_cuse_rpc.sh
new file mode 100755
index 000000000..eaf0dbd9c
--- /dev/null
+++ b/src/spdk/test/nvme/cuse/nvme_cuse_rpc.sh
@@ -0,0 +1,58 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/scripts/common.sh
+source $rootdir/test/common/autotest_common.sh
+
+rpc_py=$rootdir/scripts/rpc.py
+
+bdf=$(get_first_nvme_bdf)
+ctrlr_base="/dev/spdk/nvme"
+
+$SPDK_BIN_DIR/spdk_tgt -m 0x3 &
+spdk_tgt_pid=$!
+trap 'kill -9 ${spdk_tgt_pid}; exit 1' SIGINT SIGTERM EXIT
+
+waitforlisten $spdk_tgt_pid
+
+$rpc_py bdev_nvme_attach_controller -b Nvme0 -t PCIe -a ${bdf}
+$rpc_py bdev_nvme_cuse_register -n Nvme0
+
+sleep 5
+
+if [ ! -c "${ctrlr_base}0" ]; then
+ exit 1
+fi
+
+$rpc_py bdev_get_bdevs
+$rpc_py bdev_nvme_get_controllers
+
+$rpc_py bdev_nvme_cuse_unregister -n Nvme0
+sleep 1
+if [ -c "${ctrlr_base}0" ]; then
+ exit 1
+fi
+
+# Verify removing non-existent cuse device
+$rpc_py bdev_nvme_cuse_unregister -n Nvme0 && false
+
+$rpc_py bdev_nvme_cuse_register -n Nvme0
+sleep 1
+
+if [ ! -c "${ctrlr_base}0" ]; then
+ exit 1
+fi
+
+# Verify adding same nvme controller twice fails
+$rpc_py bdev_nvme_cuse_register -n Nvme0 && false
+sleep 1
+
+if [ -c "${ctrlr_base}1" ]; then
+ exit 1
+fi
+
+$rpc_py bdev_nvme_detach_controller Nvme0
+
+trap - SIGINT SIGTERM EXIT
+killprocess $spdk_tgt_pid
diff --git a/src/spdk/test/nvme/cuse/nvme_ns_manage_cuse.sh b/src/spdk/test/nvme/cuse/nvme_ns_manage_cuse.sh
new file mode 100755
index 000000000..fb390f34e
--- /dev/null
+++ b/src/spdk/test/nvme/cuse/nvme_ns_manage_cuse.sh
@@ -0,0 +1,164 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/scripts/common.sh
+source $rootdir/test/common/autotest_common.sh
+
+NVME_CMD="/usr/local/src/nvme-cli/nvme"
+
+rpc_py=$rootdir/scripts/rpc.py
+
+$rootdir/scripts/setup.sh
+sleep 1
+
+bdfs=$(get_nvme_bdfs)
+
+$rootdir/scripts/setup.sh reset
+sleep 1
+
+# Find bdf that supports Namespace Managment
+for bdf in $bdfs; do
+ nvme_name=$(get_nvme_ctrlr_from_bdf ${bdf})
+ if [[ -z "$nvme_name" ]]; then
+ continue
+ fi
+
+ # Check Optional Admin Command Support for Namespace Management
+ oacs=$($NVME_CMD id-ctrl /dev/${nvme_name} | grep oacs | cut -d: -f2)
+ oacs_ns_manage=$((oacs & 0x8))
+
+ if [[ "$oacs_ns_manage" -ne 0 ]]; then
+ break
+ fi
+done
+
+if [[ "${nvme_name}" == "" ]] || [[ "$oacs_ns_manage" -eq 0 ]]; then
+ echo "No NVMe device supporting Namespace managment found"
+ $rootdir/scripts/setup.sh
+ exit 1
+fi
+
+nvme_dev=/dev/${nvme_name}
+
+# Detect supported features and configuration
+oaes=$($NVME_CMD id-ctrl ${nvme_dev} | grep oaes | cut -d: -f2)
+aer_ns_change=$((oaes & 0x100))
+
+function reset_nvme_if_aer_unsupported() {
+ if [[ "$aer_ns_change" -eq "0" ]]; then
+ sleep 1
+ $NVME_CMD reset "$1" || true
+ fi
+}
+
+function clean_up() {
+ $rootdir/scripts/setup.sh reset
+
+ # This assumes every NVMe controller contains single namespace,
+ # encompassing Total NVM Capacity and formatted as 512 block size.
+ # 512 block size is needed for test/vhost/vhost_boot.sh to
+ # succesfully run.
+
+ tnvmcap=$($NVME_CMD id-ctrl ${nvme_dev} | grep tnvmcap | cut -d: -f2)
+ blksize=512
+
+ size=$((tnvmcap / blksize))
+
+ echo "Restoring $nvme_dev..."
+ $NVME_CMD detach-ns ${nvme_dev} -n 0xffffffff -c 0 || true
+ $NVME_CMD delete-ns ${nvme_dev} -n 0xffffffff || true
+ $NVME_CMD create-ns ${nvme_dev} -s ${size} -c ${size} -b ${blksize}
+ $NVME_CMD attach-ns ${nvme_dev} -n 1 -c 0
+ $NVME_CMD reset ${nvme_dev}
+
+ $rootdir/scripts/setup.sh
+}
+
+function info_print() {
+ echo "---"
+ echo "$@"
+ echo "---"
+}
+
+# Prepare controller
+info_print "delete all namespaces"
+$NVME_CMD detach-ns ${nvme_dev} -n 0xffffffff -c 0 || true
+$NVME_CMD delete-ns ${nvme_dev} -n 0xffffffff || true
+
+reset_nvme_if_aer_unsupported ${nvme_dev}
+sleep 1
+
+PCI_WHITELIST="${bdf}" $rootdir/scripts/setup.sh
+
+$SPDK_BIN_DIR/spdk_tgt -m 0x3 &
+spdk_tgt_pid=$!
+trap 'kill -9 ${spdk_tgt_pid}; clean_up; exit 1' SIGINT SIGTERM EXIT
+
+waitforlisten $spdk_tgt_pid
+
+$rpc_py bdev_nvme_attach_controller -b Nvme0 -t PCIe -a ${bdf}
+$rpc_py bdev_nvme_cuse_register -n Nvme0
+
+sleep 1
+[[ -c /dev/spdk/nvme0 ]]
+
+for dev in /dev/spdk/nvme0n*; do
+ [[ ! -c ${dev} ]]
+done
+
+info_print "create ns: nsze=10000 ncap=10000 flbias=0"
+$NVME_CMD create-ns /dev/spdk/nvme0 -s 10000 -c 10000 -f 0
+
+info_print "attach ns: nsid=1 controller=0"
+$NVME_CMD attach-ns /dev/spdk/nvme0 -n 1 -c 0
+
+reset_nvme_if_aer_unsupported /dev/spdk/nvme0
+sleep 1
+
+[[ -c /dev/spdk/nvme0n1 ]]
+
+info_print "create ns: nsze=10000 ncap=10000 flbias=0"
+$NVME_CMD create-ns /dev/spdk/nvme0 -s 10000 -c 10000 -f 0
+
+info_print "attach ns: nsid=2 controller=0"
+$NVME_CMD attach-ns /dev/spdk/nvme0 -n 2 -c 0
+
+reset_nvme_if_aer_unsupported /dev/spdk/nvme0
+sleep 1
+
+[[ -c /dev/spdk/nvme0n2 ]]
+
+info_print "detach ns: nsid=2 controller=0"
+$NVME_CMD detach-ns /dev/spdk/nvme0 -n 2 -c 0 || true
+
+info_print "delete ns: nsid=2"
+$NVME_CMD delete-ns /dev/spdk/nvme0 -n 2 || true
+
+reset_nvme_if_aer_unsupported /dev/spdk/nvme0
+sleep 1
+
+[[ ! -c /dev/spdk/nvme0n2 ]]
+
+info_print "detach ns: nsid=1 controller=0"
+$NVME_CMD detach-ns /dev/spdk/nvme0 -n 1 -c 0 || true
+
+info_print "delete ns: nsid=1"
+$NVME_CMD delete-ns /dev/spdk/nvme0 -n 1 || true
+
+reset_nvme_if_aer_unsupported /dev/spdk/nvme0
+sleep 1
+
+# Here we should not have any cuse devices
+for dev in /dev/spdk/nvme0n*; do
+ [[ ! -c ${dev} ]]
+done
+
+$rpc_py bdev_nvme_detach_controller Nvme0
+
+sleep 1
+[[ ! -c /dev/spdk/nvme0 ]]
+
+trap - SIGINT SIGTERM EXIT
+killprocess $spdk_tgt_pid
+clean_up
diff --git a/src/spdk/test/nvme/cuse/spdk_nvme_cli_cuse.sh b/src/spdk/test/nvme/cuse/spdk_nvme_cli_cuse.sh
new file mode 100755
index 000000000..cdddd2278
--- /dev/null
+++ b/src/spdk/test/nvme/cuse/spdk_nvme_cli_cuse.sh
@@ -0,0 +1,109 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/scripts/common.sh
+source $rootdir/test/common/autotest_common.sh
+
+rm -Rf $testdir/match_files
+mkdir $testdir/match_files
+
+KERNEL_OUT=$testdir/match_files/kernel.out
+CUSE_OUT=$testdir/match_files/cuse.out
+
+NVME_CMD=/usr/local/src/nvme-cli/nvme
+rpc_py=$rootdir/scripts/rpc.py
+
+bdf=$(get_first_nvme_bdf)
+
+PCI_WHITELIST="${bdf}" $rootdir/scripts/setup.sh reset
+sleep 1
+nvme_name=$(get_nvme_ctrlr_from_bdf ${bdf})
+if [[ -z "$nvme_name" ]]; then
+ echo "setup.sh failed bind kernel driver to ${bdf}"
+ return 1
+fi
+
+ctrlr="/dev/${nvme_name}"
+ns="/dev/${nvme_name}n1"
+
+waitforblk "${nvme_name}n1"
+
+oacs=$(${NVME_CMD} id-ctrl $ctrlr | grep oacs | cut -d: -f2)
+oacs_firmware=$((oacs & 0x4))
+
+set +e
+
+${NVME_CMD} get-ns-id $ns > ${KERNEL_OUT}.1
+${NVME_CMD} id-ns $ns > ${KERNEL_OUT}.2
+${NVME_CMD} list-ns $ns > ${KERNEL_OUT}.3
+
+${NVME_CMD} id-ctrl $ctrlr > ${KERNEL_OUT}.4
+${NVME_CMD} list-ctrl $ctrlr > ${KERNEL_OUT}.5
+if [ "$oacs_firmware" -ne "0" ]; then
+ ${NVME_CMD} fw-log $ctrlr > ${KERNEL_OUT}.6
+fi
+${NVME_CMD} smart-log $ctrlr
+${NVME_CMD} error-log $ctrlr > ${KERNEL_OUT}.7
+${NVME_CMD} get-feature $ctrlr -f 1 -s 1 -l 100 > ${KERNEL_OUT}.8
+${NVME_CMD} get-log $ctrlr -i 1 -l 100 > ${KERNEL_OUT}.9
+${NVME_CMD} reset $ctrlr > ${KERNEL_OUT}.10
+
+set -e
+
+$rootdir/scripts/setup.sh
+
+$SPDK_BIN_DIR/spdk_tgt -m 0x3 &
+spdk_tgt_pid=$!
+trap 'kill -9 ${spdk_tgt_pid}; exit 1' SIGINT SIGTERM EXIT
+
+waitforlisten $spdk_tgt_pid
+
+$rpc_py bdev_nvme_attach_controller -b Nvme0 -t PCIe -a ${bdf}
+$rpc_py bdev_nvme_cuse_register -n Nvme0
+
+sleep 5
+
+if [ ! -c /dev/spdk/nvme0 ]; then
+ return 1
+fi
+
+$rpc_py bdev_get_bdevs
+$rpc_py bdev_nvme_get_controllers
+
+set +e
+
+ns="/dev/spdk/nvme0n1"
+${NVME_CMD} get-ns-id $ns > ${CUSE_OUT}.1
+${NVME_CMD} id-ns $ns > ${CUSE_OUT}.2
+${NVME_CMD} list-ns $ns > ${CUSE_OUT}.3
+
+ctrlr="/dev/spdk/nvme0"
+${NVME_CMD} id-ctrl $ctrlr > ${CUSE_OUT}.4
+${NVME_CMD} list-ctrl $ctrlr > ${CUSE_OUT}.5
+if [ "$oacs_firmware" -ne "0" ]; then
+ ${NVME_CMD} fw-log $ctrlr > ${CUSE_OUT}.6
+fi
+${NVME_CMD} smart-log $ctrlr
+${NVME_CMD} error-log $ctrlr > ${CUSE_OUT}.7
+${NVME_CMD} get-feature $ctrlr -f 1 -s 1 -l 100 > ${CUSE_OUT}.8
+${NVME_CMD} get-log $ctrlr -i 1 -l 100 > ${CUSE_OUT}.9
+${NVME_CMD} reset $ctrlr > ${CUSE_OUT}.10
+
+set -e
+
+for i in {1..10}; do
+ if [ -f "${KERNEL_OUT}.${i}" ] && [ -f "${CUSE_OUT}.${i}" ]; then
+ sed -i "s/${nvme_name}/nvme0/g" ${KERNEL_OUT}.${i}
+ diff --suppress-common-lines ${KERNEL_OUT}.${i} ${CUSE_OUT}.${i}
+ fi
+done
+
+rm -Rf $testdir/match_files
+
+if [ ! -c "$ctrlr" ]; then
+ return 1
+fi
+
+trap - SIGINT SIGTERM EXIT
+killprocess $spdk_tgt_pid
diff --git a/src/spdk/test/nvme/cuse/spdk_smartctl_cuse.sh b/src/spdk/test/nvme/cuse/spdk_smartctl_cuse.sh
new file mode 100755
index 000000000..a92ca1199
--- /dev/null
+++ b/src/spdk/test/nvme/cuse/spdk_smartctl_cuse.sh
@@ -0,0 +1,79 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/scripts/common.sh
+source $rootdir/test/common/autotest_common.sh
+
+SMARTCTL_CMD='smartctl -d nvme'
+rpc_py=$rootdir/scripts/rpc.py
+
+bdf=$(get_first_nvme_bdf)
+
+PCI_WHITELIST="${bdf}" $rootdir/scripts/setup.sh reset
+sleep 1
+nvme_name=$(get_nvme_ctrlr_from_bdf ${bdf})
+if [[ -z "$nvme_name" ]]; then
+ echo "setup.sh failed bind kernel driver to ${bdf}"
+ exit 1
+fi
+
+KERNEL_SMART_JSON=$(${SMARTCTL_CMD} --json=g -a /dev/${nvme_name} | grep -v "/dev/${nvme_name}" | sort || true)
+
+${SMARTCTL_CMD} -i /dev/${nvme_name}n1
+
+# logs are not provided by json output
+KERNEL_SMART_ERRLOG=$(${SMARTCTL_CMD} -l error /dev/${nvme_name})
+
+$rootdir/scripts/setup.sh
+
+$SPDK_BIN_DIR/spdk_tgt -m 0x3 &
+spdk_tgt_pid=$!
+trap 'kill -9 ${spdk_tgt_pid}; exit 1' SIGINT SIGTERM EXIT
+
+waitforlisten $spdk_tgt_pid
+
+$rpc_py bdev_nvme_attach_controller -b Nvme0 -t PCIe -a ${bdf}
+$rpc_py bdev_nvme_cuse_register -n Nvme0
+
+sleep 5
+
+if [ ! -c /dev/spdk/nvme0 ]; then
+ exit 1
+fi
+
+CUSE_SMART_JSON=$(${SMARTCTL_CMD} --json=g -a /dev/spdk/nvme0 | grep -v "/dev/spdk/nvme0" | sort || true)
+
+DIFF_SMART_JSON=$(diff --changed-group-format='%<' --unchanged-group-format='' <(echo "$KERNEL_SMART_JSON") <(echo "$CUSE_SMART_JSON") || true)
+
+# Mask values can change
+ERR_SMART_JSON=$(grep -v "json\.nvme_smart_health_information_log\.\|json\.local_time\.\|json\.temperature\.\|json\.power_on_time\.hours" <<< $DIFF_SMART_JSON || true)
+
+if [ -n "$ERR_SMART_JSON" ]; then
+ echo "Wrong values for: $ERR_SMART_JSON"
+ exit 1
+fi
+
+CUSE_SMART_ERRLOG=$(${SMARTCTL_CMD} -l error /dev/spdk/nvme0)
+if [ "$CUSE_SMART_ERRLOG" != "$KERNEL_SMART_ERRLOG" ]; then
+ echo "Wrong values in NVMe Error log"
+ exit 1
+fi
+
+# Data integity was checked before, now make sure other commads didn't fail
+${SMARTCTL_CMD} -i /dev/spdk/nvme0n1
+${SMARTCTL_CMD} -c /dev/spdk/nvme0
+${SMARTCTL_CMD} -A /dev/spdk/nvme0
+
+# Health test can fail
+${SMARTCTL_CMD} -x /dev/spdk/nvme0 || true
+${SMARTCTL_CMD} -H /dev/spdk/nvme0 || true
+
+$rpc_py bdev_nvme_detach_controller Nvme0
+sleep 1
+if [ -c /dev/spdk/nvme1 ]; then
+ exit 1
+fi
+
+trap - SIGINT SIGTERM EXIT
+killprocess $spdk_tgt_pid
diff --git a/src/spdk/test/nvme/deallocated_value/.gitignore b/src/spdk/test/nvme/deallocated_value/.gitignore
new file mode 100644
index 000000000..8460e82ea
--- /dev/null
+++ b/src/spdk/test/nvme/deallocated_value/.gitignore
@@ -0,0 +1 @@
+deallocated_value
diff --git a/src/spdk/test/nvme/deallocated_value/Makefile b/src/spdk/test/nvme/deallocated_value/Makefile
new file mode 100644
index 000000000..8277c32e0
--- /dev/null
+++ b/src/spdk/test/nvme/deallocated_value/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../..)
+
+APP = deallocated_value
+
+include $(SPDK_ROOT_DIR)/mk/nvme.libtest.mk
diff --git a/src/spdk/test/nvme/deallocated_value/deallocated_value.c b/src/spdk/test/nvme/deallocated_value/deallocated_value.c
new file mode 100644
index 000000000..91600e83e
--- /dev/null
+++ b/src/spdk/test/nvme/deallocated_value/deallocated_value.c
@@ -0,0 +1,447 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk/nvme.h"
+#include "spdk/env.h"
+
+#define NUM_BLOCKS 100
+
+/*
+ * The purpose of this sample app is to determine the read value of deallocated logical blocks
+ * from a given NVMe Controller. The NVMe 1.3 spec requires the controller to list this value,
+ * but controllers adhering to the NVMe 1.2 spec may not report this value. According to the spec,
+ * "The values read from a deallocated logical block and its metadata (excluding protection information) shall
+ * be all bytes set to 00h, all bytes set to FFh, or the last data written to the associated logical block".
+ */
+
+struct ns_entry {
+ struct spdk_nvme_ctrlr *ctrlr;
+ struct spdk_nvme_ns *ns;
+ struct ns_entry *next;
+ struct spdk_nvme_qpair *qpair;
+};
+
+struct deallocate_context {
+ struct ns_entry *ns_entry;
+ char **write_buf;
+ char **read_buf;
+ char *zero_buf;
+ char *FFh_buf;
+ int writes_completed;
+ int reads_completed;
+ int deallocate_completed;
+ int flush_complete;
+ int matches_zeroes;
+ int matches_previous_data;
+ int matches_FFh;
+};
+
+static struct ns_entry *g_namespaces = NULL;
+
+static void cleanup(struct deallocate_context *context);
+
+static void
+fill_random(char *buf, size_t num_bytes)
+{
+ size_t i;
+
+ srand((unsigned) time(NULL));
+ for (i = 0; i < num_bytes; i++) {
+ buf[i] = rand() % 0x100;
+ }
+}
+
+static void
+register_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns *ns)
+{
+ struct ns_entry *entry;
+ const struct spdk_nvme_ctrlr_data *cdata;
+
+ cdata = spdk_nvme_ctrlr_get_data(ctrlr);
+
+ if (!spdk_nvme_ns_is_active(ns)) {
+ printf("Controller %-20.20s (%-20.20s): Skipping inactive NS %u\n",
+ cdata->mn, cdata->sn,
+ spdk_nvme_ns_get_id(ns));
+ return;
+ }
+
+ entry = malloc(sizeof(struct ns_entry));
+ if (entry == NULL) {
+ perror("ns_entry malloc");
+ exit(1);
+ }
+
+ entry->ctrlr = ctrlr;
+ entry->ns = ns;
+ entry->next = g_namespaces;
+ g_namespaces = entry;
+
+ printf(" Namespace ID: %d size: %juGB\n", spdk_nvme_ns_get_id(ns),
+ spdk_nvme_ns_get_size(ns) / 1000000000);
+}
+
+static uint32_t
+get_max_block_size(void)
+{
+ struct ns_entry *ns;
+ uint32_t max_block_size, temp_block_size;
+
+ ns = g_namespaces;
+ max_block_size = 0;
+
+ while (ns != NULL) {
+ temp_block_size = spdk_nvme_ns_get_sector_size(ns->ns);
+ max_block_size = temp_block_size > max_block_size ? temp_block_size : max_block_size;
+ ns = ns->next;
+ }
+
+ return max_block_size;
+}
+
+static void
+write_complete(void *arg, const struct spdk_nvme_cpl *completion)
+{
+ struct deallocate_context *context = arg;
+
+ context->writes_completed++;
+}
+
+static void
+read_complete(void *arg, const struct spdk_nvme_cpl *completion)
+{
+ struct deallocate_context *context = arg;
+ struct ns_entry *ns_entry = context->ns_entry;
+ int rc;
+
+ rc = memcmp(context->write_buf[context->reads_completed],
+ context->read_buf[context->reads_completed], spdk_nvme_ns_get_sector_size(ns_entry->ns));
+ if (rc == 0) {
+ context->matches_previous_data++;
+ }
+
+ rc = memcmp(context->zero_buf, context->read_buf[context->reads_completed],
+ spdk_nvme_ns_get_sector_size(ns_entry->ns));
+ if (rc == 0) {
+ context->matches_zeroes++;
+ }
+
+ rc = memcmp(context->FFh_buf, context->read_buf[context->reads_completed],
+ spdk_nvme_ns_get_sector_size(ns_entry->ns));
+ if (rc == 0) {
+ context->matches_FFh++;
+ }
+ context->reads_completed++;
+}
+
+static void
+deallocate_complete(void *arg, const struct spdk_nvme_cpl *completion)
+{
+ struct deallocate_context *context = arg;
+
+ printf("blocks matching previous data: %d\n", context->matches_previous_data);
+ printf("blocks matching zeroes: %d\n", context->matches_zeroes);
+ printf("blocks matching 0xFF: %d\n", context->matches_FFh);
+ printf("Deallocating Blocks 0 to %d with random data.\n", NUM_BLOCKS - 1);
+ printf("On next read, read value will match deallocated block read value.\n");
+ context->deallocate_completed = 1;
+ context->reads_completed = 0;
+ context->matches_previous_data = 0;
+ context->matches_zeroes = 0;
+ context->matches_FFh = 0;
+}
+
+static void
+flush_complete(void *arg, const struct spdk_nvme_cpl *completion)
+{
+ struct deallocate_context *context = arg;
+
+ context->flush_complete = 1;
+}
+
+static void
+deallocate_test(void)
+{
+ struct ns_entry *ns_entry;
+ struct spdk_nvme_ctrlr *ctrlr;
+ const struct spdk_nvme_ctrlr_data *data;
+ struct deallocate_context context;
+ struct spdk_nvme_dsm_range range;
+ uint32_t max_block_size;
+ int rc, i;
+
+ memset(&context, 0, sizeof(struct deallocate_context));
+ max_block_size = get_max_block_size();
+ ns_entry = g_namespaces;
+
+ if (max_block_size > 0) {
+ context.zero_buf = malloc(max_block_size);
+ } else {
+ printf("Unable to determine max block size.\n");
+ return;
+ }
+
+ if (context.zero_buf == NULL) {
+ printf("could not allocate buffer for test.\n");
+ return;
+ }
+
+ context.FFh_buf = malloc(max_block_size);
+ if (context.FFh_buf == NULL) {
+ cleanup(&context);
+ printf("could not allocate buffer for test.\n");
+ return;
+ }
+
+ context.write_buf = calloc(NUM_BLOCKS, sizeof(char *));
+ if (context.write_buf == NULL) {
+ cleanup(&context);
+ return;
+ }
+
+ context.read_buf = calloc(NUM_BLOCKS, sizeof(char *));
+ if (context.read_buf == NULL) {
+ printf("could not allocate buffer for test.\n");
+ cleanup(&context);
+ return;
+ }
+
+ memset(context.zero_buf, 0x00, max_block_size);
+ memset(context.FFh_buf, 0xFF, max_block_size);
+
+ for (i = 0; i < NUM_BLOCKS; i++) {
+ context.write_buf[i] = spdk_zmalloc(0x1000, max_block_size, NULL, SPDK_ENV_LCORE_ID_ANY,
+ SPDK_MALLOC_DMA);
+ if (context.write_buf[i] == NULL) {
+ printf("could not allocate buffer for test.\n");
+ cleanup(&context);
+ return;
+ }
+
+ fill_random(context.write_buf[i], 0x1000);
+ context.read_buf[i] = spdk_zmalloc(0x1000, max_block_size, NULL, SPDK_ENV_LCORE_ID_ANY,
+ SPDK_MALLOC_DMA);
+ if (context.read_buf[i] == NULL) {
+ printf("could not allocate buffer for test.\n");
+ cleanup(&context);
+ return;
+ }
+ }
+
+ while (ns_entry != NULL) {
+
+ ns_entry->qpair = spdk_nvme_ctrlr_alloc_io_qpair(ns_entry->ctrlr, NULL, 0);
+ if (ns_entry->qpair == NULL) {
+ printf("ERROR: spdk_nvme_ctrlr_alloc_io_qpair() failed.\n");
+ return;
+ }
+
+ ctrlr = spdk_nvme_ns_get_ctrlr(ns_entry->ns);
+ data = spdk_nvme_ctrlr_get_data(ctrlr);
+
+ printf("\nController %-20.20s (%-20.20s)\n", data->mn, data->sn);
+ printf("Controller PCI vendor:%u PCI subsystem vendor:%u\n", data->vid, data->ssvid);
+ printf("Namespace Block Size:%u\n", spdk_nvme_ns_get_sector_size(ns_entry->ns));
+ printf("Writing Blocks 0 to %d with random data.\n", NUM_BLOCKS - 1);
+ printf("On next read, read value will match random data.\n");
+
+ context.ns_entry = ns_entry;
+
+ for (i = 0; i < NUM_BLOCKS; i++) {
+ rc = spdk_nvme_ns_cmd_write(ns_entry->ns, ns_entry->qpair, context.write_buf[i],
+ i,
+ 1,
+ write_complete, &context, 0);
+ if (rc) {
+ printf("Error in nvme command completion, values may be inaccurate.\n");
+ }
+ }
+ while (context.writes_completed < NUM_BLOCKS) {
+ spdk_nvme_qpair_process_completions(ns_entry->qpair, 0);
+ }
+
+ spdk_nvme_ns_cmd_flush(ns_entry->ns, ns_entry->qpair, flush_complete, &context);
+ while (!context.flush_complete) {
+ spdk_nvme_qpair_process_completions(ns_entry->qpair, 0);
+ }
+
+ for (i = 0; i < NUM_BLOCKS; i++) {
+ rc = spdk_nvme_ns_cmd_read(ns_entry->ns, ns_entry->qpair, context.read_buf[i],
+ i, /* LBA start */
+ 1, /* number of LBAs */
+ read_complete, &context, 0);
+ if (rc) {
+ printf("Error in nvme command completion, values may be inaccurate.\n");
+ }
+
+ /* block after each read command so that we can match the block to the write buffer. */
+ while (context.reads_completed <= i) {
+ spdk_nvme_qpair_process_completions(ns_entry->qpair, 0);
+ }
+ }
+
+ context.flush_complete = 0;
+ range.length = NUM_BLOCKS;
+ range.starting_lba = 0;
+ rc = spdk_nvme_ns_cmd_dataset_management(ns_entry->ns, ns_entry->qpair,
+ SPDK_NVME_DSM_ATTR_DEALLOCATE, &range, 1, deallocate_complete, &context);
+ if (rc) {
+ printf("Error in nvme command completion, values may be inaccurate.\n");
+ }
+
+ while (!context.deallocate_completed) {
+ spdk_nvme_qpair_process_completions(ns_entry->qpair, 0);
+ }
+
+ for (i = 0; i < NUM_BLOCKS; i++) {
+ rc = spdk_nvme_ns_cmd_read(ns_entry->ns, ns_entry->qpair, context.read_buf[i],
+ i, /* LBA start */
+ 1, /* number of LBAs */
+ read_complete, &context, 0);
+ if (rc) {
+ printf("Error in nvme command completion, values may be inaccurate.\n");
+ }
+ while (context.reads_completed <= i) {
+ spdk_nvme_qpair_process_completions(ns_entry->qpair, 0);
+ }
+ }
+
+ printf("blocks matching previous data: %d\n", context.matches_previous_data);
+ printf("blocks matching zeroes: %d\n", context.matches_zeroes);
+ printf("blocks matching FFh: %d\n", context.matches_FFh);
+
+ /* reset counters in between each namespace. */
+ context.matches_previous_data = 0;
+ context.matches_zeroes = 0;
+ context.matches_FFh = 0;
+ context.writes_completed = 0;
+ context.reads_completed = 0;
+ context.deallocate_completed = 0;
+
+ spdk_nvme_ctrlr_free_io_qpair(ns_entry->qpair);
+ ns_entry = ns_entry->next;
+ }
+ cleanup(&context);
+}
+
+static bool
+probe_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
+ struct spdk_nvme_ctrlr_opts *opts)
+{
+ printf("Attaching to %s\n", trid->traddr);
+
+ return true;
+}
+
+static void
+attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
+ struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_ctrlr_opts *opts)
+{
+ int num_ns;
+ struct spdk_nvme_ns *ns;
+
+ printf("Attached to %s\n", trid->traddr);
+ /*
+ * Use only the first namespace from each controller since we are testing controller level functionality.
+ */
+ num_ns = spdk_nvme_ctrlr_get_num_ns(ctrlr);
+ if (num_ns < 1) {
+ printf("No valid namespaces in controller\n");
+ } else {
+ ns = spdk_nvme_ctrlr_get_ns(ctrlr, 1);
+ register_ns(ctrlr, ns);
+ }
+}
+
+static void
+cleanup(struct deallocate_context *context)
+{
+ struct ns_entry *ns_entry = g_namespaces;
+ int i;
+
+ while (ns_entry) {
+ struct ns_entry *next = ns_entry->next;
+ free(ns_entry);
+ ns_entry = next;
+ }
+ for (i = 0; i < NUM_BLOCKS; i++) {
+ if (context->write_buf && context->write_buf[i]) {
+ spdk_free(context->write_buf[i]);
+ } else {
+ break;
+ }
+ if (context->read_buf && context->read_buf[i]) {
+ spdk_free(context->read_buf[i]);
+ } else {
+ break;
+ }
+ }
+
+ free(context->write_buf);
+ free(context->read_buf);
+ free(context->zero_buf);
+ free(context->FFh_buf);
+}
+
+int main(int argc, char **argv)
+{
+ int rc;
+ struct spdk_env_opts opts;
+
+ spdk_env_opts_init(&opts);
+ opts.name = "deallocate_test";
+ opts.shm_id = 0;
+ if (spdk_env_init(&opts) < 0) {
+ fprintf(stderr, "Unable to initialize SPDK env\n");
+ return 1;
+ }
+
+ printf("Initializing NVMe Controllers\n");
+
+ rc = spdk_nvme_probe(NULL, NULL, probe_cb, attach_cb, NULL);
+ if (rc != 0) {
+ fprintf(stderr, "spdk_nvme_probe() failed\n");
+ return 1;
+ }
+
+ if (g_namespaces == NULL) {
+ fprintf(stderr, "no NVMe controllers found\n");
+ return 1;
+ }
+
+ printf("Initialization complete.\n");
+ deallocate_test();
+ return 0;
+}
diff --git a/src/spdk/test/nvme/e2edp/.gitignore b/src/spdk/test/nvme/e2edp/.gitignore
new file mode 100644
index 000000000..df0958204
--- /dev/null
+++ b/src/spdk/test/nvme/e2edp/.gitignore
@@ -0,0 +1 @@
+nvme_dp
diff --git a/src/spdk/test/nvme/e2edp/Makefile b/src/spdk/test/nvme/e2edp/Makefile
new file mode 100644
index 000000000..576262269
--- /dev/null
+++ b/src/spdk/test/nvme/e2edp/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../..)
+
+APP = nvme_dp
+
+include $(SPDK_ROOT_DIR)/mk/nvme.libtest.mk
diff --git a/src/spdk/test/nvme/e2edp/nvme_dp.c b/src/spdk/test/nvme/e2edp/nvme_dp.c
new file mode 100644
index 000000000..9559001e8
--- /dev/null
+++ b/src/spdk/test/nvme/e2edp/nvme_dp.c
@@ -0,0 +1,652 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * NVMe end-to-end data protection test
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk/nvme.h"
+#include "spdk/env.h"
+#include "spdk/crc16.h"
+#include "spdk/endian.h"
+#include "spdk/memory.h"
+
+#define MAX_DEVS 64
+
+#define DATA_PATTERN 0x5A
+
+struct dev {
+ struct spdk_nvme_ctrlr *ctrlr;
+ char name[SPDK_NVMF_TRADDR_MAX_LEN + 1];
+};
+
+static struct dev devs[MAX_DEVS];
+static int num_devs = 0;
+
+#define foreach_dev(iter) \
+ for (iter = devs; iter - devs < num_devs; iter++)
+
+static int io_complete_flag = 0;
+
+struct io_request {
+ void *contig;
+ void *metadata;
+ bool use_extended_lba;
+ bool use_sgl;
+ uint32_t sgl_offset;
+ uint32_t buf_size;
+ uint64_t lba;
+ uint32_t lba_count;
+ uint16_t apptag_mask;
+ uint16_t apptag;
+};
+
+static void
+io_complete(void *ctx, const struct spdk_nvme_cpl *cpl)
+{
+ if (spdk_nvme_cpl_is_error(cpl)) {
+ io_complete_flag = 2;
+ } else {
+ io_complete_flag = 1;
+ }
+}
+
+static void
+ns_data_buffer_reset(struct spdk_nvme_ns *ns, struct io_request *req, uint8_t data_pattern)
+{
+ uint32_t md_size, sector_size;
+ uint32_t i, offset = 0;
+ uint8_t *buf;
+
+ sector_size = spdk_nvme_ns_get_sector_size(ns);
+ md_size = spdk_nvme_ns_get_md_size(ns);
+
+ for (i = 0; i < req->lba_count; i++) {
+ if (req->use_extended_lba) {
+ offset = (sector_size + md_size) * i;
+ } else {
+ offset = sector_size * i;
+ }
+
+ buf = (uint8_t *)req->contig + offset;
+ memset(buf, data_pattern, sector_size);
+ }
+}
+
+static void nvme_req_reset_sgl(void *cb_arg, uint32_t sgl_offset)
+{
+ struct io_request *req = (struct io_request *)cb_arg;
+
+ req->sgl_offset = sgl_offset;
+ return;
+}
+
+static int nvme_req_next_sge(void *cb_arg, void **address, uint32_t *length)
+{
+ struct io_request *req = (struct io_request *)cb_arg;
+ void *payload;
+
+ payload = req->contig + req->sgl_offset;
+ *address = payload;
+
+ *length = req->buf_size - req->sgl_offset;
+
+ return 0;
+}
+
+/* CRC-16 Guard checked for extended lba format */
+static uint32_t dp_guard_check_extended_lba_test(struct spdk_nvme_ns *ns, struct io_request *req,
+ uint32_t *io_flags)
+{
+ struct spdk_nvme_protection_info *pi;
+ uint32_t md_size, sector_size;
+
+ req->lba_count = 2;
+
+ /* extended LBA only for the test case */
+ if (!(spdk_nvme_ns_supports_extended_lba(ns))) {
+ return 0;
+ }
+
+ sector_size = spdk_nvme_ns_get_sector_size(ns);
+ md_size = spdk_nvme_ns_get_md_size(ns);
+ req->contig = spdk_zmalloc((sector_size + md_size) * req->lba_count, 0x1000, NULL,
+ SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
+ assert(req->contig);
+
+ req->lba = 0;
+ req->use_extended_lba = true;
+ req->use_sgl = true;
+ req->buf_size = (sector_size + md_size) * req->lba_count;
+ req->metadata = NULL;
+ ns_data_buffer_reset(ns, req, DATA_PATTERN);
+ pi = (struct spdk_nvme_protection_info *)(req->contig + sector_size + md_size - 8);
+ /* big-endian for guard */
+ to_be16(&pi->guard, spdk_crc16_t10dif(0, req->contig, sector_size));
+
+ pi = (struct spdk_nvme_protection_info *)(req->contig + (sector_size + md_size) * 2 - 8);
+ to_be16(&pi->guard, spdk_crc16_t10dif(0, req->contig + sector_size + md_size, sector_size));
+
+ *io_flags = SPDK_NVME_IO_FLAGS_PRCHK_GUARD;
+
+ return req->lba_count;
+}
+
+/*
+ * No protection information with PRACT setting to 1,
+ * both extended LBA format and separate metadata can
+ * run the test case.
+ */
+static uint32_t dp_with_pract_test(struct spdk_nvme_ns *ns, struct io_request *req,
+ uint32_t *io_flags)
+{
+ uint32_t md_size, sector_size, data_len;
+
+ req->lba_count = 8;
+
+ sector_size = spdk_nvme_ns_get_sector_size(ns);
+ md_size = spdk_nvme_ns_get_md_size(ns);
+ if (md_size == 8) {
+ /* No additional metadata buffer provided */
+ data_len = sector_size * req->lba_count;
+ } else {
+ data_len = (sector_size + md_size) * req->lba_count;
+ }
+ req->contig = spdk_zmalloc(data_len, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY,
+ SPDK_MALLOC_DMA);
+ assert(req->contig);
+
+ req->metadata = spdk_zmalloc(md_size * req->lba_count, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY,
+ SPDK_MALLOC_DMA);
+ assert(req->metadata);
+
+ switch (spdk_nvme_ns_get_pi_type(ns)) {
+ case SPDK_NVME_FMT_NVM_PROTECTION_TYPE3:
+ *io_flags = SPDK_NVME_IO_FLAGS_PRCHK_GUARD | SPDK_NVME_IO_FLAGS_PRACT;
+ break;
+ case SPDK_NVME_FMT_NVM_PROTECTION_TYPE1:
+ case SPDK_NVME_FMT_NVM_PROTECTION_TYPE2:
+ *io_flags = SPDK_NVME_IO_FLAGS_PRCHK_GUARD | SPDK_NVME_IO_FLAGS_PRCHK_REFTAG |
+ SPDK_NVME_IO_FLAGS_PRACT;
+ break;
+ default:
+ *io_flags = 0;
+ break;
+ }
+
+ req->lba = 0;
+ req->use_extended_lba = false;
+
+ return req->lba_count;
+}
+
+/* Block Reference Tag checked for TYPE1 and TYPE2 with PRACT setting to 0 */
+static uint32_t dp_without_pract_extended_lba_test(struct spdk_nvme_ns *ns, struct io_request *req,
+ uint32_t *io_flags)
+{
+ struct spdk_nvme_protection_info *pi;
+ uint32_t md_size, sector_size;
+
+ req->lba_count = 2;
+
+ switch (spdk_nvme_ns_get_pi_type(ns)) {
+ case SPDK_NVME_FMT_NVM_PROTECTION_TYPE3:
+ return 0;
+ default:
+ break;
+ }
+
+ /* extended LBA only for the test case */
+ if (!(spdk_nvme_ns_supports_extended_lba(ns))) {
+ return 0;
+ }
+
+ sector_size = spdk_nvme_ns_get_sector_size(ns);
+ md_size = spdk_nvme_ns_get_md_size(ns);
+ req->contig = spdk_zmalloc((sector_size + md_size) * req->lba_count, 0x1000, NULL,
+ SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
+ assert(req->contig);
+
+ req->lba = 0;
+ req->use_extended_lba = true;
+ req->metadata = NULL;
+ pi = (struct spdk_nvme_protection_info *)(req->contig + sector_size + md_size - 8);
+ /* big-endian for reference tag */
+ to_be32(&pi->ref_tag, (uint32_t)req->lba);
+
+ pi = (struct spdk_nvme_protection_info *)(req->contig + (sector_size + md_size) * 2 - 8);
+ /* is incremented for each subsequent logical block */
+ to_be32(&pi->ref_tag, (uint32_t)(req->lba + 1));
+
+ *io_flags = SPDK_NVME_IO_FLAGS_PRCHK_REFTAG;
+
+ return req->lba_count;
+}
+
+/* LBA + Metadata without data protection bits setting */
+static uint32_t dp_without_flags_extended_lba_test(struct spdk_nvme_ns *ns, struct io_request *req,
+ uint32_t *io_flags)
+{
+ uint32_t md_size, sector_size;
+
+ req->lba_count = 16;
+
+ /* extended LBA only for the test case */
+ if (!(spdk_nvme_ns_supports_extended_lba(ns))) {
+ return 0;
+ }
+
+ sector_size = spdk_nvme_ns_get_sector_size(ns);
+ md_size = spdk_nvme_ns_get_md_size(ns);
+ req->contig = spdk_zmalloc((sector_size + md_size) * req->lba_count, 0x1000, NULL,
+ SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
+ assert(req->contig);
+
+ req->lba = 0;
+ req->use_extended_lba = true;
+ req->metadata = NULL;
+ *io_flags = 0;
+
+ return req->lba_count;
+}
+
+/* Block Reference Tag checked for TYPE1 and TYPE2 with PRACT setting to 0 */
+static uint32_t dp_without_pract_separate_meta_test(struct spdk_nvme_ns *ns, struct io_request *req,
+ uint32_t *io_flags)
+{
+ struct spdk_nvme_protection_info *pi;
+ uint32_t md_size, sector_size;
+
+ req->lba_count = 2;
+
+ switch (spdk_nvme_ns_get_pi_type(ns)) {
+ case SPDK_NVME_FMT_NVM_PROTECTION_TYPE3:
+ return 0;
+ default:
+ break;
+ }
+
+ /* separate metadata payload for the test case */
+ if (spdk_nvme_ns_supports_extended_lba(ns)) {
+ return 0;
+ }
+
+ sector_size = spdk_nvme_ns_get_sector_size(ns);
+ md_size = spdk_nvme_ns_get_md_size(ns);
+ req->contig = spdk_zmalloc(sector_size * req->lba_count, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY,
+ SPDK_MALLOC_DMA);
+ assert(req->contig);
+
+ req->metadata = spdk_zmalloc(md_size * req->lba_count, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY,
+ SPDK_MALLOC_DMA);
+ assert(req->metadata);
+
+ req->lba = 0;
+ req->use_extended_lba = false;
+
+ /* last 8 bytes if the metadata size bigger than 8 */
+ pi = (struct spdk_nvme_protection_info *)(req->metadata + md_size - 8);
+ /* big-endian for reference tag */
+ to_be32(&pi->ref_tag, (uint32_t)req->lba);
+
+ pi = (struct spdk_nvme_protection_info *)(req->metadata + md_size * 2 - 8);
+ /* is incremented for each subsequent logical block */
+ to_be32(&pi->ref_tag, (uint32_t)(req->lba + 1));
+
+ *io_flags = SPDK_NVME_IO_FLAGS_PRCHK_REFTAG;
+
+ return req->lba_count;
+}
+
+/* Application Tag checked with PRACT setting to 0 */
+static uint32_t dp_without_pract_separate_meta_apptag_test(struct spdk_nvme_ns *ns,
+ struct io_request *req,
+ uint32_t *io_flags)
+{
+ struct spdk_nvme_protection_info *pi;
+ uint32_t md_size, sector_size;
+
+ req->lba_count = 1;
+
+ /* separate metadata payload for the test case */
+ if (spdk_nvme_ns_supports_extended_lba(ns)) {
+ return 0;
+ }
+
+ sector_size = spdk_nvme_ns_get_sector_size(ns);
+ md_size = spdk_nvme_ns_get_md_size(ns);
+ req->contig = spdk_zmalloc(sector_size * req->lba_count, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY,
+ SPDK_MALLOC_DMA);
+ assert(req->contig);
+
+ req->metadata = spdk_zmalloc(md_size * req->lba_count, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY,
+ SPDK_MALLOC_DMA);
+ assert(req->metadata);
+
+ req->lba = 0;
+ req->use_extended_lba = false;
+ req->apptag_mask = 0xFFFF;
+ req->apptag = req->lba_count;
+
+ /* last 8 bytes if the metadata size bigger than 8 */
+ pi = (struct spdk_nvme_protection_info *)(req->metadata + md_size - 8);
+ to_be16(&pi->app_tag, req->lba_count);
+
+ *io_flags = SPDK_NVME_IO_FLAGS_PRCHK_APPTAG;
+
+ return req->lba_count;
+}
+
+/*
+ * LBA + Metadata without data protection bits setting,
+ * separate metadata payload for the test case.
+ */
+static uint32_t dp_without_flags_separate_meta_test(struct spdk_nvme_ns *ns, struct io_request *req,
+ uint32_t *io_flags)
+{
+ uint32_t md_size, sector_size;
+
+ req->lba_count = 16;
+
+ /* separate metadata payload for the test case */
+ if (spdk_nvme_ns_supports_extended_lba(ns)) {
+ return 0;
+ }
+
+ sector_size = spdk_nvme_ns_get_sector_size(ns);
+ md_size = spdk_nvme_ns_get_md_size(ns);
+ req->contig = spdk_zmalloc(sector_size * req->lba_count, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY,
+ SPDK_MALLOC_DMA);
+ assert(req->contig);
+
+ req->metadata = spdk_zmalloc(md_size * req->lba_count, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY,
+ SPDK_MALLOC_DMA);
+ assert(req->metadata);
+
+ req->lba = 0;
+ req->use_extended_lba = false;
+ *io_flags = 0;
+
+ return req->lba_count;
+}
+
+typedef uint32_t (*nvme_build_io_req_fn_t)(struct spdk_nvme_ns *ns, struct io_request *req,
+ uint32_t *lba_count);
+
+static void
+free_req(struct io_request *req)
+{
+ if (req == NULL) {
+ return;
+ }
+
+ if (req->contig) {
+ spdk_free(req->contig);
+ }
+
+ if (req->metadata) {
+ spdk_free(req->metadata);
+ }
+
+ spdk_free(req);
+}
+
+static int
+ns_data_buffer_compare(struct spdk_nvme_ns *ns, struct io_request *req, uint8_t data_pattern)
+{
+ uint32_t md_size, sector_size;
+ uint32_t i, j, offset = 0;
+ uint8_t *buf;
+
+ sector_size = spdk_nvme_ns_get_sector_size(ns);
+ md_size = spdk_nvme_ns_get_md_size(ns);
+
+ for (i = 0; i < req->lba_count; i++) {
+ if (req->use_extended_lba) {
+ offset = (sector_size + md_size) * i;
+ } else {
+ offset = sector_size * i;
+ }
+
+ buf = (uint8_t *)req->contig + offset;
+ for (j = 0; j < sector_size; j++) {
+ if (buf[j] != data_pattern) {
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int
+write_read_e2e_dp_tests(struct dev *dev, nvme_build_io_req_fn_t build_io_fn, const char *test_name)
+{
+ int rc = 0;
+ uint32_t lba_count;
+ uint32_t io_flags = 0;
+
+ struct io_request *req;
+ struct spdk_nvme_ns *ns;
+ struct spdk_nvme_qpair *qpair;
+ const struct spdk_nvme_ns_data *nsdata;
+
+ ns = spdk_nvme_ctrlr_get_ns(dev->ctrlr, 1);
+ if (!ns) {
+ printf("Null namespace\n");
+ return 0;
+ }
+
+ if (!(spdk_nvme_ns_get_flags(ns) & SPDK_NVME_NS_DPS_PI_SUPPORTED)) {
+ return 0;
+ }
+
+ nsdata = spdk_nvme_ns_get_data(ns);
+ if (!nsdata || !spdk_nvme_ns_get_sector_size(ns)) {
+ fprintf(stderr, "Empty nsdata or wrong sector size\n");
+ return -EINVAL;
+ }
+
+ req = spdk_zmalloc(sizeof(*req), 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
+ assert(req);
+
+ /* IO parameters setting */
+ lba_count = build_io_fn(ns, req, &io_flags);
+ if (!lba_count) {
+ printf("%s: %s bypass the test case\n", dev->name, test_name);
+ free_req(req);
+ return 0;
+ }
+
+ qpair = spdk_nvme_ctrlr_alloc_io_qpair(dev->ctrlr, NULL, 0);
+ if (!qpair) {
+ free_req(req);
+ return -1;
+ }
+
+ ns_data_buffer_reset(ns, req, DATA_PATTERN);
+ if (req->use_extended_lba && req->use_sgl) {
+ rc = spdk_nvme_ns_cmd_writev(ns, qpair, req->lba, lba_count, io_complete, req, io_flags,
+ nvme_req_reset_sgl, nvme_req_next_sge);
+ } else if (req->use_extended_lba) {
+ rc = spdk_nvme_ns_cmd_write(ns, qpair, req->contig, req->lba, lba_count,
+ io_complete, req, io_flags);
+ } else {
+ rc = spdk_nvme_ns_cmd_write_with_md(ns, qpair, req->contig, req->metadata, req->lba, lba_count,
+ io_complete, req, io_flags, req->apptag_mask, req->apptag);
+ }
+
+ if (rc != 0) {
+ fprintf(stderr, "%s: %s write submit failed\n", dev->name, test_name);
+ spdk_nvme_ctrlr_free_io_qpair(qpair);
+ free_req(req);
+ return -1;
+ }
+
+ io_complete_flag = 0;
+
+ while (!io_complete_flag) {
+ spdk_nvme_qpair_process_completions(qpair, 1);
+ }
+
+ if (io_complete_flag != 1) {
+ fprintf(stderr, "%s: %s write exec failed\n", dev->name, test_name);
+ spdk_nvme_ctrlr_free_io_qpair(qpair);
+ free_req(req);
+ return -1;
+ }
+
+ /* reset completion flag */
+ io_complete_flag = 0;
+
+ ns_data_buffer_reset(ns, req, 0);
+ if (req->use_extended_lba && req->use_sgl) {
+ rc = spdk_nvme_ns_cmd_readv(ns, qpair, req->lba, lba_count, io_complete, req, io_flags,
+ nvme_req_reset_sgl, nvme_req_next_sge);
+
+ } else if (req->use_extended_lba) {
+ rc = spdk_nvme_ns_cmd_read(ns, qpair, req->contig, req->lba, lba_count,
+ io_complete, req, io_flags);
+ } else {
+ rc = spdk_nvme_ns_cmd_read_with_md(ns, qpair, req->contig, req->metadata, req->lba, lba_count,
+ io_complete, req, io_flags, req->apptag_mask, req->apptag);
+ }
+
+ if (rc != 0) {
+ fprintf(stderr, "%s: %s read failed\n", dev->name, test_name);
+ spdk_nvme_ctrlr_free_io_qpair(qpair);
+ free_req(req);
+ return -1;
+ }
+
+ while (!io_complete_flag) {
+ spdk_nvme_qpair_process_completions(qpair, 1);
+ }
+
+ if (io_complete_flag != 1) {
+ fprintf(stderr, "%s: %s read failed\n", dev->name, test_name);
+ spdk_nvme_ctrlr_free_io_qpair(qpair);
+ free_req(req);
+ return -1;
+ }
+
+ rc = ns_data_buffer_compare(ns, req, DATA_PATTERN);
+ if (rc < 0) {
+ fprintf(stderr, "%s: %s write/read success, but memcmp Failed\n", dev->name, test_name);
+ spdk_nvme_ctrlr_free_io_qpair(qpair);
+ free_req(req);
+ return -1;
+ }
+
+ printf("%s: %s test passed\n", dev->name, test_name);
+ spdk_nvme_ctrlr_free_io_qpair(qpair);
+ free_req(req);
+ return 0;
+}
+
+static bool
+probe_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
+ struct spdk_nvme_ctrlr_opts *opts)
+{
+ printf("Attaching to %s\n", trid->traddr);
+
+ return true;
+}
+
+static void
+attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
+ struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_ctrlr_opts *opts)
+{
+ struct dev *dev;
+
+ /* add to dev list */
+ dev = &devs[num_devs++];
+
+ dev->ctrlr = ctrlr;
+
+ snprintf(dev->name, sizeof(dev->name), "%s",
+ trid->traddr);
+
+ printf("Attached to %s\n", dev->name);
+}
+
+int main(int argc, char **argv)
+{
+ struct dev *iter;
+ int rc, i;
+ struct spdk_env_opts opts;
+
+ spdk_env_opts_init(&opts);
+ opts.name = "nvme_dp";
+ opts.core_mask = "0x1";
+ opts.shm_id = 0;
+ if (spdk_env_init(&opts) < 0) {
+ fprintf(stderr, "Unable to initialize SPDK env\n");
+ return 1;
+ }
+
+ printf("NVMe Write/Read with End-to-End data protection test\n");
+
+ if (spdk_nvme_probe(NULL, NULL, probe_cb, attach_cb, NULL) != 0) {
+ fprintf(stderr, "nvme_probe() failed\n");
+ exit(1);
+ }
+
+ rc = 0;
+ foreach_dev(iter) {
+#define TEST(x) write_read_e2e_dp_tests(iter, x, #x)
+ if (TEST(dp_with_pract_test)
+ || TEST(dp_guard_check_extended_lba_test)
+ || TEST(dp_without_pract_extended_lba_test)
+ || TEST(dp_without_flags_extended_lba_test)
+ || TEST(dp_without_pract_separate_meta_test)
+ || TEST(dp_without_pract_separate_meta_apptag_test)
+ || TEST(dp_without_flags_separate_meta_test)) {
+#undef TEST
+ rc = 1;
+ printf("%s: failed End-to-End data protection tests\n", iter->name);
+ }
+ }
+
+ printf("Cleaning up...\n");
+
+ for (i = 0; i < num_devs; i++) {
+ struct dev *dev = &devs[i];
+
+ spdk_nvme_detach(dev->ctrlr);
+ }
+
+ return rc;
+}
diff --git a/src/spdk/test/nvme/err_injection/.gitignore b/src/spdk/test/nvme/err_injection/.gitignore
new file mode 100644
index 000000000..3572a8e78
--- /dev/null
+++ b/src/spdk/test/nvme/err_injection/.gitignore
@@ -0,0 +1 @@
+err_injection
diff --git a/src/spdk/test/nvme/err_injection/Makefile b/src/spdk/test/nvme/err_injection/Makefile
new file mode 100644
index 000000000..579fb5440
--- /dev/null
+++ b/src/spdk/test/nvme/err_injection/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../..)
+
+APP = err_injection
+
+include $(SPDK_ROOT_DIR)/mk/nvme.libtest.mk
diff --git a/src/spdk/test/nvme/err_injection/err_injection.c b/src/spdk/test/nvme/err_injection/err_injection.c
new file mode 100644
index 000000000..73b42786b
--- /dev/null
+++ b/src/spdk/test/nvme/err_injection/err_injection.c
@@ -0,0 +1,279 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk/log.h"
+#include "spdk/nvme.h"
+#include "spdk/env.h"
+
+#define MAX_DEVS 64
+
+struct dev {
+ bool error_expected;
+ struct spdk_nvme_ctrlr *ctrlr;
+ struct spdk_nvme_ns *ns;
+ struct spdk_nvme_qpair *qpair;
+ void *data;
+ char name[SPDK_NVMF_TRADDR_MAX_LEN + 1];
+};
+
+static struct dev devs[MAX_DEVS];
+static int num_devs = 0;
+
+#define foreach_dev(iter) \
+ for (iter = devs; iter - devs < num_devs; iter++)
+
+static int outstanding_commands = 0;
+static int failed = 0;
+
+static bool
+probe_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
+ struct spdk_nvme_ctrlr_opts *opts)
+{
+ printf("Attaching to %s\n", trid->traddr);
+
+ return true;
+}
+
+static void
+attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
+ struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_ctrlr_opts *opts)
+{
+ struct dev *dev;
+ uint32_t nsid;
+
+ /* add to dev list */
+ dev = &devs[num_devs++];
+ if (num_devs >= MAX_DEVS) {
+ return;
+ }
+
+ dev->ctrlr = ctrlr;
+ nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr);
+ dev->ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
+
+ dev->qpair = spdk_nvme_ctrlr_alloc_io_qpair(ctrlr, NULL, 0);
+ if (dev->qpair == NULL) {
+ failed = 1;
+ return;
+ }
+
+ snprintf(dev->name, sizeof(dev->name), "%s",
+ trid->traddr);
+
+ printf("Attached to %s\n", dev->name);
+}
+
+static void
+get_feature_test_cb(void *cb_arg, const struct spdk_nvme_cpl *cpl)
+{
+ struct dev *dev = cb_arg;
+
+ outstanding_commands--;
+
+ if (spdk_nvme_cpl_is_error(cpl) && dev->error_expected) {
+ if (cpl->status.sct != SPDK_NVME_SCT_GENERIC ||
+ cpl->status.sc != SPDK_NVME_SC_INVALID_FIELD) {
+ failed = 1;
+ }
+ printf("%s: get features failed as expected\n", dev->name);
+ return;
+ }
+
+ if (!spdk_nvme_cpl_is_error(cpl) && !dev->error_expected) {
+ printf("%s: get features successfully as expected\n", dev->name);
+ return;
+ }
+
+ failed = 1;
+}
+
+static void
+get_feature_test(bool error_expected)
+{
+ struct dev *dev;
+ struct spdk_nvme_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
+ cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_NUMBER_OF_QUEUES;
+
+ foreach_dev(dev) {
+ dev->error_expected = error_expected;
+ if (spdk_nvme_ctrlr_cmd_admin_raw(dev->ctrlr, &cmd, NULL, 0,
+ get_feature_test_cb, dev) != 0) {
+ printf("Error: failed to send Get Features command for dev=%p\n", dev);
+ failed = 1;
+ goto cleanup;
+ }
+ outstanding_commands++;
+ }
+
+cleanup:
+
+ while (outstanding_commands) {
+ foreach_dev(dev) {
+ spdk_nvme_ctrlr_process_admin_completions(dev->ctrlr);
+ }
+ }
+}
+
+static void
+read_test_cb(void *cb_arg, const struct spdk_nvme_cpl *cpl)
+{
+ struct dev *dev = cb_arg;
+
+ outstanding_commands--;
+ spdk_free(dev->data);
+
+ if (spdk_nvme_cpl_is_error(cpl) && dev->error_expected) {
+ if (cpl->status.sct != SPDK_NVME_SCT_MEDIA_ERROR ||
+ cpl->status.sc != SPDK_NVME_SC_UNRECOVERED_READ_ERROR) {
+ failed = 1;
+ }
+ printf("%s: read failed as expected\n", dev->name);
+ return;
+ }
+
+ if (!spdk_nvme_cpl_is_error(cpl) && !dev->error_expected) {
+ printf("%s: read successfully as expected\n", dev->name);
+ return;
+ }
+
+ failed = 1;
+}
+
+static void
+read_test(bool error_expected)
+{
+ struct dev *dev;
+
+ foreach_dev(dev) {
+ if (dev->ns == NULL) {
+ continue;
+ }
+
+ dev->error_expected = error_expected;
+ dev->data = spdk_zmalloc(0x1000, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
+ if (!dev->data) {
+ failed = 1;
+ goto cleanup;
+ }
+
+ if (spdk_nvme_ns_cmd_read(dev->ns, dev->qpair, dev->data,
+ 0, 1, read_test_cb, dev, 0) != 0) {
+ printf("Error: failed to send Read command for dev=%p\n", dev);
+ failed = 1;
+ goto cleanup;
+ }
+
+ outstanding_commands++;
+ }
+
+cleanup:
+
+ while (outstanding_commands) {
+ foreach_dev(dev) {
+ spdk_nvme_qpair_process_completions(dev->qpair, 0);
+ }
+ }
+}
+
+int main(int argc, char **argv)
+{
+ struct dev *dev;
+ int i;
+ struct spdk_env_opts opts;
+ int rc;
+
+ spdk_env_opts_init(&opts);
+ opts.name = "err_injection";
+ opts.core_mask = "0x1";
+ opts.shm_id = 0;
+ if (spdk_env_init(&opts) < 0) {
+ fprintf(stderr, "Unable to initialize SPDK env\n");
+ return 1;
+ }
+
+ printf("NVMe Error Injection test\n");
+
+ if (spdk_nvme_probe(NULL, NULL, probe_cb, attach_cb, NULL) != 0) {
+ fprintf(stderr, "spdk_nvme_probe() failed\n");
+ return 1;
+ }
+
+ if (failed) {
+ goto exit;
+ }
+
+ if (!num_devs) {
+ printf("No NVMe controller found, %s exiting\n", argv[0]);
+ return 1;
+ }
+
+ foreach_dev(dev) {
+ /* Admin error injection at submission path */
+ rc = spdk_nvme_qpair_add_cmd_error_injection(dev->ctrlr, NULL,
+ SPDK_NVME_OPC_GET_FEATURES, true, 5000, 1,
+ SPDK_NVME_SCT_GENERIC, SPDK_NVME_SC_INVALID_FIELD);
+ failed += rc;
+ /* IO error injection at completion path */
+ rc = spdk_nvme_qpair_add_cmd_error_injection(dev->ctrlr, dev->qpair,
+ SPDK_NVME_OPC_READ, false, 0, 1,
+ SPDK_NVME_SCT_MEDIA_ERROR, SPDK_NVME_SC_UNRECOVERED_READ_ERROR);
+ failed += rc;
+ }
+
+ if (failed) {
+ goto exit;
+ }
+
+ /* Admin Get Feature, expect error return */
+ get_feature_test(true);
+ /* Admin Get Feature, expect successful return */
+ get_feature_test(false);
+ /* Read, expect error return */
+ read_test(true);
+ /* Read, expect successful return */
+ read_test(false);
+
+exit:
+ printf("Cleaning up...\n");
+ for (i = 0; i < num_devs; i++) {
+ struct dev *dev = &devs[i];
+ spdk_nvme_detach(dev->ctrlr);
+ }
+
+ return failed;
+}
diff --git a/src/spdk/test/nvme/hotplug.sh b/src/spdk/test/nvme/hotplug.sh
new file mode 100755
index 000000000..13011e193
--- /dev/null
+++ b/src/spdk/test/nvme/hotplug.sh
@@ -0,0 +1,134 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/test/common/autotest_common.sh
+
+if [ -z "${DEPENDENCY_DIR}" ]; then
+ echo DEPENDENCY_DIR not defined!
+ exit 1
+fi
+
+function ssh_vm() {
+ xtrace_disable
+ sshpass -p "$password" ssh -o PubkeyAuthentication=no \
+ -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -p 10022 root@localhost "$@"
+ xtrace_restore
+}
+
+function monitor_cmd() {
+ echo "$@" | nc localhost 4444 | tail --lines=+2 | (grep -v '^(qemu) ' || true)
+}
+
+function get_online_devices_count() {
+ ssh_vm "lspci | grep -c NVM"
+}
+
+function wait_for_devices_ready() {
+ count=$(get_online_devices_count)
+
+ while [ $count -ne 4 ]; do
+ echo "waitting for all devices online"
+ count=$(get_online_devices_count)
+ done
+}
+
+function insert_devices() {
+ for i in {0..3}; do
+ monitor_cmd "device_add nvme,drive=drive$i,id=nvme$i,serial=nvme$i"
+ done
+ wait_for_devices_ready
+ ssh_vm "scripts/setup.sh"
+}
+
+function remove_devices() {
+ for i in {0..3}; do
+ monitor_cmd "device_del nvme$i"
+ done
+}
+
+function devices_delete() {
+ for i in {0..3}; do
+ rm "$SPDK_TEST_STORAGE/nvme$i.img"
+ done
+}
+
+password=$1
+base_img=${DEPENDENCY_DIR}/fedora-hotplug.qcow2
+test_img=${DEPENDENCY_DIR}/fedora-hotplug-test.qcow2
+qemu_pidfile=${DEPENDENCY_DIR}/qemupid
+
+if [ ! -e "$base_img" ]; then
+ echo "Hotplug VM image not found; skipping test"
+ exit 0
+fi
+
+timing_enter start_qemu
+
+qemu-img create -b "$base_img" -f qcow2 "$test_img"
+
+for i in {0..3}; do
+ dd if=/dev/zero of="$SPDK_TEST_STORAGE/nvme$i.img" bs=1M count=1024
+done
+
+qemu-system-x86_64 \
+ -daemonize -display none -m 8192 \
+ -pidfile "$qemu_pidfile" \
+ -hda "$test_img" \
+ -net user,hostfwd=tcp::10022-:22 \
+ -net nic \
+ -cpu host \
+ -smp cores=16,sockets=1 \
+ --enable-kvm \
+ -chardev socket,id=mon0,host=localhost,port=4444,server,nowait \
+ -mon chardev=mon0,mode=readline \
+ -drive format=raw,file="$SPDK_TEST_STORAGE/nvme0.img",if=none,id=drive0 \
+ -drive format=raw,file="$SPDK_TEST_STORAGE/nvme1.img",if=none,id=drive1 \
+ -drive format=raw,file="$SPDK_TEST_STORAGE/nvme2.img",if=none,id=drive2 \
+ -drive format=raw,file="$SPDK_TEST_STORAGE/nvme3.img",if=none,id=drive3
+
+timing_exit start_qemu
+
+timing_enter wait_for_vm
+ssh_vm 'echo ready'
+timing_exit wait_for_vm
+
+timing_enter copy_repo
+files_to_copy="scripts "
+files_to_copy+="include/spdk/pci_ids.h "
+files_to_copy+="build/examples/hotplug "
+files_to_copy+="build/lib "
+files_to_copy+="dpdk/build/lib "
+(
+ cd "$rootdir"
+ tar -cf - $files_to_copy
+) | (ssh_vm "tar -xf -")
+timing_exit copy_repo
+
+insert_devices
+
+timing_enter hotplug_test
+
+ssh_vm "LD_LIBRARY_PATH=/root//build/lib:/root/dpdk/build/lib:$LD_LIBRARY_PATH build/examples/hotplug -i 0 -t 25 -n 4 -r 8" &
+example_pid=$!
+
+sleep 6
+remove_devices
+sleep 4
+insert_devices
+sleep 6
+remove_devices
+devices_delete
+
+timing_enter wait_for_example
+wait $example_pid
+timing_exit wait_for_example
+
+trap - SIGINT SIGTERM EXIT
+
+qemupid=$(awk '{printf $0}' "$qemu_pidfile")
+kill -9 $qemupid
+rm "$qemu_pidfile"
+rm "$test_img"
+
+timing_exit hotplug_test
diff --git a/src/spdk/test/nvme/hw_hotplug.sh b/src/spdk/test/nvme/hw_hotplug.sh
new file mode 100755
index 000000000..ba9c59463
--- /dev/null
+++ b/src/spdk/test/nvme/hw_hotplug.sh
@@ -0,0 +1,79 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/test/common/autotest_common.sh
+
+export SPDK_LIB_DIR="$rootdir/build/lib"
+export DPDK_LIB_DIR="$rootdir/dpdk/build/lib"
+export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$SPDK_LIB_DIR:$DPDK_LIB_DIR
+
+function insert_device() {
+ ssh root@$ip 'Beetle --SetGpio "$gpio" HIGH'
+ waitforblk $name
+ DRIVER_OVERRIDE=$driver $rootdir/scripts/setup.sh
+}
+
+function remove_device() {
+ ssh root@$ip 'Beetle --SetGpio "$gpio" LOW'
+}
+
+ip=$1
+gpio=$2
+driver=$3
+declare -i io_time=5
+declare -i kernel_hotplug_time=7
+
+timing_enter hotplug_hw_cfg
+
+# Configure microcontroller
+ssh root@$ip 'Beetle --SetGpioDirection "$gpio" OUT'
+
+# Get blk dev name connected to interposer
+ssh root@$ip 'Beetle --SetGpio "$gpio" HIGH'
+sleep $kernel_hotplug_time
+$rootdir/scripts/setup.sh reset
+blk_list1=$(lsblk -d --output NAME | grep "^nvme")
+remove_device
+sleep $kernel_hotplug_time
+blk_list2=$(lsblk -d --output NAME | grep "^nvme") || true
+name=${blk_list1#"$blk_list2"}
+
+insert_device
+
+timing_exit hotplug_hw_cfg
+
+timing_enter hotplug_hw_test
+
+$SPDK_EXAMPLE_DIR/hotplug -i 0 -t 100 -n 2 -r 2 2>&1 | tee -a log.txt &
+example_pid=$!
+trap 'killprocess $example_pid; exit 1' SIGINT SIGTERM EXIT
+
+i=0
+while ! grep "Starting I/O" log.txt; do
+ [ $i -lt 20 ] || break
+ i=$((i + 1))
+ sleep 1
+done
+
+if ! grep "Starting I/O" log.txt; then
+ return 1
+fi
+
+# Add and remove NVMe with delays between to give some time for IO to proceed
+remove_device
+sleep $io_time
+insert_device
+sleep $io_time
+remove_device
+sleep $io_time
+insert_device
+sleep $io_time
+
+timing_enter wait_for_example
+wait $example_pid
+timing_exit wait_for_example
+
+trap - SIGINT SIGTERM EXIT
+
+timing_exit hotplug_hw_test
diff --git a/src/spdk/test/nvme/nvme.sh b/src/spdk/test/nvme/nvme.sh
new file mode 100755
index 000000000..74ba496cb
--- /dev/null
+++ b/src/spdk/test/nvme/nvme.sh
@@ -0,0 +1,134 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/scripts/common.sh
+source $rootdir/test/common/autotest_common.sh
+
+function nvme_identify() {
+ $SPDK_EXAMPLE_DIR/identify -i 0
+ for bdf in $(get_nvme_bdfs); do
+ $SPDK_EXAMPLE_DIR/identify -r "trtype:PCIe traddr:${bdf}" -i 0
+ done
+ timing_exit identify
+}
+
+function nvme_perf() {
+ # enable no shutdown notification option
+ $SPDK_EXAMPLE_DIR/perf -q 128 -w read -o 12288 -t 1 -LL -i 0 -N
+ $SPDK_EXAMPLE_DIR/perf -q 128 -w write -o 12288 -t 1 -LL -i 0
+ if [ -b /dev/ram0 ]; then
+ # Test perf with AIO device
+ $SPDK_EXAMPLE_DIR/perf /dev/ram0 -q 128 -w read -o 12288 -t 1 -LL -i 0
+ fi
+}
+
+function nvme_fio_test() {
+ PLUGIN_DIR=$rootdir/examples/nvme/fio_plugin
+ ran_fio=false
+ for bdf in $(get_nvme_bdfs); do
+ if $SPDK_EXAMPLE_DIR/identify -r "trtype:PCIe traddr:${bdf}" | grep -E "^Number of Namespaces" - | grep -q "0" -; then
+ continue
+ fi
+ fio_nvme $PLUGIN_DIR/example_config.fio --filename="trtype=PCIe traddr=${bdf//:/.}"
+ ran_fio=true
+ done
+ $ran_fio || (echo "No valid NVMe drive found. Failing test." && false)
+}
+
+function nvme_multi_secondary() {
+ $SPDK_EXAMPLE_DIR/perf -i 0 -q 16 -w read -o 4096 -t 3 -c 0x1 &
+ pid0=$!
+ $SPDK_EXAMPLE_DIR/perf -i 0 -q 16 -w read -o 4096 -t 3 -c 0x2 &
+ pid1=$!
+ $SPDK_EXAMPLE_DIR/perf -i 0 -q 16 -w read -o 4096 -t 3 -c 0x4
+ wait $pid0
+ wait $pid1
+}
+
+if [ $(uname) = Linux ]; then
+ # check that our setup.sh script does not bind NVMe devices to uio/vfio if they
+ # have an active mountpoint
+ $rootdir/scripts/setup.sh reset
+ # give kernel nvme driver some time to create the block devices before we start looking for them
+ sleep 1
+ blkname=''
+ # first, find an NVMe device that does not have an active mountpoint already;
+ # this covers rare case where someone is running this test script on a system
+ # that has a mounted NVMe filesystem
+ #
+ # note: more work probably needs to be done to properly handle devices with multiple
+ # namespaces
+ for bdf in $(get_nvme_bdfs); do
+ for name in $(get_nvme_name_from_bdf $bdf); do
+ if [ "$name" != "" ]; then
+ mountpoints=$(lsblk /dev/$name --output MOUNTPOINT -n | wc -w)
+ if [ "$mountpoints" = "0" ]; then
+ blkname=$name
+ break 2
+ fi
+ fi
+ done
+ done
+
+ # if we found an NVMe block device without an active mountpoint, create and mount
+ # a filesystem on it for purposes of testing the setup.sh script
+ if [ "$blkname" != "" ]; then
+ parted -s /dev/$blkname mklabel gpt
+ # just create a 100MB partition - this tests our ability to detect mountpoints
+ # on partitions of the device, not just the device itself; it also is faster
+ # since we don't trim and initialize the whole namespace
+ parted -s /dev/$blkname mkpart primary 1 100
+ sleep 1
+ mkfs.ext4 -F /dev/${blkname}p1
+ mkdir -p /tmp/nvmetest
+ mount /dev/${blkname}p1 /tmp/nvmetest
+ sleep 1
+ $rootdir/scripts/setup.sh
+ driver=$(basename $(readlink /sys/bus/pci/devices/$bdf/driver))
+ # check that the nvme driver is still loaded against the device
+ if [ "$driver" != "nvme" ]; then
+ exit 1
+ fi
+ umount /tmp/nvmetest
+ rmdir /tmp/nvmetest
+ # write zeroes to the device to blow away the partition table and filesystem
+ dd if=/dev/zero of=/dev/$blkname oflag=direct bs=1M count=1
+ $rootdir/scripts/setup.sh
+ driver=$(basename $(readlink /sys/bus/pci/devices/$bdf/driver))
+ # check that the nvme driver is not loaded against the device
+ if [ "$driver" = "nvme" ]; then
+ exit 1
+ fi
+ else
+ $rootdir/scripts/setup.sh
+ fi
+fi
+
+if [ $(uname) = Linux ]; then
+ trap "kill_stub -9; exit 1" SIGINT SIGTERM EXIT
+ start_stub "-s 4096 -i 0 -m 0xE"
+fi
+
+run_test "nvme_reset" $testdir/reset/reset -q 64 -w write -s 4096 -t 5
+run_test "nvme_identify" nvme_identify
+run_test "nvme_perf" nvme_perf
+run_test "nvme_hello_world" $SPDK_EXAMPLE_DIR/hello_world
+run_test "nvme_deallocated_value" $testdir/deallocated_value/deallocated_value
+run_test "nvme_sgl" $testdir/sgl/sgl
+run_test "nvme_e2edp" $testdir/e2edp/nvme_dp
+run_test "nvme_reserve" $testdir/reserve/reserve
+run_test "nvme_err_injection" $testdir/err_injection/err_injection
+run_test "nvme_overhead" $testdir/overhead/overhead -s 4096 -t 1 -H
+run_test "nvme_arbitration" $SPDK_EXAMPLE_DIR/arbitration -t 3 -i 0
+
+if [ $(uname) != "FreeBSD" ]; then
+ run_test "nvme_startup" $testdir/startup/startup -t 1000000
+ run_test "nvme_multi_secondary" nvme_multi_secondary
+ trap - SIGINT SIGTERM EXIT
+ kill_stub
+fi
+
+if [[ $CONFIG_FIO_PLUGIN == y ]]; then
+ run_test "nvme_fio" nvme_fio_test
+fi
diff --git a/src/spdk/test/nvme/nvme_opal.sh b/src/spdk/test/nvme/nvme_opal.sh
new file mode 100755
index 000000000..1aee2be5a
--- /dev/null
+++ b/src/spdk/test/nvme/nvme_opal.sh
@@ -0,0 +1,133 @@
+#!/usr/bin/env bash
+
+set -e
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+rpc_py="$rootdir/scripts/rpc.py"
+source "$rootdir/scripts/common.sh"
+source "$rootdir/test/common/autotest_common.sh"
+
+# The OPAL CI tests is only used for P4510 devices.
+mapfile -t bdfs < <(get_nvme_bdfs_by_id 0x0a59)
+if [[ -z ${bdfs[0]} ]]; then
+ echo "No P4510 device found, exit the tests"
+ exit 1
+fi
+
+bdf=${bdfs[0]}
+
+function opal_revert_and_init() {
+ $SPDK_BIN_DIR/spdk_tgt &
+ spdk_tgt_pid=$!
+ waitforlisten $spdk_tgt_pid
+
+ $rootdir/scripts/rpc.py bdev_nvme_attach_controller -b "nvme0" -t "pcie" -a ${bdf}
+ # Ignore if this fails.
+ $rootdir/scripts/rpc.py bdev_nvme_opal_revert -b nvme0 -p test || true
+ sleep 1
+ $rpc_py bdev_nvme_opal_init -b nvme0 -p test
+ $rpc_py bdev_nvme_detach_controller nvme0
+
+ killprocess $spdk_tgt_pid
+}
+
+function test_opal_cmds() {
+ $rpc_py bdev_nvme_attach_controller -b "nvme0" -t "pcie" -a ${bdf}
+
+ $rpc_py bdev_opal_create -b nvme0 -n 1 -i 1 -s 0 -l 1024 -p test
+ $rpc_py bdev_opal_create -b nvme0 -n 1 -i 2 -s 1024 -l 512 -p test
+ $rpc_py bdev_opal_get_info -b nvme0n1r1 -p test
+
+ $rpc_py bdev_opal_delete -b nvme0n1r1 -p test
+ $rpc_py bdev_opal_delete -b nvme0n1r2 -p test
+
+ $rpc_py bdev_opal_create -b nvme0 -n 1 -i 1 -s 0 -l 1024 -p test
+ $rpc_py bdev_opal_create -b nvme0 -n 1 -i 2 -s 1024 -l 512 -p test
+
+ $rpc_py bdev_opal_delete -b nvme0n1r2 -p test
+ $rpc_py bdev_opal_delete -b nvme0n1r1 -p test
+
+ $rpc_py bdev_opal_create -b nvme0 -n 1 -i 3 -s 4096 -l 4096 -p test
+ $rpc_py bdev_opal_create -b nvme0 -n 1 -i 1 -s 0 -l 1024 -p test
+ $rpc_py bdev_opal_create -b nvme0 -n 1 -i 2 -s 1024 -l 512 -p test
+
+ $rpc_py bdev_opal_new_user -b nvme0n1r3 -p test -i 3 -u tester3
+ $rpc_py bdev_opal_get_info -b nvme0n1r3 -p test
+ $rpc_py bdev_opal_set_lock_state -b nvme0n1r3 -i 3 -p tester3 -l readonly
+ $rpc_py bdev_opal_get_info -b nvme0n1r3 -p test
+ $rpc_py bdev_opal_set_lock_state -b nvme0n1r1 -i 0 -p test -l rwlock
+
+ $rpc_py bdev_opal_delete -b nvme0n1r2 -p test
+ $rpc_py bdev_opal_delete -b nvme0n1r3 -p test
+ $rpc_py bdev_opal_delete -b nvme0n1r1 -p test
+
+ $rpc_py bdev_nvme_detach_controller nvme0
+}
+
+function setup_test_environment() {
+ $rpc_py bdev_nvme_attach_controller -b "nvme0" -t "pcie" -a ${bdf}
+
+ $rpc_py bdev_opal_create -b nvme0 -n 1 -i 1 -s 0 -l 1024 -p test
+ $rpc_py bdev_opal_create -b nvme0 -n 1 -i 2 -s 1024 -l 512 -p test
+ $rpc_py bdev_opal_create -b nvme0 -n 1 -i 3 -s 4096 -l 4096 -p test
+
+ $rpc_py bdev_opal_new_user -b nvme0n1r1 -p test -i 1 -u tester1
+ $rpc_py bdev_opal_set_lock_state -b nvme0n1r1 -i 1 -p tester1 -l readwrite
+ $rpc_py bdev_opal_new_user -b nvme0n1r3 -p test -i 3 -u tester3
+ $rpc_py bdev_opal_set_lock_state -b nvme0n1r3 -i 3 -p tester3 -l readwrite
+
+ $rpc_py bdev_opal_set_lock_state -b nvme0n1r2 -i 0 -p test -l readwrite
+}
+
+function clean_up() {
+ $rpc_py bdev_opal_delete -b nvme0n1r1 -p test
+ $rpc_py bdev_opal_delete -b nvme0n1r2 -p test
+ $rpc_py bdev_opal_delete -b nvme0n1r3 -p test
+}
+
+function revert() {
+ $rpc_py bdev_nvme_opal_revert -b nvme0 -p test
+}
+
+function opal_spdk_tgt() {
+ $SPDK_BIN_DIR/spdk_tgt &
+ spdk_tgt_pid=$!
+ trap 'killprocess $spdk_tgt_pid; exit 1' SIGINT SIGTERM EXIT
+ waitforlisten $spdk_tgt_pid
+ test_opal_cmds
+ killprocess $spdk_tgt_pid
+}
+
+function opal_bdevio() {
+ $rootdir/test/bdev/bdevio/bdevio -w &
+ bdevio_pid=$!
+ trap 'killprocess $bdevio_pid; exit 1' SIGINT SIGTERM EXIT
+ waitforlisten $bdevio_pid
+ setup_test_environment
+ $rootdir/test/bdev/bdevio/tests.py perform_tests
+ clean_up
+ $rpc_py bdev_nvme_detach_controller nvme0
+ trap - SIGINT SIGTERM EXIT
+ killprocess $bdevio_pid
+}
+
+function opal_bdevperf() {
+ $rootdir/test/bdev/bdevperf/bdevperf -z -q 8 -o 4096 -w verify -t 10 &
+ bdevperf_pid=$!
+ trap 'revert; killprocess $bdevperf_pid; exit 1' SIGINT SIGTERM EXIT
+ waitforlisten $bdevperf_pid
+ setup_test_environment
+ $rootdir/test/bdev/bdevperf/bdevperf.py perform_tests
+ clean_up
+ revert
+ $rpc_py bdev_nvme_detach_controller nvme0
+ trap - SIGINT SIGTERM EXIT
+ killprocess $bdevperf_pid
+}
+
+opal_revert_and_init
+
+run_test "nvme_opal_spdk_tgt" opal_spdk_tgt
+run_test "nvme_opal_bdevio" opal_bdevio
+run_test "nvme_opal_bdevperf" opal_bdevperf
diff --git a/src/spdk/test/nvme/nvme_rpc.sh b/src/spdk/test/nvme/nvme_rpc.sh
new file mode 100755
index 000000000..da7cf50d3
--- /dev/null
+++ b/src/spdk/test/nvme/nvme_rpc.sh
@@ -0,0 +1,37 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/scripts/common.sh
+source $rootdir/test/common/autotest_common.sh
+
+rpc_py=$rootdir/scripts/rpc.py
+
+bdf=$(get_first_nvme_bdf)
+
+$SPDK_BIN_DIR/spdk_tgt -m 0x3 &
+spdk_tgt_pid=$!
+trap 'kill -9 ${spdk_tgt_pid}; exit 1' SIGINT SIGTERM EXIT
+
+waitforlisten $spdk_tgt_pid
+
+$rpc_py bdev_nvme_attach_controller -b Nvme0 -t PCIe -a ${bdf}
+
+# 1) Test bdev_nvme_apply_firmware RPC
+# NOTE: We don't want to do real firmware update on CI
+
+# Make sure that used firmware file doesn't exist
+if [ -f non_existing_file ]; then
+ exit 1
+fi
+
+# a) Try to apply firmware from non existing file
+$rpc_py bdev_nvme_apply_firmware non_existing_file Nvme0n1 || rv=$?
+if [ -z "$rv" ]; then
+ exit 1
+fi
+
+$rpc_py bdev_nvme_detach_controller Nvme0
+
+trap - SIGINT SIGTERM EXIT
+killprocess $spdk_tgt_pid
diff --git a/src/spdk/test/nvme/overhead/.gitignore b/src/spdk/test/nvme/overhead/.gitignore
new file mode 100644
index 000000000..d5a7d6f41
--- /dev/null
+++ b/src/spdk/test/nvme/overhead/.gitignore
@@ -0,0 +1 @@
+overhead
diff --git a/src/spdk/test/nvme/overhead/Makefile b/src/spdk/test/nvme/overhead/Makefile
new file mode 100644
index 000000000..1d050d96d
--- /dev/null
+++ b/src/spdk/test/nvme/overhead/Makefile
@@ -0,0 +1,43 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../..)
+
+APP = overhead
+
+ifeq ($(OS),Linux)
+SYS_LIBS += -laio
+CFLAGS += -DHAVE_LIBAIO
+endif
+
+include $(SPDK_ROOT_DIR)/mk/nvme.libtest.mk
diff --git a/src/spdk/test/nvme/overhead/README b/src/spdk/test/nvme/overhead/README
new file mode 100644
index 000000000..b88c42176
--- /dev/null
+++ b/src/spdk/test/nvme/overhead/README
@@ -0,0 +1,24 @@
+This application measures the software overhead of I/O submission
+and completion for both the SPDK NVMe driver and an AIO file handle.
+It runs a random read, queue depth = 1 workload to a single device,
+and captures TSC as follows:
+
+* Submission: capture TSC before and after the I/O submission
+ call (SPDK or AIO).
+* Completion: capture TSC before and after the I/O completion
+ check. Only record the TSC delta if the I/O completion check
+ resulted in a completed I/O. Also use heuristics in the AIO
+ case to account for time spent in interrupt handling outside
+ of the actual I/O completion check.
+
+Usage:
+
+To test software overhead for a 4KB I/O over a 10 second period:
+
+SPDK: overhead -s 4096 -t 10
+AIO: overhead -s 4096 -t 10 /dev/nvme0n1
+
+Note that for the SPDK case, it will only use the first namespace
+on the first controller found by SPDK. If a different namespace is
+desired, attach controllers individually to the kernel NVMe driver
+to ensure they will not be enumerated by SPDK.
diff --git a/src/spdk/test/nvme/overhead/overhead.c b/src/spdk/test/nvme/overhead/overhead.c
new file mode 100644
index 000000000..553f1a545
--- /dev/null
+++ b/src/spdk/test/nvme/overhead/overhead.c
@@ -0,0 +1,730 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk/barrier.h"
+#include "spdk/fd.h"
+#include "spdk/nvme.h"
+#include "spdk/env.h"
+#include "spdk/string.h"
+#include "spdk/nvme_intel.h"
+#include "spdk/histogram_data.h"
+
+#if HAVE_LIBAIO
+#include <libaio.h>
+#endif
+
+struct ctrlr_entry {
+ struct spdk_nvme_ctrlr *ctrlr;
+ struct ctrlr_entry *next;
+ char name[1024];
+};
+
+enum entry_type {
+ ENTRY_TYPE_NVME_NS,
+ ENTRY_TYPE_AIO_FILE,
+};
+
+struct ns_entry {
+ enum entry_type type;
+
+ union {
+ struct {
+ struct spdk_nvme_ctrlr *ctrlr;
+ struct spdk_nvme_ns *ns;
+ struct spdk_nvme_qpair *qpair;
+ } nvme;
+#if HAVE_LIBAIO
+ struct {
+ int fd;
+ struct io_event *events;
+ io_context_t ctx;
+ } aio;
+#endif
+ } u;
+
+ uint32_t io_size_blocks;
+ uint64_t size_in_ios;
+ bool is_draining;
+ uint32_t current_queue_depth;
+ char name[1024];
+ struct ns_entry *next;
+
+ struct spdk_histogram_data *submit_histogram;
+ struct spdk_histogram_data *complete_histogram;
+};
+
+struct perf_task {
+ void *buf;
+ uint64_t submit_tsc;
+#if HAVE_LIBAIO
+ struct iocb iocb;
+#endif
+};
+
+static bool g_enable_histogram = false;
+
+static struct ctrlr_entry *g_ctrlr = NULL;
+static struct ns_entry *g_ns = NULL;
+
+static uint64_t g_tsc_rate;
+
+static uint32_t g_io_size_bytes;
+static int g_time_in_sec;
+
+static int g_aio_optind; /* Index of first AIO filename in argv */
+
+struct perf_task *g_task;
+uint64_t g_tsc_submit = 0;
+uint64_t g_tsc_submit_min = UINT64_MAX;
+uint64_t g_tsc_submit_max = 0;
+uint64_t g_tsc_complete = 0;
+uint64_t g_tsc_complete_min = UINT64_MAX;
+uint64_t g_tsc_complete_max = 0;
+uint64_t g_io_completed = 0;
+
+static void
+register_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns *ns)
+{
+ struct ns_entry *entry;
+ const struct spdk_nvme_ctrlr_data *cdata;
+
+ cdata = spdk_nvme_ctrlr_get_data(ctrlr);
+
+ if (!spdk_nvme_ns_is_active(ns)) {
+ printf("Controller %-20.20s (%-20.20s): Skipping inactive NS %u\n",
+ cdata->mn, cdata->sn,
+ spdk_nvme_ns_get_id(ns));
+ return;
+ }
+
+ if (spdk_nvme_ns_get_size(ns) < g_io_size_bytes ||
+ spdk_nvme_ns_get_sector_size(ns) > g_io_size_bytes) {
+ printf("WARNING: controller %-20.20s (%-20.20s) ns %u has invalid "
+ "ns size %" PRIu64 " / block size %u for I/O size %u\n",
+ cdata->mn, cdata->sn, spdk_nvme_ns_get_id(ns),
+ spdk_nvme_ns_get_size(ns), spdk_nvme_ns_get_sector_size(ns), g_io_size_bytes);
+ return;
+ }
+
+ entry = calloc(1, sizeof(struct ns_entry));
+ if (entry == NULL) {
+ perror("ns_entry malloc");
+ exit(1);
+ }
+
+ entry->type = ENTRY_TYPE_NVME_NS;
+ entry->u.nvme.ctrlr = ctrlr;
+ entry->u.nvme.ns = ns;
+
+ entry->size_in_ios = spdk_nvme_ns_get_size(ns) /
+ g_io_size_bytes;
+ entry->io_size_blocks = g_io_size_bytes / spdk_nvme_ns_get_sector_size(ns);
+ entry->submit_histogram = spdk_histogram_data_alloc();
+ entry->complete_histogram = spdk_histogram_data_alloc();
+
+ snprintf(entry->name, 44, "%-20.20s (%-20.20s)", cdata->mn, cdata->sn);
+
+ entry->next = g_ns;
+ g_ns = entry;
+}
+
+static void
+register_ctrlr(struct spdk_nvme_ctrlr *ctrlr)
+{
+ int num_ns;
+ struct ctrlr_entry *entry = malloc(sizeof(struct ctrlr_entry));
+ const struct spdk_nvme_ctrlr_data *cdata = spdk_nvme_ctrlr_get_data(ctrlr);
+
+ if (entry == NULL) {
+ perror("ctrlr_entry malloc");
+ exit(1);
+ }
+
+ snprintf(entry->name, sizeof(entry->name), "%-20.20s (%-20.20s)", cdata->mn, cdata->sn);
+
+ entry->ctrlr = ctrlr;
+
+ entry->next = g_ctrlr;
+ g_ctrlr = entry;
+
+ num_ns = spdk_nvme_ctrlr_get_num_ns(ctrlr);
+ /* Only register the first namespace. */
+ if (num_ns < 1) {
+ fprintf(stderr, "controller found with no namespaces\n");
+ return;
+ }
+
+ register_ns(ctrlr, spdk_nvme_ctrlr_get_ns(ctrlr, 1));
+}
+
+#if HAVE_LIBAIO
+static int
+register_aio_file(const char *path)
+{
+ struct ns_entry *entry;
+
+ int fd;
+ uint64_t size;
+ uint32_t blklen;
+
+ fd = open(path, O_RDWR | O_DIRECT);
+ if (fd < 0) {
+ fprintf(stderr, "Could not open AIO device %s: %s\n", path, strerror(errno));
+ return -1;
+ }
+
+ size = spdk_fd_get_size(fd);
+ if (size == 0) {
+ fprintf(stderr, "Could not determine size of AIO device %s\n", path);
+ close(fd);
+ return -1;
+ }
+
+ blklen = spdk_fd_get_blocklen(fd);
+ if (blklen == 0) {
+ fprintf(stderr, "Could not determine block size of AIO device %s\n", path);
+ close(fd);
+ return -1;
+ }
+
+ entry = calloc(1, sizeof(struct ns_entry));
+ if (entry == NULL) {
+ close(fd);
+ perror("aio ns_entry malloc");
+ return -1;
+ }
+
+ entry->type = ENTRY_TYPE_AIO_FILE;
+ entry->u.aio.fd = fd;
+ entry->size_in_ios = size / g_io_size_bytes;
+ entry->io_size_blocks = g_io_size_bytes / blklen;
+ entry->submit_histogram = spdk_histogram_data_alloc();
+ entry->complete_histogram = spdk_histogram_data_alloc();
+
+ snprintf(entry->name, sizeof(entry->name), "%s", path);
+
+ g_ns = entry;
+
+ return 0;
+}
+
+static int
+aio_submit(io_context_t aio_ctx, struct iocb *iocb, int fd, enum io_iocb_cmd cmd, void *buf,
+ unsigned long nbytes, uint64_t offset, void *cb_ctx)
+{
+ iocb->aio_fildes = fd;
+ iocb->aio_reqprio = 0;
+ iocb->aio_lio_opcode = cmd;
+ iocb->u.c.buf = buf;
+ iocb->u.c.nbytes = nbytes;
+ iocb->u.c.offset = offset;
+ iocb->data = cb_ctx;
+
+ if (io_submit(aio_ctx, 1, &iocb) < 0) {
+ printf("io_submit");
+ return -1;
+ }
+
+ return 0;
+}
+
+static void
+aio_check_io(void)
+{
+ int count, i;
+ struct timespec timeout;
+
+ timeout.tv_sec = 0;
+ timeout.tv_nsec = 0;
+
+ count = io_getevents(g_ns->u.aio.ctx, 1, 1, g_ns->u.aio.events, &timeout);
+ if (count < 0) {
+ fprintf(stderr, "io_getevents error\n");
+ exit(1);
+ }
+
+ for (i = 0; i < count; i++) {
+ g_ns->current_queue_depth--;
+ }
+}
+#endif /* HAVE_LIBAIO */
+
+static void io_complete(void *ctx, const struct spdk_nvme_cpl *completion);
+
+static __thread unsigned int seed = 0;
+
+static void
+submit_single_io(void)
+{
+ uint64_t offset_in_ios;
+ uint64_t start;
+ int rc;
+ struct ns_entry *entry = g_ns;
+ uint64_t tsc_submit;
+
+ offset_in_ios = rand_r(&seed) % entry->size_in_ios;
+
+ start = spdk_get_ticks();
+ spdk_rmb();
+#if HAVE_LIBAIO
+ if (entry->type == ENTRY_TYPE_AIO_FILE) {
+ rc = aio_submit(g_ns->u.aio.ctx, &g_task->iocb, entry->u.aio.fd, IO_CMD_PREAD, g_task->buf,
+ g_io_size_bytes, offset_in_ios * g_io_size_bytes, g_task);
+ } else
+#endif
+ {
+ rc = spdk_nvme_ns_cmd_read(entry->u.nvme.ns, g_ns->u.nvme.qpair, g_task->buf,
+ offset_in_ios * entry->io_size_blocks,
+ entry->io_size_blocks, io_complete, g_task, 0);
+ }
+
+ spdk_rmb();
+ tsc_submit = spdk_get_ticks() - start;
+ g_tsc_submit += tsc_submit;
+ if (tsc_submit < g_tsc_submit_min) {
+ g_tsc_submit_min = tsc_submit;
+ }
+ if (tsc_submit > g_tsc_submit_max) {
+ g_tsc_submit_max = tsc_submit;
+ }
+ if (g_enable_histogram) {
+ spdk_histogram_data_tally(entry->submit_histogram, tsc_submit);
+ }
+
+ if (rc != 0) {
+ fprintf(stderr, "starting I/O failed\n");
+ } else {
+ g_ns->current_queue_depth++;
+ }
+}
+
+static void
+io_complete(void *ctx, const struct spdk_nvme_cpl *completion)
+{
+ g_ns->current_queue_depth--;
+}
+
+uint64_t g_complete_tsc_start;
+
+static uint64_t
+check_io(void)
+{
+ uint64_t end, tsc_complete;
+
+ spdk_rmb();
+#if HAVE_LIBAIO
+ if (g_ns->type == ENTRY_TYPE_AIO_FILE) {
+ aio_check_io();
+ } else
+#endif
+ {
+ spdk_nvme_qpair_process_completions(g_ns->u.nvme.qpair, 0);
+ }
+ spdk_rmb();
+ end = spdk_get_ticks();
+ if (g_ns->current_queue_depth == 1) {
+ /*
+ * Account for race condition in AIO case where interrupt occurs
+ * after checking for queue depth. If the timestamp capture
+ * is too big compared to the last capture, assume that an
+ * interrupt fired, and do not bump the start tsc forward. This
+ * will ensure this extra time is accounted for next time through
+ * when we see current_queue_depth drop to 0.
+ */
+ if (g_ns->type == ENTRY_TYPE_NVME_NS || (end - g_complete_tsc_start) < 500) {
+ g_complete_tsc_start = end;
+ }
+ } else {
+ tsc_complete = end - g_complete_tsc_start;
+ g_tsc_complete += tsc_complete;
+ if (tsc_complete < g_tsc_complete_min) {
+ g_tsc_complete_min = tsc_complete;
+ }
+ if (tsc_complete > g_tsc_complete_max) {
+ g_tsc_complete_max = tsc_complete;
+ }
+ if (g_enable_histogram) {
+ spdk_histogram_data_tally(g_ns->complete_histogram, tsc_complete);
+ }
+ g_io_completed++;
+ if (!g_ns->is_draining) {
+ submit_single_io();
+ }
+ end = g_complete_tsc_start = spdk_get_ticks();
+ }
+
+ return end;
+}
+
+static void
+drain_io(void)
+{
+ g_ns->is_draining = true;
+ while (g_ns->current_queue_depth > 0) {
+ check_io();
+ }
+}
+
+static int
+init_ns_worker_ctx(void)
+{
+ if (g_ns->type == ENTRY_TYPE_AIO_FILE) {
+#ifdef HAVE_LIBAIO
+ g_ns->u.aio.events = calloc(1, sizeof(struct io_event));
+ if (!g_ns->u.aio.events) {
+ return -1;
+ }
+ g_ns->u.aio.ctx = 0;
+ if (io_setup(1, &g_ns->u.aio.ctx) < 0) {
+ free(g_ns->u.aio.events);
+ perror("io_setup");
+ return -1;
+ }
+#endif
+ } else {
+ /*
+ * TODO: If a controller has multiple namespaces, they could all use the same queue.
+ * For now, give each namespace/thread combination its own queue.
+ */
+ g_ns->u.nvme.qpair = spdk_nvme_ctrlr_alloc_io_qpair(g_ns->u.nvme.ctrlr, NULL, 0);
+ if (!g_ns->u.nvme.qpair) {
+ printf("ERROR: spdk_nvme_ctrlr_alloc_io_qpair failed\n");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static void
+cleanup_ns_worker_ctx(void)
+{
+ if (g_ns->type == ENTRY_TYPE_AIO_FILE) {
+#ifdef HAVE_LIBAIO
+ io_destroy(g_ns->u.aio.ctx);
+ free(g_ns->u.aio.events);
+#endif
+ } else {
+ spdk_nvme_ctrlr_free_io_qpair(g_ns->u.nvme.qpair);
+ }
+}
+
+static int
+work_fn(void)
+{
+ uint64_t tsc_end, current;
+
+ /* Allocate a queue pair for each namespace. */
+ if (init_ns_worker_ctx() != 0) {
+ printf("ERROR: init_ns_worker_ctx() failed\n");
+ return 1;
+ }
+
+ tsc_end = spdk_get_ticks() + g_time_in_sec * g_tsc_rate;
+
+ /* Submit initial I/O for each namespace. */
+ submit_single_io();
+ g_complete_tsc_start = spdk_get_ticks();
+
+ while (1) {
+ /*
+ * Check for completed I/O for each controller. A new
+ * I/O will be submitted in the io_complete callback
+ * to replace each I/O that is completed.
+ */
+ current = check_io();
+
+ if (current > tsc_end) {
+ break;
+ }
+ }
+
+ drain_io();
+ cleanup_ns_worker_ctx();
+
+ return 0;
+}
+
+static void usage(char *program_name)
+{
+ printf("%s options", program_name);
+#if HAVE_LIBAIO
+ printf(" [AIO device(s)]...");
+#endif
+ printf("\n");
+ printf("\t[-s io size in bytes]\n");
+ printf("\t[-t time in seconds]\n");
+ printf("\t\t(default: 1)]\n");
+ printf("\t[-H enable histograms]\n");
+}
+
+static void
+print_bucket(void *ctx, uint64_t start, uint64_t end, uint64_t count,
+ uint64_t total, uint64_t so_far)
+{
+ double so_far_pct;
+
+ if (count == 0) {
+ return;
+ }
+
+ so_far_pct = (double)so_far * 100 / total;
+
+ printf("%9.3f - %9.3f: %9.4f%% (%9ju)\n",
+ (double)start * 1000 * 1000 / g_tsc_rate,
+ (double)end * 1000 * 1000 / g_tsc_rate,
+ so_far_pct, count);
+}
+
+static void
+print_stats(void)
+{
+ double divisor = (double)g_tsc_rate / (1000 * 1000 * 1000);
+
+ printf("submit (in ns) avg, min, max = %8.1f, %8.1f, %8.1f\n",
+ (double)g_tsc_submit / g_io_completed / divisor,
+ (double)g_tsc_submit_min / divisor,
+ (double)g_tsc_submit_max / divisor);
+ printf("complete (in ns) avg, min, max = %8.1f, %8.1f, %8.1f\n",
+ (double)g_tsc_complete / g_io_completed / divisor,
+ (double)g_tsc_complete_min / divisor,
+ (double)g_tsc_complete_max / divisor);
+
+ if (!g_enable_histogram) {
+ return;
+ }
+
+ printf("\n");
+ printf("Submit histogram\n");
+ printf("================\n");
+ printf(" Range in us Cumulative Count\n");
+ spdk_histogram_data_iterate(g_ns->submit_histogram, print_bucket, NULL);
+ printf("\n");
+
+ printf("Complete histogram\n");
+ printf("==================\n");
+ printf(" Range in us Cumulative Count\n");
+ spdk_histogram_data_iterate(g_ns->complete_histogram, print_bucket, NULL);
+ printf("\n");
+
+}
+
+static int
+parse_args(int argc, char **argv)
+{
+ int op;
+ long int val;
+
+ /* default value */
+ g_io_size_bytes = 0;
+ g_time_in_sec = 0;
+
+ while ((op = getopt(argc, argv, "hs:t:H")) != -1) {
+ switch (op) {
+ case 'h':
+ usage(argv[0]);
+ exit(0);
+ break;
+ case 's':
+ val = spdk_strtol(optarg, 10);
+ if (val < 0) {
+ fprintf(stderr, "Invalid io size\n");
+ return val;
+ }
+ g_io_size_bytes = (uint32_t)val;
+ break;
+ case 't':
+ g_time_in_sec = spdk_strtol(optarg, 10);
+ if (g_time_in_sec < 0) {
+ fprintf(stderr, "Invalid run time\n");
+ return g_time_in_sec;
+ }
+ break;
+ case 'H':
+ g_enable_histogram = true;
+ break;
+ default:
+ usage(argv[0]);
+ return 1;
+ }
+ }
+
+ if (!g_io_size_bytes) {
+ usage(argv[0]);
+ return 1;
+ }
+ if (!g_time_in_sec) {
+ usage(argv[0]);
+ return 1;
+ }
+
+ g_aio_optind = optind;
+
+ return 0;
+}
+
+static bool
+probe_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
+ struct spdk_nvme_ctrlr_opts *opts)
+{
+ static uint32_t ctrlr_found = 0;
+
+ if (ctrlr_found == 1) {
+ fprintf(stderr, "only attaching to one controller, so skipping\n");
+ fprintf(stderr, " controller at PCI address %s\n",
+ trid->traddr);
+ return false;
+ }
+ ctrlr_found = 1;
+
+ printf("Attaching to %s\n", trid->traddr);
+
+ return true;
+}
+
+static void
+attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
+ struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_ctrlr_opts *opts)
+{
+ printf("Attached to %s\n", trid->traddr);
+
+ register_ctrlr(ctrlr);
+}
+
+static int
+register_controllers(void)
+{
+ printf("Initializing NVMe Controllers\n");
+
+ if (spdk_nvme_probe(NULL, NULL, probe_cb, attach_cb, NULL) != 0) {
+ fprintf(stderr, "spdk_nvme_probe() failed\n");
+ return 1;
+ }
+
+ if (g_ns == NULL) {
+ fprintf(stderr, "no NVMe controller found - check that device is bound to uio/vfio\n");
+ return 1;
+ }
+
+ return 0;
+}
+
+static void
+cleanup(void)
+{
+ struct ns_entry *ns_entry = g_ns;
+ struct ctrlr_entry *ctrlr_entry = g_ctrlr;
+
+ while (ns_entry) {
+ struct ns_entry *next = ns_entry->next;
+
+ spdk_histogram_data_free(ns_entry->submit_histogram);
+ spdk_histogram_data_free(ns_entry->complete_histogram);
+ free(ns_entry);
+ ns_entry = next;
+ }
+
+ while (ctrlr_entry) {
+ struct ctrlr_entry *next = ctrlr_entry->next;
+
+ spdk_nvme_detach(ctrlr_entry->ctrlr);
+ free(ctrlr_entry);
+ ctrlr_entry = next;
+ }
+}
+
+int main(int argc, char **argv)
+{
+ int rc;
+ struct spdk_env_opts opts;
+
+ rc = parse_args(argc, argv);
+ if (rc != 0) {
+ return rc;
+ }
+
+ spdk_env_opts_init(&opts);
+ opts.name = "overhead";
+ opts.core_mask = "0x1";
+ opts.shm_id = 0;
+ if (spdk_env_init(&opts) < 0) {
+ fprintf(stderr, "Unable to initialize SPDK env\n");
+ return 1;
+ }
+
+ g_task = spdk_zmalloc(sizeof(struct perf_task), 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
+ if (g_task == NULL) {
+ fprintf(stderr, "g_task alloc failed\n");
+ exit(1);
+ }
+
+ g_task->buf = spdk_zmalloc(g_io_size_bytes, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
+ if (g_task->buf == NULL) {
+ fprintf(stderr, "g_task->buf spdk_zmalloc failed\n");
+ exit(1);
+ }
+
+ g_tsc_rate = spdk_get_ticks_hz();
+
+#if HAVE_LIBAIO
+ if (g_aio_optind < argc) {
+ printf("Measuring overhead for AIO device %s.\n", argv[g_aio_optind]);
+ if (register_aio_file(argv[g_aio_optind]) != 0) {
+ cleanup();
+ return -1;
+ }
+ } else
+#endif
+ {
+ if (register_controllers() != 0) {
+ cleanup();
+ return -1;
+ }
+ }
+
+ printf("Initialization complete. Launching workers.\n");
+
+ rc = work_fn();
+
+ print_stats();
+
+ cleanup();
+
+ if (rc != 0) {
+ fprintf(stderr, "%s: errors occured\n", argv[0]);
+ }
+
+ return rc;
+}
diff --git a/src/spdk/test/nvme/perf/README.md b/src/spdk/test/nvme/perf/README.md
new file mode 100644
index 000000000..3e0b4aa30
--- /dev/null
+++ b/src/spdk/test/nvme/perf/README.md
@@ -0,0 +1,103 @@
+# Automated script for NVMe performance test
+
+## Compile SPDK with LTO
+
+The link time optimization (lto) gcc flag allows the linker to run a post-link optimization pass on the code. During that pass the linker inlines thin wrappers such as those around DPDK calls which results in a shallow call stack and significantly improves performance. Therefore, we recommend compiling SPDK with the lto flag prior to running this benchmark script to archieve optimal performance.
+Link time optimization can be enabled in SPDK by doing the following:
+
+~{.sh}
+./configure --enable-lto
+~
+
+## Configuration
+
+Test is configured by using command-line options.
+
+### Available options
+
+#### -h, --help
+
+Prints available commands and help.
+
+#### --run-time
+
+Tell fio to terminate processing after the specified period of time. Value in seconds.
+
+#### --ramp-time
+
+Fio will run the specified workload for this amount of time before logging any performance numbers.
+Value in seconds.
+
+#### --fio-bin
+
+Path to fio binary.
+
+#### --driver
+
+Select between SPDK driver and kernel driver. The Linux Kernel driver has three configurations:
+Default mode, Hybrid Polling and Classic Polling. The SPDK driver supports 2 fio_plugin modes: bdev and NVMe PMD. Before running test with spdk, you will need to bind NVMe devics to the Linux uio_pci_generic or vfio-pci driver. When running test with the Kernel driver, NVMe devices use the Kernel driver. The 5 valid values for this option are:
+'bdev', 'nvme', 'kernel-libaio', 'kernel-classic-polling' and 'kernel-hybrid-polling'.
+
+#### --max-disk
+
+This option will run multiple fio jobs with varying number of NVMe devices. First it will start with
+max-disk number of devices then decrease number of disk by two until there are no more devices.
+If set to 'all' then max-disk number will be set to all available devices.
+Only one of the max-disk or disk-no option can be used.
+
+#### --disk-no
+
+This option will run fio job on specified number of NVMe devices. If set to 'all' then max-disk number
+will be set to all available devices. Only one of the max-disk or disk-no option can be used.
+
+#### --cpu-allowed
+
+Specifies the CPU cores that will be used by fio to execute the performance test cases. When spdk driver is chosen, Nthe script attempts to assign NVMe devices to CPU cores on the same NUMA node. The script will try to align each core with devices matching
+core's NUMA first but if the is no devices left within the CPU core NUMA then it will use devices from the other
+NUMA node. It is important to choose cores that will ensure best NUMA node alignment. For example:
+On System with 8 devices on NUMA node 0 and 8 devices on NUMA node 1, cores 0-27 on numa node 0 and 28-55
+on numa node 1, if test is set to use 16 disk and four cores then "--cpu-allowed=1,2,28,29" can be used
+resulting with 4 devices with node0 per core 1 and 2 and 4 devices with node1 per core 28 and 29. If 10 cores
+are required then best option would be "--cpu-allowed=1,2,3,4,28,29,30,31,32,33" because cores 1-4 will be
+aligned with 2 devices on numa0 per core and cores 28-33 will be aligned with 1 device on numa1 per core.
+If kernel driver is chosen then for each job with NVME device, all cpu cores with corresponding NUMA node are picked.
+
+#### --rw
+
+Type of I/O pattern. Accepted values are: randrw, rw
+
+#### --rwmixread
+
+Percentage of a mixed workload that should be reads.
+
+#### --iodepth
+
+Number of I/O units to keep in flight against each file.
+
+#### --block-size
+
+The block size in bytes used for I/O units.
+
+#### --numjobs
+
+Create the specified number of clones of a job.
+
+#### --repeat-no
+
+Specifies how many times run each workload. End results are averages of these workloads
+
+#### --no-preconditioning
+
+By default disks are preconditioned before test using fio with parameters: size=100%, loops=2, bs=1M, w=write,
+iodepth=32, ioengine=spdk. It can be skiped when this option is set.
+
+#### "--no-io-scaling"
+
+For SPDK fio plugin iodepth is multiplied by number of devices. When this option is set this multiplication will be disabled.
+
+## Results
+
+Results are stored in "results" folder. After each workload, to this folder are copied files with:
+fio configuration file, json files with fio results and logs with latiencies with sampling interval 250 ms.
+Number of copied files depends from number of repeats of each workload. Additionall csv file is created with averaged
+results of all workloads.
diff --git a/src/spdk/test/nvme/perf/common.sh b/src/spdk/test/nvme/perf/common.sh
new file mode 100755
index 000000000..ddd01ec52
--- /dev/null
+++ b/src/spdk/test/nvme/perf/common.sh
@@ -0,0 +1,471 @@
+#!/usr/bin/env bash
+
+function discover_bdevs() {
+ local rootdir=$1
+ local config_file=$2
+ local cfg_type=$3
+ local wait_for_spdk_bdev=${4:-30}
+ local rpc_server=/var/tmp/spdk-discover-bdevs.sock
+
+ if [ ! -e $config_file ]; then
+ echo "Invalid Configuration File: $config_file"
+ return 1
+ fi
+
+ if [ -z $cfg_type ]; then
+ cfg_type="-c"
+ fi
+
+ # Start the bdev service to query for the list of available
+ # bdevs.
+ $rootdir/test/app/bdev_svc/bdev_svc -r $rpc_server -i 0 \
+ $cfg_type $config_file &> /dev/null &
+ stubpid=$!
+ while ! [ -e /var/run/spdk_bdev0 ]; do
+ # If this counter drops to zero, errexit will be caught to abort the test
+ ((wait_for_spdk_bdev--))
+ sleep 1
+ done
+
+ # Get all of the bdevs
+ $rootdir/scripts/rpc.py -s "$rpc_server" bdev_get_bdevs
+
+ # Shut down the bdev service
+ kill $stubpid
+ wait $stubpid
+ rm -f /var/run/spdk_bdev0
+}
+
+function create_spdk_bdev_conf() {
+ local output
+ local disk_cfg
+ local bdev_io_cache_size=$1
+ local bdev_io_pool_size=$2
+ local bdev_json_cfg=()
+ local bdev_opts=()
+
+ disk_cfg=($(grep -vP "^\s*#" "$DISKCFG"))
+
+ if [[ -n "$bdev_io_cache_size" ]]; then
+ bdev_opts+=("\"bdev_io_cache_size\": $bdev_io_cache_size")
+ fi
+
+ if [[ -n "$bdev_io_pool_size" ]]; then
+ bdev_opts+=("\"bdev_io_pool_size\": $bdev_io_pool_size")
+ fi
+
+ local IFS=","
+ if [[ ${#bdev_opts[@]} -gt 0 ]]; then
+ bdev_json_cfg+=("$(
+ cat <<- JSON
+ {
+ "method": "bdev_set_options",
+ "params": {
+ ${bdev_opts[*]}
+ }
+ }
+ JSON
+ )")
+ fi
+
+ for i in "${!disk_cfg[@]}"; do
+ bdev_json_cfg+=("$(
+ cat <<- JSON
+ {
+ "method": "bdev_nvme_attach_controller",
+ "params": {
+ "trtype": "PCIe",
+ "name":"Nvme${i}",
+ "traddr":"${disk_cfg[i]}"
+ }
+ }
+ JSON
+ )")
+ done
+
+ local IFS=","
+ jq -r '.' <<- JSON > $testdir/bdev.conf
+ {
+ "subsystems": [
+ {
+ "subsystem": "bdev",
+ "config": [
+ ${bdev_json_cfg[*]}
+ ]
+ }
+ ]
+ }
+ JSON
+}
+
+function is_bdf_not_mounted() {
+ local bdf=$1
+ local blkname
+ local mountpoints
+ blkname=$(ls -l /sys/block/ | grep $bdf | awk '{print $9}')
+ mountpoints=$(lsblk /dev/$blkname --output MOUNTPOINT -n | wc -w)
+ return $mountpoints
+}
+
+function get_cores() {
+ local cpu_list="$1"
+ for cpu in ${cpu_list//,/ }; do
+ echo $cpu
+ done
+}
+
+function get_cores_numa_node() {
+ local cores=$1
+ for core in $cores; do
+ lscpu -p=cpu,node | grep "^$core\b" | awk -F ',' '{print $2}'
+ done
+}
+
+function get_numa_node() {
+ local plugin=$1
+ local disks=$2
+ if [[ "$plugin" =~ "nvme" ]]; then
+ for bdf in $disks; do
+ local driver
+ driver=$(grep DRIVER /sys/bus/pci/devices/$bdf/uevent | awk -F"=" '{print $2}')
+ # Use this check to ommit blacklisted devices ( not binded to driver with setup.sh script )
+ if [ "$driver" = "vfio-pci" ] || [ "$driver" = "uio_pci_generic" ]; then
+ cat /sys/bus/pci/devices/$bdf/numa_node
+ fi
+ done
+ elif [[ "$plugin" =~ "bdev" ]]; then
+ local bdevs
+ bdevs=$(discover_bdevs $rootdir $testdir/bdev.conf --json)
+ for name in $disks; do
+ local bdev_bdf
+ bdev_bdf=$(jq -r ".[] | select(.name==\"$name\").driver_specific.nvme.pci_address" <<< $bdevs)
+ cat /sys/bus/pci/devices/$bdev_bdf/numa_node
+ done
+ else
+ for name in $disks; do
+ local bdf
+ # Not reading directly from /sys/block/nvme* because of a kernel bug
+ # which results in NUMA 0 always getting reported.
+ bdf=$(cat /sys/block/$name/device/address)
+ cat /sys/bus/pci/devices/$bdf/numa_node
+ done
+ fi
+}
+
+function get_disks() {
+ local plugin=$1
+ local disk_cfg
+
+ disk_cfg=($(grep -vP "^\s*#" "$DISKCFG"))
+ if [[ "$plugin" =~ "nvme" ]]; then
+ # PCI BDF address is enough for nvme-perf and nvme-fio-plugin,
+ # so just print them from configuration file
+ echo "${disk_cfg[*]}"
+ elif [[ "$plugin" =~ "bdev" ]]; then
+ # Generate NvmeXn1 bdev name configuration file for bdev-perf
+ # and bdev-fio-plugin
+ local bdevs
+ local disk_no
+ disk_no=${#disk_cfg[@]}
+ eval echo "Nvme{0..$((disk_no - 1))}n1"
+ else
+ # Find nvme block devices and only use the ones which
+ # are not mounted
+ for bdf in "${disk_cfg[@]}"; do
+ if is_bdf_not_mounted $bdf; then
+ local blkname
+ blkname=$(ls -l /sys/block/ | grep $bdf | awk '{print $9}')
+ echo $blkname
+ fi
+ done
+ fi
+}
+
+function get_disks_on_numa() {
+ local devs=($1)
+ local numas=($2)
+ local numa_no=$3
+ local disks_on_numa=""
+ local i
+
+ for ((i = 0; i < ${#devs[@]}; i++)); do
+ if [ ${numas[$i]} = $numa_no ]; then
+ disks_on_numa=$((disks_on_numa + 1))
+ fi
+ done
+ echo $disks_on_numa
+}
+
+function create_fio_config() {
+ local disk_no=$1
+ local plugin=$2
+ local disks=($3)
+ local disks_numa=($4)
+ local cores=($5)
+ local total_disks=${#disks[@]}
+ local fio_job_section=()
+ local num_cores=${#cores[@]}
+ local disks_per_core=$((disk_no / num_cores))
+ local disks_per_core_mod=$((disk_no % num_cores))
+ local cores_numa
+ cores_numa=($(get_cores_numa_node "${cores[*]}"))
+
+ # Following part of this function still leverages global variables a lot.
+ # It's a mix of local variables passed as aruments to function with global variables. This is messy.
+ # TODO: Modify this to be consistent with how variables are used here. Aim for using only
+ # local variables to get rid of globals as much as possible.
+ desc="\"Test io_plugin=$PLUGIN Blocksize=${BLK_SIZE} Workload=$RW MIX=${MIX} qd=${IODEPTH}\""
+ cp "$testdir/config.fio.tmp" "$testdir/config.fio"
+ cat <<- EOF >> $testdir/config.fio
+ description=$desc
+
+ rw=$RW
+ rwmixread=$MIX
+ bs=$BLK_SIZE
+ runtime=$RUNTIME
+ ramp_time=$RAMP_TIME
+ numjobs=$NUMJOBS
+ log_avg_msec=$SAMPLING_INT
+ EOF
+
+ if $GTOD_REDUCE; then
+ echo "gtod_reduce=1" >> $testdir/config.fio
+ fi
+
+ for i in "${!cores[@]}"; do
+ local m=0 #Counter of disks per NUMA node
+ local n=0 #Counter of all disks in test
+ core_numa=${cores_numa[$i]}
+
+ total_disks_per_core=$disks_per_core
+ # Check how many "stray" disks are unassigned to CPU cores
+ # Assign one disk to current CPU core and substract it from the total of
+ # unassigned disks
+ if [[ "$disks_per_core_mod" -gt "0" ]]; then
+ total_disks_per_core=$((disks_per_core + 1))
+ disks_per_core_mod=$((disks_per_core_mod - 1))
+ fi
+ # SPDK fio plugin supports submitting/completing I/Os to multiple SSDs from a single thread.
+ # Therefore, the per thread queue depth is set to the desired IODEPTH/device X the number of devices per thread.
+ QD=$IODEPTH
+ if [[ "$NOIOSCALING" = false ]]; then
+ QD=$((IODEPTH * total_disks_per_core))
+ fi
+
+ fio_job_section+=("")
+ fio_job_section+=("[filename${i}]")
+ fio_job_section+=("iodepth=$QD")
+ fio_job_section+=("cpus_allowed=${cores[$i]} #CPU NUMA Node ${cores_numa[$i]}")
+
+ while [[ "$m" -lt "$total_disks_per_core" ]]; do
+ # Try to add disks to job section if it's NUMA node matches NUMA
+ # for currently selected CPU
+ if [[ "${disks_numa[$n]}" == "$core_numa" ]]; then
+ if [[ "$plugin" == "spdk-plugin-nvme" ]]; then
+ fio_job_section+=("filename=trtype=PCIe traddr=${disks[$n]//:/.} ns=1 #NVMe NUMA Node ${disks_numa[$n]}")
+ elif [[ "$plugin" == "spdk-plugin-bdev" ]]; then
+ fio_job_section+=("filename=${disks[$n]} #NVMe NUMA Node ${disks_numa[$n]}")
+ elif [[ "$plugin" =~ "kernel" ]]; then
+ fio_job_section+=("filename=/dev/${disks[$n]} #NVMe NUMA Node ${disks_numa[$n]}")
+ fi
+ m=$((m + 1))
+
+ #Mark numa of n'th disk as "x" to mark it as claimed for next loop iterations
+ disks_numa[$n]="x"
+ fi
+ n=$((n + 1))
+
+ # If there is no more disks with numa node same as cpu numa node, switch to
+ # other numa node, go back to start of loop and try again.
+ if [[ $n -ge $total_disks ]]; then
+ echo "WARNING! Cannot assign any more NVMes for CPU ${cores[$i]}"
+ echo "NVMe assignment for this CPU will be cross-NUMA."
+ if [[ "$core_numa" == "1" ]]; then
+ core_numa=0
+ else
+ core_numa=1
+ fi
+ n=0
+ fi
+ done
+ done
+
+ printf "%s\n" "${fio_job_section[@]}" >> $testdir/config.fio
+ echo "INFO: Generated fio configuration file:"
+ cat $testdir/config.fio
+}
+
+function preconditioning() {
+ local dev_name=""
+ local filename=""
+ local nvme_list
+
+ HUGEMEM=8192 $rootdir/scripts/setup.sh
+ cp $testdir/config.fio.tmp $testdir/config.fio
+ echo "[Preconditioning]" >> $testdir/config.fio
+
+ # Generate filename argument for FIO.
+ # We only want to target NVMes not bound to nvme driver.
+ # If they're still bound to nvme that means they were skipped by
+ # setup.sh on purpose.
+ nvme_list=$(get_disks nvme)
+ for nvme in $nvme_list; do
+ dev_name='trtype=PCIe traddr='${nvme//:/.}' ns=1'
+ filename+=$(printf %s":" "$dev_name")
+ done
+ echo "** Preconditioning disks, this can take a while, depending on the size of disks."
+ run_spdk_nvme_fio "spdk-plugin-nvme" --filename="$filename" --size=100% --loops=2 --bs=1M \
+ --rw=write --iodepth=32 --output-format=normal
+ rm -f $testdir/config.fio
+}
+
+function get_results() {
+ local reads_pct
+ local writes_pct
+
+ reads_pct=$(bc -l <<< "scale=3; $2/100")
+ writes_pct=$(bc -l <<< "scale=3; 1-$reads_pct")
+ case "$1" in
+ iops)
+ iops=$(jq -r '.jobs[] | .read.iops + .write.iops' $TMP_RESULT_FILE)
+ iops=${iops%.*}
+ echo $iops
+ ;;
+ mean_lat_usec)
+ mean_lat=$(jq -r ".jobs[] | (.read.lat_ns.mean * $reads_pct + .write.lat_ns.mean * $writes_pct)" $TMP_RESULT_FILE)
+ mean_lat=${mean_lat%.*}
+ echo $((mean_lat / 1000))
+ ;;
+ p99_lat_usec)
+ p99_lat=$(jq -r ".jobs[] | (.read.clat_ns.percentile.\"99.000000\" // 0 * $reads_pct + .write.clat_ns.percentile.\"99.000000\" // 0 * $writes_pct)" $TMP_RESULT_FILE)
+ p99_lat=${p99_lat%.*}
+ echo $((p99_lat / 1000))
+ ;;
+ p99_99_lat_usec)
+ p99_99_lat=$(jq -r ".jobs[] | (.read.clat_ns.percentile.\"99.990000\" // 0 * $reads_pct + .write.clat_ns.percentile.\"99.990000\" // 0 * $writes_pct)" $TMP_RESULT_FILE)
+ p99_99_lat=${p99_99_lat%.*}
+ echo $((p99_99_lat / 1000))
+ ;;
+ stdev_usec)
+ stdev=$(jq -r ".jobs[] | (.read.clat_ns.stddev * $reads_pct + .write.clat_ns.stddev * $writes_pct)" $TMP_RESULT_FILE)
+ stdev=${stdev%.*}
+ echo $((stdev / 1000))
+ ;;
+ mean_slat_usec)
+ mean_slat=$(jq -r ".jobs[] | (.read.slat_ns.mean * $reads_pct + .write.slat_ns.mean * $writes_pct)" $TMP_RESULT_FILE)
+ mean_slat=${mean_slat%.*}
+ echo $((mean_slat / 1000))
+ ;;
+ mean_clat_usec)
+ mean_clat=$(jq -r ".jobs[] | (.read.clat_ns.mean * $reads_pct + .write.clat_ns.mean * $writes_pct)" $TMP_RESULT_FILE)
+ mean_clat=${mean_clat%.*}
+ echo $((mean_clat / 1000))
+ ;;
+ bw_Kibs)
+ bw=$(jq -r ".jobs[] | (.read.bw + .write.bw)" $TMP_RESULT_FILE)
+ bw=${bw%.*}
+ echo $((bw))
+ ;;
+ esac
+}
+
+function get_bdevperf_results() {
+ case "$1" in
+ iops)
+ iops=$(grep Total $TMP_RESULT_FILE | awk -F 'Total' '{print $2}' | awk '{print $2}')
+ iops=${iops%.*}
+ echo $iops
+ ;;
+ bw_Kibs)
+ bw_MBs=$(grep Total $TMP_RESULT_FILE | awk -F 'Total' '{print $2}' | awk '{print $4}')
+ bw_MBs=${bw_MBs%.*}
+ echo $((bw_MBs * 1024))
+ ;;
+ esac
+}
+
+function get_nvmeperf_results() {
+ local iops
+ local bw_MBs
+ local mean_lat_usec
+ local max_lat_usec
+ local min_lat_usec
+
+ read -r iops bw_MBs mean_lat_usec min_lat_usec max_lat_usec <<< $(tr -s " " < $TMP_RESULT_FILE | grep -oP "(?<=Total : )(.*+)")
+
+ # We need to get rid of the decimal spaces due
+ # to use of arithmetic expressions instead of "bc" for calculations
+ iops=${iops%.*}
+ bw_MBs=${bw_MBs%.*}
+ mean_lat_usec=${mean_lat_usec%.*}
+ min_lat_usec=${min_lat_usec%.*}
+ max_lat_usec=${max_lat_usec%.*}
+
+ echo "$iops $(bc <<< "$bw_MBs * 1024") $mean_lat_usec $min_lat_usec $max_lat_usec"
+}
+
+function run_spdk_nvme_fio() {
+ local plugin=$1
+ echo "** Running fio test, this can take a while, depending on the run-time and ramp-time setting."
+ if [[ "$plugin" = "spdk-plugin-nvme" ]]; then
+ LD_PRELOAD=$plugin_dir/spdk_nvme $FIO_BIN $testdir/config.fio --output-format=json "${@:2}" --ioengine=spdk
+ elif [[ "$plugin" = "spdk-plugin-bdev" ]]; then
+ LD_PRELOAD=$plugin_dir/spdk_bdev $FIO_BIN $testdir/config.fio --output-format=json "${@:2}" --ioengine=spdk_bdev --spdk_json_conf=$testdir/bdev.conf --spdk_mem=4096
+ fi
+
+ sleep 1
+}
+
+function run_nvme_fio() {
+ echo "** Running fio test, this can take a while, depending on the run-time and ramp-time setting."
+ $FIO_BIN $testdir/config.fio --output-format=json "$@"
+ sleep 1
+}
+
+function run_bdevperf() {
+ echo "** Running bdevperf test, this can take a while, depending on the run-time setting."
+ $bdevperf_dir/bdevperf --json $testdir/bdev.conf -q $IODEPTH -o $BLK_SIZE -w $RW -M $MIX -t $RUNTIME -m "[$CPUS_ALLOWED]" -r /var/tmp/spdk.sock
+ sleep 1
+}
+
+function run_nvmeperf() {
+ # Prepare -r argument string for nvme perf command
+ local r_opt
+ local disks
+
+ # Limit the number of disks to $1 if needed
+ disks=($(get_disks nvme))
+ disks=("${disks[@]:0:$1}")
+ r_opt=$(printf -- ' -r "trtype:PCIe traddr:%s"' "${disks[@]}")
+
+ echo "** Running nvme perf test, this can take a while, depending on the run-time setting."
+
+ # Run command in separate shell as this solves quoting issues related to r_opt var
+ $SHELL -c "$nvmeperf_dir/perf $r_opt -q $IODEPTH -o $BLK_SIZE -w $RW -M $MIX -t $RUNTIME -c [$CPUS_ALLOWED]"
+ sleep 1
+}
+
+function wait_for_nvme_reload() {
+ local nvmes=$1
+
+ shopt -s extglob
+ for disk in $nvmes; do
+ cmd="ls /sys/block/$disk/queue/*@(iostats|rq_affinity|nomerges|io_poll_delay)*"
+ until $cmd 2> /dev/null; do
+ echo "Waiting for full nvme driver reload..."
+ sleep 0.5
+ done
+ done
+ shopt -q extglob
+}
+
+function verify_disk_number() {
+ # Check if we have appropriate number of disks to carry out the test
+ disks=($(get_disks $PLUGIN))
+ if [[ $DISKNO == "ALL" ]] || [[ $DISKNO == "all" ]]; then
+ DISKNO=${#disks[@]}
+ elif [[ $DISKNO -gt ${#disks[@]} ]] || [[ ! $DISKNO =~ ^[0-9]+$ ]]; then
+ echo "error: Required devices number ($DISKNO) is not a valid number or it's larger than the number of devices found (${#disks[@]})"
+ false
+ fi
+}
diff --git a/src/spdk/test/nvme/perf/config.fio.tmp b/src/spdk/test/nvme/perf/config.fio.tmp
new file mode 100644
index 000000000..dfaea5df5
--- /dev/null
+++ b/src/spdk/test/nvme/perf/config.fio.tmp
@@ -0,0 +1,6 @@
+[global]
+direct=1
+thread=1
+norandommap=1
+group_reporting=1
+time_based=1
diff --git a/src/spdk/test/nvme/perf/run_perf.sh b/src/spdk/test/nvme/perf/run_perf.sh
new file mode 100755
index 000000000..133aaa75c
--- /dev/null
+++ b/src/spdk/test/nvme/perf/run_perf.sh
@@ -0,0 +1,374 @@
+#!/usr/bin/env bash
+set -e
+
+# Dir variables and sourcing common files
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+plugin_dir=$rootdir/build/fio
+bdevperf_dir=$rootdir/test/bdev/bdevperf
+nvmeperf_dir=$rootdir/build/examples
+source $testdir/common.sh
+source $rootdir/scripts/common.sh || exit 1
+source $rootdir/test/common/autotest_common.sh
+
+# Global & default variables
+declare -A KERNEL_ENGINES
+KERNEL_ENGINES=(
+ ["kernel-libaio"]="--ioengine=libaio"
+ ["kernel-classic-polling"]="--ioengine=pvsync2 --hipri=100"
+ ["kernel-hybrid-polling"]="--ioengine=pvsync2 --hipri=100"
+ ["kernel-io-uring"]="--ioengine=io_uring")
+
+RW=randrw
+MIX=100
+IODEPTH=256
+BLK_SIZE=4096
+RUNTIME=600
+RAMP_TIME=30
+NUMJOBS=1
+REPEAT_NO=3
+GTOD_REDUCE=false
+SAMPLING_INT=0
+FIO_BIN=$CONFIG_FIO_SOURCE_DIR/fio
+TMP_RESULT_FILE=$testdir/result.json
+PLUGIN="nvme"
+DISKCFG=""
+BDEV_CACHE=""
+BDEV_POOL=""
+DISKNO="ALL"
+CPUS_ALLOWED=1
+NOIOSCALING=false
+PRECONDITIONING=true
+CPUFREQ=""
+PERFTOP=false
+DPDKMEM=false
+DATE="$(date +'%m_%d_%Y_%H%M%S')"
+
+function usage() {
+ set +x
+ [[ -n $2 ]] && (
+ echo "$2"
+ echo ""
+ )
+ echo "Run NVMe PMD/BDEV performance test. Change options for easier debug and setup configuration"
+ echo "Usage: $(basename $1) [options]"
+ echo "-h, --help Print help and exit"
+ echo
+ echo "Workload parameters:"
+ echo " --rw=STR Type of I/O pattern. Accepted values are randrw,rw. [default=$RW]"
+ echo " --rwmixread=INT Percentage of a mixed workload that should be reads. [default=$MIX]"
+ echo " --iodepth=INT Number of I/Os to keep in flight against the file. [default=$IODEPTH]"
+ echo " --block-size=INT The block size in bytes used for I/O units. [default=$BLK_SIZE]"
+ echo " --run-time=TIME[s] Tell fio to run the workload for the specified period of time. [default=$RUNTIME]"
+ echo " --ramp-time=TIME[s] Fio will run the specified workload for this amount of time before"
+ echo " logging any performance numbers. [default=$RAMP_TIME]. Applicable only for fio-based tests."
+ echo " --numjobs=INT Create the specified number of clones of this job. [default=$NUMJOBS]"
+ echo " Applicable only for fio-based tests."
+ echo " --repeat-no=INT How many times to repeat workload test. [default=$REPEAT_NO]"
+ echo " Test result will be an average of repeated test runs."
+ echo " --gtod-reduce Enable fio gtod_reduce option. [default=$GTOD_REDUCE]"
+ echo " --sampling-int=INT Value for fio log_avg_msec parameters [default=$SAMPLING_INT]"
+ echo " --fio-bin=PATH Path to fio binary. [default=$FIO_BIN]"
+ echo " Applicable only for fio-based tests."
+ echo
+ echo "Test setup parameters:"
+ echo " --driver=STR Selects tool used for testing. Choices available:"
+ echo " - spdk-perf-nvme (SPDK nvme perf)"
+ echo " - spdk-perf-bdev (SPDK bdev perf)"
+ echo " - spdk-plugin-nvme (SPDK nvme fio plugin)"
+ echo " - spdk-plugin-bdev (SPDK bdev fio plugin)"
+ echo " - kernel-classic-polling"
+ echo " - kernel-hybrid-polling"
+ echo " - kernel-libaio"
+ echo " - kernel-io-uring"
+ echo " --disk-config Configuration file containing PCI BDF addresses of NVMe disks to use in test."
+ echo " It consists a single column of PCI addresses. SPDK Bdev names will be assigned"
+ echo " and Kernel block device names detected."
+ echo " Lines starting with # are ignored as comments."
+ echo " --bdev-io-cache-size Set IO cache size for for SPDK bdev subsystem."
+ echo " --bdev-io-pool-size Set IO pool size for for SPDK bdev subsystem."
+ echo " --max-disk=INT,ALL Number of disks to test on, this will run multiple workloads with increasing number of disk each run."
+ echo " If =ALL then test on all found disk. [default=$DISKNO]"
+ echo " --cpu-allowed=INT/PATH Comma-separated list of CPU cores used to run the workload. Ranges allowed."
+ echo " Can also point to a file containing list of CPUs. [default=$CPUS_ALLOWED]"
+ echo " --no-preconditioning Skip preconditioning"
+ echo " --no-io-scaling Do not scale iodepth for each device in SPDK fio plugin. [default=$NOIOSCALING]"
+ echo " --cpu-frequency=INT Run tests with CPUs set to a desired frequency. 'intel_pstate=disable' must be set in"
+ echo " GRUB options. You can use 'cpupower frequency-info' and 'cpupower frequency-set' to"
+ echo " check list of available frequencies. Example: --cpu-frequency=1100000."
+ echo
+ echo "Other options:"
+ echo " --perftop Run perftop measurements on the same CPU cores as specified in --cpu-allowed option."
+ echo " --dpdk-mem-stats Dump DPDK memory stats during the test."
+ set -x
+}
+
+while getopts 'h-:' optchar; do
+ case "$optchar" in
+ -)
+ case "$OPTARG" in
+ help)
+ usage $0
+ exit 0
+ ;;
+ rw=*) RW="${OPTARG#*=}" ;;
+ rwmixread=*) MIX="${OPTARG#*=}" ;;
+ iodepth=*) IODEPTH="${OPTARG#*=}" ;;
+ block-size=*) BLK_SIZE="${OPTARG#*=}" ;;
+ run-time=*) RUNTIME="${OPTARG#*=}" ;;
+ ramp-time=*) RAMP_TIME="${OPTARG#*=}" ;;
+ numjobs=*) NUMJOBS="${OPTARG#*=}" ;;
+ repeat-no=*) REPEAT_NO="${OPTARG#*=}" ;;
+ gtod-reduce) GTOD_REDUCE=true ;;
+ sampling-int=*) SAMPLING_INT="${OPTARG#*=}" ;;
+ fio-bin=*) FIO_BIN="${OPTARG#*=}" ;;
+ driver=*) PLUGIN="${OPTARG#*=}" ;;
+ disk-config=*)
+ DISKCFG="${OPTARG#*=}"
+ if [[ ! -f "$DISKCFG" ]]; then
+ echo "Disk confiuration file $DISKCFG does not exist!"
+ exit 1
+ fi
+ ;;
+ bdev-io-cache-size=*) BDEV_CACHE="${OPTARG#*=}" ;;
+ bdev-io-pool-size=*) BDEV_POOL="${OPTARG#*=}" ;;
+ max-disk=*) DISKNO="${OPTARG#*=}" ;;
+ cpu-allowed=*)
+ CPUS_ALLOWED="${OPTARG#*=}"
+ if [[ -f "$CPUS_ALLOWED" ]]; then
+ CPUS_ALLOWED=$(cat "$CPUS_ALLOWED")
+ fi
+ ;;
+ no-preconditioning) PRECONDITIONING=false ;;
+ no-io-scaling) NOIOSCALING=true ;;
+ cpu-frequency=*) CPUFREQ="${OPTARG#*=}" ;;
+ perftop) PERFTOP=true ;;
+ dpdk-mem-stats) DPDKMEM=true ;;
+ *)
+ usage $0 echo "Invalid argument '$OPTARG'"
+ exit 1
+ ;;
+ esac
+ ;;
+ h)
+ usage $0
+ exit 0
+ ;;
+ *)
+ usage $0 "Invalid argument '$optchar'"
+ exit 1
+ ;;
+ esac
+done
+
+result_dir=$testdir/results/perf_results_${BLK_SIZE}BS_${IODEPTH}QD_${RW}_${MIX}MIX_${PLUGIN}_${DATE}
+result_file=$result_dir/perf_results_${BLK_SIZE}BS_${IODEPTH}QD_${RW}_${MIX}MIX_${PLUGIN}_${DATE}.csv
+mkdir -p $result_dir
+unset iops_disks bw mean_lat_disks_usec p99_lat_disks_usec p99_99_lat_disks_usec stdev_disks_usec
+echo "run-time,ramp-time,fio-plugin,QD,block-size,num-cpu-cores,workload,workload-mix" > $result_file
+printf "%s,%s,%s,%s,%s,%s,%s,%s\n" $RUNTIME $RAMP_TIME $PLUGIN $IODEPTH $BLK_SIZE $NO_CORES $RW $MIX >> $result_file
+echo "num_of_disks,iops,avg_lat[usec],p99[usec],p99.99[usec],stdev[usec],avg_slat[usec],avg_clat[usec],bw[Kib/s]" >> $result_file
+
+trap 'rm -f *.state $testdir/bdev.conf; kill $perf_pid; wait $dpdk_mem_pid; print_backtrace' ERR SIGTERM SIGABRT
+
+if [[ "$PLUGIN" =~ "bdev" ]]; then
+ create_spdk_bdev_conf "$BDEV_CACHE" "$BDEV_POOL"
+fi
+verify_disk_number
+DISK_NAMES=$(get_disks $PLUGIN)
+DISKS_NUMA=$(get_numa_node $PLUGIN "$DISK_NAMES")
+CORES=$(get_cores "$CPUS_ALLOWED")
+NO_CORES_ARRAY=($CORES)
+NO_CORES=${#NO_CORES_ARRAY[@]}
+
+if $PRECONDITIONING; then
+ preconditioning
+fi
+
+if [[ "$PLUGIN" =~ "kernel" ]]; then
+ $rootdir/scripts/setup.sh reset
+ fio_ioengine_opt="${KERNEL_ENGINES[$PLUGIN]}"
+
+ if [[ $PLUGIN = "kernel-classic-polling" ]]; then
+ for disk in $DISK_NAMES; do
+ echo -1 > /sys/block/$disk/queue/io_poll_delay
+ done
+ elif [[ $PLUGIN = "kernel-hybrid-polling" ]]; then
+ for disk in $DISK_NAMES; do
+ echo 0 > /sys/block/$disk/queue/io_poll_delay
+ done
+ elif [[ $PLUGIN = "kernel-io-uring" ]]; then
+ modprobe -rv nvme
+ modprobe nvme poll_queues=8
+ wait_for_nvme_reload $DISK_NAMES
+
+ backup_dir="/tmp/nvme_param_bak"
+ mkdir -p $backup_dir
+
+ for disk in $DISK_NAMES; do
+ echo "INFO: Backing up device parameters for $disk"
+ sysfs=/sys/block/$disk/queue
+ mkdir -p $backup_dir/$disk
+ cat $sysfs/iostats > $backup_dir/$disk/iostats
+ cat $sysfs/rq_affinity > $backup_dir/$disk/rq_affinity
+ cat $sysfs/nomerges > $backup_dir/$disk/nomerges
+ cat $sysfs/io_poll_delay > $backup_dir/$disk/io_poll_delay
+ done
+
+ for disk in $DISK_NAMES; do
+ echo "INFO: Setting device parameters for $disk"
+ sysfs=/sys/block/$disk/queue
+ echo 0 > $sysfs/iostats
+ echo 0 > $sysfs/rq_affinity
+ echo 2 > $sysfs/nomerges
+ echo 0 > $sysfs/io_poll_delay
+ done
+ fi
+fi
+
+if [[ -n "$CPUFREQ" ]]; then
+ if [[ ! "$(cat /proc/cmdline)" =~ "intel_pstate=disable" ]]; then
+ echo "ERROR: Cannot set custom CPU frequency for test. intel_pstate=disable not in boot options."
+ false
+ else
+ cpu_governor="$(cat /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor)"
+ cpupower frequency-set -g userspace
+ cpupower frequency-set -f $CPUFREQ
+ fi
+fi
+
+if $PERFTOP; then
+ echo "INFO: starting perf record on cores $CPUS_ALLOWED"
+ perf record -C $CPUS_ALLOWED -o "$testdir/perf.data" &
+ perf_pid=$!
+fi
+
+if $DPDKMEM; then
+ echo "INFO: waiting to generate DPDK memory usage"
+ wait_time=$((RUNTIME / 2))
+ if [[ ! "$PLUGIN" =~ "perf" ]]; then
+ wait_time=$((wait_time + RAMP_TIME))
+ fi
+ (
+ sleep $wait_time
+ echo "INFO: generating DPDK memory usage"
+ $rootdir/scripts/rpc.py env_dpdk_get_mem_stats
+ ) &
+ dpdk_mem_pid=$!
+fi
+
+#Run each workolad $REPEAT_NO times
+for ((j = 0; j < REPEAT_NO; j++)); do
+ if [ $PLUGIN = "spdk-perf-bdev" ]; then
+ run_bdevperf > $TMP_RESULT_FILE
+ iops_disks=$((iops_disks + $(get_bdevperf_results iops)))
+ bw=$((bw + $(get_bdevperf_results bw_Kibs)))
+ cp $TMP_RESULT_FILE $result_dir/perf_results_${MIX}_${PLUGIN}_${NO_CORES}cpus_${DATE}_${k}_disks_${j}.output
+ elif [ $PLUGIN = "spdk-perf-nvme" ]; then
+ run_nvmeperf $DISKNO > $TMP_RESULT_FILE
+ read -r iops bandwidth mean_lat min_lat max_lat <<< $(get_nvmeperf_results)
+
+ iops_disks=$((iops_disks + iops))
+ bw=$((bw + bandwidth))
+ mean_lat_disks_usec=$((mean_lat_disks_usec + mean_lat))
+ min_lat_disks_usec=$((min_lat_disks_usec + min_lat))
+ max_lat_disks_usec=$((max_lat_disks_usec + max_lat))
+
+ cp $TMP_RESULT_FILE $result_dir/perf_results_${MIX}_${PLUGIN}_${NO_CORES}cpus_${DATE}_${k}_disks_${j}.output
+ else
+ create_fio_config $DISKNO $PLUGIN "$DISK_NAMES" "$DISKS_NUMA" "$CORES"
+
+ if [[ "$PLUGIN" =~ "spdk-plugin" ]]; then
+ run_spdk_nvme_fio $PLUGIN "--output=$TMP_RESULT_FILE" \
+ "--write_lat_log=$result_dir/perf_lat_${BLK_SIZE}BS_${IODEPTH}QD_${RW}_${MIX}MIX_${PLUGIN}_${DATE}_${k}disks_${j}"
+ else
+ run_nvme_fio $fio_ioengine_opt "--output=$TMP_RESULT_FILE" \
+ "--write_lat_log=$result_dir/perf_lat_${BLK_SIZE}BS_${IODEPTH}QD_${RW}_${MIX}MIX_${PLUGIN}_${DATE}_${k}disks_${j}"
+ fi
+
+ #Store values for every number of used disks
+ #Use recalculated value for mixread param in case rw mode is not rw.
+ rwmixread=$MIX
+ if [[ $RW = *"read"* ]]; then
+ rwmixread=100
+ elif [[ $RW = *"write"* ]]; then
+ rwmixread=0
+ fi
+ iops_disks=$((iops_disks + $(get_results iops $rwmixread)))
+ mean_lat_disks_usec=$((mean_lat_disks_usec + $(get_results mean_lat_usec $rwmixread)))
+ p99_lat_disks_usec=$((p99_lat_disks_usec + $(get_results p99_lat_usec $rwmixread)))
+ p99_99_lat_disks_usec=$((p99_99_lat_disks_usec + $(get_results p99_99_lat_usec $rwmixread)))
+ stdev_disks_usec=$((stdev_disks_usec + $(get_results stdev_usec $rwmixread)))
+
+ mean_slat_disks_usec=$((mean_slat_disks_usec + $(get_results mean_slat_usec $rwmixread)))
+ mean_clat_disks_usec=$((mean_clat_disks_usec + $(get_results mean_clat_usec $rwmixread)))
+ bw=$((bw + $(get_results bw_Kibs $rwmixread)))
+
+ cp $TMP_RESULT_FILE $result_dir/perf_results_${MIX}_${PLUGIN}_${NO_CORES}cpus_${DATE}_${k}_disks_${j}.json
+ cp $testdir/config.fio $result_dir/config_${MIX}_${PLUGIN}_${NO_CORES}cpus_${DATE}_${k}_disks_${j}.fio
+ rm -f $testdir/config.fio
+ fi
+done
+
+if $PERFTOP; then
+ echo "INFO: Stopping perftop measurements."
+ kill $perf_pid
+ wait $perf_pid || true
+ perf report -i "$testdir/perf.data" > $result_dir/perftop_${BLK_SIZE}BS_${IODEPTH}QD_${RW}_${MIX}MIX_${PLUGIN}_${DATE}.txt
+ rm -f "$testdir/perf.data"
+fi
+
+if $DPDKMEM; then
+ mv "/tmp/spdk_mem_dump.txt" $result_dir/spdk_mem_dump_${BLK_SIZE}BS_${IODEPTH}QD_${RW}_${MIX}MIX_${PLUGIN}_${DATE}.txt
+ echo "INFO: DPDK memory usage saved in $result_dir"
+fi
+
+#Write results to csv file
+iops_disks=$((iops_disks / REPEAT_NO))
+bw=$((bw / REPEAT_NO))
+if [[ "$PLUGIN" =~ "plugin" ]]; then
+ mean_lat_disks_usec=$((mean_lat_disks_usec / REPEAT_NO))
+ p99_lat_disks_usec=$((p99_lat_disks_usec / REPEAT_NO))
+ p99_99_lat_disks_usec=$((p99_99_lat_disks_usec / REPEAT_NO))
+ stdev_disks_usec=$((stdev_disks_usec / REPEAT_NO))
+ mean_slat_disks_usec=$((mean_slat_disks_usec / REPEAT_NO))
+ mean_clat_disks_usec=$((mean_clat_disks_usec / REPEAT_NO))
+elif [[ "$PLUGIN" == "spdk-perf-bdev" ]]; then
+ mean_lat_disks_usec=0
+ p99_lat_disks_usec=0
+ p99_99_lat_disks_usec=0
+ stdev_disks_usec=0
+ mean_slat_disks_usec=0
+ mean_clat_disks_usec=0
+elif [[ "$PLUGIN" == "spdk-perf-nvme" ]]; then
+ mean_lat_disks_usec=$((mean_lat_disks_usec / REPEAT_NO))
+ p99_lat_disks_usec=0
+ p99_99_lat_disks_usec=0
+ stdev_disks_usec=0
+ mean_slat_disks_usec=0
+ mean_clat_disks_usec=0
+fi
+
+printf "%s,%s,%s,%s,%s,%s,%s,%s,%s\n" ${DISKNO} ${iops_disks} ${mean_lat_disks_usec} ${p99_lat_disks_usec} \
+ ${p99_99_lat_disks_usec} ${stdev_disks_usec} ${mean_slat_disks_usec} ${mean_clat_disks_usec} ${bw} >> $result_file
+
+if [[ -n "$CPUFREQ" ]]; then
+ cpupower frequency-set -g $cpu_governor
+fi
+
+if [ $PLUGIN = "kernel-io-uring" ]; then
+ # Reload the nvme driver so that other test runs are not affected
+ modprobe -rv nvme
+ modprobe nvme
+ wait_for_nvme_reload $DISK_NAMES
+
+ for disk in $DISK_NAMES; do
+ echo "INFO: Restoring device parameters for $disk"
+ sysfs=/sys/block/$disk/queue
+ cat $backup_dir/$disk/iostats > $sysfs/iostats
+ cat $backup_dir/$disk/rq_affinity > $sysfs/rq_affinity
+ cat $backup_dir/$disk/nomerges > $sysfs/nomerges
+ cat $backup_dir/$disk/io_poll_delay > $sysfs/io_poll_delay
+ done
+fi
+rm -f $testdir/bdev.conf $testdir/config.fio
diff --git a/src/spdk/test/nvme/reserve/.gitignore b/src/spdk/test/nvme/reserve/.gitignore
new file mode 100644
index 000000000..c58b368cf
--- /dev/null
+++ b/src/spdk/test/nvme/reserve/.gitignore
@@ -0,0 +1 @@
+reserve
diff --git a/src/spdk/test/nvme/reserve/Makefile b/src/spdk/test/nvme/reserve/Makefile
new file mode 100644
index 000000000..a3e62138b
--- /dev/null
+++ b/src/spdk/test/nvme/reserve/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../..)
+
+APP = reserve
+
+include $(SPDK_ROOT_DIR)/mk/nvme.libtest.mk
diff --git a/src/spdk/test/nvme/reserve/reserve.c b/src/spdk/test/nvme/reserve/reserve.c
new file mode 100644
index 000000000..9bb9230cf
--- /dev/null
+++ b/src/spdk/test/nvme/reserve/reserve.c
@@ -0,0 +1,457 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk/endian.h"
+#include "spdk/nvme.h"
+#include "spdk/env.h"
+#include "spdk/log.h"
+
+#define MAX_DEVS 64
+
+struct dev {
+ struct spdk_pci_addr pci_addr;
+ struct spdk_nvme_ctrlr *ctrlr;
+ char name[100];
+};
+
+static struct dev g_devs[MAX_DEVS];
+static int g_num_devs = 0;
+
+#define foreach_dev(iter) \
+ for (iter = g_devs; iter - g_devs < g_num_devs; iter++)
+
+static int g_outstanding_commands;
+static int g_reserve_command_result;
+static bool g_feat_host_id_successful;
+
+#define HOST_ID 0xABABABABCDCDCDCD
+#define EXT_HOST_ID ((uint8_t[]){0x0f, 0x97, 0xcd, 0x74, 0x8c, 0x80, 0x41, 0x42, \
+ 0x99, 0x0f, 0x65, 0xc4, 0xf0, 0x39, 0x24, 0x20})
+
+#define CR_KEY 0xDEADBEAF5A5A5A5B
+
+static void
+feat_host_id_completion(void *cb_arg, const struct spdk_nvme_cpl *cpl)
+{
+ if (spdk_nvme_cpl_is_error(cpl)) {
+ fprintf(stdout, "Get/Set Features - Host Identifier failed\n");
+ g_feat_host_id_successful = false;
+ } else {
+ g_feat_host_id_successful = true;
+ }
+ g_outstanding_commands--;
+}
+
+static int
+get_host_identifier(struct spdk_nvme_ctrlr *ctrlr)
+{
+ int ret;
+ uint8_t host_id[16];
+ uint32_t host_id_size;
+ uint32_t cdw11;
+
+ if (spdk_nvme_ctrlr_get_data(ctrlr)->ctratt.host_id_exhid_supported) {
+ host_id_size = 16;
+ cdw11 = 1;
+ printf("Using 128-bit extended host identifier\n");
+ } else {
+ host_id_size = 8;
+ cdw11 = 0;
+ printf("Using 64-bit host identifier\n");
+ }
+
+ g_outstanding_commands = 0;
+ ret = spdk_nvme_ctrlr_cmd_get_feature(ctrlr, SPDK_NVME_FEAT_HOST_IDENTIFIER, cdw11, host_id,
+ host_id_size,
+ feat_host_id_completion, NULL);
+ if (ret) {
+ fprintf(stdout, "Get Feature: Failed\n");
+ return -1;
+ }
+
+ g_outstanding_commands++;
+ g_feat_host_id_successful = false;
+
+ while (g_outstanding_commands) {
+ spdk_nvme_ctrlr_process_admin_completions(ctrlr);
+ }
+
+ if (g_feat_host_id_successful) {
+ spdk_log_dump(stdout, "Get Feature: Host Identifier:", host_id, host_id_size);
+ return 0;
+ }
+
+ return -1;
+}
+
+static int
+set_host_identifier(struct spdk_nvme_ctrlr *ctrlr)
+{
+ int ret;
+ uint8_t host_id[16] = {};
+ uint32_t host_id_size;
+ uint32_t cdw11;
+
+ if (spdk_nvme_ctrlr_get_data(ctrlr)->ctratt.host_id_exhid_supported) {
+ host_id_size = 16;
+ cdw11 = 1;
+ printf("Using 128-bit extended host identifier\n");
+ memcpy(host_id, EXT_HOST_ID, host_id_size);
+ } else {
+ host_id_size = 8;
+ cdw11 = 0;
+ to_be64(host_id, HOST_ID);
+ printf("Using 64-bit host identifier\n");
+ }
+
+ g_outstanding_commands = 0;
+ ret = spdk_nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_FEAT_HOST_IDENTIFIER, cdw11, 0, host_id,
+ host_id_size, feat_host_id_completion, NULL);
+ if (ret) {
+ fprintf(stdout, "Set Feature: Failed\n");
+ return -1;
+ }
+
+ g_outstanding_commands++;
+ g_feat_host_id_successful = false;
+
+ while (g_outstanding_commands) {
+ spdk_nvme_ctrlr_process_admin_completions(ctrlr);
+ }
+
+ if (g_feat_host_id_successful) {
+ spdk_log_dump(stdout, "Set Feature: Host Identifier:", host_id, host_id_size);
+ return 0;
+ }
+
+ fprintf(stderr, "Set Feature: Host Identifier Failed\n");
+ return -1;
+}
+
+static void
+reservation_ns_completion(void *cb_arg, const struct spdk_nvme_cpl *cpl)
+{
+ if (spdk_nvme_cpl_is_error(cpl)) {
+ g_reserve_command_result = -1;
+ } else {
+ g_reserve_command_result = 0;
+ }
+
+ g_outstanding_commands--;
+}
+
+static int
+reservation_ns_register(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
+ uint32_t ns_id, bool reg)
+{
+ int ret;
+ struct spdk_nvme_reservation_register_data rr_data;
+ enum spdk_nvme_reservation_register_action action;
+ struct spdk_nvme_ns *ns;
+
+ ns = spdk_nvme_ctrlr_get_ns(ctrlr, ns_id);
+
+ if (reg) {
+ rr_data.crkey = 0;
+ rr_data.nrkey = CR_KEY;
+ action = SPDK_NVME_RESERVE_REGISTER_KEY;
+ } else {
+ rr_data.crkey = CR_KEY;
+ rr_data.nrkey = 0;
+ action = SPDK_NVME_RESERVE_UNREGISTER_KEY;
+ }
+
+ g_outstanding_commands = 0;
+ g_reserve_command_result = -1;
+
+ ret = spdk_nvme_ns_cmd_reservation_register(ns, qpair, &rr_data, true,
+ action,
+ SPDK_NVME_RESERVE_PTPL_CLEAR_POWER_ON,
+ reservation_ns_completion, NULL);
+ if (ret) {
+ fprintf(stderr, "Reservation Register Failed\n");
+ return -1;
+ }
+
+ g_outstanding_commands++;
+ while (g_outstanding_commands) {
+ spdk_nvme_qpair_process_completions(qpair, 100);
+ }
+
+ if (g_reserve_command_result) {
+ fprintf(stderr, "Reservation Register Failed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+reservation_ns_report(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair, uint32_t ns_id)
+{
+ int ret, i;
+ uint8_t *payload;
+ struct spdk_nvme_reservation_status_data *status;
+ struct spdk_nvme_registered_ctrlr_data *cdata;
+ struct spdk_nvme_ns *ns;
+
+ ns = spdk_nvme_ctrlr_get_ns(ctrlr, ns_id);
+
+ g_outstanding_commands = 0;
+ g_reserve_command_result = -1;
+
+ payload = spdk_dma_zmalloc(0x1000, 0x1000, NULL);
+ if (!payload) {
+ fprintf(stderr, "DMA Buffer Allocation Failed\n");
+ return -1;
+ }
+
+ ret = spdk_nvme_ns_cmd_reservation_report(ns, qpair, payload, 0x1000,
+ reservation_ns_completion, NULL);
+ if (ret) {
+ fprintf(stderr, "Reservation Report Failed\n");
+ spdk_dma_free(payload);
+ return -1;
+ }
+
+ g_outstanding_commands++;
+ while (g_outstanding_commands) {
+ spdk_nvme_qpair_process_completions(qpair, 100);
+ }
+
+ if (g_reserve_command_result) {
+ fprintf(stderr, "Reservation Report Failed\n");
+ spdk_dma_free(payload);
+ return -1;
+ }
+
+ status = (struct spdk_nvme_reservation_status_data *)payload;
+ fprintf(stdout, "Reservation Generation Counter %u\n", status->gen);
+ fprintf(stdout, "Reservation type %u\n", status->rtype);
+ fprintf(stdout, "Reservation Number of Registered Controllers %u\n", status->regctl);
+ fprintf(stdout, "Reservation Persist Through Power Loss State %u\n", status->ptpls);
+ for (i = 0; i < status->regctl; i++) {
+ cdata = (struct spdk_nvme_registered_ctrlr_data *)(payload +
+ sizeof(struct spdk_nvme_reservation_status_data) +
+ sizeof(struct spdk_nvme_registered_ctrlr_data) * i);
+ fprintf(stdout, "Controller ID %u\n", cdata->cntlid);
+ fprintf(stdout, "Controller Reservation Status %u\n", cdata->rcsts.status);
+ fprintf(stdout, "Controller Host ID 0x%"PRIx64"\n", cdata->hostid);
+ fprintf(stdout, "Controller Reservation Key 0x%"PRIx64"\n", cdata->rkey);
+ }
+
+ spdk_dma_free(payload);
+ return 0;
+}
+
+static int
+reservation_ns_acquire(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair, uint32_t ns_id)
+{
+ int ret;
+ struct spdk_nvme_reservation_acquire_data cdata;
+ struct spdk_nvme_ns *ns;
+
+ ns = spdk_nvme_ctrlr_get_ns(ctrlr, ns_id);
+ cdata.crkey = CR_KEY;
+ cdata.prkey = 0;
+
+ g_outstanding_commands = 0;
+ g_reserve_command_result = -1;
+
+ ret = spdk_nvme_ns_cmd_reservation_acquire(ns, qpair, &cdata,
+ false,
+ SPDK_NVME_RESERVE_ACQUIRE,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE,
+ reservation_ns_completion, NULL);
+ if (ret) {
+ fprintf(stderr, "Reservation Acquire Failed\n");
+ return -1;
+ }
+
+ g_outstanding_commands++;
+ while (g_outstanding_commands) {
+ spdk_nvme_qpair_process_completions(qpair, 100);
+ }
+
+ if (g_reserve_command_result) {
+ fprintf(stderr, "Reservation Acquire Failed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+reservation_ns_release(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair, uint32_t ns_id)
+{
+ int ret;
+ struct spdk_nvme_reservation_key_data cdata;
+ struct spdk_nvme_ns *ns;
+
+ ns = spdk_nvme_ctrlr_get_ns(ctrlr, ns_id);
+ cdata.crkey = CR_KEY;
+
+ g_outstanding_commands = 0;
+ g_reserve_command_result = -1;
+
+ ret = spdk_nvme_ns_cmd_reservation_release(ns, qpair, &cdata,
+ false,
+ SPDK_NVME_RESERVE_RELEASE,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE,
+ reservation_ns_completion, NULL);
+ if (ret) {
+ fprintf(stderr, "Reservation Release Failed\n");
+ return -1;
+ }
+
+ g_outstanding_commands++;
+ while (g_outstanding_commands) {
+ spdk_nvme_qpair_process_completions(qpair, 100);
+ }
+
+ if (g_reserve_command_result) {
+ fprintf(stderr, "Reservation Release Failed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+reserve_controller(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
+ const struct spdk_pci_addr *pci_addr)
+{
+ const struct spdk_nvme_ctrlr_data *cdata;
+ int ret;
+
+ cdata = spdk_nvme_ctrlr_get_data(ctrlr);
+
+ printf("=====================================================\n");
+ printf("NVMe Controller at PCI bus %d, device %d, function %d\n",
+ pci_addr->bus, pci_addr->dev, pci_addr->func);
+ printf("=====================================================\n");
+
+ printf("Reservations: %s\n",
+ cdata->oncs.reservations ? "Supported" : "Not Supported");
+
+ if (!cdata->oncs.reservations) {
+ return 0;
+ }
+
+ ret = set_host_identifier(ctrlr);
+ if (ret) {
+ return ret;
+ }
+
+ ret = get_host_identifier(ctrlr);
+ if (ret) {
+ return ret;
+ }
+
+ /* tested 1 namespace */
+ ret += reservation_ns_register(ctrlr, qpair, 1, 1);
+ ret += reservation_ns_acquire(ctrlr, qpair, 1);
+ ret += reservation_ns_release(ctrlr, qpair, 1);
+ ret += reservation_ns_register(ctrlr, qpair, 1, 0);
+ ret += reservation_ns_report(ctrlr, qpair, 1);
+
+ return ret;
+}
+
+static bool
+probe_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
+ struct spdk_nvme_ctrlr_opts *opts)
+{
+ return true;
+}
+
+static void
+attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
+ struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_ctrlr_opts *opts)
+{
+ struct dev *dev;
+
+ /* add to dev list */
+ dev = &g_devs[g_num_devs++];
+ spdk_pci_addr_parse(&dev->pci_addr, trid->traddr);
+ dev->ctrlr = ctrlr;
+}
+
+int main(int argc, char **argv)
+{
+ struct dev *iter;
+ int i;
+ struct spdk_env_opts opts;
+ int ret = 0;
+
+ spdk_env_opts_init(&opts);
+ opts.name = "reserve";
+ opts.core_mask = "0x1";
+ opts.shm_id = 0;
+ if (spdk_env_init(&opts) < 0) {
+ fprintf(stderr, "Unable to initialize SPDK env\n");
+ return 1;
+ }
+
+ if (spdk_nvme_probe(NULL, NULL, probe_cb, attach_cb, NULL) != 0) {
+ fprintf(stderr, "spdk_nvme_probe() failed\n");
+ return 1;
+ }
+
+ foreach_dev(iter) {
+ struct spdk_nvme_qpair *qpair;
+
+ qpair = spdk_nvme_ctrlr_alloc_io_qpair(iter->ctrlr, NULL, 0);
+ if (!qpair) {
+ fprintf(stderr, "spdk_nvme_ctrlr_alloc_io_qpair() failed\n");
+ ret = 1;
+ } else {
+ ret = reserve_controller(iter->ctrlr, qpair, &iter->pci_addr);
+ }
+
+ if (ret) {
+ break;
+ }
+ }
+
+ printf("Reservation test %s\n", ret ? "failed" : "passed");
+
+ for (i = 0; i < g_num_devs; i++) {
+ struct dev *dev = &g_devs[i];
+ spdk_nvme_detach(dev->ctrlr);
+ }
+
+ return ret;
+}
diff --git a/src/spdk/test/nvme/reset/.gitignore b/src/spdk/test/nvme/reset/.gitignore
new file mode 100644
index 000000000..a16781b1b
--- /dev/null
+++ b/src/spdk/test/nvme/reset/.gitignore
@@ -0,0 +1 @@
+reset
diff --git a/src/spdk/test/nvme/reset/Makefile b/src/spdk/test/nvme/reset/Makefile
new file mode 100644
index 000000000..dd1774bcd
--- /dev/null
+++ b/src/spdk/test/nvme/reset/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../..)
+
+APP = reset
+
+include $(SPDK_ROOT_DIR)/mk/nvme.libtest.mk
diff --git a/src/spdk/test/nvme/reset/reset.c b/src/spdk/test/nvme/reset/reset.c
new file mode 100644
index 000000000..70d44db39
--- /dev/null
+++ b/src/spdk/test/nvme/reset/reset.c
@@ -0,0 +1,716 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk/nvme.h"
+#include "spdk/env.h"
+#include "spdk/string.h"
+#include "spdk/pci_ids.h"
+
+struct ctrlr_entry {
+ struct spdk_nvme_ctrlr *ctrlr;
+ struct ctrlr_entry *next;
+ char name[1024];
+};
+
+struct ns_entry {
+ struct spdk_nvme_ns *ns;
+ struct spdk_nvme_ctrlr *ctrlr;
+ struct ns_entry *next;
+ uint32_t io_size_blocks;
+ uint64_t size_in_ios;
+ char name[1024];
+};
+
+struct ns_worker_ctx {
+ struct ns_entry *entry;
+ struct spdk_nvme_qpair *qpair;
+ uint64_t io_completed;
+ uint64_t io_completed_error;
+ uint64_t io_submitted;
+ uint64_t current_queue_depth;
+ uint64_t offset_in_ios;
+ bool is_draining;
+
+ struct ns_worker_ctx *next;
+};
+
+struct reset_task {
+ struct ns_worker_ctx *ns_ctx;
+ void *buf;
+};
+
+struct worker_thread {
+ struct ns_worker_ctx *ns_ctx;
+ unsigned lcore;
+};
+
+static struct spdk_mempool *task_pool;
+
+static struct ctrlr_entry *g_controllers = NULL;
+static struct ns_entry *g_namespaces = NULL;
+static int g_num_namespaces = 0;
+static struct worker_thread *g_workers = NULL;
+static bool g_qemu_ssd_found = false;
+
+static uint64_t g_tsc_rate;
+
+static int g_io_size_bytes;
+static int g_rw_percentage;
+static int g_is_random;
+static int g_queue_depth;
+static int g_time_in_sec;
+
+#define TASK_POOL_NUM 8192
+
+static void
+register_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns *ns)
+{
+ struct ns_entry *entry;
+ const struct spdk_nvme_ctrlr_data *cdata;
+
+ if (!spdk_nvme_ns_is_active(ns)) {
+ printf("Skipping inactive NS %u\n", spdk_nvme_ns_get_id(ns));
+ return;
+ }
+
+ entry = malloc(sizeof(struct ns_entry));
+ if (entry == NULL) {
+ perror("ns_entry malloc");
+ exit(1);
+ }
+
+ cdata = spdk_nvme_ctrlr_get_data(ctrlr);
+
+ entry->ns = ns;
+ entry->ctrlr = ctrlr;
+ entry->size_in_ios = spdk_nvme_ns_get_size(ns) /
+ g_io_size_bytes;
+ entry->io_size_blocks = g_io_size_bytes / spdk_nvme_ns_get_sector_size(ns);
+
+ snprintf(entry->name, 44, "%-20.20s (%-20.20s)", cdata->mn, cdata->sn);
+
+ g_num_namespaces++;
+ entry->next = g_namespaces;
+ g_namespaces = entry;
+}
+
+static void
+register_ctrlr(struct spdk_nvme_ctrlr *ctrlr)
+{
+ int nsid, num_ns;
+ struct spdk_nvme_ns *ns;
+ struct ctrlr_entry *entry = malloc(sizeof(struct ctrlr_entry));
+
+ if (entry == NULL) {
+ perror("ctrlr_entry malloc");
+ exit(1);
+ }
+
+ entry->ctrlr = ctrlr;
+ entry->next = g_controllers;
+ g_controllers = entry;
+
+ num_ns = spdk_nvme_ctrlr_get_num_ns(ctrlr);
+ for (nsid = 1; nsid <= num_ns; nsid++) {
+ ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
+ if (ns == NULL) {
+ continue;
+ }
+ register_ns(ctrlr, ns);
+ }
+}
+
+static void io_complete(void *ctx, const struct spdk_nvme_cpl *completion);
+
+static __thread unsigned int seed = 0;
+
+static void
+submit_single_io(struct ns_worker_ctx *ns_ctx)
+{
+ struct reset_task *task = NULL;
+ uint64_t offset_in_ios;
+ int rc;
+ struct ns_entry *entry = ns_ctx->entry;
+
+ task = spdk_mempool_get(task_pool);
+ if (!task) {
+ fprintf(stderr, "Failed to get task from task_pool\n");
+ exit(1);
+ }
+
+ task->buf = spdk_zmalloc(g_io_size_bytes, 0x200, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
+ if (!task->buf) {
+ spdk_free(task->buf);
+ fprintf(stderr, "task->buf spdk_zmalloc failed\n");
+ exit(1);
+ }
+
+ task->ns_ctx = ns_ctx;
+ task->ns_ctx->io_submitted++;
+
+ if (g_is_random) {
+ offset_in_ios = rand_r(&seed) % entry->size_in_ios;
+ } else {
+ offset_in_ios = ns_ctx->offset_in_ios++;
+ if (ns_ctx->offset_in_ios == entry->size_in_ios) {
+ ns_ctx->offset_in_ios = 0;
+ }
+ }
+
+ if ((g_rw_percentage == 100) ||
+ (g_rw_percentage != 0 && ((rand_r(&seed) % 100) < g_rw_percentage))) {
+ rc = spdk_nvme_ns_cmd_read(entry->ns, ns_ctx->qpair, task->buf,
+ offset_in_ios * entry->io_size_blocks,
+ entry->io_size_blocks, io_complete, task, 0);
+ } else {
+ rc = spdk_nvme_ns_cmd_write(entry->ns, ns_ctx->qpair, task->buf,
+ offset_in_ios * entry->io_size_blocks,
+ entry->io_size_blocks, io_complete, task, 0);
+ }
+
+ if (rc != 0) {
+ fprintf(stderr, "starting I/O failed\n");
+ } else {
+ ns_ctx->current_queue_depth++;
+ }
+}
+
+static void
+task_complete(struct reset_task *task, const struct spdk_nvme_cpl *completion)
+{
+ struct ns_worker_ctx *ns_ctx;
+
+ ns_ctx = task->ns_ctx;
+ ns_ctx->current_queue_depth--;
+
+ if (spdk_nvme_cpl_is_error(completion)) {
+ ns_ctx->io_completed_error++;
+ } else {
+ ns_ctx->io_completed++;
+ }
+
+ spdk_free(task->buf);
+ spdk_mempool_put(task_pool, task);
+
+ /*
+ * is_draining indicates when time has expired for the test run
+ * and we are just waiting for the previously submitted I/O
+ * to complete. In this case, do not submit a new I/O to replace
+ * the one just completed.
+ */
+ if (!ns_ctx->is_draining) {
+ submit_single_io(ns_ctx);
+ }
+}
+
+static void
+io_complete(void *ctx, const struct spdk_nvme_cpl *completion)
+{
+ task_complete((struct reset_task *)ctx, completion);
+}
+
+static void
+check_io(struct ns_worker_ctx *ns_ctx)
+{
+ spdk_nvme_qpair_process_completions(ns_ctx->qpair, 0);
+}
+
+static void
+submit_io(struct ns_worker_ctx *ns_ctx, int queue_depth)
+{
+ while (queue_depth-- > 0) {
+ submit_single_io(ns_ctx);
+ }
+}
+
+static void
+drain_io(struct ns_worker_ctx *ns_ctx)
+{
+ ns_ctx->is_draining = true;
+ while (ns_ctx->current_queue_depth > 0) {
+ check_io(ns_ctx);
+ }
+}
+
+static int
+work_fn(void *arg)
+{
+ uint64_t tsc_end = spdk_get_ticks() + g_time_in_sec * g_tsc_rate;
+ struct worker_thread *worker = (struct worker_thread *)arg;
+ struct ns_worker_ctx *ns_ctx = NULL;
+ bool did_reset = false;
+
+ printf("Starting thread on core %u\n", worker->lcore);
+
+ /* Submit initial I/O for each namespace. */
+ ns_ctx = worker->ns_ctx;
+ while (ns_ctx != NULL) {
+ ns_ctx->qpair = spdk_nvme_ctrlr_alloc_io_qpair(ns_ctx->entry->ctrlr, NULL, 0);
+ if (ns_ctx->qpair == NULL) {
+ fprintf(stderr, "spdk_nvme_ctrlr_alloc_io_qpair() failed on core %u\n", worker->lcore);
+ return -1;
+ }
+ submit_io(ns_ctx, g_queue_depth);
+ ns_ctx = ns_ctx->next;
+ }
+
+ while (1) {
+ if (!did_reset && ((tsc_end - spdk_get_ticks()) / g_tsc_rate) > (uint64_t)g_time_in_sec / 2) {
+ ns_ctx = worker->ns_ctx;
+ while (ns_ctx != NULL) {
+ if (spdk_nvme_ctrlr_reset(ns_ctx->entry->ctrlr) < 0) {
+ fprintf(stderr, "nvme reset failed.\n");
+ return -1;
+ }
+ ns_ctx = ns_ctx->next;
+ }
+ did_reset = true;
+ }
+
+ /*
+ * Check for completed I/O for each controller. A new
+ * I/O will be submitted in the io_complete callback
+ * to replace each I/O that is completed.
+ */
+ ns_ctx = worker->ns_ctx;
+ while (ns_ctx != NULL) {
+ check_io(ns_ctx);
+ ns_ctx = ns_ctx->next;
+ }
+
+ if (spdk_get_ticks() > tsc_end) {
+ break;
+ }
+ }
+
+ ns_ctx = worker->ns_ctx;
+ while (ns_ctx != NULL) {
+ drain_io(ns_ctx);
+ spdk_nvme_ctrlr_free_io_qpair(ns_ctx->qpair);
+ ns_ctx = ns_ctx->next;
+ }
+
+ return 0;
+}
+
+static void usage(char *program_name)
+{
+ printf("%s options", program_name);
+ printf("\n");
+ printf("\t[-q io depth]\n");
+ printf("\t[-s io size in bytes]\n");
+ printf("\t[-w io pattern type, must be one of\n");
+ printf("\t\t(read, write, randread, randwrite, rw, randrw)]\n");
+ printf("\t[-M rwmixread (100 for reads, 0 for writes)]\n");
+ printf("\t[-t time in seconds(should be larger than 15 seconds)]\n");
+ printf("\t[-m max completions per poll]\n");
+ printf("\t\t(default:0 - unlimited)\n");
+}
+
+static int
+print_stats(void)
+{
+ uint64_t io_completed, io_submitted, io_completed_error;
+ uint64_t total_completed_io, total_submitted_io, total_completed_err_io;
+ struct worker_thread *worker;
+ struct ns_worker_ctx *ns_ctx;
+
+ total_completed_io = 0;
+ total_submitted_io = 0;
+ total_completed_err_io = 0;
+
+ worker = g_workers;
+ ns_ctx = worker->ns_ctx;
+ while (ns_ctx) {
+ io_completed = ns_ctx->io_completed;
+ io_submitted = ns_ctx->io_submitted;
+ io_completed_error = ns_ctx->io_completed_error;
+ total_completed_io += io_completed;
+ total_submitted_io += io_submitted;
+ total_completed_err_io += io_completed_error;
+ ns_ctx = ns_ctx->next;
+ }
+
+ printf("========================================================\n");
+ printf("%16lu IO completed successfully\n", total_completed_io);
+ printf("%16lu IO completed with error\n", total_completed_err_io);
+ printf("--------------------------------------------------------\n");
+ printf("%16lu IO completed total\n", total_completed_io + total_completed_err_io);
+ printf("%16lu IO submitted\n", total_submitted_io);
+
+ if (total_submitted_io != (total_completed_io + total_completed_err_io)) {
+ fprintf(stderr, "Some IO are missing......\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+parse_args(int argc, char **argv)
+{
+ const char *workload_type;
+ int op;
+ bool mix_specified = false;
+ long int val;
+
+ /* default value */
+ g_queue_depth = 0;
+ g_io_size_bytes = 0;
+ workload_type = NULL;
+ g_time_in_sec = 0;
+ g_rw_percentage = -1;
+
+ while ((op = getopt(argc, argv, "m:q:s:t:w:M:")) != -1) {
+ if (op == 'w') {
+ workload_type = optarg;
+ } else if (op == '?') {
+ usage(argv[0]);
+ return -EINVAL;
+ } else {
+ val = spdk_strtol(optarg, 10);
+ if (val < 0) {
+ fprintf(stderr, "Converting a string to integer failed\n");
+ return val;
+ }
+ switch (op) {
+ case 'q':
+ g_queue_depth = val;
+ break;
+ case 's':
+ g_io_size_bytes = val;
+ break;
+ case 't':
+ g_time_in_sec = val;
+ break;
+ case 'M':
+ g_rw_percentage = val;
+ mix_specified = true;
+ break;
+ default:
+ usage(argv[0]);
+ return -EINVAL;
+ }
+ }
+ }
+
+ if (!g_queue_depth) {
+ usage(argv[0]);
+ return 1;
+ }
+ if (!g_io_size_bytes) {
+ usage(argv[0]);
+ return 1;
+ }
+ if (!workload_type) {
+ usage(argv[0]);
+ return 1;
+ }
+ if (!g_time_in_sec) {
+ usage(argv[0]);
+ return 1;
+ }
+
+ if (strcmp(workload_type, "read") &&
+ strcmp(workload_type, "write") &&
+ strcmp(workload_type, "randread") &&
+ strcmp(workload_type, "randwrite") &&
+ strcmp(workload_type, "rw") &&
+ strcmp(workload_type, "randrw")) {
+ fprintf(stderr,
+ "io pattern type must be one of\n"
+ "(read, write, randread, randwrite, rw, randrw)\n");
+ return 1;
+ }
+
+ if (!strcmp(workload_type, "read") ||
+ !strcmp(workload_type, "randread")) {
+ g_rw_percentage = 100;
+ }
+
+ if (!strcmp(workload_type, "write") ||
+ !strcmp(workload_type, "randwrite")) {
+ g_rw_percentage = 0;
+ }
+
+ if (!strcmp(workload_type, "read") ||
+ !strcmp(workload_type, "randread") ||
+ !strcmp(workload_type, "write") ||
+ !strcmp(workload_type, "randwrite")) {
+ if (mix_specified) {
+ fprintf(stderr, "Ignoring -M option... Please use -M option"
+ " only when using rw or randrw.\n");
+ }
+ }
+
+ if (!strcmp(workload_type, "rw") ||
+ !strcmp(workload_type, "randrw")) {
+ if (g_rw_percentage < 0 || g_rw_percentage > 100) {
+ fprintf(stderr,
+ "-M must be specified to value from 0 to 100 "
+ "for rw or randrw.\n");
+ return 1;
+ }
+ }
+
+ if (!strcmp(workload_type, "read") ||
+ !strcmp(workload_type, "write") ||
+ !strcmp(workload_type, "rw")) {
+ g_is_random = 0;
+ } else {
+ g_is_random = 1;
+ }
+
+ return 0;
+}
+
+static int
+register_workers(void)
+{
+ struct worker_thread *worker;
+
+ worker = malloc(sizeof(struct worker_thread));
+ if (worker == NULL) {
+ perror("worker_thread malloc");
+ return -1;
+ }
+
+ memset(worker, 0, sizeof(struct worker_thread));
+ worker->lcore = spdk_env_get_current_core();
+
+ g_workers = worker;
+
+ return 0;
+}
+
+
+static bool
+probe_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
+ struct spdk_nvme_ctrlr_opts *opts)
+{
+ opts->disable_error_logging = true;
+ return true;
+}
+
+static void
+attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
+ struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_ctrlr_opts *opts)
+{
+ if (trid->trtype == SPDK_NVME_TRANSPORT_PCIE) {
+ struct spdk_pci_device *dev = spdk_nvme_ctrlr_get_pci_device(ctrlr);
+
+ /* QEMU emulated SSDs can't handle this test, so we will skip
+ * them. QEMU NVMe SSDs report themselves as VID == Intel. So we need
+ * to check this specific 0x5845 device ID to know whether it's QEMU
+ * or not.
+ */
+ if (spdk_pci_device_get_vendor_id(dev) == SPDK_PCI_VID_INTEL &&
+ spdk_pci_device_get_device_id(dev) == 0x5845) {
+ g_qemu_ssd_found = true;
+ printf("Skipping QEMU NVMe SSD at %s\n", trid->traddr);
+ return;
+ }
+ }
+
+ register_ctrlr(ctrlr);
+}
+
+static int
+register_controllers(void)
+{
+ printf("Initializing NVMe Controllers\n");
+
+ if (spdk_nvme_probe(NULL, NULL, probe_cb, attach_cb, NULL) != 0) {
+ fprintf(stderr, "spdk_nvme_probe() failed\n");
+ return 1;
+ }
+
+ return 0;
+}
+
+static void
+unregister_controllers(void)
+{
+ struct ctrlr_entry *entry = g_controllers;
+
+ while (entry) {
+ struct ctrlr_entry *next = entry->next;
+ spdk_nvme_detach(entry->ctrlr);
+ free(entry);
+ entry = next;
+ }
+}
+
+static int
+associate_workers_with_ns(void)
+{
+ struct ns_entry *entry = g_namespaces;
+ struct worker_thread *worker = g_workers;
+ struct ns_worker_ctx *ns_ctx;
+ int i, count;
+
+ count = g_num_namespaces;
+
+ for (i = 0; i < count; i++) {
+ if (entry == NULL) {
+ break;
+ }
+ ns_ctx = malloc(sizeof(struct ns_worker_ctx));
+ if (!ns_ctx) {
+ return -1;
+ }
+ memset(ns_ctx, 0, sizeof(*ns_ctx));
+
+ printf("Associating %s with lcore %d\n", entry->name, worker->lcore);
+ ns_ctx->entry = entry;
+ ns_ctx->next = worker->ns_ctx;
+ worker->ns_ctx = ns_ctx;
+
+ worker = g_workers;
+
+ entry = entry->next;
+ if (entry == NULL) {
+ entry = g_namespaces;
+ }
+ }
+
+ return 0;
+}
+
+static int
+run_nvme_reset_cycle(void)
+{
+ struct worker_thread *worker;
+ struct ns_worker_ctx *ns_ctx;
+
+ if (work_fn(g_workers) != 0) {
+ return -1;
+ }
+
+ if (print_stats() != 0) {
+ return -1;
+ }
+
+ worker = g_workers;
+ ns_ctx = worker->ns_ctx;
+ while (ns_ctx != NULL) {
+ ns_ctx->io_completed = 0;
+ ns_ctx->io_completed_error = 0;
+ ns_ctx->io_submitted = 0;
+ ns_ctx->is_draining = false;
+ ns_ctx = ns_ctx->next;
+ }
+
+ return 0;
+}
+
+static void
+spdk_reset_free_tasks(void)
+{
+ if (spdk_mempool_count(task_pool) != TASK_POOL_NUM) {
+ fprintf(stderr, "task_pool count is %zu but should be %d\n",
+ spdk_mempool_count(task_pool), TASK_POOL_NUM);
+ }
+ spdk_mempool_free(task_pool);
+}
+
+int main(int argc, char **argv)
+{
+ int rc;
+ int i;
+ struct spdk_env_opts opts;
+
+
+ rc = parse_args(argc, argv);
+ if (rc != 0) {
+ return rc;
+ }
+
+ spdk_env_opts_init(&opts);
+ opts.name = "reset";
+ opts.core_mask = "0x1";
+ opts.shm_id = 0;
+ if (spdk_env_init(&opts) < 0) {
+ fprintf(stderr, "Unable to initialize SPDK env\n");
+ return 1;
+ }
+
+ if (register_controllers() != 0) {
+ return 1;
+ }
+
+ if (!g_controllers) {
+ printf("No NVMe controller found, %s exiting\n", argv[0]);
+ return g_qemu_ssd_found ? 0 : 1;
+ }
+
+ task_pool = spdk_mempool_create("task_pool", TASK_POOL_NUM,
+ sizeof(struct reset_task),
+ 64, SPDK_ENV_SOCKET_ID_ANY);
+ if (!task_pool) {
+ fprintf(stderr, "Cannot create task pool\n");
+ return 1;
+ }
+
+ g_tsc_rate = spdk_get_ticks_hz();
+
+ if (register_workers() != 0) {
+ return 1;
+ }
+
+ if (associate_workers_with_ns() != 0) {
+ rc = 1;
+ goto cleanup;
+ }
+
+ printf("Initialization complete. Launching workers.\n");
+
+ for (i = 2; i >= 0; i--) {
+ rc = run_nvme_reset_cycle();
+ if (rc != 0) {
+ goto cleanup;
+ }
+ }
+
+cleanup:
+ unregister_controllers();
+ spdk_reset_free_tasks();
+
+ if (rc != 0) {
+ fprintf(stderr, "%s: errors occured\n", argv[0]);
+ }
+
+ return rc;
+}
diff --git a/src/spdk/test/nvme/sgl/.gitignore b/src/spdk/test/nvme/sgl/.gitignore
new file mode 100644
index 000000000..d1cebd688
--- /dev/null
+++ b/src/spdk/test/nvme/sgl/.gitignore
@@ -0,0 +1 @@
+sgl
diff --git a/src/spdk/test/nvme/sgl/Makefile b/src/spdk/test/nvme/sgl/Makefile
new file mode 100644
index 000000000..fe57e6147
--- /dev/null
+++ b/src/spdk/test/nvme/sgl/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../..)
+
+APP = sgl
+
+include $(SPDK_ROOT_DIR)/mk/nvme.libtest.mk
diff --git a/src/spdk/test/nvme/sgl/sgl.c b/src/spdk/test/nvme/sgl/sgl.c
new file mode 100644
index 000000000..09794681f
--- /dev/null
+++ b/src/spdk/test/nvme/sgl/sgl.c
@@ -0,0 +1,545 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk/nvme.h"
+#include "spdk/env.h"
+#include "spdk/util.h"
+
+#define MAX_DEVS 64
+
+#define MAX_IOVS 128
+
+#define DATA_PATTERN 0x5A
+
+#define BASE_LBA_START 0x100000
+
+struct dev {
+ struct spdk_nvme_ctrlr *ctrlr;
+ char name[SPDK_NVMF_TRADDR_MAX_LEN + 1];
+};
+
+static struct dev devs[MAX_DEVS];
+static int num_devs = 0;
+
+#define foreach_dev(iter) \
+ for (iter = devs; iter - devs < num_devs; iter++)
+
+static int io_complete_flag = 0;
+
+struct sgl_element {
+ void *base;
+ size_t offset;
+ size_t len;
+};
+
+struct io_request {
+ uint32_t current_iov_index;
+ uint32_t current_iov_bytes_left;
+ struct sgl_element iovs[MAX_IOVS];
+ uint32_t nseg;
+ uint32_t misalign;
+};
+
+static void nvme_request_reset_sgl(void *cb_arg, uint32_t sgl_offset)
+{
+ uint32_t i;
+ uint32_t offset = 0;
+ struct sgl_element *iov;
+ struct io_request *req = (struct io_request *)cb_arg;
+
+ for (i = 0; i < req->nseg; i++) {
+ iov = &req->iovs[i];
+ offset += iov->len;
+ if (offset > sgl_offset) {
+ break;
+ }
+ }
+ req->current_iov_index = i;
+ req->current_iov_bytes_left = offset - sgl_offset;
+ return;
+}
+
+static int nvme_request_next_sge(void *cb_arg, void **address, uint32_t *length)
+{
+ struct io_request *req = (struct io_request *)cb_arg;
+ struct sgl_element *iov;
+
+ if (req->current_iov_index >= req->nseg) {
+ *length = 0;
+ *address = NULL;
+ return 0;
+ }
+
+ iov = &req->iovs[req->current_iov_index];
+
+ if (req->current_iov_bytes_left) {
+ *address = iov->base + iov->offset + iov->len - req->current_iov_bytes_left;
+ *length = req->current_iov_bytes_left;
+ req->current_iov_bytes_left = 0;
+ } else {
+ *address = iov->base + iov->offset;
+ *length = iov->len;
+ }
+
+ req->current_iov_index++;
+
+ return 0;
+}
+
+static void
+io_complete(void *ctx, const struct spdk_nvme_cpl *cpl)
+{
+ if (spdk_nvme_cpl_is_error(cpl)) {
+ io_complete_flag = 2;
+ } else {
+ io_complete_flag = 1;
+ }
+}
+
+static void build_io_request_0(struct io_request *req)
+{
+ req->nseg = 1;
+
+ req->iovs[0].base = spdk_zmalloc(0x800, 4, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
+ req->iovs[0].len = 0x800;
+}
+
+static void build_io_request_1(struct io_request *req)
+{
+ req->nseg = 1;
+
+ /* 512B for 1st sge */
+ req->iovs[0].base = spdk_zmalloc(0x200, 0x200, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
+ req->iovs[0].len = 0x200;
+}
+
+static void build_io_request_2(struct io_request *req)
+{
+ req->nseg = 1;
+
+ /* 256KB for 1st sge */
+ req->iovs[0].base = spdk_zmalloc(0x40000, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
+ req->iovs[0].len = 0x40000;
+}
+
+static void build_io_request_3(struct io_request *req)
+{
+ req->nseg = 3;
+
+ /* 2KB for 1st sge, make sure the iov address start at 0x800 boundary,
+ * and end with 0x1000 boundary */
+ req->iovs[0].base = spdk_zmalloc(0x1000, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
+ req->iovs[0].offset = 0x800;
+ req->iovs[0].len = 0x800;
+
+ /* 4KB for 2th sge */
+ req->iovs[1].base = spdk_zmalloc(0x1000, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
+ req->iovs[1].len = 0x1000;
+
+ /* 12KB for 3th sge */
+ req->iovs[2].base = spdk_zmalloc(0x3000, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
+ req->iovs[2].len = 0x3000;
+}
+
+static void build_io_request_4(struct io_request *req)
+{
+ uint32_t i;
+
+ req->nseg = 32;
+
+ /* 4KB for 1st sge */
+ req->iovs[0].base = spdk_zmalloc(0x1000, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
+ req->iovs[0].len = 0x1000;
+
+ /* 8KB for the rest 31 sge */
+ for (i = 1; i < req->nseg; i++) {
+ req->iovs[i].base = spdk_zmalloc(0x2000, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
+ req->iovs[i].len = 0x2000;
+ }
+}
+
+static void build_io_request_5(struct io_request *req)
+{
+ req->nseg = 1;
+
+ /* 8KB for 1st sge */
+ req->iovs[0].base = spdk_zmalloc(0x2000, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
+ req->iovs[0].len = 0x2000;
+}
+
+static void build_io_request_6(struct io_request *req)
+{
+ req->nseg = 2;
+
+ /* 4KB for 1st sge */
+ req->iovs[0].base = spdk_zmalloc(0x1000, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
+ req->iovs[0].len = 0x1000;
+
+ /* 4KB for 2st sge */
+ req->iovs[1].base = spdk_zmalloc(0x1000, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
+ req->iovs[1].len = 0x1000;
+}
+
+static void build_io_request_7(struct io_request *req)
+{
+ uint8_t *base;
+
+ req->nseg = 1;
+
+ /*
+ * Create a 64KB sge, but ensure it is *not* aligned on a 4KB
+ * boundary. This is valid for single element buffers with PRP.
+ */
+ base = spdk_zmalloc(0x11000, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
+ req->misalign = 64;
+ req->iovs[0].base = base + req->misalign;
+ req->iovs[0].len = 0x10000;
+}
+
+static void build_io_request_8(struct io_request *req)
+{
+ req->nseg = 2;
+
+ /*
+ * 1KB for 1st sge, make sure the iov address does not start and end
+ * at 0x1000 boundary
+ */
+ req->iovs[0].base = spdk_zmalloc(0x1000, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
+ req->iovs[0].offset = 0x400;
+ req->iovs[0].len = 0x400;
+
+ /*
+ * 1KB for 1st sge, make sure the iov address does not start and end
+ * at 0x1000 boundary
+ */
+ req->iovs[1].base = spdk_zmalloc(0x1000, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
+ req->iovs[1].offset = 0x400;
+ req->iovs[1].len = 0x400;
+}
+
+static void build_io_request_9(struct io_request *req)
+{
+ /*
+ * Check if mixed PRP complaint and not complaint requests are handled
+ * properly by splitting them into subrequests.
+ * Construct buffers with following theme:
+ */
+ const size_t req_len[] = { 2048, 4096, 2048, 4096, 2048, 1024 };
+ const size_t req_off[] = { 0x800, 0x0, 0x0, 0x100, 0x800, 0x800 };
+ struct sgl_element *iovs = req->iovs;
+ uint32_t i;
+ req->nseg = SPDK_COUNTOF(req_len);
+ assert(SPDK_COUNTOF(req_len) == SPDK_COUNTOF(req_off));
+
+ for (i = 0; i < req->nseg; i++) {
+ iovs[i].base = spdk_zmalloc(req_off[i] + req_len[i], 0x4000, NULL, SPDK_ENV_LCORE_ID_ANY,
+ SPDK_MALLOC_DMA);
+ iovs[i].offset = req_off[i];
+ iovs[i].len = req_len[i];
+ }
+}
+
+static void build_io_request_10(struct io_request *req)
+{
+ /*
+ * Test the case where we have a valid PRP list, but the first and last
+ * elements are not exact multiples of the logical block size.
+ */
+ const size_t req_len[] = { 4004, 4096, 92 };
+ const size_t req_off[] = { 0x5c, 0x0, 0x0 };
+ struct sgl_element *iovs = req->iovs;
+ uint32_t i;
+ req->nseg = SPDK_COUNTOF(req_len);
+ assert(SPDK_COUNTOF(req_len) == SPDK_COUNTOF(req_off));
+
+ for (i = 0; i < req->nseg; i++) {
+ iovs[i].base = spdk_zmalloc(req_off[i] + req_len[i], 0x4000, NULL, SPDK_ENV_LCORE_ID_ANY,
+ SPDK_MALLOC_DMA);
+ iovs[i].offset = req_off[i];
+ iovs[i].len = req_len[i];
+ }
+}
+
+static void build_io_request_11(struct io_request *req)
+{
+ /* This test case focuses on the last element not starting on a page boundary. */
+ const size_t req_len[] = { 512, 512 };
+ const size_t req_off[] = { 0xe00, 0x800 };
+ struct sgl_element *iovs = req->iovs;
+ uint32_t i;
+ req->nseg = SPDK_COUNTOF(req_len);
+ assert(SPDK_COUNTOF(req_len) == SPDK_COUNTOF(req_off));
+
+ for (i = 0; i < req->nseg; i++) {
+ iovs[i].base = spdk_zmalloc(req_off[i] + req_len[i], 0x4000, NULL, SPDK_ENV_LCORE_ID_ANY,
+ SPDK_MALLOC_DMA);
+ iovs[i].offset = req_off[i];
+ iovs[i].len = req_len[i];
+ }
+}
+
+typedef void (*nvme_build_io_req_fn_t)(struct io_request *req);
+
+static void
+free_req(struct io_request *req)
+{
+ uint32_t i;
+
+ if (req == NULL) {
+ return;
+ }
+
+ for (i = 0; i < req->nseg; i++) {
+ spdk_free(req->iovs[i].base - req->misalign);
+ }
+
+ spdk_free(req);
+}
+
+static int
+writev_readv_tests(struct dev *dev, nvme_build_io_req_fn_t build_io_fn, const char *test_name)
+{
+ int rc = 0;
+ uint32_t len, lba_count;
+ uint32_t i, j, nseg, remainder;
+ char *buf;
+
+ struct io_request *req;
+ struct spdk_nvme_ns *ns;
+ struct spdk_nvme_qpair *qpair;
+ const struct spdk_nvme_ns_data *nsdata;
+
+ ns = spdk_nvme_ctrlr_get_ns(dev->ctrlr, 1);
+ if (!ns) {
+ fprintf(stderr, "Null namespace\n");
+ return 0;
+ }
+ nsdata = spdk_nvme_ns_get_data(ns);
+ if (!nsdata || !spdk_nvme_ns_get_sector_size(ns)) {
+ fprintf(stderr, "Empty nsdata or wrong sector size\n");
+ return 0;
+ }
+
+ if (spdk_nvme_ns_get_flags(ns) & SPDK_NVME_NS_DPS_PI_SUPPORTED) {
+ return 0;
+ }
+
+ req = spdk_zmalloc(sizeof(*req), 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
+ if (!req) {
+ fprintf(stderr, "Allocate request failed\n");
+ return 0;
+ }
+
+ /* IO parameters setting */
+ build_io_fn(req);
+
+ len = 0;
+ for (i = 0; i < req->nseg; i++) {
+ struct sgl_element *sge = &req->iovs[i];
+
+ len += sge->len;
+ }
+
+ lba_count = len / spdk_nvme_ns_get_sector_size(ns);
+ remainder = len % spdk_nvme_ns_get_sector_size(ns);
+ if (!lba_count || remainder || (BASE_LBA_START + lba_count > (uint32_t)nsdata->nsze)) {
+ fprintf(stderr, "%s: %s Invalid IO length parameter\n", dev->name, test_name);
+ free_req(req);
+ return 0;
+ }
+
+ qpair = spdk_nvme_ctrlr_alloc_io_qpair(dev->ctrlr, NULL, 0);
+ if (!qpair) {
+ free_req(req);
+ return -1;
+ }
+
+ nseg = req->nseg;
+ for (i = 0; i < nseg; i++) {
+ memset(req->iovs[i].base + req->iovs[i].offset, DATA_PATTERN, req->iovs[i].len);
+ }
+
+ rc = spdk_nvme_ns_cmd_writev(ns, qpair, BASE_LBA_START, lba_count,
+ io_complete, req, 0,
+ nvme_request_reset_sgl,
+ nvme_request_next_sge);
+
+ if (rc != 0) {
+ fprintf(stderr, "%s: %s writev failed\n", dev->name, test_name);
+ spdk_nvme_ctrlr_free_io_qpair(qpair);
+ free_req(req);
+ return -1;
+ }
+
+ io_complete_flag = 0;
+
+ while (!io_complete_flag) {
+ spdk_nvme_qpair_process_completions(qpair, 1);
+ }
+
+ if (io_complete_flag != 1) {
+ fprintf(stderr, "%s: %s writev failed\n", dev->name, test_name);
+ spdk_nvme_ctrlr_free_io_qpair(qpair);
+ free_req(req);
+ return -1;
+ }
+
+ /* reset completion flag */
+ io_complete_flag = 0;
+
+ for (i = 0; i < nseg; i++) {
+ memset(req->iovs[i].base + req->iovs[i].offset, 0, req->iovs[i].len);
+ }
+
+ rc = spdk_nvme_ns_cmd_readv(ns, qpair, BASE_LBA_START, lba_count,
+ io_complete, req, 0,
+ nvme_request_reset_sgl,
+ nvme_request_next_sge);
+
+ if (rc != 0) {
+ fprintf(stderr, "%s: %s readv failed\n", dev->name, test_name);
+ spdk_nvme_ctrlr_free_io_qpair(qpair);
+ free_req(req);
+ return -1;
+ }
+
+ while (!io_complete_flag) {
+ spdk_nvme_qpair_process_completions(qpair, 1);
+ }
+
+ if (io_complete_flag != 1) {
+ fprintf(stderr, "%s: %s readv failed\n", dev->name, test_name);
+ spdk_nvme_ctrlr_free_io_qpair(qpair);
+ free_req(req);
+ return -1;
+ }
+
+ for (i = 0; i < nseg; i++) {
+ buf = (char *)req->iovs[i].base + req->iovs[i].offset;
+ for (j = 0; j < req->iovs[i].len; j++) {
+ if (buf[j] != DATA_PATTERN) {
+ fprintf(stderr, "%s: %s write/read success, but memcmp Failed\n", dev->name, test_name);
+ spdk_nvme_ctrlr_free_io_qpair(qpair);
+ free_req(req);
+ return -1;
+ }
+ }
+ }
+
+ fprintf(stdout, "%s: %s test passed\n", dev->name, test_name);
+ spdk_nvme_ctrlr_free_io_qpair(qpair);
+ free_req(req);
+ return rc;
+}
+
+static bool
+probe_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
+ struct spdk_nvme_ctrlr_opts *opts)
+{
+ printf("Attaching to %s\n", trid->traddr);
+
+ return true;
+}
+
+static void
+attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
+ struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_ctrlr_opts *opts)
+{
+ struct dev *dev;
+
+ /* add to dev list */
+ dev = &devs[num_devs++];
+
+ dev->ctrlr = ctrlr;
+
+ snprintf(dev->name, sizeof(dev->name), "%s",
+ trid->traddr);
+
+ printf("Attached to %s\n", dev->name);
+}
+
+int main(int argc, char **argv)
+{
+ struct dev *iter;
+ int rc, i;
+ struct spdk_env_opts opts;
+
+ spdk_env_opts_init(&opts);
+ opts.name = "nvme_sgl";
+ opts.core_mask = "0x1";
+ opts.shm_id = 0;
+ if (spdk_env_init(&opts) < 0) {
+ fprintf(stderr, "Unable to initialize SPDK env\n");
+ return 1;
+ }
+
+ printf("NVMe Readv/Writev Request test\n");
+
+ if (spdk_nvme_probe(NULL, NULL, probe_cb, attach_cb, NULL) != 0) {
+ fprintf(stderr, "nvme_probe() failed\n");
+ exit(1);
+ }
+
+ rc = 0;
+ foreach_dev(iter) {
+#define TEST(x) writev_readv_tests(iter, x, #x)
+ if (TEST(build_io_request_0)
+ || TEST(build_io_request_1)
+ || TEST(build_io_request_2)
+ || TEST(build_io_request_3)
+ || TEST(build_io_request_4)
+ || TEST(build_io_request_5)
+ || TEST(build_io_request_6)
+ || TEST(build_io_request_7)
+ || TEST(build_io_request_8)
+ || TEST(build_io_request_9)
+ || TEST(build_io_request_10)
+ || TEST(build_io_request_11)) {
+#undef TEST
+ rc = 1;
+ printf("%s: failed sgl tests\n", iter->name);
+ }
+ }
+
+ printf("Cleaning up...\n");
+
+ for (i = 0; i < num_devs; i++) {
+ struct dev *dev = &devs[i];
+
+ spdk_nvme_detach(dev->ctrlr);
+ }
+
+ return rc;
+}
diff --git a/src/spdk/test/nvme/spdk_nvme_cli.sh b/src/spdk/test/nvme/spdk_nvme_cli.sh
new file mode 100755
index 000000000..516a16f48
--- /dev/null
+++ b/src/spdk/test/nvme/spdk_nvme_cli.sh
@@ -0,0 +1,40 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/scripts/common.sh
+source $rootdir/test/common/autotest_common.sh
+
+if [[ $(uname) != "Linux" ]]; then
+ echo "NVMe cuse tests only supported on Linux"
+ exit 1
+fi
+
+nvme_cli_build
+
+trap "kill_stub; exit 1" SIGINT SIGTERM EXIT
+start_stub "-s 2048 -i 0 -m 0xF"
+
+pushd ${DEPENDENCY_DIR}/nvme-cli
+
+sed -i 's/spdk=0/spdk=1/g' spdk.conf
+sed -i 's/shm_id=.*/shm_id=0/g' spdk.conf
+for bdf in $(get_nvme_bdfs); do
+ ./nvme list
+ ./nvme id-ctrl $bdf
+ ./nvme list-ctrl $bdf
+ ./nvme get-ns-id $bdf
+ ./nvme id-ns $bdf
+ ./nvme fw-log $bdf
+ ./nvme smart-log $bdf
+ ./nvme error-log $bdf
+ ./nvme list-ns $bdf -n 1
+ ./nvme get-feature $bdf -f 1 -s 1 -l 100
+ ./nvme get-log $bdf -i 1 -l 100
+ ./nvme reset $bdf
+done
+
+popd
+
+trap - SIGINT SIGTERM EXIT
+kill_stub
diff --git a/src/spdk/test/nvme/startup/.gitignore b/src/spdk/test/nvme/startup/.gitignore
new file mode 100644
index 000000000..efcfc5a6a
--- /dev/null
+++ b/src/spdk/test/nvme/startup/.gitignore
@@ -0,0 +1 @@
+startup
diff --git a/src/spdk/test/nvme/startup/Makefile b/src/spdk/test/nvme/startup/Makefile
new file mode 100644
index 000000000..06e5824b9
--- /dev/null
+++ b/src/spdk/test/nvme/startup/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../..)
+
+APP = startup
+
+include $(SPDK_ROOT_DIR)/mk/nvme.libtest.mk
diff --git a/src/spdk/test/nvme/startup/startup.c b/src/spdk/test/nvme/startup/startup.c
new file mode 100644
index 000000000..2d99803d3
--- /dev/null
+++ b/src/spdk/test/nvme/startup/startup.c
@@ -0,0 +1,218 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk/nvme.h"
+#include "spdk/env.h"
+#include "spdk/string.h"
+
+struct ctrlr_entry {
+ struct spdk_nvme_ctrlr *ctrlr;
+ struct ctrlr_entry *next;
+ char name[1024];
+};
+
+struct ns_entry {
+ struct spdk_nvme_ctrlr *ctrlr;
+ struct spdk_nvme_ns *ns;
+ struct ns_entry *next;
+ struct spdk_nvme_qpair *qpair;
+};
+
+static struct ctrlr_entry *g_controllers = NULL;
+static struct ns_entry *g_namespaces = NULL;
+static int g_startup_time = 0;
+
+static bool
+probe_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
+ struct spdk_nvme_ctrlr_opts *opts)
+{
+ printf("Attaching to %s\n", trid->traddr);
+
+ return true;
+}
+
+static void
+attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
+ struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_ctrlr_opts *opts)
+{
+
+ struct ctrlr_entry *entry;
+ const struct spdk_nvme_ctrlr_data *cdata;
+
+ entry = malloc(sizeof(struct ctrlr_entry));
+ if (entry == NULL) {
+ perror("ctrlr_entry malloc");
+ exit(1);
+ }
+
+ printf("Attached to %s\n", trid->traddr);
+
+ /*
+ * spdk_nvme_ctrlr is the logical abstraction in SPDK for an NVMe
+ * controller. During initialization, the IDENTIFY data for the
+ * controller is read using an NVMe admin command, and that data
+ * can be retrieved using spdk_nvme_ctrlr_get_data() to get
+ * detailed information on the controller. Refer to the NVMe
+ * specification for more details on IDENTIFY for NVMe controllers.
+ */
+ cdata = spdk_nvme_ctrlr_get_data(ctrlr);
+
+ snprintf(entry->name, sizeof(entry->name), "%-20.20s (%-20.20s)", cdata->mn, cdata->sn);
+
+ entry->ctrlr = ctrlr;
+ entry->next = g_controllers;
+ g_controllers = entry;
+}
+
+static void
+cleanup(void)
+{
+ struct ns_entry *ns_entry = g_namespaces;
+ struct ctrlr_entry *ctrlr_entry = g_controllers;
+
+ while (ns_entry) {
+ struct ns_entry *next = ns_entry->next;
+ free(ns_entry);
+ ns_entry = next;
+ }
+
+ while (ctrlr_entry) {
+ struct ctrlr_entry *next = ctrlr_entry->next;
+
+ spdk_nvme_detach(ctrlr_entry->ctrlr);
+ free(ctrlr_entry);
+ ctrlr_entry = next;
+ }
+}
+
+static void
+usage(const char *program_name)
+{
+ printf("%s [options]", program_name);
+ printf("\n");
+ printf("options:\n");
+ printf(" -t The maximum time needed for startup. The unit is us. The value should be bigger than 0.\n");
+}
+
+static int
+parse_args(int argc, char **argv)
+{
+ int op;
+
+ while ((op = getopt(argc, argv, "t:")) != -1) {
+ switch (op) {
+ case 't':
+ g_startup_time = spdk_strtol(optarg, 10);
+ if (g_startup_time < 0) {
+ fprintf(stderr, "Invalid nvme startup time\n");
+ return g_startup_time;
+ }
+ break;
+ default:
+ usage(argv[0]);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+int main(int argc, char **argv)
+{
+ int rc;
+ struct spdk_env_opts opts;
+ uint64_t start_tsc, end_tsc, tsc_diff;
+ float time_used_in_usec;
+
+ rc = parse_args(argc, argv);
+ if (rc != 0) {
+ return rc;
+ }
+
+ if (g_startup_time == 0) {
+ usage(argv[0]);
+ return 1;
+ }
+
+ start_tsc = spdk_get_ticks();
+ /*
+ * SPDK relies on an abstraction around the local environment
+ * named env that handles memory allocation and PCI device operations.
+ * This library must be initialized first.
+ *
+ */
+ spdk_env_opts_init(&opts);
+ opts.name = "startup";
+ opts.shm_id = 0;
+ if (spdk_env_init(&opts) < 0) {
+ fprintf(stderr, "Unable to initialize SPDK env\n");
+ return 1;
+ }
+
+ printf("Initializing NVMe Controllers\n");
+
+
+ /*
+ * Start the SPDK NVMe enumeration process. probe_cb will be called
+ * for each NVMe controller found, giving our application a choice on
+ * whether to attach to each controller. attach_cb will then be
+ * called for each controller after the SPDK NVMe driver has completed
+ * initializing the controller we chose to attach.
+ */
+ rc = spdk_nvme_probe(NULL, NULL, probe_cb, attach_cb, NULL);
+ if (rc != 0) {
+ fprintf(stderr, "spdk_nvme_probe() failed\n");
+ cleanup();
+ return 1;
+ }
+
+ if (g_controllers == NULL) {
+ fprintf(stderr, "no NVMe controllers found\n");
+ return 0;
+ }
+
+ end_tsc = spdk_get_ticks();
+ tsc_diff = end_tsc - start_tsc;
+ time_used_in_usec = ((float)tsc_diff) * 1000 * 1000 / spdk_get_ticks_hz();
+ printf("Initialization complete.\n");
+ printf("Time used:%-16.3f(us).\n", time_used_in_usec);
+ if (time_used_in_usec > g_startup_time) {
+ fprintf(stderr, "Too long time for initialization.\n");
+ cleanup();
+ return 1;
+ }
+ cleanup();
+ return 0;
+}
diff --git a/src/spdk/test/nvmf/README.md b/src/spdk/test/nvmf/README.md
new file mode 100644
index 000000000..19d6954c2
--- /dev/null
+++ b/src/spdk/test/nvmf/README.md
@@ -0,0 +1,5 @@
+# NVMe-oF test scripts
+
+The test scripts in this directory hierarchy can be run in isolation by passing
+the --iso flag when running the test script. This will set up the RDMA NIC for
+testing and then tear it back down again when the test is completed.
diff --git a/src/spdk/test/nvmf/common.sh b/src/spdk/test/nvmf/common.sh
new file mode 100644
index 000000000..5f52ef127
--- /dev/null
+++ b/src/spdk/test/nvmf/common.sh
@@ -0,0 +1,292 @@
+NVMF_PORT=4420
+NVMF_IP_PREFIX="192.168.100"
+NVMF_IP_LEAST_ADDR=8
+NVMF_TCP_IP_ADDRESS="127.0.0.1"
+NVMF_TRANSPORT_OPTS=""
+NVMF_SERIAL=SPDK00000000000001
+
+function build_nvmf_app_args() {
+ if [ $SPDK_RUN_NON_ROOT -eq 1 ]; then
+ NVMF_APP=(sudo -u "$USER" "${NVMF_APP[@]}")
+ NVMF_APP+=(-i "$NVMF_APP_SHM_ID" -e 0xFFFF)
+ else
+ NVMF_APP+=(-i "$NVMF_APP_SHM_ID" -e 0xFFFF)
+ fi
+}
+
+: ${NVMF_APP_SHM_ID="0"}
+export NVMF_APP_SHM_ID
+build_nvmf_app_args
+
+have_pci_nics=0
+
+function rxe_cfg() {
+ "$rootdir/scripts/rxe_cfg_small.sh" "$@"
+}
+
+function load_ib_rdma_modules() {
+ if [ $(uname) != Linux ]; then
+ return 0
+ fi
+
+ modprobe ib_cm
+ modprobe ib_core
+ # Newer kernels do not have the ib_ucm module
+ modprobe ib_ucm || true
+ modprobe ib_umad
+ modprobe ib_uverbs
+ modprobe iw_cm
+ modprobe rdma_cm
+ modprobe rdma_ucm
+}
+
+function detect_soft_roce_nics() {
+ rxe_cfg start
+}
+
+# args 1 and 2 represent the grep filters for finding our NICS.
+# subsequent args are all drivers that should be loaded if we find these NICs.
+# Those drivers should be supplied in the correct order.
+function detect_nics_and_probe_drivers() {
+ NIC_VENDOR="$1"
+ NIC_CLASS="$2"
+
+ nvmf_nic_bdfs=$(lspci | grep Ethernet | grep "$NIC_VENDOR" | grep "$NIC_CLASS" | awk -F ' ' '{print "0000:"$1}')
+
+ if [ -z "$nvmf_nic_bdfs" ]; then
+ return 0
+ fi
+
+ have_pci_nics=1
+ if [ $# -ge 2 ]; then
+ # shift out the first two positional arguments.
+ shift 2
+ # Iterate through the remaining arguments.
+ for i; do
+ modprobe "$i"
+ done
+ fi
+}
+
+function detect_pci_nics() {
+
+ if ! hash lspci; then
+ return 0
+ fi
+
+ detect_nics_and_probe_drivers "Mellanox" "ConnectX-4" "mlx4_core" "mlx4_ib" "mlx4_en"
+ detect_nics_and_probe_drivers "Mellanox" "ConnectX-5" "mlx5_core" "mlx5_ib"
+ detect_nics_and_probe_drivers "Intel" "X722" "i40e" "i40iw"
+ detect_nics_and_probe_drivers "Chelsio" "Unified Wire" "cxgb4" "iw_cxgb4"
+
+ if [ "$have_pci_nics" -eq "0" ]; then
+ return 0
+ fi
+
+ # Provide time for drivers to properly load.
+ sleep 5
+}
+
+function detect_rdma_nics() {
+ detect_pci_nics
+ if [ "$have_pci_nics" -eq "0" ]; then
+ detect_soft_roce_nics
+ fi
+}
+
+function allocate_nic_ips() {
+ ((count = NVMF_IP_LEAST_ADDR))
+ for nic_name in $(get_rdma_if_list); do
+ ip="$(get_ip_address $nic_name)"
+ if [ -z $ip ]; then
+ ip addr add $NVMF_IP_PREFIX.$count/24 dev $nic_name
+ ip link set $nic_name up
+ ((count = count + 1))
+ fi
+ # dump configuration for debug log
+ ip addr show $nic_name
+ done
+}
+
+function get_available_rdma_ips() {
+ for nic_name in $(get_rdma_if_list); do
+ get_ip_address $nic_name
+ done
+}
+
+function get_rdma_if_list() {
+ for nic_type in /sys/class/infiniband/*; do
+ [[ -e "$nic_type" ]] || break
+ for nic_name in /sys/class/infiniband/"$(basename ${nic_type})"/device/net/*; do
+ [[ -e "$nic_name" ]] || break
+ basename "$nic_name"
+ done
+ done
+}
+
+function get_ip_address() {
+ interface=$1
+ ip -o -4 addr show $interface | awk '{print $4}' | cut -d"/" -f1
+}
+
+function nvmfcleanup() {
+ sync
+ set +e
+ for i in {1..20}; do
+ modprobe -v -r nvme-$TEST_TRANSPORT
+ if modprobe -v -r nvme-fabrics; then
+ set -e
+ return 0
+ fi
+ sleep 1
+ done
+ set -e
+
+ # So far unable to remove the kernel modules. Try
+ # one more time and let it fail.
+ # Allow the transport module to fail for now. See Jim's comment
+ # about the nvme-tcp module below.
+ modprobe -v -r nvme-$TEST_TRANSPORT || true
+ modprobe -v -r nvme-fabrics
+}
+
+function nvmftestinit() {
+ if [ -z $TEST_TRANSPORT ]; then
+ echo "transport not specified - use --transport= to specify"
+ return 1
+ fi
+ if [ "$TEST_MODE" == "iso" ]; then
+ $rootdir/scripts/setup.sh
+ if [ "$TEST_TRANSPORT" == "rdma" ]; then
+ rdma_device_init
+ fi
+ fi
+
+ NVMF_TRANSPORT_OPTS="-t $TEST_TRANSPORT"
+ if [ "$TEST_TRANSPORT" == "rdma" ]; then
+ RDMA_IP_LIST=$(get_available_rdma_ips)
+ NVMF_FIRST_TARGET_IP=$(echo "$RDMA_IP_LIST" | head -n 1)
+ NVMF_SECOND_TARGET_IP=$(echo "$RDMA_IP_LIST" | tail -n +2 | head -n 1)
+ if [ -z $NVMF_FIRST_TARGET_IP ]; then
+ echo "no NIC for nvmf test"
+ exit 0
+ fi
+ elif [ "$TEST_TRANSPORT" == "tcp" ]; then
+ NVMF_FIRST_TARGET_IP=127.0.0.1
+ NVMF_TRANSPORT_OPTS="$NVMF_TRANSPORT_OPTS -o"
+ fi
+
+ # currently we run the host/perf test for TCP even on systems without kernel nvme-tcp
+ # support; that's fine since the host/perf test uses the SPDK initiator
+ # maybe later we will enforce modprobe to succeed once we have systems in the test pool
+ # with nvme-tcp kernel support - but until then let this pass so we can still run the
+ # host/perf test with the tcp transport
+ modprobe nvme-$TEST_TRANSPORT || true
+}
+
+function nvmfappstart() {
+ timing_enter start_nvmf_tgt
+ "${NVMF_APP[@]}" "$@" &
+ nvmfpid=$!
+ trap 'process_shm --id $NVMF_APP_SHM_ID; nvmftestfini; exit 1' SIGINT SIGTERM EXIT
+ waitforlisten $nvmfpid
+ timing_exit start_nvmf_tgt
+}
+
+function nvmftestfini() {
+ nvmfcleanup || :
+ if [ -n "$nvmfpid" ]; then
+ killprocess $nvmfpid
+ fi
+ if [ "$TEST_MODE" == "iso" ]; then
+ $rootdir/scripts/setup.sh reset
+ if [ "$TEST_TRANSPORT" == "rdma" ]; then
+ rdma_device_init
+ fi
+ fi
+}
+
+function rdma_device_init() {
+ load_ib_rdma_modules
+ detect_rdma_nics
+ allocate_nic_ips
+}
+
+function revert_soft_roce() {
+ rxe_cfg stop
+}
+
+function check_ip_is_soft_roce() {
+ rxe_cfg status rxe | grep -wq "$1"
+}
+
+function nvme_connect() {
+ local init_count
+ init_count=$(nvme list | wc -l)
+
+ if ! nvme connect "$@"; then return $?; fi
+
+ for i in $(seq 1 10); do
+ if [ $(nvme list | wc -l) -gt $init_count ]; then
+ return 0
+ else
+ sleep 1s
+ fi
+ done
+ return 1
+}
+
+function get_nvme_devs() {
+ local dev rest
+
+ nvmes=()
+ while read -r dev rest; do
+ if [[ $dev == /dev/nvme* ]]; then
+ nvmes+=("$dev")
+ fi
+ if [[ $1 == print ]]; then
+ echo "$dev $rest"
+ fi
+ done < <(nvme list)
+ ((${#nvmes[@]})) || return 1
+ echo "${#nvmes[@]}" >&2
+}
+
+function gen_nvmf_target_json() {
+ local subsystem config=()
+
+ for subsystem in "${@:-1}"; do
+ config+=(
+ "$(
+ cat <<- EOF
+ {
+ "params": {
+ "name": "Nvme$subsystem",
+ "trtype": "$TEST_TRANSPORT",
+ "traddr": "$NVMF_FIRST_TARGET_IP",
+ "adrfam": "ipv4",
+ "trsvcid": "$NVMF_PORT",
+ "subnqn": "nqn.2016-06.io.spdk:cnode$subsystem"
+ },
+ "method": "bdev_nvme_attach_controller"
+ }
+ EOF
+ )"
+ )
+ done
+ jq . <<- JSON
+ {
+ "subsystems": [
+ {
+ "subsystem": "bdev",
+ "config": [
+ $(
+ IFS=","
+ printf '%s\n' "${config[*]}"
+ )
+ ]
+ }
+ ]
+ }
+ JSON
+}
diff --git a/src/spdk/test/nvmf/host/aer.sh b/src/spdk/test/nvmf/host/aer.sh
new file mode 100755
index 000000000..1c438c686
--- /dev/null
+++ b/src/spdk/test/nvmf/host/aer.sh
@@ -0,0 +1,50 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+nvmftestinit
+nvmfappstart -m 0xF
+
+$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
+
+$rpc_py bdev_malloc_create 64 512 --name Malloc0
+$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001 -m 2
+$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc0
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+
+$rpc_py nvmf_get_subsystems
+
+AER_TOUCH_FILE=/tmp/aer_touch_file
+rm -f $AER_TOUCH_FILE
+
+# Namespace Attribute Notice Tests
+$rootdir/test/nvme/aer/aer -r "\
+ trtype:$TEST_TRANSPORT \
+ adrfam:IPv4 \
+ traddr:$NVMF_FIRST_TARGET_IP \
+ trsvcid:$NVMF_PORT \
+ subnqn:nqn.2016-06.io.spdk:cnode1" -n 2 -t $AER_TOUCH_FILE &
+aerpid=$!
+
+# Waiting for aer start to work
+waitforfile $AER_TOUCH_FILE
+
+# Add a new namespace
+$rpc_py bdev_malloc_create 64 4096 --name Malloc1
+$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc1 -n 2
+$rpc_py nvmf_get_subsystems
+
+wait $aerpid
+
+$rpc_py bdev_malloc_delete Malloc0
+$rpc_py bdev_malloc_delete Malloc1
+$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1
+
+trap - SIGINT SIGTERM EXIT
+
+nvmftestfini
diff --git a/src/spdk/test/nvmf/host/bdevperf.sh b/src/spdk/test/nvmf/host/bdevperf.sh
new file mode 100755
index 000000000..776550c4d
--- /dev/null
+++ b/src/spdk/test/nvmf/host/bdevperf.sh
@@ -0,0 +1,50 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+function tgt_init() {
+ nvmfappstart -m 0xF
+
+ $rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
+ $rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc0
+ $rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc0
+ $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+}
+
+nvmftestinit
+# There is an intermittent error relating to this test and Soft-RoCE. for now, just
+# skip this test if we are using rxe. TODO: get to the bottom of GitHub issue #1165
+if [ $TEST_TRANSPORT == "rdma" ] && check_ip_is_soft_roce $NVMF_FIRST_TARGET_IP; then
+ echo "Using software RDMA, skipping the host bdevperf tests."
+ exit 0
+fi
+
+tgt_init
+
+"$rootdir/test/bdev/bdevperf/bdevperf" --json <(gen_nvmf_target_json) -q 128 -o 4096 -w verify -t 1
+
+"$rootdir/test/bdev/bdevperf/bdevperf" --json <(gen_nvmf_target_json) -q 128 -o 4096 -w verify -t 15 -f &
+bdevperfpid=$!
+
+sleep 3
+kill -9 $nvmfpid
+
+sleep 3
+tgt_init
+
+wait $bdevperfpid
+sync
+$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1
+
+trap - SIGINT SIGTERM EXIT
+
+nvmftestfini
diff --git a/src/spdk/test/nvmf/host/fio.sh b/src/spdk/test/nvmf/host/fio.sh
new file mode 100755
index 000000000..85f9a00f1
--- /dev/null
+++ b/src/spdk/test/nvmf/host/fio.sh
@@ -0,0 +1,82 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/scripts/common.sh
+source $rootdir/test/nvmf/common.sh
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+nvmftestinit
+
+if [[ $CONFIG_FIO_PLUGIN != y ]]; then
+ echo "FIO not available"
+ exit 1
+fi
+
+timing_enter start_nvmf_tgt
+
+"${NVMF_APP[@]}" -m 0xF &
+nvmfpid=$!
+
+trap 'process_shm --id $NVMF_APP_SHM_ID; nvmftestfini; exit 1' SIGINT SIGTERM EXIT
+
+waitforlisten $nvmfpid
+$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
+timing_exit start_nvmf_tgt
+
+$rpc_py bdev_malloc_create 64 512 -b Malloc1
+$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
+$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc1
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+
+PLUGIN_DIR=$rootdir/examples/nvme/fio_plugin
+
+# Test fio_plugin as host with malloc backend
+fio_nvme $PLUGIN_DIR/example_config.fio --filename="trtype=$TEST_TRANSPORT adrfam=IPv4 \
+traddr=$NVMF_FIRST_TARGET_IP trsvcid=$NVMF_PORT ns=1"
+
+# second test mocking multiple SGL elements
+fio_nvme $PLUGIN_DIR/mock_sgl_config.fio --filename="trtype=$TEST_TRANSPORT adrfam=IPv4 \
+traddr=$NVMF_FIRST_TARGET_IP trsvcid=$NVMF_PORT ns=1"
+$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1
+
+if [ $RUN_NIGHTLY -eq 1 ]; then
+ # Test fio_plugin as host with nvme lvol backend
+ bdfs=$(get_nvme_bdfs)
+ $rpc_py bdev_nvme_attach_controller -b Nvme0 -t PCIe -a $(echo $bdfs | awk '{ print $1 }') -i $NVMF_FIRST_TARGET_IP
+ ls_guid=$($rpc_py bdev_lvol_create_lvstore -c 1073741824 Nvme0n1 lvs_0)
+ get_lvs_free_mb $ls_guid
+ $rpc_py bdev_lvol_create -l lvs_0 lbd_0 $free_mb
+ $rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode2 -a -s SPDK00000000000001
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode2 lvs_0/lbd_0
+ $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode2 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+ fio_nvme $PLUGIN_DIR/example_config.fio --filename="trtype=$TEST_TRANSPORT adrfam=IPv4 \
+ traddr=$NVMF_FIRST_TARGET_IP trsvcid=$NVMF_PORT ns=1"
+ $rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode2
+
+ # Test fio_plugin as host with nvme lvol nested backend
+ ls_nested_guid=$($rpc_py bdev_lvol_create_lvstore --clear-method none lvs_0/lbd_0 lvs_n_0)
+ get_lvs_free_mb $ls_nested_guid
+ $rpc_py bdev_lvol_create -l lvs_n_0 lbd_nest_0 $free_mb
+ $rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode3 -a -s SPDK00000000000001
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode3 lvs_n_0/lbd_nest_0
+ $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode3 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+ fio_nvme $PLUGIN_DIR/example_config.fio --filename="trtype=$TEST_TRANSPORT adrfam=IPv4 \
+ traddr=$NVMF_FIRST_TARGET_IP trsvcid=$NVMF_PORT ns=1"
+ $rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode3
+
+ sync
+ # Delete lvol_bdev and destroy lvol_store.
+ $rpc_py bdev_lvol_delete lvs_n_0/lbd_nest_0
+ $rpc_py bdev_lvol_delete_lvstore -l lvs_n_0
+ $rpc_py bdev_lvol_delete lvs_0/lbd_0
+ $rpc_py bdev_lvol_delete_lvstore -l lvs_0
+ $rpc_py bdev_nvme_detach_controller Nvme0
+fi
+
+trap - SIGINT SIGTERM EXIT
+
+rm -f ./local-test-0-verify.state
+nvmftestfini
diff --git a/src/spdk/test/nvmf/host/identify.sh b/src/spdk/test/nvmf/host/identify.sh
new file mode 100755
index 000000000..412626388
--- /dev/null
+++ b/src/spdk/test/nvmf/host/identify.sh
@@ -0,0 +1,54 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+nvmftestinit
+
+timing_enter start_nvmf_tgt
+
+"${NVMF_APP[@]}" -m 0xF &
+nvmfpid=$!
+
+trap 'process_shm --id $NVMF_APP_SHM_ID; nvmftestfini; exit 1' SIGINT SIGTERM EXIT
+
+waitforlisten $nvmfpid
+$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
+timing_exit start_nvmf_tgt
+
+$rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc0
+$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
+# NOTE: This will assign the same NGUID and EUI64 to all bdevs,
+# but currently we only have one (see above), so this is OK.
+$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc0 \
+ --nguid "ABCDEF0123456789ABCDEF0123456789" \
+ --eui64 "ABCDEF0123456789"
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+
+$rpc_py nvmf_get_subsystems
+
+$SPDK_EXAMPLE_DIR/identify -r "\
+ trtype:$TEST_TRANSPORT \
+ adrfam:IPv4 \
+ traddr:$NVMF_FIRST_TARGET_IP \
+ trsvcid:$NVMF_PORT \
+ subnqn:nqn.2014-08.org.nvmexpress.discovery" -L all
+$SPDK_EXAMPLE_DIR/identify -r "\
+ trtype:$TEST_TRANSPORT \
+ adrfam:IPv4 \
+ traddr:$NVMF_FIRST_TARGET_IP \
+ trsvcid:$NVMF_PORT \
+ subnqn:nqn.2016-06.io.spdk:cnode1" -L all
+sync
+$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1
+
+trap - SIGINT SIGTERM EXIT
+
+nvmftestfini
diff --git a/src/spdk/test/nvmf/host/identify_kernel_nvmf.sh b/src/spdk/test/nvmf/host/identify_kernel_nvmf.sh
new file mode 100755
index 000000000..d6dd2916b
--- /dev/null
+++ b/src/spdk/test/nvmf/host/identify_kernel_nvmf.sh
@@ -0,0 +1,71 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+nvmftestinit
+
+subsystemname=nqn.2016-06.io.spdk:testnqn
+
+modprobe null_blk nr_devices=1
+modprobe nvmet
+modprobe nvmet-rdma
+modprobe nvmet-fc
+modprobe lpfc
+
+if [ ! -d /sys/kernel/config/nvmet/subsystems/$subsystemname ]; then
+ mkdir /sys/kernel/config/nvmet/subsystems/$subsystemname
+fi
+echo 1 > /sys/kernel/config/nvmet/subsystems/$subsystemname/attr_allow_any_host
+
+if [ ! -d /sys/kernel/config/nvmet/subsystems/$subsystemname/namespaces/1 ]; then
+ mkdir /sys/kernel/config/nvmet/subsystems/$subsystemname/namespaces/1
+fi
+
+echo -n /dev/nullb0 > /sys/kernel/config/nvmet/subsystems/$subsystemname/namespaces/1/device_path
+echo 1 > /sys/kernel/config/nvmet/subsystems/$subsystemname/namespaces/1/enable
+
+if [ ! -d /sys/kernel/config/nvmet/ports/1 ]; then
+ mkdir /sys/kernel/config/nvmet/ports/1
+fi
+
+echo -n rdma > /sys/kernel/config/nvmet/ports/1/addr_trtype
+echo -n ipv4 > /sys/kernel/config/nvmet/ports/1/addr_adrfam
+echo -n $NVMF_FIRST_TARGET_IP > /sys/kernel/config/nvmet/ports/1/addr_traddr
+echo -n $NVMF_PORT > /sys/kernel/config/nvmet/ports/1/addr_trsvcid
+
+ln -s /sys/kernel/config/nvmet/subsystems/$subsystemname /sys/kernel/config/nvmet/ports/1/subsystems/$subsystemname
+
+sleep 4
+
+$SPDK_EXAMPLE_DIR/identify -r "\
+ trtype:$TEST_TRANSPORT \
+ adrfam:IPv4 \
+ traddr:$NVMF_FIRST_TARGET_IP \
+ trsvcid:$NVMF_PORT \
+ subnqn:nqn.2014-08.org.nvmexpress.discovery" -t all
+$SPDK_EXAMPLE_DIR/identify -r "\
+ trtype:$TEST_TRANSPORT \
+ adrfam:IPv4 \
+ traddr:$NVMF_FIRST_TARGET_IP \
+ trsvcid:$NVMF_PORT \
+ subnqn:$subsystemname"
+
+rm -rf /sys/kernel/config/nvmet/ports/1/subsystems/$subsystemname
+
+echo 0 > /sys/kernel/config/nvmet/subsystems/$subsystemname/namespaces/1/enable
+echo -n 0 > /sys/kernel/config/nvmet/subsystems/$subsystemname/namespaces/1/device_path
+
+rmdir --ignore-fail-on-non-empty /sys/kernel/config/nvmet/subsystems/$subsystemname/namespaces/1
+rmdir --ignore-fail-on-non-empty /sys/kernel/config/nvmet/subsystems/$subsystemname
+rmdir --ignore-fail-on-non-empty /sys/kernel/config/nvmet/ports/1
+
+rmmod lpfc
+rmmod nvmet_fc
+rmmod nvmet-rdma
+rmmod null_blk
+rmmod nvmet
+
+nvmftestfini
diff --git a/src/spdk/test/nvmf/host/perf.sh b/src/spdk/test/nvmf/host/perf.sh
new file mode 100755
index 000000000..69fa28f0b
--- /dev/null
+++ b/src/spdk/test/nvmf/host/perf.sh
@@ -0,0 +1,93 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+nvmftestinit
+nvmfappstart -m 0xF
+
+$rootdir/scripts/gen_nvme.sh --json | $rpc_py load_subsystem_config
+
+local_nvme_trid="trtype:PCIe traddr:"$($rpc_py framework_get_config bdev | jq -r '.[].params | select(.name=="Nvme0").traddr')
+bdevs="$bdevs $($rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
+
+if [ -n "$local_nvme_trid" ]; then
+ bdevs="$bdevs Nvme0n1"
+fi
+
+$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS
+$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
+for bdev in $bdevs; do
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 $bdev
+done
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+
+# Test multi-process access to local NVMe device
+if [ -n "$local_nvme_trid" ]; then
+ if [ $SPDK_RUN_NON_ROOT -eq 1 ]; then
+ perf_app="sudo -u $(logname) $SPDK_EXAMPLE_DIR/perf"
+ else
+ perf_app="$SPDK_EXAMPLE_DIR/perf"
+ fi
+ $perf_app -i $NVMF_APP_SHM_ID -q 32 -o 4096 -w randrw -M 50 -t 1 -r "$local_nvme_trid"
+fi
+
+$SPDK_EXAMPLE_DIR/perf -q 1 -o 4096 -w randrw -M 50 -t 1 -r "trtype:$TEST_TRANSPORT adrfam:IPv4 traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT"
+$SPDK_EXAMPLE_DIR/perf -q 32 -o 4096 -w randrw -M 50 -t 1 -r "trtype:$TEST_TRANSPORT adrfam:IPv4 traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT"
+$SPDK_EXAMPLE_DIR/perf -q 128 -o 262144 -w randrw -M 50 -t 2 -r "trtype:$TEST_TRANSPORT adrfam:IPv4 traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT"
+sync
+$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1
+
+if [ $RUN_NIGHTLY -eq 1 ]; then
+ # Configure nvme devices with nvmf lvol_bdev backend
+ if [ -n "$local_nvme_trid" ]; then
+ ls_guid=$($rpc_py bdev_lvol_create_lvstore Nvme0n1 lvs_0)
+ get_lvs_free_mb $ls_guid
+ # We don't need to create an lvol larger than 20G for this test.
+ # decreasing the size of the nested lvol allows us to take less time setting up
+ #before running I/O.
+ if [ $free_mb -gt 20480 ]; then
+ free_mb=20480
+ fi
+ lb_guid=$($rpc_py bdev_lvol_create -u $ls_guid lbd_0 $free_mb)
+
+ # Create lvol bdev for nested lvol stores
+ ls_nested_guid=$($rpc_py bdev_lvol_create_lvstore $lb_guid lvs_n_0)
+ get_lvs_free_mb $ls_nested_guid
+ if [ $free_mb -gt 20480 ]; then
+ free_mb=20480
+ fi
+ lb_nested_guid=$($rpc_py bdev_lvol_create -u $ls_nested_guid lbd_nest_0 $free_mb)
+ $rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
+ for bdev in $lb_nested_guid; do
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 $bdev
+ done
+ $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+ # Test perf as host with different io_size and qd_depth in nightly
+ qd_depth=("1" "32" "128")
+ io_size=("512" "131072")
+ for qd in "${qd_depth[@]}"; do
+ for o in "${io_size[@]}"; do
+ $SPDK_EXAMPLE_DIR/perf -q $qd -o $o -w randrw -M 50 -t 10 -r "trtype:$TEST_TRANSPORT adrfam:IPv4 traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT"
+ done
+ done
+
+ # Delete subsystems, lvol_bdev and destroy lvol_store.
+ $rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1
+ $rpc_py bdev_lvol_delete "$lb_nested_guid"
+ $rpc_py bdev_lvol_delete_lvstore -l lvs_n_0
+ $rpc_py bdev_lvol_delete "$lb_guid"
+ $rpc_py bdev_lvol_delete_lvstore -l lvs_0
+ fi
+fi
+
+trap - SIGINT SIGTERM EXIT
+
+nvmftestfini
diff --git a/src/spdk/test/nvmf/host/target_disconnect.sh b/src/spdk/test/nvmf/host/target_disconnect.sh
new file mode 100755
index 000000000..82521196b
--- /dev/null
+++ b/src/spdk/test/nvmf/host/target_disconnect.sh
@@ -0,0 +1,89 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+PLUGIN_DIR=$rootdir/examples/nvme/fio_plugin
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+function disconnect_init() {
+ nvmfappstart -m 0xF0
+
+ $rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc0
+
+ $rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS
+ $rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
+
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc0
+ $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $1 -s $NVMF_PORT
+}
+
+# Test to make sure we don't segfault or access null pointers when we try to connect to
+# a discovery controller that doesn't exist yet.
+function nvmf_target_disconnect_tc1() {
+ set +e
+ $SPDK_EXAMPLE_DIR/reconnect -q 32 -o 4096 -w randrw -M 50 -t 10 -c 0xF \
+ -r "trtype:$TEST_TRANSPORT adrfam:IPv4 traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT"
+ # If the program crashes, the high bit of $? will be set so we will get a value in the hundreds.
+ # But if the reconnect code detects errors and exits normally it will return 1.
+ if [ $? != 1 ]; then
+ set -e
+ exit 1
+ fi
+ set -e
+}
+
+function nvmf_target_disconnect_tc2() {
+ disconnect_init $NVMF_FIRST_TARGET_IP
+
+ # If perf doesn't shut down, this test will time out.
+ $SPDK_EXAMPLE_DIR/reconnect -q 32 -o 4096 -w randrw -M 50 -t 10 -c 0xF \
+ -r "trtype:$TEST_TRANSPORT adrfam:IPv4 traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT" &
+ reconnectpid=$!
+
+ sleep 2
+ kill -9 $nvmfpid
+
+ sleep 2
+ disconnect_init $NVMF_FIRST_TARGET_IP
+
+ wait $reconnectpid
+ sync
+}
+
+function nvmf_target_disconnect_tc3() {
+ $SPDK_EXAMPLE_DIR/reconnect -q 32 -o 4096 -w randrw -M 50 -t 10 -c 0xF \
+ -r "trtype:$TEST_TRANSPORT adrfam:IPv4 traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT alt_traddr:$NVMF_SECOND_TARGET_IP" &
+ reconnectpid=$!
+
+ sleep 2
+ kill -9 $nvmfpid
+
+ sleep 2
+ disconnect_init $NVMF_SECOND_TARGET_IP
+
+ wait $reconnectpid
+ sync
+}
+
+nvmftestinit
+# There is an intermittent error relating to this test and Soft-RoCE. for now, just
+# skip this test if we are using rxe. TODO: get to the bottom of GitHub issue #1043
+if [ $TEST_TRANSPORT == "rdma" ] && check_ip_is_soft_roce $NVMF_FIRST_TARGET_IP; then
+ echo "Using software RDMA, skipping the target disconnect tests."
+else
+ run_test "nvmf_target_disconnect_tc1" nvmf_target_disconnect_tc1
+ run_test "nvmf_target_disconnect_tc2" nvmf_target_disconnect_tc2
+ if [ -n "$NVMF_SECOND_TARGET_IP" ]; then
+ run_test "nvmf_target_disconnect_tc3" nvmf_target_disconnect_tc3
+ fi
+fi
+
+trap - SIGINT SIGTERM EXIT
+nvmftestfini
diff --git a/src/spdk/test/nvmf/nvmf.sh b/src/spdk/test/nvmf/nvmf.sh
new file mode 100755
index 000000000..b33d55cff
--- /dev/null
+++ b/src/spdk/test/nvmf/nvmf.sh
@@ -0,0 +1,60 @@
+#!/usr/bin/env bash
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/test/common/autotest_common.sh
+
+if [ ! $(uname -s) = Linux ]; then
+ exit 0
+fi
+
+source $rootdir/test/nvmf/common.sh
+
+trap "exit 1" SIGINT SIGTERM EXIT
+
+TEST_ARGS=("$@")
+
+run_test "nvmf_example" test/nvmf/target/nvmf_example.sh "${TEST_ARGS[@]}"
+run_test "nvmf_filesystem" test/nvmf/target/filesystem.sh "${TEST_ARGS[@]}"
+run_test "nvmf_discovery" test/nvmf/target/discovery.sh "${TEST_ARGS[@]}"
+run_test "nvmf_connect_disconnect" test/nvmf/target/connect_disconnect.sh "${TEST_ARGS[@]}"
+if [ $SPDK_TEST_NVME_CLI -eq 1 ]; then
+ run_test "nvmf_nvme_cli" test/nvmf/target/nvme_cli.sh "${TEST_ARGS[@]}"
+fi
+run_test "nvmf_lvol" test/nvmf/target/nvmf_lvol.sh "${TEST_ARGS[@]}"
+#TODO: disabled due to intermittent failures. Need to triage.
+# run_test "nvmf_srq_overwhelm" test/nvmf/target/srq_overwhelm.sh $TEST_ARGS
+run_test "nvmf_vhost" test/nvmf/target/nvmf_vhost.sh "${TEST_ARGS[@]}"
+run_test "nvmf_bdev_io_wait" test/nvmf/target/bdev_io_wait.sh "${TEST_ARGS[@]}"
+run_test "nvmf_create_transport." test/nvmf/target/create_transport.sh "${TEST_ARGS[@]}"
+run_test "nvmf_multitarget" test/nvmf/target/multitarget.sh "${TEST_ARGS[@]}"
+
+if [ $RUN_NIGHTLY -eq 1 ]; then
+ run_test "nvmf_fuzz" test/nvmf/target/fuzz.sh "${TEST_ARGS[@]}"
+ run_test "nvmf_multiconnection" test/nvmf/target/multiconnection.sh "${TEST_ARGS[@]}"
+ run_test "nvmf_initiator_timeout" test/nvmf/target/initiator_timeout.sh "${TEST_ARGS[@]}"
+fi
+
+run_test "nvmf_nmic" test/nvmf/target/nmic.sh "${TEST_ARGS[@]}"
+run_test "nvmf_rpc" test/nvmf/target/rpc.sh "${TEST_ARGS[@]}"
+run_test "nvmf_fio" test/nvmf/target/fio.sh "${TEST_ARGS[@]}"
+run_test "nvmf_shutdown" test/nvmf/target/shutdown.sh "${TEST_ARGS[@]}"
+run_test "nvmf_bdevio" test/nvmf/target/bdevio.sh "${TEST_ARGS[@]}"
+run_test "nvmf_invalid" test/nvmf/target/invalid.sh "${TEST_ARGS[@]}"
+run_test "nvmf_abort" test/nvmf/target/abort.sh "${TEST_ARGS[@]}"
+
+timing_enter host
+
+run_test "nvmf_bdevperf" test/nvmf/host/bdevperf.sh "${TEST_ARGS[@]}"
+run_test "nvmf_identify" test/nvmf/host/identify.sh "${TEST_ARGS[@]}"
+run_test "nvmf_perf" test/nvmf/host/perf.sh "${TEST_ARGS[@]}"
+
+# TODO: disabled due to intermittent failures (RDMA_CM_EVENT_UNREACHABLE/ETIMEDOUT)
+#run_test test/nvmf/host/identify_kernel_nvmf.sh $TEST_ARGS
+run_test "nvmf_aer" test/nvmf/host/aer.sh "${TEST_ARGS[@]}"
+run_test "nvmf_fio" test/nvmf/host/fio.sh "${TEST_ARGS[@]}"
+run_test "nvmf_target_disconnect" test/nvmf/host/target_disconnect.sh "${TEST_ARGS[@]}"
+
+timing_exit host
+
+trap - SIGINT SIGTERM EXIT
+revert_soft_roce
diff --git a/src/spdk/test/nvmf/target/abort.sh b/src/spdk/test/nvmf/target/abort.sh
new file mode 100755
index 000000000..913c17e19
--- /dev/null
+++ b/src/spdk/test/nvmf/target/abort.sh
@@ -0,0 +1,35 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=4096
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+nvmftestinit
+nvmfappstart -m 0xE
+
+$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
+
+# Construct a delay bdev on a malloc bdev which has constant 10ms delay for all read or write I/Os
+$rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc0
+$rpc_py bdev_delay_create -b Malloc0 -d Delay0 -r 1000000 -t 1000000 -w 1000000 -n 1000000
+
+# Create an NVMe-oF subsystem and add the delay bdev as a namespace
+$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode0 -a -s SPDK0
+$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode0 Delay0
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode0 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+
+# Run abort application
+$SPDK_EXAMPLE_DIR/abort -r "trtype:$TEST_TRANSPORT adrfam:IPv4 traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT" -c 0x1
+
+# Clean up
+$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode0
+
+trap - SIGINT SIGTERM EXIT
+
+nvmftestfini
diff --git a/src/spdk/test/nvmf/target/bdev_io_wait.sh b/src/spdk/test/nvmf/target/bdev_io_wait.sh
new file mode 100755
index 000000000..e57ffc36d
--- /dev/null
+++ b/src/spdk/test/nvmf/target/bdev_io_wait.sh
@@ -0,0 +1,45 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+nvmftestinit
+nvmfappstart -m 0xF --wait-for-rpc
+
+# Minimal number of bdev io pool (5) and cache (1)
+$rpc_py bdev_set_options -p 5 -c 1
+$rpc_py framework_start_init
+$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
+
+$rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc0
+$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
+$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc0
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+
+"$rootdir/test/bdev/bdevperf/bdevperf" -m 0x10 -i 1 --json <(gen_nvmf_target_json) -q 128 -o 4096 -w write -t 1 &
+WRITE_PID=$!
+"$rootdir/test/bdev/bdevperf/bdevperf" -m 0x20 -i 2 --json <(gen_nvmf_target_json) -q 128 -o 4096 -w read -t 1 &
+READ_PID=$!
+"$rootdir/test/bdev/bdevperf/bdevperf" -m 0x40 -i 3 --json <(gen_nvmf_target_json) -q 128 -o 4096 -w flush -t 1 &
+FLUSH_PID=$!
+"$rootdir/test/bdev/bdevperf/bdevperf" -m 0x80 -i 4 --json <(gen_nvmf_target_json) -q 128 -o 4096 -w unmap -t 1 &
+UNMAP_PID=$!
+sync
+
+wait $WRITE_PID
+wait $READ_PID
+wait $FLUSH_PID
+wait $UNMAP_PID
+
+$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1
+
+trap - SIGINT SIGTERM EXIT
+
+nvmftestfini
diff --git a/src/spdk/test/nvmf/target/bdevio.sh b/src/spdk/test/nvmf/target/bdevio.sh
new file mode 100755
index 000000000..f4d7eb1b5
--- /dev/null
+++ b/src/spdk/test/nvmf/target/bdevio.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+nvmftestinit
+# Don't use cores 0 - 2 to avoid overlap with bdevio.
+nvmfappstart -m 0x78
+
+$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
+$rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc0
+$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
+$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc0
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+
+$rootdir/test/bdev/bdevio/bdevio --json <(gen_nvmf_target_json)
+
+$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1
+
+trap - SIGINT SIGTERM EXIT
+
+nvmftestfini
diff --git a/src/spdk/test/nvmf/target/connect_disconnect.sh b/src/spdk/test/nvmf/target/connect_disconnect.sh
new file mode 100755
index 000000000..b74394123
--- /dev/null
+++ b/src/spdk/test/nvmf/target/connect_disconnect.sh
@@ -0,0 +1,43 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+# connect disconnect is geared towards ensuring that we are properly freeing resources after disconnecting qpairs.
+nvmftestinit
+nvmfappstart -m 0xF
+
+$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192 -c 0
+
+bdev="$($rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
+
+$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s $NVMF_SERIAL
+$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 $bdev
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+
+if [ $RUN_NIGHTLY -eq 1 ]; then
+ num_iterations=200
+ IO_QUEUES="-i 8"
+else
+ num_iterations=10
+fi
+
+set +x
+for i in $(seq 1 $num_iterations); do
+ nvme connect -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT" $IO_QUEUES
+ waitforserial "$NVMF_SERIAL"
+ nvme disconnect -n "nqn.2016-06.io.spdk:cnode1"
+ waitforserial_disconnect "$NVMF_SERIAL"
+done
+set -x
+
+trap - SIGINT SIGTERM EXIT
+
+nvmftestfini
diff --git a/src/spdk/test/nvmf/target/create_transport.sh b/src/spdk/test/nvmf/target/create_transport.sh
new file mode 100755
index 000000000..e2766467b
--- /dev/null
+++ b/src/spdk/test/nvmf/target/create_transport.sh
@@ -0,0 +1,52 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+NULL_BDEV_SIZE=102400
+NULL_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+if ! hash nvme; then
+ echo "nvme command not found; skipping create transport test"
+ exit 0
+fi
+
+nvmftestinit
+nvmfappstart -m 0xF
+
+# Use nvmf_create_transport call to create transport
+$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
+
+null_bdevs="$($rpc_py bdev_null_create Null0 $NULL_BDEV_SIZE $NULL_BLOCK_SIZE) "
+null_bdevs+="$($rpc_py bdev_null_create Null1 $NULL_BDEV_SIZE $NULL_BLOCK_SIZE)"
+
+$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
+for null_bdev in $null_bdevs; do
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 $null_bdev
+done
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+
+nvme discover -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+
+echo "Perform nvmf subsystem discovery via RPC"
+$rpc_py nvmf_get_subsystems
+
+$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1
+
+for null_bdev in $null_bdevs; do
+ $rpc_py bdev_null_delete $null_bdev
+done
+
+check_bdevs=$($rpc_py bdev_get_bdevs | jq -r '.[].name')
+if [ -n "$check_bdevs" ]; then
+ echo $check_bdevs
+ exit 1
+fi
+
+trap - SIGINT SIGTERM EXIT
+
+nvmftestfini
diff --git a/src/spdk/test/nvmf/target/discovery.sh b/src/spdk/test/nvmf/target/discovery.sh
new file mode 100755
index 000000000..ad5a6ce96
--- /dev/null
+++ b/src/spdk/test/nvmf/target/discovery.sh
@@ -0,0 +1,49 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+NULL_BDEV_SIZE=102400
+NULL_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+if ! hash nvme; then
+ echo "nvme command not found; skipping discovery test"
+ exit 0
+fi
+
+nvmftestinit
+nvmfappstart -m 0xF
+
+$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
+
+# Use at least 4 subsystems so they spill over to a second discovery log page
+for i in $(seq 1 4); do
+ $rpc_py bdev_null_create Null$i $NULL_BDEV_SIZE $NULL_BLOCK_SIZE
+ $rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode$i -a -s SPDK0000000000000$i
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode$i Null$i
+ $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode$i -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+done
+
+nvme discover -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+
+echo "Perform nvmf subsystem discovery via RPC"
+$rpc_py nvmf_get_subsystems
+
+for i in $(seq 1 4); do
+ $rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode$i
+ $rpc_py bdev_null_delete Null$i
+done
+
+check_bdevs=$($rpc_py bdev_get_bdevs | jq -r '.[].name')
+if [ -n "$check_bdevs" ]; then
+ echo $check_bdevs
+ exit 1
+fi
+
+trap - SIGINT SIGTERM EXIT
+
+nvmftestfini
diff --git a/src/spdk/test/nvmf/target/filesystem.sh b/src/spdk/test/nvmf/target/filesystem.sh
new file mode 100755
index 000000000..ff819fdb6
--- /dev/null
+++ b/src/spdk/test/nvmf/target/filesystem.sh
@@ -0,0 +1,92 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+nvmftestinit
+
+function nvmf_filesystem_create() {
+ fstype=$1
+ nvme_name=$2
+
+ make_filesystem ${fstype} /dev/${nvme_name}p1
+
+ mount /dev/${nvme_name}p1 /mnt/device
+ touch /mnt/device/aaa
+ sync
+ rm /mnt/device/aaa
+ sync
+
+ i=0
+ while ! umount /mnt/device; do
+ [ $i -lt 15 ] || break
+ i=$((i + 1))
+ sleep 1
+ done
+
+ # Make sure the target did not crash
+ kill -0 $nvmfpid
+
+ # Make sure the device is still present
+ lsblk -l -o NAME | grep -q -w "${nvme_name}"
+
+ # Make sure the partition is still present
+ lsblk -l -o NAME | grep -q -w "${nvme_name}p1"
+}
+
+function nvmf_filesystem_part() {
+ incapsule=$1
+
+ nvmfappstart -m 0xF
+
+ $rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192 -c $incapsule
+ $rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc1
+ $rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s $NVMF_SERIAL
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc1
+ $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+
+ nvme connect -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
+
+ waitforserial "$NVMF_SERIAL"
+ nvme_name=$(lsblk -l -o NAME,SERIAL | grep -oP "([\w]*)(?=\s+${NVMF_SERIAL})")
+
+ mkdir -p /mnt/device
+
+ parted -s /dev/${nvme_name} mklabel msdos mkpart primary '0%' '100%'
+ partprobe
+ sleep 1
+
+ if [ $incapsule -eq 0 ]; then
+ run_test "filesystem_ext4" nvmf_filesystem_create "ext4" ${nvme_name}
+ run_test "filesystem_btrfs" nvmf_filesystem_create "btrfs" ${nvme_name}
+ run_test "filesystem_xfs" nvmf_filesystem_create "xfs" ${nvme_name}
+ else
+ run_test "filesystem_incapsule_ext4" nvmf_filesystem_create "ext4" ${nvme_name}
+ run_test "filesystem_incapsule_btrfs" nvmf_filesystem_create "btrfs" ${nvme_name}
+ run_test "filesystem_incapsule_xfs" nvmf_filesystem_create "xfs" ${nvme_name}
+ fi
+
+ parted -s /dev/${nvme_name} rm 1
+
+ sync
+ nvme disconnect -n "nqn.2016-06.io.spdk:cnode1" || true
+
+ $rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1
+
+ trap - SIGINT SIGTERM EXIT
+
+ killprocess $nvmfpid
+ nvmfpid=
+}
+
+run_test "nvmf_filesystem_no_incapsule" nvmf_filesystem_part 0
+run_test "nvmf_filesystem_incapsule" nvmf_filesystem_part 4096
+
+nvmftestfini
diff --git a/src/spdk/test/nvmf/target/fio.sh b/src/spdk/test/nvmf/target/fio.sh
new file mode 100755
index 000000000..4e98d7083
--- /dev/null
+++ b/src/spdk/test/nvmf/target/fio.sh
@@ -0,0 +1,77 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+nvmftestinit
+nvmfappstart -m 0xF
+
+$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
+
+malloc_bdevs="$($rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE) "
+malloc_bdevs+="$($rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
+# Create a RAID-0 bdev from two malloc bdevs
+raid_malloc_bdevs="$($rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE) "
+raid_malloc_bdevs+="$($rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
+$rpc_py bdev_raid_create -n raid0 -z 64 -r 0 -b "$raid_malloc_bdevs"
+
+$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s $NVMF_SERIAL
+for malloc_bdev in $malloc_bdevs; do
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 "$malloc_bdev"
+done
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+
+# Append the raid0 bdev into subsystem
+$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 raid0
+
+nvme connect -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
+
+waitforserial $NVMF_SERIAL 3
+
+$rootdir/scripts/fio.py -p nvmf -i 4096 -d 1 -t write -r 1 -v
+$rootdir/scripts/fio.py -p nvmf -i 4096 -d 1 -t randwrite -r 1 -v
+$rootdir/scripts/fio.py -p nvmf -i 4096 -d 128 -t write -r 1 -v
+$rootdir/scripts/fio.py -p nvmf -i 4096 -d 128 -t randwrite -r 1 -v
+
+sync
+
+#start hotplug test case
+$rootdir/scripts/fio.py -p nvmf -i 4096 -d 1 -t read -r 10 &
+fio_pid=$!
+
+sleep 3
+
+$rpc_py bdev_raid_delete "raid0"
+for malloc_bdev in $malloc_bdevs; do
+ $rpc_py bdev_malloc_delete "$malloc_bdev"
+done
+
+fio_status=0
+wait $fio_pid || fio_status=$?
+
+nvme disconnect -n "nqn.2016-06.io.spdk:cnode1" || true
+
+if [ $fio_status -eq 0 ]; then
+ echo "nvmf hotplug test: fio successful - expected failure"
+ nvmftestfini
+ exit 1
+else
+ echo "nvmf hotplug test: fio failed as expected"
+fi
+
+$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1
+
+rm -f ./local-job0-0-verify.state
+rm -f ./local-job1-1-verify.state
+rm -f ./local-job2-2-verify.state
+
+trap - SIGINT SIGTERM EXIT
+
+nvmftestfini
diff --git a/src/spdk/test/nvmf/target/fuzz.sh b/src/spdk/test/nvmf/target/fuzz.sh
new file mode 100755
index 000000000..5a18be856
--- /dev/null
+++ b/src/spdk/test/nvmf/target/fuzz.sh
@@ -0,0 +1,43 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+nvmftestinit
+
+"${NVMF_APP[@]}" -m 0xF > $output_dir/nvmf_fuzz_tgt_output.txt 2>&1 &
+nvmfpid=$!
+
+trap 'process_shm --id $NVMF_APP_SHM_ID; rm -f $testdir/nvmf_fuzz.conf; killprocess $nvmfpid; nvmftestfini $1; exit 1' SIGINT SIGTERM EXIT
+
+waitforlisten $nvmfpid
+$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
+
+$rpc_py bdev_malloc_create -b Malloc0 64 512
+
+$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
+$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc0
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+
+echo "[Nvme]" > $testdir/nvmf_fuzz.conf
+echo " TransportID \"trtype:$TEST_TRANSPORT adrfam:IPv4 subnqn:nqn.2016-06.io.spdk:cnode1 traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT\" Nvme0" >> $testdir/nvmf_fuzz.conf
+
+# Note that we chose a consistent seed to ensure that this test is consistent in nightly builds.
+$rootdir/test/app/fuzz/nvme_fuzz/nvme_fuzz -m 0xF0 -r "/var/tmp/nvme_fuzz" -t 30 -S 123456 -C $testdir/nvmf_fuzz.conf -N -a 2> $output_dir/nvmf_fuzz_logs1.txt
+# We don't specify a seed for this test. Instead we run a static list of commands from example.json.
+$rootdir/test/app/fuzz/nvme_fuzz/nvme_fuzz -m 0xF0 -r "/var/tmp/nvme_fuzz" -C $testdir/nvmf_fuzz.conf -j $rootdir/test/app/fuzz/nvme_fuzz/example.json -a 2> $output_dir/nvmf_fuzz_logs2.txt
+
+rm -f $testdir/nvmf_fuzz.conf
+$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1
+
+trap - SIGINT SIGTERM EXIT
+
+nvmfcleanup
+killprocess $nvmfpid
+nvmfpid=
+
+nvmftestfini
diff --git a/src/spdk/test/nvmf/target/identify_passthru.sh b/src/spdk/test/nvmf/target/identify_passthru.sh
new file mode 100755
index 000000000..2ce52fe38
--- /dev/null
+++ b/src/spdk/test/nvmf/target/identify_passthru.sh
@@ -0,0 +1,76 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+source $rootdir/scripts/common.sh
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+nvmftestinit
+
+timing_enter nvme_identify
+
+bdf=$(get_first_nvme_bdf)
+if [ -z "${bdf}" ]; then
+ echo "No NVMe drive found but test requires it. Failing the test."
+ exit 1
+fi
+
+# Expected values
+nvme_serial_number=$($SPDK_EXAMPLE_DIR/identify -r "trtype:PCIe traddr:${bdf}" -i 0 | grep "Serial Number:" | awk '{print $3}')
+nvme_model_number=$($SPDK_EXAMPLE_DIR/identify -r "trtype:PCIe traddr:${bdf}" -i 0 | grep "Model Number:" | awk '{print $3}')
+
+timing_exit nvme_identify
+
+timing_enter start_nvmf_tgt
+
+"${NVMF_APP[@]}" -m 0xF --wait-for-rpc &
+nvmfpid=$!
+
+trap 'process_shm --id $NVMF_APP_SHM_ID; nvmftestfini; exit 1' SIGINT SIGTERM EXIT
+
+waitforlisten $nvmfpid
+$rpc_py -v nvmf_set_config --passthru-identify-ctrlr
+$rpc_py -v framework_start_init
+$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
+timing_exit start_nvmf_tgt
+
+$rpc_py bdev_nvme_attach_controller -b Nvme0 -t PCIe -a ${bdf}
+$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
+$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Nvme0n1
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+
+$rpc_py nvmf_get_subsystems
+
+# Discovered values
+nvmf_serial_number=$($SPDK_EXAMPLE_DIR/identify -r "\
+ trtype:$TEST_TRANSPORT \
+ adrfam:IPv4 \
+ traddr:$NVMF_FIRST_TARGET_IP \
+ trsvcid:$NVMF_PORT \
+ subnqn:nqn.2016-06.io.spdk:cnode1" | grep "Serial Number:" | awk '{print $3}')
+
+nvmf_model_number=$($SPDK_EXAMPLE_DIR/identify -r "\
+ trtype:$TEST_TRANSPORT \
+ adrfam:IPv4 \
+ traddr:$NVMF_FIRST_TARGET_IP \
+ trsvcid:$NVMF_PORT \
+ subnqn:nqn.2016-06.io.spdk:cnode1" | grep "Model Number:" | awk '{print $3}')
+
+if [ ${nvme_serial_number} != ${nvmf_serial_number} ]; then
+ echo "Serial number doesn't match"
+ exit 1
+fi
+
+if [ ${nvme_model_number} != ${nvmf_model_number} ]; then
+ echo "Model number doesn't match"
+ exit 1
+fi
+
+$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1
+
+trap - SIGINT SIGTERM EXIT
+
+nvmftestfini
diff --git a/src/spdk/test/nvmf/target/initiator_timeout.sh b/src/spdk/test/nvmf/target/initiator_timeout.sh
new file mode 100755
index 000000000..199983be5
--- /dev/null
+++ b/src/spdk/test/nvmf/target/initiator_timeout.sh
@@ -0,0 +1,71 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+nvmftestinit
+nvmfappstart -m 0xF
+
+trap 'process_shm --id $NVMF_APP_SHM_ID; killprocess $nvmfpid; nvmftestfini $1; exit 1' SIGINT SIGTERM EXIT
+
+$rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc0
+
+# We cannot configure the bdev with an incredibly high latency up front because connect will not work properly.
+$rpc_py bdev_delay_create -b Malloc0 -d Delay0 -r 30 -t 30 -w 30 -n 30
+
+$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
+$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s $NVMF_SERIAL
+$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Delay0
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+
+nvme connect -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
+
+waitforserial "$NVMF_SERIAL"
+
+# Once our timed out I/O complete, we will still have 10 sec of I/O.
+$rootdir/scripts/fio.py -p nvmf -i 4096 -d 1 -t write -r 60 -v &
+fio_pid=$!
+
+sleep 3
+
+# The kernel initiator has a default timeout of 30 seconds. delay for 31 to trigger initiator reconnect.
+$rpc_py bdev_delay_update_latency Delay0 avg_read 31000000
+$rpc_py bdev_delay_update_latency Delay0 avg_write 31000000
+$rpc_py bdev_delay_update_latency Delay0 p99_read 31000000
+$rpc_py bdev_delay_update_latency Delay0 p99_write 310000000
+
+sleep 3
+
+# Reset these values so that subsequent I/O will complete in a timely manner.
+$rpc_py bdev_delay_update_latency Delay0 avg_read 30
+$rpc_py bdev_delay_update_latency Delay0 avg_write 30
+$rpc_py bdev_delay_update_latency Delay0 p99_read 30
+$rpc_py bdev_delay_update_latency Delay0 p99_write 30
+
+fio_status=0
+wait $fio_pid || fio_status=$?
+
+nvme disconnect -n "nqn.2016-06.io.spdk:cnode1" || true
+
+if [ $fio_status -eq 0 ]; then
+ echo "nvmf hotplug test: fio successful as expected"
+else
+ echo "nvmf hotplug test: fio failed, expected success"
+ nvmftestfini
+ exit 1
+fi
+
+$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1
+
+rm -f ./local-job0-0-verify.state
+
+trap - SIGINT SIGTERM EXIT
+
+nvmftestfini
diff --git a/src/spdk/test/nvmf/target/invalid.sh b/src/spdk/test/nvmf/target/invalid.sh
new file mode 100755
index 000000000..98246efeb
--- /dev/null
+++ b/src/spdk/test/nvmf/target/invalid.sh
@@ -0,0 +1,63 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f "$(dirname "$0")")
+rootdir=$(readlink -f "$testdir/../../..")
+source "$rootdir/test/common/autotest_common.sh"
+source "$rootdir/test/nvmf/common.sh"
+
+multi_target_rpc=$rootdir/test/nvmf/target/multitarget_rpc.py
+rpc=$rootdir/scripts/rpc.py
+nqn=nqn.2016-06.io.spdk:cnode
+target=foobar
+# pre-seed the rng to generate predictive values across different test runs
+RANDOM=0
+
+gen_random_s() {
+ local length=$1 ll
+ # generate ascii table which nvme supports
+ local chars=({32..127})
+ local string
+
+ for ((ll = 0; ll < length; ll++)); do
+ string+="$(echo -e "\x$(printf '%x' "${chars[RANDOM % ${#chars[@]}]}")")"
+ done
+ # Be nice to rpc.py's arg parser and escape `-` in case it's a first character
+ if [[ ${string::1} == "-" ]]; then
+ string=${string/-/\\-}
+ fi
+ echo "$string"
+}
+
+nvmftestinit
+nvmfappstart -m 0xF
+
+trap 'process_shm --id $NVMF_APP_SHM_ID; nvmftestfini $1; exit 1' SIGINT SIGTERM EXIT
+
+# Attempt to create subsystem with non-existing target
+out=$("$rpc" nvmf_create_subsystem -t "$target" "$nqn$RANDOM" 2>&1) && false
+[[ $out == *"Unable to find target"* ]]
+
+# Attempt to create subsystem with invalid serial number - inject ASCII char that's
+# not in the range (0x20-0x7e) of these supported by the nvme spec.
+out=$("$rpc" nvmf_create_subsystem -s "$NVMF_SERIAL$(echo -e "\x1f")" "$nqn$RANDOM" 2>&1) && false
+[[ $out == *"Invalid SN"* ]]
+
+# Attempt to create subsystem with invalid model - inject ASCII char that's not in the
+# range (0x20-0x7e) of these supported by the nvme spec.
+out=$("$rpc" nvmf_create_subsystem -d "SPDK_Controller$(echo -e "\x1f")" "$nqn$RANDOM" 2>&1) && false
+[[ $out == *"Invalid MN"* ]]
+
+# Attempt to create subsystem with invalid serial number - exceed SPDK_NVME_CTRLR_SN_LEN (20)
+out=$("$rpc" nvmf_create_subsystem -s "$(gen_random_s 21)" "$nqn$RANDOM" 2>&1) && false
+[[ $out == *"Invalid SN"* ]]
+
+# Attempt to create subsystem with invalid model - exceed SPDK_NVME_CTRLR_MN_LEN (40)
+out=$("$rpc" nvmf_create_subsystem -d "$(gen_random_s 41)" "$nqn$RANDOM" 2>&1) && false
+[[ $out == *"Invalid MN"* ]]
+
+# Attempt to delete non-existing target
+out=$("$multi_target_rpc" nvmf_delete_target --name "$target" 2>&1) && false
+[[ $out == *"The specified target doesn't exist, cannot delete it."* ]]
+
+trap - SIGINT SIGTERM EXIT
+nvmftestfini
diff --git a/src/spdk/test/nvmf/target/multiconnection.sh b/src/spdk/test/nvmf/target/multiconnection.sh
new file mode 100755
index 000000000..d7e490861
--- /dev/null
+++ b/src/spdk/test/nvmf/target/multiconnection.sh
@@ -0,0 +1,53 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+NVMF_SUBSYS=11
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+nvmftestinit
+nvmfappstart -m 0xF
+
+# SoftRoce does not have enough queues available for
+# multiconnection tests. Detect if we're using software RDMA.
+# If so - lower the number of subsystems for test.
+if check_ip_is_soft_roce $NVMF_FIRST_TARGET_IP; then
+ echo "Using software RDMA, lowering number of NVMeOF subsystems."
+ NVMF_SUBSYS=1
+fi
+
+$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
+
+for i in $(seq 1 $NVMF_SUBSYS); do
+ $rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc$i
+ $rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode$i -a -s SPDK$i
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode$i Malloc$i
+ $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode$i -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+done
+
+for i in $(seq 1 $NVMF_SUBSYS); do
+ nvme connect -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode${i}" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
+ waitforserial SPDK$i
+done
+
+$rootdir/scripts/fio.py -p nvmf -i 262144 -d 64 -t read -r 10
+$rootdir/scripts/fio.py -p nvmf -i 262144 -d 64 -t randwrite -r 10
+
+sync
+for i in $(seq 1 $NVMF_SUBSYS); do
+ nvme disconnect -n "nqn.2016-06.io.spdk:cnode${i}" || true
+ $rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode${i}
+done
+
+rm -f ./local-job0-0-verify.state
+
+trap - SIGINT SIGTERM EXIT
+
+nvmftestfini
diff --git a/src/spdk/test/nvmf/target/multitarget.sh b/src/spdk/test/nvmf/target/multitarget.sh
new file mode 100755
index 000000000..4c3ece7c0
--- /dev/null
+++ b/src/spdk/test/nvmf/target/multitarget.sh
@@ -0,0 +1,37 @@
+#!/usr/bin/env bash
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+# For the time being this script is just menat to confirm the basic functionality of the
+# multitarget RPCs as the in-tree applications don't support multi-target functionality.
+rpc_py="$rootdir/test/nvmf/target/multitarget_rpc.py"
+
+nvmftestinit
+nvmfappstart -m 0xF
+
+trap 'process_shm --id $NVMF_APP_SHM_ID; nvmftestfini $1; exit 1' SIGINT SIGTERM EXIT
+
+# Target application should start with a single target.
+if [ "$($rpc_py nvmf_get_targets | jq 'length')" != "1" ]; then
+ echo "SPDK application did not start with the proper number of targets." && false
+fi
+
+$rpc_py nvmf_create_target -n nvmf_tgt_1 -s 32
+$rpc_py nvmf_create_target -n nvmf_tgt_2 -s 32
+
+if [ "$($rpc_py nvmf_get_targets | jq 'length')" != "3" ]; then
+ echo "nvmf_create_target RPC didn't properly create targets." && false
+fi
+
+$rpc_py nvmf_delete_target -n nvmf_tgt_1
+$rpc_py nvmf_delete_target -n nvmf_tgt_2
+
+if [ "$($rpc_py nvmf_get_targets | jq 'length')" != "1" ]; then
+ echo "nvmf_delete_target RPC didn't properly destroy targets." && false
+fi
+
+trap - SIGINT SIGTERM EXIT
+
+nvmftestfini
diff --git a/src/spdk/test/nvmf/target/multitarget_rpc.py b/src/spdk/test/nvmf/target/multitarget_rpc.py
new file mode 100755
index 000000000..c5ccbcece
--- /dev/null
+++ b/src/spdk/test/nvmf/target/multitarget_rpc.py
@@ -0,0 +1,84 @@
+#!/usr/bin/env python3
+
+# Not for use in production. Please see the changelog for v19.10.
+
+from rpc.client import print_dict, JSONRPCException
+
+import logging
+import argparse
+import rpc
+import sys
+import shlex
+
+try:
+ from shlex import quote
+except ImportError:
+ from pipes import quote
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description='NVMe-oF RPC command line interface. NOTE: spdk/scripts/ is expected in PYTHONPATH')
+ parser.add_argument('-s', dest='server_addr',
+ help='RPC domain socket path or IP address', default='/var/tmp/spdk.sock')
+ parser.add_argument('-p', dest='port',
+ help='RPC port number (if server_addr is IP address)',
+ default=5260, type=int)
+ parser.add_argument('-t', dest='timeout',
+ help='Timeout as a floating point number expressed in seconds waiting for response. Default: 60.0',
+ default=60.0, type=float)
+ parser.add_argument('-v', dest='verbose', action='store_const', const="INFO",
+ help='Set verbose mode to INFO', default="ERROR")
+ parser.add_argument('--verbose', dest='verbose', choices=['DEBUG', 'INFO', 'ERROR'],
+ help="""Set verbose level. """)
+ subparsers = parser.add_subparsers(help='RPC methods')
+
+ def nvmf_create_target(args):
+ print_dict(rpc.nvmf.nvmf_create_target(args.client,
+ name=args.name,
+ max_subsystems=args.max_subsystems))
+
+ p = subparsers.add_parser('nvmf_create_target', help='Create a new NVMe-oF target')
+ p.add_argument('-n', '--name', help='Target name (unique to application)', type=str, required=True)
+ p.add_argument('-s', '--max-subsystems', help='Max number of NVMf subsystems defaults to SPDK_NVMF_DEFAULT_MAX_SUBSYSTEMS',
+ type=int, required=False)
+ p.set_defaults(func=nvmf_create_target)
+
+ def nvmf_delete_target(args):
+ print_dict(rpc.nvmf.nvmf_delete_target(args.client,
+ name=args.name))
+
+ p = subparsers.add_parser('nvmf_delete_target', help='Destroy the given NVMe-oF Target')
+ p.add_argument('-n', '--name', help='Target name (unique to application)', type=str, required=True)
+ p.set_defaults(func=nvmf_delete_target)
+
+ def nvmf_get_targets(args):
+ print_dict(rpc.nvmf.nvmf_get_targets(args.client))
+
+ p = subparsers.add_parser('nvmf_get_targets', help='Get the list of NVMe-oF Targets')
+ p.set_defaults(func=nvmf_get_targets)
+
+ def call_rpc_func(args):
+ try:
+ args.func(args)
+ except JSONRPCException as ex:
+ print(ex.message)
+ exit(1)
+
+ def execute_script(parser, client, fd):
+ for rpc_call in map(str.rstrip, fd):
+ if not rpc_call.strip():
+ continue
+ args = parser.parse_args(shlex.split(rpc_call))
+ args.client = client
+ call_rpc_func(args)
+
+ args = parser.parse_args()
+ args.client = rpc.client.JSONRPCClient(args.server_addr, args.port, args.timeout, log_level=getattr(logging, args.verbose.upper()))
+ if hasattr(args, 'func'):
+ call_rpc_func(args)
+ elif sys.stdin.isatty():
+ # No arguments and no data piped through stdin
+ parser.print_help()
+ exit(1)
+ else:
+ execute_script(parser, args.client, sys.stdin)
diff --git a/src/spdk/test/nvmf/target/nmic.sh b/src/spdk/test/nvmf/target/nmic.sh
new file mode 100755
index 000000000..6a967dc08
--- /dev/null
+++ b/src/spdk/test/nvmf/target/nmic.sh
@@ -0,0 +1,56 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+nvmftestinit
+nvmfappstart -m 0xF
+
+NVMF_SECOND_TARGET_IP=$(echo "$RDMA_IP_LIST" | sed -n 2p)
+
+$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
+
+# Create subsystems
+$rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc0
+$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s $NVMF_SERIAL
+$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc0
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s "$NVMF_PORT"
+
+echo "test case1: single bdev can't be used in multiple subsystems"
+$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode2 -a -s SPDK2
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode2 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s "$NVMF_PORT"
+nmic_status=0
+$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode2 Malloc0 || nmic_status=$?
+
+if [ $nmic_status -eq 0 ]; then
+ echo " Adding namespace passed - failure expected."
+ nvmftestfini
+ exit 1
+else
+ echo " Adding namespace failed - expected result."
+fi
+
+echo "test case2: host connect to nvmf target in multiple paths"
+if [ -n "$NVMF_SECOND_TARGET_IP" ]; then
+ $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_SECOND_TARGET_IP -s $NVMF_PORT
+
+ nvme connect -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
+ nvme connect -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_SECOND_TARGET_IP" -s "$NVMF_PORT"
+
+ waitforserial "$NVMF_SERIAL"
+
+ $rootdir/scripts/fio.py -p nvmf -i 4096 -d 1 -t write -r 1 -v
+fi
+
+nvme disconnect -n "nqn.2016-06.io.spdk:cnode1" || true
+
+trap - SIGINT SIGTERM EXIT
+
+nvmftestfini
diff --git a/src/spdk/test/nvmf/target/nvme_cli.sh b/src/spdk/test/nvmf/target/nvme_cli.sh
new file mode 100755
index 000000000..29359689b
--- /dev/null
+++ b/src/spdk/test/nvmf/target/nvme_cli.sh
@@ -0,0 +1,75 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+if [ -z "${DEPENDENCY_DIR}" ]; then
+ echo DEPENDENCY_DIR not defined!
+ exit 1
+fi
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+nvmftestinit
+nvmfappstart -m 0xF
+
+$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
+
+$rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc0
+$rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc1
+
+$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s $NVMF_SERIAL -d SPDK_Controller1
+$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc0
+$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc1
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+
+nvme connect -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
+
+waitforserial $NVMF_SERIAL 2
+if ! get_nvme_devs print 2> /dev/null; then
+ echo "Could not find any nvme devices to work with, aborting the test" >&2
+ exit 1
+fi
+
+for ctrl in "${nvmes[@]}"; do
+ nvme id-ctrl $ctrl
+ nvme smart-log $ctrl
+ nvme_model=$(nvme id-ctrl $ctrl | grep -w mn | sed 's/^.*: //' | sed 's/ *$//')
+ if [ "$nvme_model" != "SPDK_Controller1" ]; then
+ echo "Wrong model number for controller" $nvme_model
+ exit 1
+ fi
+done
+
+for ns in "${nvmes[@]}"; do
+ nvme id-ns $ns
+done
+
+nvme disconnect -n "nqn.2016-06.io.spdk:cnode1"
+
+# Test spdk/nvme-cli NVMe-oF commands: discover, connect and disconnect
+nvme_cli_build
+pushd "${DEPENDENCY_DIR}/nvme-cli"
+
+sed -i 's/shm_id=.*/shm_id=-1/g' spdk.conf
+./nvme discover -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s "$NVMF_PORT"
+nvme_num_before_connection=$(get_nvme_devs 2>&1 || echo 0)
+./nvme connect -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
+sleep 1
+nvme_num=$(get_nvme_devs 2>&1)
+./nvme disconnect -n "nqn.2016-06.io.spdk:cnode1"
+if [ $nvme_num -le $nvme_num_before_connection ]; then
+ echo "spdk/nvme-cli connect target devices failed"
+ exit 1
+fi
+popd
+
+$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1
+trap - SIGINT SIGTERM EXIT
+
+nvmftestfini
diff --git a/src/spdk/test/nvmf/target/nvmf_example.sh b/src/spdk/test/nvmf/target/nvmf_example.sh
new file mode 100755
index 000000000..28045bc49
--- /dev/null
+++ b/src/spdk/test/nvmf/target/nvmf_example.sh
@@ -0,0 +1,59 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+function build_nvmf_example_args() {
+ if [ $SPDK_RUN_NON_ROOT -eq 1 ]; then
+ echo "sudo -u $(logname) $SPDK_EXAMPLE_DIR/nvmf -i $NVMF_APP_SHM_ID" -g 10000
+ else
+ echo "$SPDK_EXAMPLE_DIR/nvmf -i $NVMF_APP_SHM_ID" -g 10000
+ fi
+}
+
+NVMF_EXAMPLE="$(build_nvmf_example_args)"
+
+function nvmfexamplestart() {
+ timing_enter start_nvmf_example
+ $NVMF_EXAMPLE $1 &
+ nvmfpid=$!
+ trap 'process_shm --id $NVMF_APP_SHM_ID; nvmftestfini; exit 1' SIGINT SIGTERM EXIT
+ waitforlisten $nvmfpid
+ timing_exit start_nvmf_example
+}
+
+timing_enter nvmf_example_test
+nvmftestinit
+nvmfexamplestart "-m 0xF"
+
+#create transport
+$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
+#create malloc bdev
+malloc_bdevs="$($rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE) "
+#create subsystem
+$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
+
+#add ns to subsystem
+for malloc_bdev in $malloc_bdevs; do
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 "$malloc_bdev"
+done
+
+#add listener to subsystem
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+
+perf="$SPDK_EXAMPLE_DIR/perf"
+
+$perf -q 64 -o 4096 -w randrw -M 30 -t 10 \
+ -r "trtype:${TEST_TRANSPORT} adrfam:IPv4 traddr:${NVMF_FIRST_TARGET_IP} trsvcid:${NVMF_PORT} \
+subnqn:nqn.2016-06.io.spdk:cnode1"
+
+trap - SIGINT SIGTERM EXIT
+nvmftestfini
+timing_exit nvmf_example_test
diff --git a/src/spdk/test/nvmf/target/nvmf_lvol.sh b/src/spdk/test/nvmf/target/nvmf_lvol.sh
new file mode 100755
index 000000000..d44bc9332
--- /dev/null
+++ b/src/spdk/test/nvmf/target/nvmf_lvol.sh
@@ -0,0 +1,60 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+LVOL_BDEV_INIT_SIZE=20
+LVOL_BDEV_FINAL_SIZE=30
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+nvmftestinit
+nvmfappstart -m 0x7
+
+$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
+
+# Construct a RAID volume for the logical volume store
+base_bdevs="$($rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE) "
+base_bdevs+=$($rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)
+$rpc_py bdev_raid_create -n raid0 -z 64 -r 0 -b "$base_bdevs"
+
+# Create the logical volume store on the RAID volume
+lvs=$($rpc_py bdev_lvol_create_lvstore raid0 lvs)
+
+# Create a logical volume on the logical volume store
+lvol=$($rpc_py bdev_lvol_create -u $lvs lvol $LVOL_BDEV_INIT_SIZE)
+
+# Create an NVMe-oF subsystem and add the logical volume as a namespace
+$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode0 -a -s SPDK0
+$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode0 $lvol
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode0 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+
+# Start random writes in the background
+$SPDK_EXAMPLE_DIR/perf -r "trtype:$TEST_TRANSPORT adrfam:IPv4 traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT" -o 4096 -q 128 -s 512 -w randwrite -t 10 -c 0x18 &
+perf_pid=$!
+
+sleep 1
+
+# Perform some operations on the logical volume
+snapshot=$($rpc_py bdev_lvol_snapshot $lvol "MY_SNAPSHOT")
+$rpc_py bdev_lvol_resize $lvol $LVOL_BDEV_FINAL_SIZE
+clone=$($rpc_py bdev_lvol_clone $snapshot "MY_CLONE")
+$rpc_py bdev_lvol_inflate $clone
+
+# Wait for I/O to complete
+wait $perf_pid
+
+# Clean up
+$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode0
+$rpc_py bdev_lvol_delete $lvol
+$rpc_py bdev_lvol_delete_lvstore -u $lvs
+
+rm -f ./local-job*
+
+trap - SIGINT SIGTERM EXIT
+
+nvmftestfini
diff --git a/src/spdk/test/nvmf/target/nvmf_vhost.sh b/src/spdk/test/nvmf/target/nvmf_vhost.sh
new file mode 100755
index 000000000..48e78d6d2
--- /dev/null
+++ b/src/spdk/test/nvmf/target/nvmf_vhost.sh
@@ -0,0 +1,69 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$testdir/../../..
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+if [ ! -d "/usr/local/qemu/spdk-3.0.0" ]; then
+ echo "Qemu not installed on this machine. It may be a VM. Skipping nvmf_vhost test."
+ exit 0
+fi
+
+source $rootdir/test/vhost/common.sh
+
+MALLOC_BDEV_SIZE=128
+MALLOC_BLOCK_SIZE=512
+NVMF_SOCK="/tmp/nvmf_rpc.sock"
+NVMF_RPC="$rootdir/scripts/rpc.py -s $NVMF_SOCK"
+
+VHOST_SOCK="/tmp/vhost_rpc.sock"
+VHOST_APP+=(-p 0 -r "$VHOST_SOCK" -u)
+VHOST_RPC="$rootdir/scripts/rpc.py -s $VHOST_SOCK"
+
+nvmftestinit
+
+# Start Apps
+"${NVMF_APP[@]}" -r $NVMF_SOCK &
+nvmfpid=$!
+waitforlisten $nvmfpid $NVMF_SOCK
+
+trap 'process_shm --id $NVMF_APP_SHM_ID; nvmftestfini; exit 1' SIGINT SIGTERM EXIT
+
+mkdir -p "$(get_vhost_dir 3)"
+
+"${VHOST_APP[@]}" -S "$(get_vhost_dir 3)" &
+vhostpid=$!
+waitforlisten $vhostpid $NVMF_SOCK
+
+trap 'process_shm --id $NVMF_APP_SHM_ID; killprocess $vhostpid nvmftestfini; exit 1' SIGINT SIGTERM EXIT
+
+# Configure NVMF tgt on host machine
+malloc_bdev="$($NVMF_RPC bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
+
+$NVMF_RPC nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192 -p 4
+$NVMF_RPC nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
+$NVMF_RPC nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 "$malloc_bdev"
+$NVMF_RPC nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+
+# Configure VHost on host machine
+$VHOST_RPC bdev_nvme_attach_controller -b Nvme0 -t $TEST_TRANSPORT -f ipv4 -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT -n nqn.2016-06.io.spdk:cnode1
+$VHOST_RPC vhost_create_scsi_controller naa.VhostScsi0.3
+$VHOST_RPC vhost_scsi_controller_add_target naa.VhostScsi0.3 0 "Nvme0n1"
+
+# start qemu based VM.
+vm_setup --os="$VM_IMAGE" --disk-type=spdk_vhost_scsi --disks="VhostScsi0" --force=3 --vhost-name=3
+
+vm_run 3
+
+vm_wait_for_boot 300 3
+
+# Run the fio workload remotely
+vm_scp 3 $testdir/nvmf_vhost_fio.job 127.0.0.1:/root/nvmf_vhost_fio.job
+vm_exec 3 "fio /root/nvmf_vhost_fio.job"
+vm_shutdown_all
+
+trap - SIGINT SIGTERM EXIT
+
+killprocess $vhostpid
+nvmftestfini
diff --git a/src/spdk/test/nvmf/target/nvmf_vhost_fio.job b/src/spdk/test/nvmf/target/nvmf_vhost_fio.job
new file mode 100644
index 000000000..350aa895e
--- /dev/null
+++ b/src/spdk/test/nvmf/target/nvmf_vhost_fio.job
@@ -0,0 +1,19 @@
+[global]
+blocksize=4k-512k
+iodepth=128
+ioengine=libaio
+filename=/dev/sdb
+group_reporting
+thread
+numjobs=1
+direct=1
+do_verify=1
+verify=md5
+verify_fatal=1
+verify_dump=1
+verify_backlog=8
+
+[randwrite]
+rw=randwrite
+runtime=15
+time_based
diff --git a/src/spdk/test/nvmf/target/rpc.sh b/src/spdk/test/nvmf/target/rpc.sh
new file mode 100755
index 000000000..d715e4b4f
--- /dev/null
+++ b/src/spdk/test/nvmf/target/rpc.sh
@@ -0,0 +1,124 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+function jcount() {
+ local filter=$1
+ jq "$filter" | wc -l
+}
+
+function jsum() {
+ local filter=$1
+ jq "$filter" | awk '{s+=$1}END{print s}'
+}
+
+nvmftestinit
+nvmfappstart -m 0xF
+
+stats=$($rpc_py nvmf_get_stats)
+# Expect 4 poll groups (from CPU mask) and no transports yet
+[ "4" -eq $(jcount .poll_groups[].name <<< "$stats") ]
+[ "null" == $(jq .poll_groups[0].transports[0] <<< "$stats") ]
+
+$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
+
+stats=$($rpc_py nvmf_get_stats)
+# Expect no QPs
+[ "0" -eq $(jsum .poll_groups[].admin_qpairs <<< "$stats") ]
+[ "0" -eq $(jsum .poll_groups[].io_qpairs <<< "$stats") ]
+# Transport statistics is currently implemented for RDMA only
+if [ 'rdma' == $TEST_TRANSPORT ]; then
+ # Expect RDMA transport and some devices
+ [ "1" -eq $(jcount .poll_groups[0].transports[].trtype <<< "$stats") ]
+ transport_type=$(jq -r .poll_groups[0].transports[0].trtype <<< "$stats")
+ [ "${transport_type,,}" == "${TEST_TRANSPORT,,}" ]
+ [ "0" -lt $(jcount .poll_groups[0].transports[0].devices[].name <<< "$stats") ]
+fi
+
+# set times for subsystem construct/delete
+if [ $RUN_NIGHTLY -eq 1 ]; then
+ times=50
+else
+ times=3
+fi
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+$rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc1
+
+# Disallow host NQN and make sure connect fails
+$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s $NVMF_SERIAL
+$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc1
+$rpc_py nvmf_subsystem_allow_any_host -d nqn.2016-06.io.spdk:cnode1
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+
+# This connect should fail - the host NQN is not allowed
+! nvme connect -t $TEST_TRANSPORT -n nqn.2016-06.io.spdk:cnode1 -q nqn.2016-06.io.spdk:host1 -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
+
+# Add the host NQN and verify that the connect succeeds
+$rpc_py nvmf_subsystem_add_host nqn.2016-06.io.spdk:cnode1 nqn.2016-06.io.spdk:host1
+nvme connect -t $TEST_TRANSPORT -n nqn.2016-06.io.spdk:cnode1 -q nqn.2016-06.io.spdk:host1 -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
+waitforserial "$NVMF_SERIAL"
+nvme disconnect -n nqn.2016-06.io.spdk:cnode1
+
+# Remove the host and verify that the connect fails
+$rpc_py nvmf_subsystem_remove_host nqn.2016-06.io.spdk:cnode1 nqn.2016-06.io.spdk:host1
+! nvme connect -t $TEST_TRANSPORT -n nqn.2016-06.io.spdk:cnode1 -q nqn.2016-06.io.spdk:host1 -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
+
+# Allow any host and verify that the connect succeeds
+$rpc_py nvmf_subsystem_allow_any_host -e nqn.2016-06.io.spdk:cnode1
+nvme connect -t $TEST_TRANSPORT -n nqn.2016-06.io.spdk:cnode1 -q nqn.2016-06.io.spdk:host1 -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
+waitforserial "$NVMF_SERIAL"
+nvme disconnect -n nqn.2016-06.io.spdk:cnode1
+
+$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1
+
+# do frequent add delete of namespaces with different nsid.
+for i in $(seq 1 $times); do
+ $rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -s $NVMF_SERIAL
+ $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc1 -n 5
+ $rpc_py nvmf_subsystem_allow_any_host nqn.2016-06.io.spdk:cnode1
+ nvme connect -t $TEST_TRANSPORT -n nqn.2016-06.io.spdk:cnode1 -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
+
+ waitforserial "$NVMF_SERIAL"
+
+ nvme disconnect -n nqn.2016-06.io.spdk:cnode1
+
+ $rpc_py nvmf_subsystem_remove_ns nqn.2016-06.io.spdk:cnode1 5
+ $rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1
+
+done
+
+# do frequent add delete.
+for i in $(seq 1 $times); do
+ $rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -s $NVMF_SERIAL
+ $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc1
+ $rpc_py nvmf_subsystem_allow_any_host nqn.2016-06.io.spdk:cnode1
+
+ $rpc_py nvmf_subsystem_remove_ns nqn.2016-06.io.spdk:cnode1 1
+
+ $rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1
+done
+
+stats=$($rpc_py nvmf_get_stats)
+# Expect some admin and IO qpairs
+[ "0" -lt $(jsum .poll_groups[].admin_qpairs <<< "$stats") ]
+[ "0" -lt $(jsum .poll_groups[].io_qpairs <<< "$stats") ]
+# Transport statistics is currently implemented for RDMA only
+if [ 'rdma' == $TEST_TRANSPORT ]; then
+ # Expect non-zero completions and request latencies accumulated
+ [ "0" -lt $(jsum .poll_groups[].transports[].devices[].completions <<< "$stats") ]
+ [ "0" -lt $(jsum .poll_groups[].transports[].devices[].request_latency <<< "$stats") ]
+fi
+
+trap - SIGINT SIGTERM EXIT
+
+nvmftestfini
diff --git a/src/spdk/test/nvmf/target/shutdown.sh b/src/spdk/test/nvmf/target/shutdown.sh
new file mode 100755
index 000000000..8ad73bd6f
--- /dev/null
+++ b/src/spdk/test/nvmf/target/shutdown.sh
@@ -0,0 +1,155 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+function starttarget() {
+ # Start the target
+ nvmfappstart -m 0x1E
+
+ $rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
+
+ num_subsystems=({1..10})
+ # SoftRoce does not have enough queues available for
+ # this test. Detect if we're using software RDMA.
+ # If so, only use two subsystem.
+ if check_ip_is_soft_roce "$NVMF_FIRST_TARGET_IP"; then
+ num_subsystems=({1..2})
+ fi
+
+ timing_enter create_subsystems
+ # Create subsystems
+ rm -rf $testdir/rpcs.txt
+ for i in "${num_subsystems[@]}"; do
+ cat <<- EOL >> $testdir/rpcs.txt
+ bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc$i
+ nvmf_create_subsystem nqn.2016-06.io.spdk:cnode$i -a -s SPDK$i
+ nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode$i Malloc$i
+ nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode$i -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+ EOL
+ done
+ $rpc_py < $testdir/rpcs.txt
+ timing_exit create_subsystems
+
+}
+
+function stoptarget() {
+ rm -f ./local-job0-0-verify.state
+ rm -rf $testdir/bdevperf.conf
+ rm -rf $testdir/rpcs.txt
+
+ nvmftestfini
+}
+
+function waitforio() {
+ # $1 = RPC socket
+ if [ -z "$1" ]; then
+ exit 1
+ fi
+ # $2 = bdev name
+ if [ -z "$2" ]; then
+ exit 1
+ fi
+ local ret=1
+ local i
+ for ((i = 10; i != 0; i--)); do
+ read_io_count=$($rpc_py -s $1 bdev_get_iostat -b $2 | jq -r '.bdevs[0].num_read_ops')
+ # A few I/O will happen during initial examine. So wait until at least 100 I/O
+ # have completed to know that bdevperf is really generating the I/O.
+ if [ $read_io_count -ge 100 ]; then
+ ret=0
+ break
+ fi
+ sleep 0.25
+ done
+ return $ret
+}
+
+# Test 1: Kill the initiator unexpectedly with no I/O outstanding
+function nvmf_shutdown_tc1() {
+ starttarget
+
+ # Run bdev_svc, which connects but does not issue I/O
+ $rootdir/test/app/bdev_svc/bdev_svc -m 0x1 -i 1 -r /var/tmp/bdevperf.sock --json <(gen_nvmf_target_json "${num_subsystems[@]}") &
+ perfpid=$!
+ waitforlisten $perfpid /var/tmp/bdevperf.sock
+ $rpc_py -s /var/tmp/bdevperf.sock framework_wait_init
+
+ # Kill bdev_svc
+ kill -9 $perfpid || true
+ rm -f /var/run/spdk_bdev1
+
+ # Verify the target stays up
+ sleep 1
+ kill -0 $nvmfpid
+
+ # Connect with bdevperf and confirm it works
+ $rootdir/test/bdev/bdevperf/bdevperf -r /var/tmp/bdevperf.sock --json <(gen_nvmf_target_json "${num_subsystems[@]}") -q 64 -o 65536 -w verify -t 1
+
+ stoptarget
+}
+
+# Test 2: Kill initiator unexpectedly with I/O outstanding
+function nvmf_shutdown_tc2() {
+ starttarget
+
+ # Run bdevperf
+ $rootdir/test/bdev/bdevperf/bdevperf -r /var/tmp/bdevperf.sock --json <(gen_nvmf_target_json "${num_subsystems[@]}") -q 64 -o 65536 -w verify -t 10 &
+ perfpid=$!
+ waitforlisten $perfpid /var/tmp/bdevperf.sock
+ $rpc_py -s /var/tmp/bdevperf.sock framework_wait_init
+
+ waitforio /var/tmp/bdevperf.sock Nvme1n1
+
+ # Kill bdevperf half way through
+ killprocess $perfpid
+
+ # Verify the target stays up
+ sleep 1
+ kill -0 $nvmfpid
+
+ stoptarget
+}
+
+# Test 3: Kill the target unexpectedly with I/O outstanding
+function nvmf_shutdown_tc3() {
+ starttarget
+
+ # Run bdevperf
+ $rootdir/test/bdev/bdevperf/bdevperf -r /var/tmp/bdevperf.sock --json <(gen_nvmf_target_json "${num_subsystems[@]}") -q 64 -o 65536 -w verify -t 10 &
+ perfpid=$!
+ waitforlisten $perfpid /var/tmp/bdevperf.sock
+ $rpc_py -s /var/tmp/bdevperf.sock framework_wait_init
+
+ # Expand the trap to clean up bdevperf if something goes wrong
+ trap 'process_shm --id $NVMF_APP_SHM_ID; kill -9 $perfpid || true; nvmftestfini; exit 1' SIGINT SIGTERM EXIT
+
+ waitforio /var/tmp/bdevperf.sock Nvme1n1
+
+ # Kill the target half way through
+ killprocess $nvmfpid
+ nvmfpid=
+
+ # Verify bdevperf exits successfully
+ sleep 1
+ # TODO: Right now the NVMe-oF initiator will not correctly detect broken connections
+ # and so it will never shut down. Just kill it.
+ kill -9 $perfpid || true
+
+ stoptarget
+}
+
+nvmftestinit
+
+run_test "nvmf_shutdown_tc1" nvmf_shutdown_tc1
+run_test "nvmf_shutdown_tc2" nvmf_shutdown_tc2
+run_test "nvmf_shutdown_tc3" nvmf_shutdown_tc3
+
+trap - SIGINT SIGTERM EXIT
diff --git a/src/spdk/test/nvmf/target/srq_overwhelm.sh b/src/spdk/test/nvmf/target/srq_overwhelm.sh
new file mode 100755
index 000000000..fe4dd7d29
--- /dev/null
+++ b/src/spdk/test/nvmf/target/srq_overwhelm.sh
@@ -0,0 +1,50 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+nvmftestinit
+
+if check_ip_is_soft_roce $NVMF_FIRST_TARGET_IP; then
+ echo "Using software RDMA, Likely not enough memory to run this test. aborting."
+ exit 0
+fi
+
+nvmfappstart -m 0xF
+
+# create the rdma transport with an intentionally small SRQ depth
+$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192 -s 1024
+
+for i in $(seq 0 5); do
+ $rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode$i -a -s SPDK00000000000001
+ $rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc$i
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode$i Malloc$i
+ $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode$i -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+ nvme connect -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode${i}" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT" -i 16
+ waitforblk "nvme${i}n1"
+done
+
+# by running 6 different FIO jobs, each with 13 subjobs, we end up with 78 fio threads trying to write to
+# our target at once. This completely overwhelms the target SRQ, but allows us to verify that rnr_retry is
+# working even at very high queue depths because the rdma qpair doesn't fail.
+# It is normal to see the initiator timeout and reconnect waiting for completions from an overwhelmmed target,
+# but the connection should come up and FIO should complete without errors.
+$rootdir/scripts/fio.py -p nvmf -i 1048576 -d 128 -t read -r 10 -n 13
+
+sync
+
+for i in $(seq 0 5); do
+ nvme disconnect -n "nqn.2016-06.io.spdk:cnode${i}"
+ $rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode$i
+done
+
+trap - SIGINT SIGTERM EXIT
+
+nvmftestfini
diff --git a/src/spdk/test/ocf/common.sh b/src/spdk/test/ocf/common.sh
new file mode 100644
index 000000000..6c196ab97
--- /dev/null
+++ b/src/spdk/test/ocf/common.sh
@@ -0,0 +1,27 @@
+source $rootdir/scripts/common.sh
+source $rootdir/test/common/autotest_common.sh
+
+rpc_py=$rootdir/scripts/rpc.py
+
+function nvme_cfg() {
+ if [ -z "$ocf_nvme_cfg" ]; then
+ ocf_nvme_cfg=$($rootdir/scripts/gen_nvme.sh)
+ fi
+ echo "$ocf_nvme_cfg"
+}
+
+function clear_nvme() {
+ mapfile -t bdf < <(get_first_nvme_bdf)
+
+ # Clear metadata on NVMe device
+ $rootdir/scripts/setup.sh reset
+ sleep 5
+
+ name=$(get_nvme_name_from_bdf "${bdf[0]}")
+ mountpoints=$(lsblk /dev/$name --output MOUNTPOINT -n | wc -w)
+ if [ "$mountpoints" != "0" ]; then
+ exit 1
+ fi
+ dd if=/dev/zero of=/dev/$name bs=1M count=1000 oflag=direct
+ $rootdir/scripts/setup.sh
+}
diff --git a/src/spdk/test/ocf/integrity/bdevperf-iotypes.sh b/src/spdk/test/ocf/integrity/bdevperf-iotypes.sh
new file mode 100755
index 000000000..2effa21bd
--- /dev/null
+++ b/src/spdk/test/ocf/integrity/bdevperf-iotypes.sh
@@ -0,0 +1,13 @@
+#!/usr/bin/env bash
+
+curdir=$(dirname $(readlink -f "${BASH_SOURCE[0]}"))
+rootdir=$(readlink -f $curdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+
+bdevperf=$rootdir/test/bdev/bdevperf/bdevperf
+rpc_py="$rootdir/scripts/rpc.py"
+
+source "$curdir/mallocs.conf"
+$bdevperf --json <(gen_malloc_ocf_json) -q 128 -o 4096 -t 4 -w flush
+$bdevperf --json <(gen_malloc_ocf_json) -q 128 -o 4096 -t 4 -w unmap
+$bdevperf --json <(gen_malloc_ocf_json) -q 128 -o 4096 -t 4 -w write
diff --git a/src/spdk/test/ocf/integrity/fio-modes.sh b/src/spdk/test/ocf/integrity/fio-modes.sh
new file mode 100755
index 000000000..0c90f999f
--- /dev/null
+++ b/src/spdk/test/ocf/integrity/fio-modes.sh
@@ -0,0 +1,90 @@
+#!/usr/bin/env bash
+
+curdir=$(dirname $(readlink -f "${BASH_SOURCE[0]}"))
+rootdir=$(readlink -f $curdir/../../..)
+
+source $rootdir/test/ocf/common.sh
+
+function fio_verify() {
+ fio_bdev $curdir/test.fio --aux-path=/tmp/ --ioengine=spdk_bdev "$@"
+}
+
+function cleanup() {
+ rm -f $curdir/modes.conf
+}
+
+# Clear nvme device which we will use in test
+clear_nvme
+
+trap "cleanup; exit 1" SIGINT SIGTERM EXIT
+
+# Building config is not backtrace worthy ...
+xtrace_disable
+
+config=() ocf_names=() ocf_modes=()
+
+ocf_names[1]=PT_Nvme ocf_modes[1]=pt
+ocf_names[2]=WT_Nvme ocf_modes[2]=wt
+ocf_names[3]=WB_Nvme0 ocf_modes[3]=wb
+ocf_names[4]=WB_Nvme1 ocf_modes[4]=wb
+
+mapfile -t config < <("$rootdir/scripts/gen_nvme.sh" --json)
+
+# Drop anything from last closing ] so we can inject our own config pieces ...
+config=("${config[@]::${#config[@]}-2}")
+# ... and now convert entire array to a single string item
+config=("${config[*]}")
+
+config+=(
+ "$(
+ cat <<- JSON
+ {
+ "method": "bdev_split_create",
+ "params": {
+ "base_bdev": "Nvme0n1",
+ "split_count": 8,
+ "split_size_mb": 101
+ }
+ }
+ JSON
+ )"
+)
+
+for ((d = 0, c = 1; d <= ${#ocf_names[@]} + 2; d += 2, c++)); do
+ config+=(
+ "$(
+ cat <<- JSON
+ {
+ "method": "bdev_ocf_create",
+ "params": {
+ "name": "${ocf_names[c]}",
+ "mode": "${ocf_modes[c]}",
+ "cache_bdev_name": "Nvme0n1p$d",
+ "core_bdev_name": "Nvme0n1p$((d + 1))"
+ }
+ }
+ JSON
+ )"
+ )
+done
+
+# First ']}' closes our config and bdev subsystem blocks
+cat <<- CONFIG > "$curdir/modes.conf"
+ {"subsystems":[
+ $(
+ IFS=","
+ printf '%s\n' "${config[*]}"
+ )
+ ]}]}
+CONFIG
+
+# Format the config nicely and dump it to stdout for everyone to marvel at it ...
+jq . "$curdir/modes.conf"
+
+# ... and now back to our regularly scheduled program
+xtrace_restore
+
+fio_verify --filename=PT_Nvme:WT_Nvme:WB_Nvme0:WB_Nvme1 --spdk_json_conf="$curdir/modes.conf" --thread=1
+
+trap - SIGINT SIGTERM EXIT
+cleanup
diff --git a/src/spdk/test/ocf/integrity/mallocs.conf b/src/spdk/test/ocf/integrity/mallocs.conf
new file mode 100644
index 000000000..245dd23cf
--- /dev/null
+++ b/src/spdk/test/ocf/integrity/mallocs.conf
@@ -0,0 +1,59 @@
+gen_malloc_ocf_json () {
+ local size=300 # MB
+ local block_size=512
+ local config
+
+ local malloc malloc_devs=3
+ for (( malloc = 0; malloc < malloc_devs; malloc++ )); do
+ config+=(
+ "$(
+ cat <<-JSON
+ {
+ "method": "bdev_malloc_create",
+ "params": {
+ "name": "Malloc$malloc",
+ "num_blocks": $(( (size << 20) / block_size )),
+ "block_size": 512
+ }
+ }
+ JSON
+ )"
+ )
+ done
+
+ local ocfs ocf ocf_mode ocf_cache ocf_core
+ ocfs=(1 2)
+ ocf_mode[1]=wt ocf_cache[1]=Malloc0 ocf_core[1]=Malloc1
+ ocf_mode[2]=pt ocf_cache[2]=Malloc0 ocf_core[2]=Malloc2
+
+ for ocf in "${ocfs[@]}"; do
+ config+=(
+ "$(
+ cat <<-JSON
+ {
+ "method": "bdev_ocf_create",
+ "params": {
+ "name": "MalCache$ocf",
+ "mode": "${ocf_mode[ocf]}",
+ "cache_bdev_name": "${ocf_cache[ocf]}",
+ "core_bdev_name": "${ocf_core[ocf]}"
+ }
+ }
+ JSON
+ )"
+ )
+ done
+
+ jq . <<-JSON
+ {
+ "subsystems": [
+ {
+ "subsystem": "bdev",
+ "config": [
+ $(IFS=","; printf '%s\n' "${config[*]}")
+ ]
+ }
+ ]
+ }
+ JSON
+}
diff --git a/src/spdk/test/ocf/integrity/stats.sh b/src/spdk/test/ocf/integrity/stats.sh
new file mode 100755
index 000000000..c82f89d89
--- /dev/null
+++ b/src/spdk/test/ocf/integrity/stats.sh
@@ -0,0 +1,17 @@
+#!/usr/bin/env bash
+
+curdir=$(dirname $(readlink -f "${BASH_SOURCE[0]}"))
+rootdir=$(readlink -f $curdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+
+bdevperf=$rootdir/test/bdev/bdevperf/bdevperf
+rpc_py="$rootdir/scripts/rpc.py"
+
+source "$curdir/mallocs.conf"
+$bdevperf --json <(gen_malloc_ocf_json) -q 128 -o 4096 -w write -t 120 -r /var/tmp/spdk.sock &
+bdev_perf_pid=$!
+waitforlisten $bdev_perf_pid
+sleep 1
+$rpc_py bdev_ocf_get_stats MalCache1
+kill -9 $bdev_perf_pid
+wait $bdev_perf_pid || true
diff --git a/src/spdk/test/ocf/integrity/test.fio b/src/spdk/test/ocf/integrity/test.fio
new file mode 100644
index 000000000..e56895c28
--- /dev/null
+++ b/src/spdk/test/ocf/integrity/test.fio
@@ -0,0 +1,39 @@
+[global]
+thread=1
+group_reporting=1
+direct=1
+serialize_overlap=1
+time_based=1
+do_verify=1
+verify=md5
+verify_backlog=1024
+iodepth=128
+bs=4K
+runtime=10
+
+size=20%
+
+[job_1]
+offset=0
+rw=randwrite
+name=randwrite
+
+[job_2]
+offset=20%
+rw=randrw
+name=randrw
+
+[job_3]
+offset=40%
+rw=write
+name=write
+
+[job_4]
+offset=60%
+rw=rw
+name=rw
+
+[job_5]
+offset=80%
+rw=randwrite
+name=randwrite
diff --git a/src/spdk/test/ocf/management/create-destruct.sh b/src/spdk/test/ocf/management/create-destruct.sh
new file mode 100755
index 000000000..162f7a679
--- /dev/null
+++ b/src/spdk/test/ocf/management/create-destruct.sh
@@ -0,0 +1,88 @@
+#!/usr/bin/env bash
+
+curdir=$(dirname $(readlink -f "${BASH_SOURCE[0]}"))
+rootdir=$(readlink -f $curdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+
+rpc_py=$rootdir/scripts/rpc.py
+
+function bdev_check_claimed() {
+ if [ "$($rpc_py get_bdevs -b "$@" | jq '.[0].claimed')" = "true" ]; then
+ return 0
+ else
+ return 1
+ fi
+}
+
+$SPDK_BIN_DIR/iscsi_tgt &
+spdk_pid=$!
+
+trap 'killprocess $spdk_pid; exit 1' SIGINT SIGTERM EXIT
+
+waitforlisten $spdk_pid
+
+$rpc_py bdev_malloc_create 101 512 -b Malloc0
+$rpc_py bdev_malloc_create 101 512 -b Malloc1
+
+$rpc_py bdev_ocf_create PartCache wt Malloc0 NonExisting
+
+$rpc_py bdev_ocf_get_bdevs PartCache | jq -e \
+ '.[0] | .started == false and .cache.attached and .core.attached == false'
+
+$rpc_py bdev_ocf_get_bdevs NonExisting | jq -e \
+ '.[0] | .name == "PartCache"'
+
+if ! bdev_check_claimed Malloc0; then
+ echo >&2 "Base device expected to be claimed now"
+ exit 1
+fi
+
+$rpc_py bdev_ocf_delete PartCache
+if bdev_check_claimed Malloc0; then
+ echo >&2 "Base device is not expected to be claimed now"
+ exit 1
+fi
+
+$rpc_py bdev_ocf_create FullCache wt Malloc0 Malloc1
+
+$rpc_py bdev_ocf_get_bdevs FullCache | jq -e \
+ '.[0] | .started and .cache.attached and .core.attached'
+
+if ! (bdev_check_claimed Malloc0 && bdev_check_claimed Malloc1); then
+ echo >&2 "Base devices expected to be claimed now"
+ exit 1
+fi
+
+$rpc_py bdev_ocf_delete FullCache
+if bdev_check_claimed Malloc0 && bdev_check_claimed Malloc1; then
+ echo >&2 "Base devices are not expected to be claimed now"
+ exit 1
+fi
+
+$rpc_py bdev_ocf_create HotCache wt Malloc0 Malloc1
+
+if ! (bdev_check_claimed Malloc0 && bdev_check_claimed Malloc1); then
+ echo >&2 "Base devices expected to be claimed now"
+ exit 1
+fi
+
+$rpc_py bdev_malloc_delete Malloc0
+
+if bdev_check_claimed Malloc1; then
+ echo >&2 "Base device is not expected to be claimed now"
+ exit 1
+fi
+
+status=$($rpc_py get_bdevs)
+gone=$(echo $status | jq 'map(select(.name == "HotCache")) == []')
+if [[ $gone == false ]]; then
+ echo >&2 "OCF bdev is expected to unregister"
+ exit 1
+fi
+
+# check if shutdown of running CAS bdev is ok
+$rpc_py bdev_ocf_create PartCache wt NonExisting Malloc1
+
+trap - SIGINT SIGTERM EXIT
+
+killprocess $spdk_pid
diff --git a/src/spdk/test/ocf/management/multicore.sh b/src/spdk/test/ocf/management/multicore.sh
new file mode 100755
index 000000000..8c4f89e1b
--- /dev/null
+++ b/src/spdk/test/ocf/management/multicore.sh
@@ -0,0 +1,82 @@
+#!/usr/bin/env bash
+
+curdir=$(dirname $(readlink -f "${BASH_SOURCE[0]}"))
+rootdir=$(readlink -f $curdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+
+rpc_py=$rootdir/scripts/rpc.py
+
+spdk_pid='?'
+function start_spdk() {
+ $SPDK_BIN_DIR/iscsi_tgt &
+ spdk_pid=$!
+ trap 'killprocess $spdk_pid; exit 1' SIGINT SIGTERM EXIT
+ waitforlisten $spdk_pid
+}
+function stop_spdk() {
+ killprocess $spdk_pid
+ trap - SIGINT SIGTERM EXIT
+}
+
+start_spdk
+
+# Hotplug case
+
+$rpc_py bdev_malloc_create 1 512 -b Core0
+$rpc_py bdev_malloc_create 1 512 -b Core1
+
+$rpc_py bdev_ocf_create C1 wt Cache Core0
+$rpc_py bdev_ocf_create C2 wt Cache Core1
+
+$rpc_py bdev_ocf_get_bdevs | jq -e \
+ 'any(select(.started)) == false'
+
+$rpc_py bdev_malloc_create 101 512 -b Cache
+
+$rpc_py bdev_ocf_get_bdevs | jq -e \
+ 'all(select(.started)) == true'
+
+#Be sure that we will not fail delete because examine is still in progress
+waitforbdev C2
+
+# Detaching cores
+
+$rpc_py bdev_ocf_delete C2
+
+$rpc_py bdev_ocf_get_bdevs C1 | jq -e \
+ '.[0] | .started'
+
+$rpc_py bdev_ocf_create C2 wt Cache Core1
+
+$rpc_py bdev_ocf_get_bdevs C2 | jq -e \
+ '.[0] | .started'
+
+# Normal shutdown
+
+stop_spdk
+
+# Hotremove case
+start_spdk
+
+$rpc_py bdev_malloc_create 101 512 -b Cache
+$rpc_py bdev_malloc_create 101 512 -b Malloc
+$rpc_py bdev_malloc_create 1 512 -b Core
+
+$rpc_py bdev_ocf_create C1 wt Cache Malloc
+$rpc_py bdev_ocf_create C2 wt Cache Core
+
+$rpc_py bdev_ocf_get_bdevs Cache | jq \
+ 'length == 2'
+
+$rpc_py bdev_malloc_delete Cache
+
+$rpc_py bdev_ocf_get_bdevs | jq -e \
+ '. == []'
+
+# Not fully initialized shutdown
+
+$rpc_py bdev_ocf_create C1 wt Malloc NonExisting
+$rpc_py bdev_ocf_create C2 wt Malloc NonExisting
+$rpc_py bdev_ocf_create C3 wt Malloc Core
+
+stop_spdk
diff --git a/src/spdk/test/ocf/management/persistent-metadata.sh b/src/spdk/test/ocf/management/persistent-metadata.sh
new file mode 100755
index 000000000..cbfcab341
--- /dev/null
+++ b/src/spdk/test/ocf/management/persistent-metadata.sh
@@ -0,0 +1,88 @@
+#!/usr/bin/env bash
+
+curdir=$(dirname $(readlink -f "${BASH_SOURCE[0]}"))
+rootdir=$(readlink -f $curdir/../../..)
+source $rootdir/test/ocf/common.sh
+
+source $rootdir/scripts/common.sh
+source $rootdir/test/common/autotest_common.sh
+
+rpc_py=$rootdir/scripts/rpc.py
+
+$rootdir/scripts/setup.sh
+
+mapfile -t config < <("$rootdir/scripts/gen_nvme.sh" --json)
+# Drop anything from last closing ] so we can inject our own config pieces ...
+config=("${config[@]::${#config[@]}-2}")
+# ... and now convert entire array to a single string item
+config=("${config[*]}")
+
+config+=(
+ "$(
+ cat <<- JSON
+ {
+ "method": "bdev_split_create",
+ "params": {
+ "base_bdev": "Nvme0n1",
+ "split_count": 7,
+ "split_size_mb": 128
+ }
+ }
+ JSON
+ )"
+)
+
+# First ']}' closes our config and bdev subsystem blocks
+jq . <<- CONFIG > "$curdir/config"
+ {"subsystems":[
+ $(
+ IFS=","
+ printf '%s\n' "${config[*]}"
+ )
+ ]}]}
+CONFIG
+
+# Clear nvme device which we will use in test
+clear_nvme
+
+"$SPDK_BIN_DIR/iscsi_tgt" --json "$curdir/config" &
+spdk_pid=$!
+
+waitforlisten $spdk_pid
+
+# Create ocf on persistent storage
+
+$rpc_py bdev_ocf_create ocfWT wt Nvme0n1p0 Nvme0n1p1
+$rpc_py bdev_ocf_create ocfPT pt Nvme0n1p2 Nvme0n1p3
+$rpc_py bdev_ocf_create ocfWB0 wb Nvme0n1p4 Nvme0n1p5
+$rpc_py bdev_ocf_create ocfWB1 wb Nvme0n1p4 Nvme0n1p6
+
+# Sorting bdevs because we dont guarantee that they are going to be
+# in the same order after shutdown
+($rpc_py bdev_ocf_get_bdevs | jq '(.. | arrays) |= sort') > ./ocf_bdevs
+
+trap - SIGINT SIGTERM EXIT
+
+killprocess $spdk_pid
+
+# Check for ocf persistency after restart
+"$SPDK_BIN_DIR/iscsi_tgt" --json "$curdir/config" &
+spdk_pid=$!
+
+trap 'killprocess $spdk_pid; rm -f $curdir/config ocf_bdevs ocf_bdevs_verify; exit 1' SIGINT SIGTERM EXIT
+
+waitforlisten $spdk_pid
+sleep 5
+
+# OCF should be loaded now as well
+
+($rpc_py bdev_ocf_get_bdevs | jq '(.. | arrays) |= sort') > ./ocf_bdevs_verify
+
+diff ocf_bdevs ocf_bdevs_verify
+
+trap - SIGINT SIGTERM EXIT
+
+killprocess $spdk_pid
+rm -f $curdir/config ocf_bdevs ocf_bdevs_verify
+
+clear_nvme $bdf
diff --git a/src/spdk/test/ocf/management/remove.sh b/src/spdk/test/ocf/management/remove.sh
new file mode 100755
index 000000000..1302f16cd
--- /dev/null
+++ b/src/spdk/test/ocf/management/remove.sh
@@ -0,0 +1,81 @@
+#!/usr/bin/env bash
+
+curdir=$(dirname $(readlink -f "${BASH_SOURCE[0]}"))
+rootdir=$(readlink -f $curdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+
+rpc_py=$rootdir/scripts/rpc.py
+
+rm -f aio*
+truncate -s 128M aio0
+truncate -s 128M aio1
+
+jq . <<- JSON > "$curdir/config"
+ {
+ "subsystems": [
+ {
+ "subsystem": "bdev",
+ "config": [
+ {
+ "method": "bdev_aio_create",
+ "params": {
+ "name": "ai0",
+ "block_size": 512,
+ "filename": "./aio0"
+ }
+ },
+ {
+ "method": "bdev_aio_create",
+ "params": {
+ "name": "aio1",
+ "block_size": 512,
+ "filename": "./aio1"
+ }
+ }
+ ]
+ }
+ ]
+ }
+JSON
+
+"$SPDK_BIN_DIR/iscsi_tgt" --json "$curdir/config" &
+spdk_pid=$!
+
+waitforlisten $spdk_pid
+
+# Create ocf on persistent storage
+
+$rpc_py bdev_ocf_create ocfWT wt aio0 aio1
+
+# Check that ocfWT was created properly
+
+$rpc_py bdev_ocf_get_bdevs | jq -r '.[] .name' | grep -qw ocfWT
+
+# Remove ocfWT, after delete via rpc ocf bdev should not load on next app start
+
+$rpc_py bdev_ocf_delete ocfWT
+
+# Check that ocfWT was deleted properly
+
+! $rpc_py bdev_ocf_get_bdevs | jq -r '.[] .name' | grep -qw ocfWT
+
+trap - SIGINT SIGTERM EXIT
+
+killprocess $spdk_pid
+
+# Check for ocfWT was deleted permanently
+"$SPDK_BIN_DIR/iscsi_tgt" --json "$curdir/config" &
+spdk_pid=$!
+
+trap 'killprocess $spdk_pid; rm -f aio* $curdir/config ocf_bdevs ocf_bdevs_verify; exit 1' SIGINT SIGTERM EXIT
+
+waitforlisten $spdk_pid
+
+# Check that ocfWT was not loaded on app start
+
+! $rpc_py bdev_ocf_get_bdevs | jq -r '.[] .name' | grep -qw ocfWT
+
+trap - SIGINT SIGTERM EXIT
+
+killprocess $spdk_pid
+rm -f aio* $curdir/config ocf_bdevs ocf_bdevs_verify
diff --git a/src/spdk/test/ocf/ocf.sh b/src/spdk/test/ocf/ocf.sh
new file mode 100755
index 000000000..415befc67
--- /dev/null
+++ b/src/spdk/test/ocf/ocf.sh
@@ -0,0 +1,14 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+
+source $rootdir/test/common/autotest_common.sh
+
+run_test "ocf_fio_modes" "$testdir/integrity/fio-modes.sh"
+run_test "ocf_bdevperf_iotypes" "$testdir/integrity/bdevperf-iotypes.sh"
+run_test "ocf_stats" "$testdir/integrity/stats.sh"
+run_test "ocf_create_destruct" "$testdir/management/create-destruct.sh"
+run_test "ocf_multicore" "$testdir/management/multicore.sh"
+run_test "ocf_persistent_metadata" "$testdir/management/persistent-metadata.sh"
+run_test "ocf_remove" "$testdir/management/remove.sh"
diff --git a/src/spdk/test/openstack/install_devstack.sh b/src/spdk/test/openstack/install_devstack.sh
new file mode 100755
index 000000000..cb9c9acf8
--- /dev/null
+++ b/src/spdk/test/openstack/install_devstack.sh
@@ -0,0 +1,51 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+
+function usage() {
+ [[ -n $2 ]] && (
+ echo "$2"
+ echo ""
+ )
+ echo "Devstack installation script"
+ echo "Usage: $(basename $1) [OPTIONS]"
+ echo "--branch=BRANCH Define which version of openstack"
+ echo " should be installed. Default is master."
+ echo "-h, --help Print help and exit"
+
+ exit 0
+}
+
+branch="master"
+while getopts 'h-:' optchar; do
+ case "$optchar" in
+ -)
+ case "$OPTARG" in
+ help) usage $0 ;;
+ branch=*) branch="${OPTARG#*=}" ;;
+ esac
+ ;;
+ h) usage $0 ;;
+ *) usage $0 "Invalid argument '$OPTARG'" ;;
+ esac
+done
+
+cd /opt/stack/devstack
+su -c "./unstack.sh" -s /bin/bash stack
+
+cd /opt/stack
+rm -rf cinder devstack glance keystone heat horizon neutron nova placement requirements tacker tacker-horizon tempest
+
+if [[ $branch == "master" ]]; then
+ su -c "git clone https://opendev.org/openstack-dev/devstack" -s /bin/bash stack
+else
+ su -c "git clone https://opendev.org/openstack-dev/devstack -b stable/$branch" -s /bin/bash stack
+fi
+cp $rootdir/scripts/vagrant/local.conf /opt/stack/devstack/local.conf
+
+cd /opt/stack/devstack
+sudo sed -i "s|http://download.cirros-cloud.net|https://download.cirros-cloud.net|g" stackrc
+su -c "./stack.sh" -s /bin/bash stack
+source openrc admin admin
+openstack volume type create SPDK --public
diff --git a/src/spdk/test/openstack/run_openstack_tests.sh b/src/spdk/test/openstack/run_openstack_tests.sh
new file mode 100755
index 000000000..3bbd57d29
--- /dev/null
+++ b/src/spdk/test/openstack/run_openstack_tests.sh
@@ -0,0 +1,77 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+rpc_py=$rootdir/scripts/rpc.py
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+TEST_TRANSPORT='rdma'
+
+nvmftestinit
+
+function finish_test() {
+ {
+ "$rpc_py" bdev_lvol_delete_lvstore -l lvs0
+ kill -9 $rpc_proxy_pid
+ rm "$testdir/conf.json"
+ } || :
+}
+
+cat <<- JSON > "$testdir/conf.json"
+ {"subsystems":[
+ $("$rootdir/scripts/gen_nvme.sh" --json)
+ ]}
+JSON
+
+nvmfappstart -m 0x3 -p 0 -s 1024 --json $testdir/conf.json
+
+trap 'finish_test; process_shm --id $NVMF_APP_SHM_ID; nvmftestfini; exit 1' SIGINT SIGTERM EXIT
+
+$rpc_py bdev_nvme_set_hotplug -e
+timing_enter run_rpc_proxy
+$rootdir/scripts/rpc_http_proxy.py 127.0.0.1 3333 secret secret &
+rpc_proxy_pid=$!
+timing_exit run_rpc_proxy
+
+timing_enter configure_spdk
+$rpc_py bdev_get_bdevs
+$rpc_py bdev_lvol_delete_lvstore -l lvs0 || true
+$rpc_py bdev_lvol_create_lvstore Nvme0n1 lvs0
+$rpc_py bdev_get_bdevs
+timing_exit configure_spdk
+
+timing_enter restart_cinder
+sudo systemctl restart devstack@c-*
+sleep 10
+timing_exit restart_cinder
+
+# Start testing spdk with openstack using tempest (openstack tool that allow testing an openstack functionalities)
+# In this tests is checked if spdk can correctly cooperate with openstack spdk driver
+timing_enter tempest_tests
+current_dir=$(pwd)
+cd /opt/stack/tempest
+tox -e all -- tempest.api.compute.volumes.test_attach_volume.AttachVolumeTestJSON.test_attach_detach_volume
+tox -e all -- tempest.api.compute.volumes.test_attach_volume.AttachVolumeTestJSON.test_list_get_volume_attachments
+tox -e all -- tempest.api.compute.volumes.test_volume_snapshots.VolumesSnapshotsTestJSON.test_volume_snapshot_create_get_list_delete
+tox -e all -- tempest.api.compute.volumes.test_volumes_get.VolumesGetTestJSON.test_volume_create_get_delete
+tox -e all -- tempest.api.compute.volumes.test_volumes_list.VolumesTestJSON.test_volume_list
+tox -e all -- tempest.api.volume.test_versions.VersionsTest.test_list_versions
+tox -e all -- tempest.api.volume.test_volumes_extend.VolumesExtendTest.test_volume_extend
+tox -e all -- tempest.api.volume.test_volumes_extend.VolumesExtendTest.test_volume_extend_when_volume_has_snapshot
+tox -e all -- tempest.api.volume.test_volumes_get.VolumesSummaryTest.test_show_volume_summary
+tox -e all -- tempest.api.volume.test_volumes_list.VolumesListTestJSON.test_volume_list
+tox -e all -- tempest.api.volume.test_volumes_snapshots.VolumesSnapshotTestJSON.test_snapshot_create_delete_with_volume_in_use
+tox -e all -- tempest.api.volume.test_volumes_snapshots.VolumesSnapshotTestJSON.test_snapshot_create_get_list_update_delete
+tox -e all -- tempest.api.volume.test_volumes_snapshots.VolumesSnapshotTestJSON.test_snapshot_create_offline_delete_online
+tox -e all -- tempest.api.volume.test_volumes_snapshots.VolumesSnapshotTestJSON.test_volume_from_snapshot
+tox -e all -- tempest.api.volume.test_volumes_snapshots.VolumesSnapshotTestJSON.test_volume_from_snapshot_no_size
+tox -e all -- tempest.api.volume.test_volumes_snapshots_list.VolumesSnapshotListTestJSON.test_snapshot_list_param_limit
+cd $current_dir
+timing_exit tempest_tests
+
+timing_enter test_cleanup
+finish_test
+
+trap - SIGINT SIGTERM EXIT
+nvmftestfini
+timing_exit test_cleanup
diff --git a/src/spdk/test/pmem/common.sh b/src/spdk/test/pmem/common.sh
new file mode 100644
index 000000000..844930df2
--- /dev/null
+++ b/src/spdk/test/pmem/common.sh
@@ -0,0 +1,91 @@
+# Prints error message and return error code, closes vhost app and remove
+# pmem pool file
+# input: error message, error code
+function error() {
+ local error_code=${2:-1}
+ echo "==========="
+ echo -e "ERROR: $1"
+ echo "error code: $error_code"
+ echo "==========="
+ vhost_kill 0
+ pmem_clean_pool_file
+ return $error_code
+}
+
+# check if there is pool file & remove it
+# input: path to pool file
+# default: $default_pool_file
+function pmem_clean_pool_file() {
+ local pool_file=${1:-$default_pool_file}
+
+ if [ -f $pool_file ]; then
+ echo "Deleting old pool_file"
+ rm $pool_file
+ fi
+}
+
+# create new pmem file
+# input: path to pool file, size in MB, block_size
+# default: $default_pool_file 32 512
+function pmem_create_pool_file() {
+ local pool_file=${1:-$default_pool_file}
+ local size=${2:-32}
+ local block_size=${3:-512}
+
+ pmem_clean_pool_file $pool_file
+ echo "Creating new pool file"
+ if ! $rpc_py bdev_pmem_create_pool $pool_file $size $block_size; then
+ error "Creating pool_file failed!"
+ fi
+
+ if [ ! -f $pool_file ]; then
+ error "Creating pool_file failed!"
+ fi
+}
+
+function pmem_unmount_ramspace() {
+ if [ -d "$testdir/ramspace" ]; then
+ if mount | grep -q "$testdir/ramspace"; then
+ umount $testdir/ramspace
+ fi
+
+ rm -rf $testdir/ramspace
+ fi
+}
+
+function pmem_print_tc_name() {
+ echo ""
+ echo "==============================================================="
+ echo "Now running: $1"
+ echo "==============================================================="
+}
+
+function vhost_start() {
+ local vhost_pid
+
+ $SPDK_BIN_DIR/vhost &
+
+ vhost_pid=$!
+ echo $vhost_pid > $testdir/vhost.pid
+ waitforlisten $vhost_pid
+}
+
+function vhost_kill() {
+ local vhost_pid_file="$testdir/vhost.pid"
+ local vhost_pid
+ vhost_pid="$(cat $vhost_pid_file)"
+
+ if [[ ! -f $vhost_pid_file ]]; then
+ echo -e "ERROR: No vhost pid file found!"
+ return 1
+ fi
+
+ if ! kill -s INT $vhost_pid; then
+ echo -e "ERROR: Failed to exit vhost / invalid pid!"
+ rm $vhost_pid_file
+ return 1
+ fi
+
+ sleep 1
+ rm $vhost_pid_file
+}
diff --git a/src/spdk/test/pmem/pmem.sh b/src/spdk/test/pmem/pmem.sh
new file mode 100755
index 000000000..bca8b4db7
--- /dev/null
+++ b/src/spdk/test/pmem/pmem.sh
@@ -0,0 +1,683 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/pmem/common.sh
+
+rpc_py="$rootdir/scripts/rpc.py "
+
+enable_script_debug=false
+test_info=false
+test_create=false
+test_delete=false
+test_construct_bdev=false
+test_delete_bdev=false
+test_all=true
+test_all_get=false
+default_pool_file="$testdir/pool_file"
+obj_pool_file="$testdir/obj_pool_file"
+bdev_name=pmem0
+
+function usage() {
+ [[ -n $2 ]] && (
+ echo "$2"
+ echo ""
+ )
+ echo "Shortcut script for automated RPC tests for PMEM"
+ echo
+ echo "Usage: $(basename $1) [OPTIONS]"
+ echo
+ echo "-h, --help Print help and exit"
+ echo "-x set -x for script debug"
+ echo " --info Run test cases for bdev_pmem_get_pool_info"
+ echo " --create Run test cases for bdev_pmem_create_pool"
+ echo " --delete Run test cases for bdev_pmem_delete_pool"
+ echo " --construct_bdev Run test cases for constructing pmem bdevs"
+ echo " --delete_bdev Run test cases for deleting pmem bdevs"
+ echo " --all Run all test cases (default)"
+ exit 0
+}
+
+while getopts 'xh-:' optchar; do
+ case "$optchar" in
+ -)
+ case "$OPTARG" in
+ help) usage $0 ;;
+ info)
+ test_info=true
+ test_all=false
+ ;;
+ create)
+ test_create=true
+ test_all=false
+ ;;
+ delete)
+ test_delete=true
+ test_all=false
+ ;;
+ construct_bdev)
+ test_construct_bdev=true
+ test_all=false
+ ;;
+ delete_bdev)
+ test_delete_bdev=true
+ test_all=false
+ ;;
+ all) test_all_get=true ;;
+ *) usage $0 "Invalid argument '$OPTARG'" ;;
+ esac
+ ;;
+ h) usage $0 ;;
+ x) enable_script_debug=true ;;
+ *) usage $0 "Invalid argument '$OPTARG'" ;;
+ esac
+done
+
+if $test_all_get; then
+ test_all=true
+fi
+
+if [[ $EUID -ne 0 ]]; then
+ echo "Go away user come back as root"
+ exit 1
+fi
+
+#================================================
+# bdev_pmem_get_pool_info tests
+#================================================
+function bdev_pmem_get_pool_info_tc1() {
+ pmem_print_tc_name ${FUNCNAME[0]}
+
+ if $rpc_py bdev_pmem_get_pool_info; then
+ error "bdev_pmem_get_pool_info passed with missing argument!"
+ fi
+
+ return 0
+}
+
+function bdev_pmem_get_pool_info_tc2() {
+ pmem_print_tc_name ${FUNCNAME[0]}
+
+ if $rpc_py bdev_pmem_get_pool_info $rootdir/non/existing/path/non_existent_file; then
+ error "bdev_pmem_get_pool_info passed with invalid path!"
+ fi
+
+ return 0
+}
+
+function bdev_pmem_get_pool_info_tc3() {
+ pmem_print_tc_name ${FUNCNAME[0]}
+ pmem_clean_pool_file $obj_pool_file
+
+ echo "Creating new type OBJ pool file"
+ if hash pmempool; then
+ pmempool create -s 32000000 obj $obj_pool_file
+ else
+ echo "Warning: pmempool package not found! Creating stub file."
+ truncate -s "32M" $obj_pool_file
+ fi
+
+ if $rpc_py bdev_pmem_get_pool_info $obj_pool_file; then
+ pmem_clean_pool_file $obj_pool_file
+ error "Pmem_pool_info passed with invalid pool_file type!"
+ fi
+
+ pmem_clean_pool_file $obj_pool_file
+ return 0
+}
+
+function bdev_pmem_get_pool_info_tc4() {
+ pmem_print_tc_name ${FUNCNAME[0]}
+ pmem_clean_pool_file
+
+ pmem_create_pool_file
+ if ! $rpc_py bdev_pmem_get_pool_info $default_pool_file; then
+ error "Failed to get bdev_pmem_get_pool_info!"
+ fi
+
+ pmem_clean_pool_file
+ return 0
+}
+
+#================================================
+# bdev_pmem_create_pool tests
+#================================================
+function bdev_pmem_create_pool_tc1() {
+ pmem_print_tc_name ${FUNCNAME[0]}
+ pmem_clean_pool_file
+
+ if $rpc_py bdev_pmem_create_pool 32 512; then
+ error "Mem pool file created w/out given path!"
+ fi
+
+ if $rpc_py bdev_pmem_create_pool $default_pool_file; then
+ error "Mem pool file created w/out size & block size arguments!"
+ fi
+
+ if $rpc_py bdev_pmem_get_pool_info $default_pool_file; then
+ error "bdev_pmem_create_pool created invalid pool file!"
+ fi
+
+ if $rpc_py bdev_pmem_create_pool $default_pool_file 32; then
+ error "Mem pool file created w/out block size argument!"
+ fi
+
+ if $rpc_py bdev_pmem_get_pool_info $default_pool_file; then
+ error "bdev_pmem_create_pool created invalid pool file!"
+ fi
+
+ pmem_clean_pool_file
+ return 0
+}
+
+function bdev_pmem_create_pool_tc2() {
+ pmem_print_tc_name ${FUNCNAME[0]}
+ pmem_clean_pool_file
+
+ if $rpc_py bdev_pmem_create_pool $rootdir/non/existing/path/non_existent_file 32 512; then
+ error "Mem pool file created with incorrect path!"
+ fi
+
+ if $rpc_py bdev_pmem_get_pool_info $rootdir/non/existing/path/non_existent_file; then
+ error "bdev_pmem_create_pool created invalid pool file!"
+ fi
+
+ pmem_clean_pool_file
+ return 0
+}
+
+function bdev_pmem_create_pool_tc3() {
+ pmem_print_tc_name ${FUNCNAME[0]}
+ pmem_clean_pool_file
+
+ if ! $rpc_py bdev_pmem_create_pool $default_pool_file 256 512; then
+ error "Failed to create pmem pool!"
+ fi
+
+ if ! $rpc_py bdev_pmem_get_pool_info $default_pool_file; then
+ error "Failed to get pmem info"
+ fi
+
+ if ! $rpc_py bdev_pmem_delete_pool $default_pool_file; then
+ error "Failed to delete pool file!"
+ fi
+
+ if $rpc_py bdev_pmem_get_pool_info $default_pool_file; then
+ error "Got pmem file info but file should be deleted"
+ fi
+
+ if [ -f $default_pool_file ]; then
+ error "Failed to delete pmem file!"
+ fi
+
+ pmem_clean_pool_file
+ return 0
+}
+
+function bdev_pmem_create_pool_tc4() {
+ pmem_print_tc_name ${FUNCNAME[0]}
+
+ pmem_unmount_ramspace
+ mkdir $rootdir/test/pmem/ramspace
+ mount -t tmpfs -o size=300m tmpfs $rootdir/test/pmem/ramspace
+ if ! $rpc_py bdev_pmem_create_pool $rootdir/test/pmem/ramspace/pool_file 256 512; then
+ pmem_unmount_ramspace
+ error "Failed to create pmem pool!"
+ fi
+
+ if ! $rpc_py bdev_pmem_get_pool_info $rootdir/test/pmem/ramspace/pool_file; then
+ pmem_unmount_ramspace
+ error "Failed to get pmem info"
+ fi
+
+ if ! $rpc_py bdev_pmem_delete_pool $rootdir/test/pmem/ramspace/pool_file; then
+ pmem_unmount_ramspace
+ error "Failed to delete pool file!"
+ fi
+
+ if [ -f $rootdir/test/pmem/ramspace/pool_file ]; then
+ pmem_unmount_ramspace
+ error "Failed to delete pmem file / file still exists!"
+ fi
+
+ pmem_unmount_ramspace
+ return 0
+}
+
+function bdev_pmem_create_pool_tc5() {
+ pmem_print_tc_name ${FUNCNAME[0]}
+ pmem_clean_pool_file
+ local pmem_block_size
+ local pmem_num_block
+
+ if ! $rpc_py bdev_pmem_create_pool $default_pool_file 256 512; then
+ error "Failed to create pmem pool!"
+ fi
+
+ if $rpc_py bdev_pmem_get_pool_info $default_pool_file; then
+ pmem_block_size=$($rpc_py bdev_pmem_get_pool_info $default_pool_file | jq -r '.[] .block_size')
+ pmem_num_block=$($rpc_py bdev_pmem_get_pool_info $default_pool_file | jq -r '.[] .num_blocks')
+ else
+ error "Failed to get pmem info!"
+ fi
+
+ if $rpc_py bdev_pmem_create_pool $default_pool_file 512 4096; then
+ error "Pmem pool with already occupied path has been created!"
+ fi
+
+ if $rpc_py bdev_pmem_get_pool_info $default_pool_file; then
+ if [ $pmem_block_size != $($rpc_py bdev_pmem_get_pool_info $default_pool_file | jq -r '.[] .block_size') ]; then
+ error "Invalid block size of pmem pool!"
+ fi
+
+ if [ $pmem_num_block != $($rpc_py bdev_pmem_get_pool_info $default_pool_file | jq -r '.[] .num_blocks') ]; then
+ error "Invalid number of blocks of pmem pool!"
+ fi
+ else
+ error "Failed to get pmem info!"
+ fi
+
+ if ! $rpc_py bdev_pmem_delete_pool $default_pool_file; then
+ error "Failed to delete pmem file!"
+ fi
+
+ pmem_clean_pool_file
+ return 0
+}
+
+function bdev_pmem_create_pool_tc6() {
+ pmem_print_tc_name ${FUNCNAME[0]}
+ pmem_clean_pool_file
+ local created_pmem_block_size
+
+ for i in 511 512 1024 2048 4096 131072 262144; do
+ if ! $rpc_py bdev_pmem_create_pool $default_pool_file 256 $i; then
+ error "Failed to create pmem pool!"
+ fi
+
+ if ! created_pmem_block_size=$($rpc_py bdev_pmem_get_pool_info $default_pool_file | jq -r '.[] .block_size'); then
+ error "Failed to get pmem info!"
+ fi
+
+ if [ $i != $created_pmem_block_size ]; then
+ error "Invalid block size of pmem pool!"
+ fi
+
+ if ! $rpc_py bdev_pmem_delete_pool $default_pool_file; then
+ error "Failed to delete pmem file!"
+ fi
+ done
+
+ pmem_clean_pool_file
+ return 0
+}
+
+function bdev_pmem_create_pool_tc7() {
+ pmem_print_tc_name ${FUNCNAME[0]}
+ pmem_clean_pool_file
+
+ if $rpc_py bdev_pmem_create_pool $default_pool_file 15 512; then
+ error "Created pmem pool with invalid size!"
+ fi
+
+ if $rpc_py bdev_pmem_get_pool_info $default_pool_file; then
+ error "Pmem file shouldn' exist!"
+ fi
+
+ pmem_clean_pool_file
+ return 0
+}
+
+function bdev_pmem_create_pool_tc8() {
+ pmem_print_tc_name "bdev_pmem_create_pool_tc8"
+ pmem_clean_pool_file
+
+ if $rpc_py bdev_pmem_create_pool $default_pool_file 32 65536; then
+ error "Created pmem pool with invalid block number!"
+ fi
+
+ if $rpc_py bdev_pmem_get_pool_info $default_pool_file; then
+ error "Pmem file shouldn' exist!"
+ fi
+
+ pmem_clean_pool_file
+ return 0
+}
+
+function bdev_pmem_create_pool_tc9() {
+ pmem_print_tc_name ${FUNCNAME[0]}
+ pmem_clean_pool_file
+
+ if $rpc_py bdev_pmem_create_pool $default_pool_file 256 -1; then
+ error "Created pmem pool with negative block size number!"
+ fi
+
+ if $rpc_py bdev_pmem_get_pool_info $default_pool_file; then
+ error "bdev_pmem_create_pool create invalid pool file!"
+ fi
+
+ if $rpc_py bdev_pmem_create_pool $default_pool_file -1 512; then
+ error "Created pmem pool with negative size number!"
+ fi
+
+ if $rpc_py bdev_pmem_get_pool_info $default_pool_file; then
+ error "bdev_pmem_create_pool create invalid pool file!"
+ fi
+
+ pmem_clean_pool_file
+ return 0
+}
+
+#================================================
+# bdev_pmem_delete_pool tests
+#================================================
+function bdev_pmem_delete_pool_tc1() {
+ pmem_print_tc_name ${FUNCNAME[0]}
+ pmem_clean_pool_file
+
+ if $rpc_py bdev_pmem_delete_pool $default_pool_file; then
+ error "bdev_pmem_delete_pool deleted inexistant pool file!"
+ fi
+
+ return 0
+}
+
+function bdev_pmem_delete_pool_tc2() {
+ pmem_print_tc_name "bdev_pmem_delete_pool_tc2"
+ pmem_clean_pool_file $obj_pool_file
+
+ echo "Creating new type OBJ pool file"
+ if hash pmempool; then
+ pmempool create -s 32000000 obj $obj_pool_file
+ else
+ echo "Warning: pmempool package not found! Creating stub file."
+ truncate -s "32M" $obj_pool_file
+ fi
+
+ if $rpc_py bdev_pmem_delete_pool $obj_pool_file; then
+ pmem_clean_pool_file $obj_pool_file
+ error "bdev_pmem_delete_pool deleted invalid pmem pool type!"
+ fi
+
+ pmem_clean_pool_file $obj_pool_file
+ return 0
+}
+
+function bdev_pmem_delete_pool_tc3() {
+ pmem_print_tc_name ${FUNCNAME[0]}
+ pmem_clean_pool_file
+
+ pmem_create_pool_file
+ if ! $rpc_py bdev_pmem_get_pool_info $default_pool_file; then
+ error "Failed to get info on pmem pool file!"
+ fi
+
+ if ! $rpc_py bdev_pmem_delete_pool $default_pool_file; then
+ error "Failed to delete pmem pool file!"
+ fi
+
+ if $rpc_py bdev_pmem_get_pool_info $default_pool_file; then
+ error "Pmem pool file exists after using bdev_pmem_get_pool_info!"
+ fi
+
+ return 0
+}
+
+function bdev_pmem_delete_pool_tc4() {
+ pmem_print_tc_name ${FUNCNAME[0]}
+
+ bdev_pmem_delete_pool_tc3
+ if $rpc_py bdev_pmem_delete_pool $default_pool_file; then
+ error "Deleted pmem pool file that shouldn't exist!"
+ fi
+
+ return 0
+}
+
+#================================================
+# bdev_pmem_create tests
+#================================================
+function bdev_pmem_create_tc1() {
+ pmem_print_tc_name ${FUNCNAME[0]}
+ pmem_clean_pool_file
+
+ pmem_create_pool_file
+ if $rpc_py bdev_pmem_create; then
+ error "bdev_pmem_create passed with missing argument!"
+ fi
+
+ pmem_clean_pool_file
+ return 0
+}
+
+function bdev_pmem_create_tc2() {
+ pmem_print_tc_name ${FUNCNAME[0]}
+ pmem_clean_pool_file
+
+ pmem_create_pool_file
+ if $rpc_py bdev_pmem_create -n $bdev_name $rootdir/non/existing/path/non_existent_file; then
+ error "Created pmem bdev w/out valid pool file!"
+ fi
+
+ if $rpc_py bdev_get_bdevs | jq -r '.[] .name' | grep -qi pmem; then
+ error "bdev_pmem_create passed with invalid argument!"
+ fi
+
+ pmem_clean_pool_file
+ return 0
+}
+
+function bdev_pmem_create_tc3() {
+ pmem_print_tc_name ${FUNCNAME[0]}
+
+ truncate -s 32M $rootdir/test/pmem/random_file
+ if $rpc_py bdev_pmem_create -n $bdev_name $rootdir/test/pmem/random_file; then
+ error "Created pmem bdev from random file!"
+ fi
+
+ if [ -f $rootdir/test/pmem/random_file ]; then
+ echo "Deleting previously created random file"
+ rm $rootdir/test/pmem/random_file
+ fi
+
+ return 0
+}
+
+function bdev_pmem_create_tc4() {
+ pmem_print_tc_name ${FUNCNAME[0]}
+ pmem_clean_pool_file $obj_pool_file
+
+ echo "Creating new type OBJ pool file"
+ if hash pmempool; then
+ pmempool create -s 32000000 obj $obj_pool_file
+ else
+ echo "Warning: pmempool package not found! Creating stub file."
+ truncate -s "32M" $obj_pool_file
+ fi
+
+ if $rpc_py bdev_pmem_create -n $bdev_name $obj_pool_file; then
+ pmem_clean_pool_file $obj_pool_file
+ error "Created pmem bdev from obj type pmem file!"
+ fi
+
+ pmem_clean_pool_file $obj_pool_file
+ return 0
+}
+
+function bdev_pmem_create_tc5() {
+ pmem_print_tc_name ${FUNCNAME[0]}
+ pmem_clean_pool_file
+ pmem_create_pool_file
+ local pmem_bdev_name
+
+ if ! $rpc_py bdev_pmem_get_pool_info $default_pool_file; then
+ error "Failed to get pmem info!"
+ fi
+
+ if ! pmem_bdev_name=$($rpc_py bdev_pmem_create -n $bdev_name $default_pool_file); then
+ error "Failed to create pmem bdev"
+ fi
+
+ if ! $rpc_py bdev_get_bdevs | jq -r '.[] .name' | grep -qi $pmem_bdev_name; then
+ error "Pmem bdev not found!"
+ fi
+
+ if ! $rpc_py bdev_pmem_delete $pmem_bdev_name; then
+ error "Failed to delete pmem bdev!"
+ fi
+
+ if ! $rpc_py bdev_pmem_delete_pool $default_pool_file; then
+ error "Failed to delete pmem pool file!"
+ fi
+
+ pmem_clean_pool_file
+ return 0
+}
+
+function bdev_pmem_create_tc6() {
+ pmem_print_tc_name ${FUNCNAME[0]}
+ local pmem_bdev_name
+ pmem_clean_pool_file
+
+ pmem_create_pool_file
+ if ! $rpc_py bdev_pmem_get_pool_info $default_pool_file; then
+ error "Failed to get info on pmem pool file!"
+ fi
+
+ if ! pmem_bdev_name=$($rpc_py bdev_pmem_create -n $bdev_name $default_pool_file); then
+ error "Failed to create pmem bdev!"
+ fi
+
+ if ! $rpc_py bdev_get_bdevs | jq -r '.[] .name' | grep -qi $pmem_bdev_name; then
+ error "Pmem bdev not found!"
+ fi
+
+ if $rpc_py bdev_pmem_create -n $bdev_name $default_pool_file; then
+ error "Constructed pmem bdev with occupied path!"
+ fi
+
+ if ! $rpc_py bdev_pmem_delete $pmem_bdev_name; then
+ error "Failed to delete pmem bdev!"
+ fi
+
+ if ! $rpc_py bdev_pmem_delete_pool $default_pool_file; then
+ error "Failed to delete pmem pool file!"
+ fi
+
+ pmem_clean_pool_file
+ return 0
+}
+
+#================================================
+# bdev_pmem_delete tests
+#================================================
+function delete_bdev_tc1() {
+ pmem_print_tc_name ${FUNCNAME[0]}
+ local pmem_bdev_name
+ local bdevs_names
+ pmem_clean_pool_file
+
+ pmem_create_pool_file $default_pool_file 256 512
+ if ! $rpc_py bdev_pmem_get_pool_info $default_pool_file; then
+ error "Failed to get pmem info!"
+ fi
+
+ if ! pmem_bdev_name=$($rpc_py bdev_pmem_create -n $bdev_name $default_pool_file); then
+ error "Failed to create pmem bdev!"
+ fi
+
+ if ! $rpc_py bdev_get_bdevs | jq -r '.[] .name' | grep -qi $pmem_bdev_name; then
+ error "$pmem_bdev_name bdev not found!"
+ fi
+
+ if ! $rpc_py bdev_pmem_delete $pmem_bdev_name; then
+ error "Failed to delete $pmem_bdev_name bdev!"
+ fi
+
+ bdevs_names=$($rpc_py bdev_get_bdevs | jq -r '.[] .name')
+ if echo $bdevs_names | grep -qi $pmem_bdev_name; then
+ error "$pmem_bdev_name bdev is not deleted!"
+ fi
+
+ pmem_clean_pool_file
+ return 0
+}
+
+function delete_bdev_tc2() {
+ pmem_print_tc_name ${FUNCNAME[0]}
+ pmem_clean_pool_file
+ pmem_create_pool_file $default_pool_file 256 512
+ local pmem_bdev_name
+
+ if ! $rpc_py bdev_pmem_get_pool_info $default_pool_file; then
+ error "Failed to get pmem info!"
+ fi
+
+ if ! pmem_bdev_name=$($rpc_py bdev_pmem_create -n $bdev_name $default_pool_file); then
+ error "Failed to create pmem bdev"
+ fi
+
+ if ! $rpc_py bdev_get_bdevs | jq -r '.[] .name' | grep -qi $pmem_bdev_name; then
+ error "Pmem bdev not found!"
+ fi
+
+ if ! $rpc_py bdev_pmem_delete $pmem_bdev_name; then
+ error "Failed to delete pmem bdev!"
+ fi
+
+ if $rpc_py bdev_pmem_delete $pmem_bdev_name; then
+ error "bdev_pmem_delete deleted pmem bdev for second time!"
+ fi
+
+ pmem_clean_pool_file
+ return 0
+}
+
+vhost_start
+if ! $enable_script_debug; then
+ set +x
+fi
+
+if $test_info || $test_all; then
+ bdev_pmem_get_pool_info_tc1
+ bdev_pmem_get_pool_info_tc2
+ bdev_pmem_get_pool_info_tc3
+ bdev_pmem_get_pool_info_tc4
+fi
+
+if $test_create || $test_all; then
+ bdev_pmem_create_pool_tc1
+ bdev_pmem_create_pool_tc2
+ bdev_pmem_create_pool_tc3
+ bdev_pmem_create_pool_tc4
+ bdev_pmem_create_pool_tc5
+ bdev_pmem_create_pool_tc6
+ bdev_pmem_create_pool_tc7
+ bdev_pmem_create_pool_tc8
+ bdev_pmem_create_pool_tc9
+fi
+
+if $test_delete || $test_all; then
+ bdev_pmem_delete_pool_tc1
+ bdev_pmem_delete_pool_tc2
+ bdev_pmem_delete_pool_tc3
+ bdev_pmem_delete_pool_tc4
+fi
+
+if $test_construct_bdev || $test_all; then
+ bdev_pmem_create_tc1
+ bdev_pmem_create_tc2
+ bdev_pmem_create_tc3
+ bdev_pmem_create_tc4
+ bdev_pmem_create_tc5
+ bdev_pmem_create_tc6
+fi
+
+if $test_delete_bdev || $test_all; then
+ delete_bdev_tc1
+ delete_bdev_tc2
+fi
+
+pmem_clean_pool_file
+vhost_kill 0
diff --git a/src/spdk/test/rpc/rpc.sh b/src/spdk/test/rpc/rpc.sh
new file mode 100755
index 000000000..56da28cf5
--- /dev/null
+++ b/src/spdk/test/rpc/rpc.sh
@@ -0,0 +1,56 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/test/common/autotest_common.sh
+
+# simply check if rpc commands have any effect on spdk
+function rpc_integrity() {
+ time {
+ bdevs=$($rpc bdev_get_bdevs)
+ [ "$(jq length <<< "$bdevs")" == "0" ]
+
+ malloc=$($rpc bdev_malloc_create 8 512)
+ bdevs=$($rpc bdev_get_bdevs)
+ [ "$(jq length <<< "$bdevs")" == "1" ]
+
+ $rpc bdev_passthru_create -b "$malloc" -p Passthru0
+ bdevs=$($rpc bdev_get_bdevs)
+ [ "$(jq length <<< "$bdevs")" == "2" ]
+
+ $rpc bdev_passthru_delete Passthru0
+ $rpc bdev_malloc_delete $malloc
+ bdevs=$($rpc bdev_get_bdevs)
+ [ "$(jq length <<< "$bdevs")" == "0" ]
+ }
+}
+
+function rpc_plugins() {
+ time {
+ malloc=$($rpc --plugin rpc_plugin create_malloc)
+ bdevs=$($rpc bdev_get_bdevs)
+ [ "$(jq length <<< "$bdevs")" == "1" ]
+
+ $rpc --plugin rpc_plugin delete_malloc $malloc
+ bdevs=$($rpc bdev_get_bdevs)
+ [ "$(jq length <<< "$bdevs")" == "0" ]
+ }
+}
+
+$SPDK_BIN_DIR/spdk_tgt &
+spdk_pid=$!
+trap 'killprocess $spdk_pid; exit 1' SIGINT SIGTERM EXIT
+waitforlisten $spdk_pid
+
+export PYTHONPATH=$testdir
+
+# basic integrity test
+rpc="$rootdir/scripts/rpc.py"
+run_test "rpc_integrity" rpc_integrity
+run_test "rpc_plugins" rpc_plugins
+# same integrity test, but with rpc_cmd() instead
+rpc="rpc_cmd"
+run_test "rpc_daemon_integrity" rpc_integrity
+
+trap - SIGINT SIGTERM EXIT
+killprocess $spdk_pid
diff --git a/src/spdk/test/rpc/rpc_plugin.py b/src/spdk/test/rpc/rpc_plugin.py
new file mode 100644
index 000000000..18c2d7c51
--- /dev/null
+++ b/src/spdk/test/rpc/rpc_plugin.py
@@ -0,0 +1,24 @@
+from rpc.client import print_json
+
+
+def malloc_create(args):
+ params = {'num_blocks': 256, 'block_size': 4096}
+ return args.client.call('bdev_malloc_create', params)
+
+
+def malloc_delete(args):
+ params = {'name': args.name}
+ return args.client.call('bdev_malloc_delete', params)
+
+
+def create_malloc(args):
+ print_json(malloc_create(args))
+
+
+def spdk_rpc_plugin_initialize(subparsers):
+ p = subparsers.add_parser('create_malloc', help='Create malloc backend')
+ p.set_defaults(func=create_malloc)
+
+ p = subparsers.add_parser('delete_malloc', help='Delete malloc backend')
+ p.add_argument('name', help='malloc bdev name')
+ p.set_defaults(func=malloc_delete)
diff --git a/src/spdk/test/rpc_client/.gitignore b/src/spdk/test/rpc_client/.gitignore
new file mode 100644
index 000000000..e878ca3a4
--- /dev/null
+++ b/src/spdk/test/rpc_client/.gitignore
@@ -0,0 +1 @@
+rpc_client_test
diff --git a/src/spdk/test/rpc_client/Makefile b/src/spdk/test/rpc_client/Makefile
new file mode 100644
index 000000000..e26b17470
--- /dev/null
+++ b/src/spdk/test/rpc_client/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.modules.mk
+
+APP = rpc_client_test
+
+C_SRCS := rpc_client_test.c
+
+SPDK_LIB_LIST = jsonrpc json rpc log util
+
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
diff --git a/src/spdk/test/rpc_client/rpc_client.sh b/src/spdk/test/rpc_client/rpc_client.sh
new file mode 100755
index 000000000..2d20f5b7c
--- /dev/null
+++ b/src/spdk/test/rpc_client/rpc_client.sh
@@ -0,0 +1,9 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/test/common/autotest_common.sh
+
+$rootdir/test/rpc_client/rpc_client_test
+
+trap - SIGINT SIGTERM EXIT
diff --git a/src/spdk/test/rpc_client/rpc_client_test.c b/src/spdk/test/rpc_client/rpc_client_test.c
new file mode 100644
index 000000000..4b421f7bb
--- /dev/null
+++ b/src/spdk/test/rpc_client/rpc_client_test.c
@@ -0,0 +1,461 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+#include "spdk/event.h"
+#include "spdk/jsonrpc.h"
+#include "spdk/util.h"
+#include "spdk/rpc.h"
+
+
+#define RPC_MAX_METHODS 200
+#define JOIN_TIMEOUT_S 1
+
+static const char *g_rpcsock_addr = SPDK_DEFAULT_RPC_ADDR;
+static int g_addr_family = AF_UNIX;
+
+#define RPC_MAX_METHODS 200
+
+struct get_jsonrpc_methods_resp {
+ char *method_names[RPC_MAX_METHODS];
+ size_t method_num;
+};
+
+static int
+_rpc_client_wait_for_response(struct spdk_jsonrpc_client *client)
+{
+ int rc;
+
+ do {
+ rc = spdk_jsonrpc_client_poll(client, 1);
+ } while (rc == 0 || rc == -ENOTCONN);
+
+ if (rc <= 0) {
+ SPDK_ERRLOG("Failed to get response: %d\n", rc);
+ }
+
+ return rc;
+}
+
+static int
+get_jsonrpc_method_json_parser(struct get_jsonrpc_methods_resp *resp,
+ const struct spdk_json_val *result)
+{
+ return spdk_json_decode_array(result, spdk_json_decode_string, resp->method_names,
+ RPC_MAX_METHODS, &resp->method_num, sizeof(char *));
+}
+
+static int
+spdk_jsonrpc_client_check_rpc_method(struct spdk_jsonrpc_client *client, char *method_name)
+{
+ int rc, i;
+ struct spdk_jsonrpc_client_response *json_resp = NULL;
+ struct get_jsonrpc_methods_resp resp = {};
+ struct spdk_json_write_ctx *w;
+ struct spdk_jsonrpc_client_request *request;
+
+ request = spdk_jsonrpc_client_create_request();
+ if (request == NULL) {
+ return -ENOMEM;
+ }
+
+ w = spdk_jsonrpc_begin_request(request, 1, "rpc_get_methods");
+ spdk_jsonrpc_end_request(request, w);
+ spdk_jsonrpc_client_send_request(client, request);
+
+ rc = _rpc_client_wait_for_response(client);
+ if (rc <= 0) {
+ goto out;
+ }
+
+ json_resp = spdk_jsonrpc_client_get_response(client);
+ if (json_resp == NULL) {
+ SPDK_ERRLOG("spdk_jsonrpc_client_get_response() failed\n");
+ rc = -1;
+ goto out;
+
+ }
+
+ /* Check for error response */
+ if (json_resp->error != NULL) {
+ SPDK_ERRLOG("Unexpected error response\n");
+ rc = -1;
+ goto out;
+ }
+
+ assert(json_resp->result);
+
+ rc = get_jsonrpc_method_json_parser(&resp, json_resp->result);
+ if (rc) {
+ SPDK_ERRLOG("get_jsonrpc_method_json_parser() failed\n");
+ goto out;
+ }
+
+ for (i = 0; i < (int)resp.method_num; i++) {
+ if (strcmp(method_name, resp.method_names[i]) == 0) {
+ rc = 0;
+ goto out;
+ }
+ }
+
+ rc = -1;
+ SPDK_ERRLOG("Method '%s' not found in response\n", method_name);
+
+out:
+ for (i = 0; i < (int)resp.method_num; i++) {
+ SPDK_NOTICELOG("%s\n", resp.method_names[i]);
+ free(resp.method_names[i]);
+ }
+
+ spdk_jsonrpc_client_free_response(json_resp);
+ return rc;
+}
+
+static int
+spdk_jsonrpc_client_check_null_params_method(struct spdk_jsonrpc_client *client)
+{
+ int rc;
+ bool res = false;
+ struct spdk_jsonrpc_client_response *json_resp = NULL;
+ struct spdk_json_write_ctx *w;
+ struct spdk_jsonrpc_client_request *request;
+
+ request = spdk_jsonrpc_client_create_request();
+ if (request == NULL) {
+ return -ENOMEM;
+ }
+
+ w = spdk_jsonrpc_begin_request(request, 1, "test_null_params");
+ spdk_json_write_name(w, "params");
+ spdk_json_write_null(w);
+ spdk_jsonrpc_end_request(request, w);
+ spdk_jsonrpc_client_send_request(client, request);
+
+ rc = _rpc_client_wait_for_response(client);
+ if (rc <= 0) {
+ goto out;
+ }
+
+ json_resp = spdk_jsonrpc_client_get_response(client);
+ if (json_resp == NULL) {
+ SPDK_ERRLOG("spdk_jsonrpc_client_get_response() failed\n");
+ rc = -1;
+ goto out;
+
+ }
+
+ /* Check for error response */
+ if (json_resp->error != NULL) {
+ SPDK_ERRLOG("Unexpected error response\n");
+ rc = -1;
+ goto out;
+ }
+
+ assert(json_resp->result);
+
+ if (spdk_json_decode_bool(json_resp->result, &res) != 0 || res != true) {
+ SPDK_ERRLOG("Response is not a boolean or it is not 'true'\n");
+ rc = -EINVAL;
+ goto out;
+ } else {
+ rc = 0;
+ }
+
+out:
+ spdk_jsonrpc_client_free_response(json_resp);
+ return rc;
+}
+
+static void
+rpc_test_method_startup(struct spdk_jsonrpc_request *request, const struct spdk_json_val *params)
+{
+ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS,
+ "rpc_test_method_startup(): Method body not implemented");
+}
+SPDK_RPC_REGISTER("test_method_startup", rpc_test_method_startup, SPDK_RPC_STARTUP)
+
+static void
+rpc_test_method_runtime(struct spdk_jsonrpc_request *request, const struct spdk_json_val *params)
+{
+ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS,
+ "rpc_test_method_runtime(): Method body not implemented");
+}
+SPDK_RPC_REGISTER("test_method_runtime", rpc_test_method_runtime, SPDK_RPC_RUNTIME)
+
+static void
+rpc_test_method_null_params(struct spdk_jsonrpc_request *request,
+ const struct spdk_json_val *params)
+{
+ struct spdk_json_write_ctx *w;
+
+ if (params != NULL) {
+ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS,
+ "rpc_test_method_null_params(): Parameters are not NULL");
+ return;
+ }
+ w = spdk_jsonrpc_begin_result(request);
+ assert(w != NULL);
+ spdk_json_write_bool(w, true);
+ spdk_jsonrpc_end_result(request, w);
+}
+SPDK_RPC_REGISTER("test_null_params", rpc_test_method_null_params, SPDK_RPC_RUNTIME)
+
+static bool g_conn_close_detected;
+
+static void
+rpc_test_conn_close_cb(struct spdk_jsonrpc_server_conn *conn, void *ctx)
+{
+ assert((intptr_t)ctx == 42);
+ g_conn_close_detected = true;
+}
+
+static void
+rpc_hook_conn_close(struct spdk_jsonrpc_request *request, const struct spdk_json_val *params)
+{
+ struct spdk_jsonrpc_server_conn *conn = spdk_jsonrpc_get_conn(request);
+ struct spdk_json_write_ctx *w;
+ int rc;
+
+ rc = spdk_jsonrpc_conn_add_close_cb(conn, rpc_test_conn_close_cb, (void *)(intptr_t)(42));
+ if (rc != 0) {
+
+ rc = spdk_jsonrpc_conn_add_close_cb(conn, rpc_test_conn_close_cb, (void *)(intptr_t)(42));
+ assert(rc == -ENOSPC);
+ }
+
+ rc = spdk_jsonrpc_conn_add_close_cb(conn, rpc_test_conn_close_cb, (void *)(intptr_t)(42));
+ if (rc != -EEXIST) {
+ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS,
+ "rpc_test_method_conn_close_detect(): rc != -EEXIST");
+ return;
+ }
+
+ rc = spdk_jsonrpc_conn_add_close_cb(conn, rpc_test_conn_close_cb, (void *)(intptr_t)(43));
+ if (rc != -ENOSPC) {
+ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS,
+ "rpc_test_method_conn_close_detect(): rc != -ENOSPC");
+ return;
+ }
+
+ w = spdk_jsonrpc_begin_result(request);
+ assert(w != NULL);
+ spdk_json_write_bool(w, true);
+ spdk_jsonrpc_end_result(request, w);
+
+}
+SPDK_RPC_REGISTER("hook_conn_close", rpc_hook_conn_close, SPDK_RPC_RUNTIME | SPDK_RPC_STARTUP)
+
+static int
+spdk_jsonrpc_client_hook_conn_close(struct spdk_jsonrpc_client *client)
+{
+ int rc;
+ bool res = false;
+ struct spdk_jsonrpc_client_response *json_resp = NULL;
+ struct spdk_json_write_ctx *w;
+ struct spdk_jsonrpc_client_request *request;
+
+ request = spdk_jsonrpc_client_create_request();
+ if (request == NULL) {
+ return -ENOMEM;
+ }
+
+ w = spdk_jsonrpc_begin_request(request, 1, "hook_conn_close");
+ spdk_jsonrpc_end_request(request, w);
+ spdk_jsonrpc_client_send_request(client, request);
+
+ rc = _rpc_client_wait_for_response(client);
+ if (rc <= 0) {
+ goto out;
+ }
+
+ json_resp = spdk_jsonrpc_client_get_response(client);
+ if (json_resp == NULL) {
+ SPDK_ERRLOG("spdk_jsonrpc_client_get_response() failed\n");
+ rc = -errno;
+ goto out;
+
+ }
+
+ /* Check for error response */
+ if (json_resp->error != NULL) {
+ SPDK_ERRLOG("Unexpected error response: %.*s\n", json_resp->error->len,
+ (char *)json_resp->error->start);
+ rc = -EIO;
+ goto out;
+ }
+
+ assert(json_resp->result);
+ if (spdk_json_decode_bool(json_resp->result, &res) != 0 || res != true) {
+ SPDK_ERRLOG("Response is not and boolean or if not 'true'\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rc = 0;
+out:
+ spdk_jsonrpc_client_free_response(json_resp);
+ return rc;
+}
+
+volatile int g_rpc_server_th_stop;
+static sem_t g_rpc_server_th_listening;
+
+static void *
+rpc_server_th(void *arg)
+{
+ int rc;
+
+ rc = spdk_rpc_listen(g_rpcsock_addr);
+ if (rc) {
+ fprintf(stderr, "spdk_rpc_listen() failed: %d\n", rc);
+ sem_post(&g_rpc_server_th_listening);
+ goto out;
+ }
+
+ sem_post(&g_rpc_server_th_listening);
+
+ while (!g_rpc_server_th_stop) {
+ spdk_rpc_accept();
+ usleep(50);
+ }
+
+ spdk_rpc_close();
+out:
+ return (void *)(intptr_t)rc;
+}
+
+static void *
+rpc_client_th(void *arg)
+{
+ struct spdk_jsonrpc_client *client = NULL;
+ char *method_name = "rpc_get_methods";
+ int rc;
+
+
+ rc = sem_wait(&g_rpc_server_th_listening);
+ if (rc == -1) {
+ fprintf(stderr, "Timeout waiting for server thread to start listening: rc=%d errno=%d\n", rc,
+ errno);
+ goto out;
+ }
+
+ client = spdk_jsonrpc_client_connect(g_rpcsock_addr, g_addr_family);
+ if (!client) {
+ fprintf(stderr, "spdk_jsonrpc_client_connect() failed: %d\n", errno);
+ rc = -1;
+ goto out;
+ }
+
+ rc = spdk_jsonrpc_client_check_rpc_method(client, method_name);
+ if (rc) {
+ fprintf(stderr, "spdk_jsonrpc_client_check_rpc_method() failed: rc=%d errno=%d\n", rc, errno);
+ goto out;
+ }
+
+ rc = spdk_jsonrpc_client_check_null_params_method(client);
+ if (rc) {
+ fprintf(stderr, "spdk_jsonrpc_client_null_params_method() failed: rc=%d errno=%d\n", rc, errno);
+ goto out;
+ }
+
+ rc = spdk_jsonrpc_client_hook_conn_close(client);
+ if (rc) {
+ fprintf(stderr, "spdk_jsonrpc_client_hook_conn_close() failed: rc=%d errno=%d\n", rc, errno);
+ goto out;
+ }
+
+out:
+ if (client) {
+ spdk_jsonrpc_client_close(client);
+ }
+
+ return (void *)(intptr_t)rc;
+}
+
+int main(int argc, char **argv)
+{
+ pthread_t srv_tid, client_tid;
+ int srv_tid_valid;
+ int client_tid_valid = -1;
+ intptr_t th_rc = INTPTR_MIN;
+ int rc = 0, err_cnt = 0;
+
+ sem_init(&g_rpc_server_th_listening, 0, 0);
+
+ srv_tid_valid = pthread_create(&srv_tid, NULL, rpc_server_th, NULL);
+ if (srv_tid_valid != 0) {
+ fprintf(stderr, "pthread_create() failed to create server thread: %d\n", srv_tid_valid);
+ goto out;
+ }
+
+ client_tid_valid = pthread_create(&client_tid, NULL, rpc_client_th, NULL);
+ if (client_tid_valid != 0) {
+ fprintf(stderr, "pthread_create(): failed to create client thread: %d\n", client_tid_valid);
+ goto out;
+ }
+
+out:
+ if (client_tid_valid == 0) {
+ rc = pthread_join(client_tid, (void **)&th_rc);
+ if (rc) {
+ fprintf(stderr, "pthread_join() on client thread failed (rc: %d)\n", rc);
+ err_cnt++;
+ } else if (th_rc) {
+ fprintf(stderr, "client thread failed reported failure(thread rc: %d)\n", (int)th_rc);
+ err_cnt++;
+ }
+ }
+
+ g_rpc_server_th_stop = 1;
+
+ if (srv_tid_valid == 0) {
+ rc = pthread_join(srv_tid, (void **)&th_rc);
+ if (rc) {
+ fprintf(stderr, "pthread_join() on server thread failed (rc: %d)\n", rc);
+ err_cnt++;
+ } else if (th_rc) {
+ fprintf(stderr, "server thread failed reported failure(thread rc: %d)\n", (int)th_rc);
+ err_cnt++;
+ }
+ }
+
+ if (g_conn_close_detected == false) {
+ fprintf(stderr, "Connection close not detected\n");
+ err_cnt++;
+ }
+
+ sem_destroy(&g_rpc_server_th_listening);
+
+ fprintf(stderr, "%s\n", err_cnt == 0 ? "OK" : "FAILED");
+ return err_cnt ? EXIT_FAILURE : 0;
+}
diff --git a/src/spdk/test/spdk_cunit.h b/src/spdk/test/spdk_cunit.h
new file mode 100644
index 000000000..6696bff35
--- /dev/null
+++ b/src/spdk/test/spdk_cunit.h
@@ -0,0 +1,56 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef SPDK_CUNIT_H
+#define SPDK_CUNIT_H
+
+#include "spdk/stdinc.h"
+
+#include <CUnit/Basic.h>
+
+/*
+ * CU_ASSERT_FATAL calls a function that does a longjmp() internally, but only for fatal asserts,
+ * so the function itself is not marked as noreturn. Add an abort() after the assert to help
+ * static analyzers figure out that it really doesn't return.
+ * The abort() will never actually execute.
+ */
+#define SPDK_CU_ASSERT_FATAL(cond) \
+ do { \
+ int result_ = !!(cond); \
+ CU_ASSERT_FATAL(result_); \
+ if (!result_) { \
+ abort(); \
+ } \
+ } while (0)
+
+#endif /* SPDK_CUNIT_H */
diff --git a/src/spdk/test/spdkcli/common.sh b/src/spdk/test/spdkcli/common.sh
new file mode 100644
index 000000000..fec6d0497
--- /dev/null
+++ b/src/spdk/test/spdkcli/common.sh
@@ -0,0 +1,45 @@
+spdkcli_job="$rootdir/test/spdkcli/spdkcli_job.py"
+spdk_clear_config_py="$rootdir/test/json_config/clear_config.py"
+
+function on_error_exit() {
+ set +e
+ if [ -n "$spdk_tgt_pid" ]; then
+ killprocess $spdk_tgt_pid
+ fi
+ if [ -n "$nvmf_tgt_pid" ]; then
+ killprocess $nvmf_tgt_pid
+ fi
+ if [ -n "$iscsi_tgt_pid" ]; then
+ killprocess $iscsi_tgt_pid
+ fi
+ if [ -n "$vhost_tgt_pid" ]; then
+ killprocess $vhost_tgt_pid
+ fi
+ rm -f $testdir/${MATCH_FILE} $testdir/match_files/spdkcli_details_vhost.test /tmp/sample_aio /tmp/sample_pmem
+ print_backtrace
+ exit 1
+}
+
+function run_spdk_tgt() {
+ $SPDK_BIN_DIR/spdk_tgt -m 0x3 -p 0 -s 4096 &
+ spdk_tgt_pid=$!
+ waitforlisten $spdk_tgt_pid
+}
+
+function run_nvmf_tgt() {
+ $SPDK_BIN_DIR/nvmf_tgt -m 0x3 -p 0 -s 4096 &
+ nvmf_tgt_pid=$!
+ waitforlisten $nvmf_tgt_pid
+}
+
+function run_vhost_tgt() {
+ $SPDK_BIN_DIR/vhost -m 0x3 -p 0 -s 4096 &
+ vhost_tgt_pid=$!
+ waitforlisten $vhost_tgt_pid
+}
+
+function check_match() {
+ $rootdir/scripts/spdkcli.py ll $SPDKCLI_BRANCH > $testdir/match_files/${MATCH_FILE}
+ $rootdir/test/app/match/match $testdir/match_files/${MATCH_FILE}.match
+ rm -f $testdir/match_files/${MATCH_FILE}
+}
diff --git a/src/spdk/test/spdkcli/iscsi.sh b/src/spdk/test/spdkcli/iscsi.sh
new file mode 100755
index 000000000..ff892ab36
--- /dev/null
+++ b/src/spdk/test/spdkcli/iscsi.sh
@@ -0,0 +1,72 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/spdkcli/common.sh
+source $rootdir/test/iscsi_tgt/common.sh
+
+MATCH_FILE="spdkcli_iscsi.test"
+SPDKCLI_BRANCH="/iscsi"
+
+trap 'on_error_exit;' ERR
+
+timing_enter run_iscsi_tgt
+
+# Running iscsi target with --wait-for-rpc. Implies framework_start_init later
+$SPDK_BIN_DIR/iscsi_tgt -m 0x3 -p 0 -s 4096 --wait-for-rpc &
+iscsi_tgt_pid=$!
+waitforlisten $iscsi_tgt_pid
+$rootdir/scripts/rpc.py framework_start_init
+
+timing_exit run_iscsi_tgt
+
+timing_enter spdkcli_create_iscsi_config
+$spdkcli_job "'/bdevs/malloc create 32 512 Malloc0' 'Malloc0' True
+'/bdevs/malloc create 32 512 Malloc1' 'Malloc1' True
+'/bdevs/malloc create 32 512 Malloc2' 'Malloc2' True
+'/bdevs/malloc create 32 512 Malloc3' 'Malloc3' True
+'/iscsi/portal_groups create 1 \"127.0.0.1:3261 127.0.0.1:3263@0x1\"' 'host=127.0.0.1, port=3261' True
+'/iscsi/portal_groups create 2 127.0.0.1:3262' 'host=127.0.0.1, port=3262' True
+'/iscsi/initiator_groups create 2 ANY 10.0.2.15/32' 'hostname=ANY, netmask=10.0.2.15/32' True
+'/iscsi/initiator_groups create 3 ANZ 10.0.2.15/32' 'hostname=ANZ, netmask=10.0.2.15/32' True
+'/iscsi/initiator_groups add_initiator 2 ANW 10.0.2.16/32' 'hostname=ANW, netmask=10.0.2.16' True
+'/iscsi/target_nodes create Target0 Target0_alias \"Malloc0:0 Malloc1:1\" 1:2 64 g=1' 'Target0' True
+'/iscsi/target_nodes create Target1 Target1_alias Malloc2:0 1:2 64 g=1' 'Target1' True
+'/iscsi/target_nodes/iqn.2016-06.io.spdk:Target0 iscsi_target_node_add_pg_ig_maps \"1:3 2:2\"' 'portal_group1 - initiator_group3' True
+'/iscsi/target_nodes add_lun iqn.2016-06.io.spdk:Target1 Malloc3 2' 'Malloc3' True
+'/iscsi/auth_groups create 1 \"user:test1 secret:test1 muser:mutual_test1 msecret:mutual_test1,\
+user:test3 secret:test3 muser:mutual_test3 msecret:mutual_test3\"' 'user=test3' True
+'/iscsi/auth_groups add_secret 1 user=test2 secret=test2 muser=mutual_test2 msecret=mutual_test2' 'user=test2' True
+'/iscsi/auth_groups create 2 \"user:test4 secret:test4 muser:mutual_test4 msecret:mutual_test4\"' 'user=test4' True
+'/iscsi/target_nodes/iqn.2016-06.io.spdk:Target0 set_auth g=1 d=true' 'disable_chap: True' True
+'/iscsi/global_params set_auth g=1 d=true r=false' 'disable_chap: True' True
+'/iscsi ls' 'Malloc' True
+"
+timing_exit spdkcli_create_iscsi_config
+
+timing_enter spdkcli_check_match
+check_match
+timing_exit spdkcli_check_match
+
+timing_enter spdkcli_clear_iscsi_config
+$spdkcli_job "'/iscsi/auth_groups delete_secret 1 test2' 'user=test2'
+'/iscsi/auth_groups delete_secret_all 1' 'user=test1'
+'/iscsi/auth_groups delete 1' 'user=test1'
+'/iscsi/auth_groups delete_all' 'user=test4'
+'/iscsi/target_nodes/iqn.2016-06.io.spdk:Target0 iscsi_target_node_remove_pg_ig_maps \"1:3 2:2\"' 'portal_group1 - initiator_group3'
+'/iscsi/target_nodes delete iqn.2016-06.io.spdk:Target1' 'Target1'
+'/iscsi/target_nodes delete_all' 'Target0'
+'/iscsi/initiator_groups delete_initiator 2 ANW 10.0.2.16/32' 'ANW'
+'/iscsi/initiator_groups delete 3' 'ANZ'
+'/iscsi/initiator_groups delete_all' 'ANY'
+'/iscsi/portal_groups delete 1' '127.0.0.1:3261'
+'/iscsi/portal_groups delete_all' '127.0.0.1:3262'
+'/bdevs/malloc delete Malloc3' 'Malloc3'
+'/bdevs/malloc delete Malloc2' 'Malloc2'
+'/bdevs/malloc delete Malloc1' 'Malloc1'
+'/bdevs/malloc delete Malloc0' 'Malloc0'
+"
+timing_exit spdkcli_clear_iscsi_config
+
+killprocess $iscsi_tgt_pid
diff --git a/src/spdk/test/spdkcli/match_files/spdkcli_details_lvs.test.match b/src/spdk/test/spdkcli/match_files/spdkcli_details_lvs.test.match
new file mode 100644
index 000000000..efb34b9e7
--- /dev/null
+++ b/src/spdk/test/spdkcli/match_files/spdkcli_details_lvs.test.match
@@ -0,0 +1,9 @@
+{
+ "base_bdev": "Malloc0",
+ "block_size": 512,
+ "cluster_size": 4194304,
+ "free_clusters": 1,
+ "name": "lvs0",
+ "total_data_clusters": 9,
+ "uuid": "$(S)"
+}
diff --git a/src/spdk/test/spdkcli/match_files/spdkcli_details_vhost.test.match b/src/spdk/test/spdkcli/match_files/spdkcli_details_vhost.test.match
new file mode 100644
index 000000000..afab7ba56
--- /dev/null
+++ b/src/spdk/test/spdkcli/match_files/spdkcli_details_vhost.test.match
@@ -0,0 +1,32 @@
+{
+ "aliases": [],
+ "assigned_rate_limits": {
+ "r_mbytes_per_sec": $(N),
+ "rw_ios_per_sec": $(N),
+ "rw_mbytes_per_sec": $(N),
+ "w_mbytes_per_sec": $(N)
+ },
+ "block_size": $(N),
+ "claimed": false,
+ "driver_specific": {
+ "split": {
+ "base_bdev": "Nvme0n1",
+ "offset_blocks": $(N)
+ }
+ },
+ "name": "Nvme0n1p0",
+ "num_blocks": $(N),
+ "product_name": "Split Disk",
+ "supported_io_types": {
+ "flush": $(S),
+ "nvme_admin": $(S),
+ "nvme_io": $(S),
+ "read": $(S),
+ "reset": $(S),
+ "unmap": $(S),
+ "write": $(S),
+ "write_zeroes": $(S)
+ },
+ "uuid": "$(S)",
+ "zoned": false
+}
diff --git a/src/spdk/test/spdkcli/match_files/spdkcli_details_vhost_ctrl.test.match b/src/spdk/test/spdkcli/match_files/spdkcli_details_vhost_ctrl.test.match
new file mode 100644
index 000000000..7e045fb7d
--- /dev/null
+++ b/src/spdk/test/spdkcli/match_files/spdkcli_details_vhost_ctrl.test.match
@@ -0,0 +1,22 @@
+{
+ "backend_specific": {
+ "scsi": [
+ {
+ "id": 0,
+ "luns": [
+ {
+ "bdev_name": "Malloc2",
+ "id": 0
+ }
+ ],
+ "scsi_dev_num": 0,
+ "target_name": "Target 0"
+ }
+ ]
+ },
+ "cpumask": "0x3",
+ "ctrlr": "vhost_scsi1",
+ "delay_base_us": 20,
+ "iops_threshold": 1000000,
+ "socket": "$(S)/vhost_scsi1"
+}
diff --git a/src/spdk/test/spdkcli/match_files/spdkcli_details_vhost_target.test.match b/src/spdk/test/spdkcli/match_files/spdkcli_details_vhost_target.test.match
new file mode 100644
index 000000000..8a31e49f0
--- /dev/null
+++ b/src/spdk/test/spdkcli/match_files/spdkcli_details_vhost_target.test.match
@@ -0,0 +1,11 @@
+{
+ "id": 0,
+ "luns": [
+ {
+ "bdev_name": "Malloc2",
+ "id": 0
+ }
+ ],
+ "scsi_dev_num": 0,
+ "target_name": "Target 0"
+}
diff --git a/src/spdk/test/spdkcli/match_files/spdkcli_iscsi.test.match b/src/spdk/test/spdkcli/match_files/spdkcli_iscsi.test.match
new file mode 100644
index 000000000..ffb314921
--- /dev/null
+++ b/src/spdk/test/spdkcli/match_files/spdkcli_iscsi.test.match
@@ -0,0 +1,55 @@
+o- iscsi ..................................................................................................................... [...]
+ o- auth_groups ....................................................................................................... [Groups: 2]
+ | o- group1 ......................................................................................................... [Secrets: 3]
+ | | o- user=test1, secret=test1, muser=mutual_test1, msecret=mutual_test1 .................................................. [...]
+ | | o- user=test2, secret=test2, muser=mutual_test2, msecret=mutual_test2 .................................................. [...]
+ | | o- user=test3, secret=test3, muser=mutual_test3, msecret=mutual_test3 .................................................. [...]
+ | o- group2 ......................................................................................................... [Secrets: 1]
+ | o- user=test4, secret=test4, muser=mutual_test4, msecret=mutual_test4 .................................................. [...]
+ o- global_params ........................................................................................................... [...]
+ | o- allow_duplicated_isid: False .......................................................................................... [...]
+ | o- chap_group: 1 ......................................................................................................... [...]
+ | o- default_time2retain: 20 ............................................................................................... [...]
+ | o- default_time2wait: 2 .................................................................................................. [...]
+ | o- disable_chap: True .................................................................................................... [...]
+ | o- error_recovery_level: 0 ............................................................................................... [...]
+ | o- first_burst_length: 8192 .............................................................................................. [...]
+ | o- immediate_data: True .................................................................................................. [...]
+ | o- max_connections_per_session: 2 ........................................................................................ [...]
+ | o- max_queue_depth: 64 ................................................................................................... [...]
+ | o- max_sessions: 128 ..................................................................................................... [...]
+ | o- mutual_chap: False .................................................................................................... [...]
+ | o- node_base: iqn.2016-06.io.spdk ........................................................................................ [...]
+ | o- nop_in_interval: 30 ................................................................................................... [...]
+ | o- nop_timeout: 60 ....................................................................................................... [...]
+ | o- require_chap: False ................................................................................................... [...]
+ o- initiator_groups ........................................................................................ [Initiator groups: 2]
+ | o- initiator_group2 ............................................................................................ [Initiators: 2]
+ | | o- hostname=ANW, netmask=$(N).$(N).$(N).$(N)/32 $(S) [...]
+ | | o- hostname=ANY, netmask=$(N).$(N).$(N).$(N)/32 $(S) [...]
+ | o- initiator_group3 ............................................................................................ [Initiators: 1]
+ | o- hostname=ANZ, netmask=$(N).$(N).$(N).$(N)/32 $(S) [...]
+ o- iscsi_connections ............................................................................................ [Connections: 0]
+ o- portal_groups .............................................................................................. [Portal groups: 2]
+ | o- portal_group1 .................................................................................................. [Portals: 2]
+ | | o- host=127.0.0.1, port=3261 ........................................................................................... [...]
+ | | o- host=127.0.0.1, port=3263 ........................................................................................... [...]
+ | o- portal_group2 .................................................................................................. [Portals: 1]
+ | o- host=127.0.0.1, port=3262 ........................................................................................... [...]
+ o- target_nodes ................................................................................................ [Target nodes: 2]
+ o- iqn.2016-06.io.spdk:Target0 ......................................................................... [Id: 0, QueueDepth: 64]
+ | o- auths ........................................ [disable_chap: True, require_chap: False, mutual_chap: False, chap_group: 1]
+ | o- luns ............................................................................................................ [Luns: 2]
+ | | o- lun 0 ......................................................................................................... [Malloc0]
+ | | o- lun 1 ......................................................................................................... [Malloc1]
+ | o- pg_ig_maps ................................................................................................ [Pg_ig_maps: 3]
+ | o- portal_group1 - initiator_group2 .................................................................................. [...]
+ | o- portal_group1 - initiator_group3 .................................................................................. [...]
+ | o- portal_group2 - initiator_group2 .................................................................................. [...]
+ o- iqn.2016-06.io.spdk:Target1 ......................................................................... [Id: 1, QueueDepth: 64]
+ o- auths ....................................... [disable_chap: False, require_chap: False, mutual_chap: False, chap_group: 1]
+ o- luns ............................................................................................................ [Luns: 2]
+ | o- lun 0 ......................................................................................................... [Malloc2]
+ | o- lun 2 ......................................................................................................... [Malloc3]
+ o- pg_ig_maps ................................................................................................ [Pg_ig_maps: 1]
+ o- portal_group1 - initiator_group2 .................................................................................. [...]
diff --git a/src/spdk/test/spdkcli/match_files/spdkcli_nvmf.test.match b/src/spdk/test/spdkcli/match_files/spdkcli_nvmf.test.match
new file mode 100644
index 000000000..736c53090
--- /dev/null
+++ b/src/spdk/test/spdkcli/match_files/spdkcli_nvmf.test.match
@@ -0,0 +1,34 @@
+o- nvmf ...................................................................................................................... [...]
+ o- subsystem ..................................................................................................... [Subsystems: 4]
+ | o- nqn.2014-08.org.nvmexpress.discovery ......................................................... [st=Discovery, Allow any host]
+ | | o- hosts .......................................................................................................... [Hosts: 0]
+ | | o- listen_addresses ........................................................................................... [Addresses: 0]
+ | o- nqn.2014-08.org.spdk:cnode1 ...................................................... [sn=$(S), st=NVMe, Allow any host]
+ | | o- hosts .......................................................................................................... [Hosts: 1]
+ | | | o- nqn.2014-08.org.spdk:cnode2 ....................................................................................... [...]
+ | | o- listen_addresses ........................................................................................... [Addresses: 3]
+ | | | o- $(N).$(N).$(N).$(N):4260 $(S) [RDMA]
+ | | | o- $(N).$(N).$(N).$(N):4261 $(S) [RDMA]
+ | | | o- $(N).$(N).$(N).$(N):4262 $(S) [RDMA]
+ | | o- namespaces ................................................................................................ [Namespaces: 4]
+ | | o- Malloc3 .................................................................................................... [Malloc3, 1]
+ | | o- Malloc4 .................................................................................................... [Malloc4, 2]
+ | | o- Malloc5 .................................................................................................... [Malloc5, 3]
+ | | o- Malloc6 .................................................................................................... [Malloc6, 4]
+ | o- nqn.2014-08.org.spdk:cnode2 ...................................................... [sn=$(S), st=NVMe, Allow any host]
+ | | o- hosts .......................................................................................................... [Hosts: 0]
+ | | o- listen_addresses ........................................................................................... [Addresses: 1]
+ | | | o- $(N).$(N).$(N).$(N):4260 $(S) [RDMA]
+ | | o- namespaces ................................................................................................ [Namespaces: 1]
+ | | o- Malloc2 .................................................................................................... [Malloc2, 1]
+ | o- nqn.2014-08.org.spdk:cnode3 ...................................................... [sn=$(S), st=NVMe, Allow any host]
+ | o- hosts .......................................................................................................... [Hosts: 2]
+ | | o- nqn.2014-08.org.spdk:cnode1 ....................................................................................... [...]
+ | | o- nqn.2014-08.org.spdk:cnode2 ....................................................................................... [...]
+ | o- listen_addresses ........................................................................................... [Addresses: 2]
+ | | o- $(N).$(N).$(N).$(N):4260 $(S) [RDMA]
+ | | o- $(N).$(N).$(N).$(N):4261 $(S) [RDMA]
+ | o- namespaces ................................................................................................ [Namespaces: 1]
+ | o- Malloc1 .................................................................................................... [Malloc1, 1]
+ o- transport ..................................................................................................... [Transports: 1]
+ o- RDMA .................................................................................................................. [...]
diff --git a/src/spdk/test/spdkcli/match_files/spdkcli_pmem.test.match b/src/spdk/test/spdkcli/match_files/spdkcli_pmem.test.match
new file mode 100644
index 000000000..3948afd26
--- /dev/null
+++ b/src/spdk/test/spdkcli/match_files/spdkcli_pmem.test.match
@@ -0,0 +1,3 @@
+o- pmemblk .............................................................................................................. [Bdevs: 2]
+ o- pmem_bdev0 .......................................................................................... [Size=31.6M, Not claimed]
+ o- pmem_bdev1 .......................................................................................... [Size=31.6M, Not claimed]
diff --git a/src/spdk/test/spdkcli/match_files/spdkcli_pmem_info.test.match b/src/spdk/test/spdkcli/match_files/spdkcli_pmem_info.test.match
new file mode 100644
index 000000000..cdebc2ba1
--- /dev/null
+++ b/src/spdk/test/spdkcli/match_files/spdkcli_pmem_info.test.match
@@ -0,0 +1,12 @@
+[
+ {
+ "num_blocks": 64700,
+ "block_size": 512
+ }
+]
+[
+ {
+ "num_blocks": 64700,
+ "block_size": 512
+ }
+]
diff --git a/src/spdk/test/spdkcli/match_files/spdkcli_raid.test.match b/src/spdk/test/spdkcli/match_files/spdkcli_raid.test.match
new file mode 100644
index 000000000..906c1717f
--- /dev/null
+++ b/src/spdk/test/spdkcli/match_files/spdkcli_raid.test.match
@@ -0,0 +1,17 @@
+o- bdevs ..................................................................................................................... [...]
+ o- aio ................................................................................................................ [Bdevs: 0]
+ o- error .............................................................................................................. [Bdevs: 0]
+ o- iscsi .............................................................................................................. [Bdevs: 0]
+ o- logical_volume ..................................................................................................... [Bdevs: 0]
+ o- malloc ............................................................................................................. [Bdevs: 2]
+ | o- Malloc1 ................................................................................................ [Size=8.0M, Claimed]
+ | o- Malloc2 ................................................................................................ [Size=8.0M, Claimed]
+ o- null ............................................................................................................... [Bdevs: 0]
+ o- nvme ............................................................................................................... [Bdevs: 0]
+ o- pmemblk ............................................................................................................ [Bdevs: 0]
+ o- raid_volume ........................................................................................................ [Bdevs: 1]
+ | o- testraid .......................................................................................... [Size=16.0M, Not claimed]
+ o- rbd ................................................................................................................ [Bdevs: 0]
+ o- split_disk ......................................................................................................... [Bdevs: 0]
+ o- virtioblk_disk ..................................................................................................... [Bdevs: 0]
+ o- virtioscsi_disk .................................................................................................... [Bdevs: 0]
diff --git a/src/spdk/test/spdkcli/match_files/spdkcli_rbd.test.match b/src/spdk/test/spdkcli/match_files/spdkcli_rbd.test.match
new file mode 100644
index 000000000..7d1257493
--- /dev/null
+++ b/src/spdk/test/spdkcli/match_files/spdkcli_rbd.test.match
@@ -0,0 +1,3 @@
+o- rbd .................................................................................................................. [Bdevs: 2]
+ o- Ceph0 ............................................................................................. [Size=1000.0M, Not claimed]
+ o- Ceph1 ............................................................................................. [Size=1000.0M, Not claimed]
diff --git a/src/spdk/test/spdkcli/match_files/spdkcli_vhost.test.match b/src/spdk/test/spdkcli/match_files/spdkcli_vhost.test.match
new file mode 100644
index 000000000..2a8c3c738
--- /dev/null
+++ b/src/spdk/test/spdkcli/match_files/spdkcli_vhost.test.match
@@ -0,0 +1,54 @@
+o- / ......................................................................................................................... [...]
+ o- bdevs ................................................................................................................... [...]
+ | o- aio .............................................................................................................. [Bdevs: 2]
+ | | o- sample0 .......................................................................................... [Size=$(FP)M, Not claimed]
+ | | o- sample1 .......................................................................................... [Size=$(FP)M, Not claimed]
+ | o- error ............................................................................................................ [Bdevs: 2]
+ | | o- EE_Malloc1 ...................................................................................... [Size=$(FP)M, Not claimed]
+ | | o- EE_Malloc4 ...................................................................................... [Size=$(FP)M, Not claimed]
+ | o- iscsi ............................................................................................................ [Bdevs: 0]
+ | o- logical_volume ................................................................................................... [Bdevs: 2]
+ | | o- $(X)-$(X)-$(X)-$(X)-$(X) ................................................ [lvs0/lvol$(FP), Size=$(FP)M, Not claimed]
+ | | o- $(X)-$(X)-$(X)-$(X)-$(X) ................................................ [lvs0/lvol$(FP), Size=$(FP)M, Not claimed]
+ | o- malloc ........................................................................................................... [Bdevs: 6]
+ | | o- Malloc0 ............................................................................................. [Size=$(FP)M, Claimed]
+ | | o- Malloc1 ............................................................................................. [Size=$(FP)M, Claimed]
+ | | o- Malloc2 ......................................................................................... [Size=$(FP)M, Not claimed]
+ | | o- Malloc3 ......................................................................................... [Size=$(FP)M, Not claimed]
+ | | o- Malloc4 ............................................................................................. [Size=$(FP)M, Claimed]
+ | | o- Malloc5 ............................................................................................. [Size=$(FP)M, Claimed]
+ | o- null ............................................................................................................. [Bdevs: 2]
+ | | o- null_bdev0 ...................................................................................... [Size=$(FP)M, Not claimed]
+ | | o- null_bdev1 ...................................................................................... [Size=$(FP)M, Not claimed]
+ | o- nvme ............................................................................................................. [Bdevs: 1]
+ | | o- Nvme0n1 $(S) [Size=$(S), Claimed]
+ | o- pmemblk .......................................................................................................... [Bdevs: 0]
+ | o- raid_volume ...................................................................................................... [Bdevs: 0]
+ | o- rbd .............................................................................................................. [Bdevs: 0]
+ | o- split_disk ....................................................................................................... [Bdevs: 4]
+ | | o- Nvme0n1p0 $(S) [Size=$(FP)G, Not claimed]
+ | | o- Nvme0n1p1 $(S) [Size=$(FP)G, Not claimed]
+ | | o- Nvme0n1p2 $(S) [Size=$(FP)G, Not claimed]
+ | | o- Nvme0n1p3 $(S) [Size=$(FP)G, Not claimed]
+ | o- virtioblk_disk ................................................................................................... [Bdevs: 0]
+ | o- virtioscsi_disk .................................................................................................. [Bdevs: 0]
+ o- lvol_stores .................................................................................................. [Lvol stores: 2]
+ | o- lvs0 $(S) [Size=$(FP)M, Free=$(FP)M]
+ | o- lvs1 $(S) [Size=$(FP)M, Free=$(FP)M]
+ o- vhost ................................................................................................................... [...]
+ o- block ................................................................................................................. [...]
+ | o- vhost_blk1 $(S) [$(S)]
+ | | o- Nvme0n1p0 ......................................................................................................... [...]
+ | o- vhost_blk2 $(S) [$(S), Readonly]
+ | o- Nvme0n1p1 ......................................................................................................... [...]
+ o- scsi .................................................................................................................. [...]
+ o- vhost_scsi1 $(S) [$(S)]
+ | o- Target_0 .......................................................................................... [LUNs: 1,TargetID: 0]
+ | o- Malloc2 ......................................................................................................... [...]
+ o- vhost_scsi2 $(S) [$(S)]
+ o- Target_0 .......................................................................................... [LUNs: 1,TargetID: 0]
+ | o- Malloc3 ......................................................................................................... [...]
+ o- Target_1 .......................................................................................... [LUNs: 1,TargetID: 1]
+ | o- Nvme0n1p2 ....................................................................................................... [...]
+ o- Target_2 .......................................................................................... [LUNs: 1,TargetID: 2]
+ o- Nvme0n1p3 ....................................................................................................... [...]
diff --git a/src/spdk/test/spdkcli/match_files/spdkcli_virtio_pci.test.match b/src/spdk/test/spdkcli/match_files/spdkcli_virtio_pci.test.match
new file mode 100644
index 000000000..5caf84e56
--- /dev/null
+++ b/src/spdk/test/spdkcli/match_files/spdkcli_virtio_pci.test.match
@@ -0,0 +1,19 @@
+o- bdevs ..................................................................................................................... [...]
+ o- aio ................................................................................................................ [Bdevs: 0]
+ o- error .............................................................................................................. [Bdevs: 0]
+ o- iscsi .............................................................................................................. [Bdevs: 0]
+ o- logical_volume ..................................................................................................... [Bdevs: 0]
+ o- malloc ............................................................................................................. [Bdevs: 2]
+ | o- Malloc0 ........................................................................................... [Size=32.0M, Not claimed]
+ | o- Malloc1 ........................................................................................... [Size=32.0M, Not claimed]
+ o- null ............................................................................................................... [Bdevs: 0]
+ o- nvme ............................................................................................................... [Bdevs: 0]
+ o- pmemblk ............................................................................................................ [Bdevs: 0]
+ o- raid_volume ........................................................................................................ [Bdevs: 0]
+ o- rbd ................................................................................................................ [Bdevs: 0]
+ o- split_disk ......................................................................................................... [Bdevs: 0]
+ o- virtioblk_disk ..................................................................................................... [Bdevs: 1]
+ | o- virtioblk_pci $(S) [Size=$(FP)M, Not claimed]
+ o- virtioscsi_disk .................................................................................................... [Bdevs: 1]
+ o- virtioscsi_pci ............................................................................................... [$(S)]
+ o- virtioscsi_pcit0 $(S) [Size=$(FP)M, Not claimed]
diff --git a/src/spdk/test/spdkcli/match_files/spdkcli_virtio_user.test.match b/src/spdk/test/spdkcli/match_files/spdkcli_virtio_user.test.match
new file mode 100644
index 000000000..157938e74
--- /dev/null
+++ b/src/spdk/test/spdkcli/match_files/spdkcli_virtio_user.test.match
@@ -0,0 +1,8 @@
+o- vhost ..................................................................................................................... [...]
+ o- block ................................................................................................................... [...]
+ | o- sample_block $(S) [$(S)]
+ | o- Malloc1 ............................................................................................................. [...]
+ o- scsi .................................................................................................................... [...]
+ o- sample_scsi $(S) [$(S)]
+ o- Target_0 ............................................................................................ [LUNs: 1,TargetID: 0]
+ o- Malloc0 ........................................................................................................... [...]
diff --git a/src/spdk/test/spdkcli/nvmf.sh b/src/spdk/test/spdkcli/nvmf.sh
new file mode 100755
index 000000000..1c2499d11
--- /dev/null
+++ b/src/spdk/test/spdkcli/nvmf.sh
@@ -0,0 +1,85 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/spdkcli/common.sh
+source $rootdir/test/nvmf/common.sh
+
+MATCH_FILE="spdkcli_nvmf.test"
+SPDKCLI_BRANCH="/nvmf"
+
+trap 'on_error_exit; revert_soft_roce' ERR
+rdma_device_init
+
+timing_enter run_nvmf_tgt
+run_nvmf_tgt
+timing_exit run_nvmf_tgt
+
+RDMA_IP_LIST=$(get_available_rdma_ips)
+NVMF_TARGET_IP=$(echo "$RDMA_IP_LIST" | head -n 1)
+
+timing_enter spdkcli_create_nvmf_config
+$spdkcli_job "'/bdevs/malloc create 32 512 Malloc1' 'Malloc1' True
+'/bdevs/malloc create 32 512 Malloc2' 'Malloc2' True
+'/bdevs/malloc create 32 512 Malloc3' 'Malloc3' True
+'/bdevs/malloc create 32 512 Malloc4' 'Malloc4' True
+'/bdevs/malloc create 32 512 Malloc5' 'Malloc5' True
+'/bdevs/malloc create 32 512 Malloc6' 'Malloc6' True
+'nvmf/transport create RDMA max_io_qpairs_per_ctrlr=4 io_unit_size=8192' '' True
+'/nvmf/subsystem create nqn.2014-08.org.spdk:cnode1 N37SXV509SRW\
+ max_namespaces=4 allow_any_host=True' 'nqn.2014-08.org.spdk:cnode1' True
+'/nvmf/subsystem/nqn.2014-08.org.spdk:cnode1/namespaces create Malloc3 1' 'Malloc3' True
+'/nvmf/subsystem/nqn.2014-08.org.spdk:cnode1/namespaces create Malloc4 2' 'Malloc4' True
+'/nvmf/subsystem/nqn.2014-08.org.spdk:cnode1/listen_addresses create \
+ RDMA $NVMF_TARGET_IP 4260 IPv4' '$NVMF_TARGET_IP:4260' True
+'/nvmf/subsystem create nqn.2014-08.org.spdk:cnode2 N37SXV509SRD\
+ max_namespaces=2 allow_any_host=True' 'nqn.2014-08.org.spdk:cnode2' True
+'/nvmf/subsystem/nqn.2014-08.org.spdk:cnode2/namespaces create Malloc2' 'Malloc2' True
+'/nvmf/subsystem/nqn.2014-08.org.spdk:cnode2/listen_addresses create \
+ RDMA $NVMF_TARGET_IP 4260 IPv4' '$NVMF_TARGET_IP:4260' True
+'/nvmf/subsystem create nqn.2014-08.org.spdk:cnode3 N37SXV509SRR\
+ max_namespaces=2 allow_any_host=True' 'nqn.2014-08.org.spdk:cnode2' True
+'/nvmf/subsystem/nqn.2014-08.org.spdk:cnode3/namespaces create Malloc1' 'Malloc1' True
+'/nvmf/subsystem/nqn.2014-08.org.spdk:cnode3/listen_addresses create \
+ RDMA $NVMF_TARGET_IP 4260 IPv4' '$NVMF_TARGET_IP:4260' True
+'/nvmf/subsystem/nqn.2014-08.org.spdk:cnode3/listen_addresses create \
+ RDMA $NVMF_TARGET_IP 4261 IPv4' '$NVMF_TARGET_IP:4261' True
+'/nvmf/subsystem/nqn.2014-08.org.spdk:cnode3/hosts create \
+ nqn.2014-08.org.spdk:cnode1' 'nqn.2014-08.org.spdk:cnode1' True
+'/nvmf/subsystem/nqn.2014-08.org.spdk:cnode3/hosts create \
+ nqn.2014-08.org.spdk:cnode2' 'nqn.2014-08.org.spdk:cnode2' True
+'/nvmf/subsystem/nqn.2014-08.org.spdk:cnode1 allow_any_host True' 'Allow any host'
+'/nvmf/subsystem/nqn.2014-08.org.spdk:cnode1 allow_any_host False' 'Allow any host' True
+'/nvmf/subsystem/nqn.2014-08.org.spdk:cnode1/listen_addresses create RDMA $NVMF_TARGET_IP 4261 IPv4' '$NVMF_TARGET_IP:4261' True
+'/nvmf/subsystem/nqn.2014-08.org.spdk:cnode1/listen_addresses create RDMA $NVMF_TARGET_IP 4262 IPv4' '$NVMF_TARGET_IP:4262' True
+'/nvmf/subsystem/nqn.2014-08.org.spdk:cnode1/hosts create nqn.2014-08.org.spdk:cnode2' 'nqn.2014-08.org.spdk:cnode2' True
+'/nvmf/subsystem/nqn.2014-08.org.spdk:cnode1/namespaces create Malloc5' 'Malloc5' True
+'/nvmf/subsystem/nqn.2014-08.org.spdk:cnode1/namespaces create Malloc6' 'Malloc6' True
+"
+timing_exit spdkcli_create_nvmf_config
+
+timing_enter spdkcli_check_match
+check_match
+timing_exit spdkcli_check_match
+
+timing_enter spdkcli_clear_nvmf_config
+$spdkcli_job "'/nvmf/subsystem/nqn.2014-08.org.spdk:cnode1/namespaces delete nsid=1' 'Malloc3'
+'/nvmf/subsystem/nqn.2014-08.org.spdk:cnode1/namespaces delete_all' 'Malloc4'
+'/nvmf/subsystem/nqn.2014-08.org.spdk:cnode1/hosts delete nqn.2014-08.org.spdk:cnode2' 'nqn.2014-08.org.spdk:cnode2'
+'/nvmf/subsystem/nqn.2014-08.org.spdk:cnode3/hosts delete_all' 'nqn.2014-08.org.spdk:cnode1'
+'/nvmf/subsystem/nqn.2014-08.org.spdk:cnode1/listen_addresses delete RDMA $NVMF_TARGET_IP 4262' '$NVMF_TARGET_IP:4262'
+'/nvmf/subsystem/nqn.2014-08.org.spdk:cnode1/listen_addresses delete_all' '$NVMF_TARGET_IP:4261'
+'/nvmf/subsystem delete nqn.2014-08.org.spdk:cnode3' 'nqn.2014-08.org.spdk:cnode3'
+'/nvmf/subsystem delete_all' 'nqn.2014-08.org.spdk:cnode2'
+'/bdevs/malloc delete Malloc6' 'Malloc6'
+'/bdevs/malloc delete Malloc5' 'Malloc5'
+'/bdevs/malloc delete Malloc4' 'Malloc4'
+'/bdevs/malloc delete Malloc3' 'Malloc3'
+'/bdevs/malloc delete Malloc2' 'Malloc2'
+'/bdevs/malloc delete Malloc1' 'Malloc1'
+"
+timing_exit spdkcli_clear_nvmf_config
+
+killprocess $nvmf_tgt_pid
+#revert_soft_roce
diff --git a/src/spdk/test/spdkcli/pmem.sh b/src/spdk/test/spdkcli/pmem.sh
new file mode 100755
index 000000000..abecd73ca
--- /dev/null
+++ b/src/spdk/test/spdkcli/pmem.sh
@@ -0,0 +1,47 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/spdkcli/common.sh
+
+MATCH_FILE="spdkcli_pmem.test"
+SPDKCLI_BRANCH="/bdevs/pmemblk"
+
+trap 'rm -f $testdir/match_files/spdkcli_pmem_info.test; on_error_exit;' ERR
+
+timing_enter run_spdk_tgt
+run_spdk_tgt
+timing_exit run_spdk_tgt
+
+timing_enter spdkcli_create_pmem_config
+$spdkcli_job "'/bdevs/pmemblk bdev_pmem_create_pool /tmp/sample_pmem0 32 512' '' True
+'/bdevs/pmemblk bdev_pmem_create_pool /tmp/sample_pmem1 32 512' '' True
+"
+
+# Saving pmem pool info before they get claimed by /bdevs/pmemblk create
+$rootdir/scripts/spdkcli.py /bdevs/pmemblk bdev_pmem_get_pool_info /tmp/sample_pmem0 >> $testdir/match_files/spdkcli_pmem_info.test
+$rootdir/scripts/spdkcli.py /bdevs/pmemblk bdev_pmem_get_pool_info /tmp/sample_pmem1 >> $testdir/match_files/spdkcli_pmem_info.test
+
+$spdkcli_job "'/bdevs/pmemblk create /tmp/sample_pmem0 pmem_bdev0' 'pmem_bdev0' True
+'/bdevs/pmemblk create /tmp/sample_pmem1 pmem_bdev1' 'pmem_bdev1' True
+"
+
+timing_exit spdkcli_create_pmem_config
+
+timing_enter spdkcli_check_match
+check_match
+$rootdir/test/app/match/match -v $testdir/match_files/spdkcli_pmem_info.test.match
+timing_exit spdkcli_check_match
+
+timing_enter spdkcli_clear_pmem_config
+$spdkcli_job "'/bdevs/pmemblk delete pmem_bdev0' 'pmem_bdev0'
+'/bdevs/pmemblk bdev_pmem_delete_pool /tmp/sample_pmem0' ''
+'/bdevs/pmemblk delete_all' 'pmem_bdev1'
+'/bdevs/pmemblk bdev_pmem_delete_pool /tmp/sample_pmem1' ''
+"
+rm -f /tmp/sample_pmem
+rm -f $testdir/match_files/spdkcli_pmem_info.test
+timing_exit spdkcli_clear_pmem_config
+
+killprocess $spdk_tgt_pid
diff --git a/src/spdk/test/spdkcli/raid.sh b/src/spdk/test/spdkcli/raid.sh
new file mode 100755
index 000000000..9bbde4719
--- /dev/null
+++ b/src/spdk/test/spdkcli/raid.sh
@@ -0,0 +1,46 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/spdkcli/common.sh
+source $rootdir/test/iscsi_tgt/common.sh
+
+MATCH_FILE="spdkcli_raid.test"
+SPDKCLI_BRANCH="/bdevs"
+testdir=$(readlink -f $(dirname $0))
+. $testdir/common.sh
+
+trap 'on_error_exit;' ERR
+
+timing_enter run_spdk_tgt
+run_spdk_tgt
+timing_exit run_spdk_tgt
+
+timing_enter spdkcli_create_malloc
+$spdkcli_job "'/bdevs/malloc create 8 512 Malloc1' 'Malloc1' True
+'/bdevs/malloc create 8 512 Malloc2' 'Malloc2' True
+"
+timing_exit spdkcli_create_malloc
+
+timing_enter spdkcli_create_raid
+$spdkcli_job "'/bdevs/raid_volume create testraid 0 \"Malloc1 Malloc2\" 4' 'testraid' True
+"
+timing_exit spdkcli_create_raid
+
+timing_enter spdkcli_check_match
+check_match
+timing_exit spdkcli_check_match
+
+timing_enter spdkcli_delete_raid
+$spdkcli_job "'/bdevs/raid_volume delete testraid' '' True
+"
+timing_exit spdkcli_delete_raid
+
+timing_enter spdkcli_delete_malloc
+$spdkcli_job "'/bdevs/malloc delete Malloc1' '' True
+'/bdevs/malloc delete Malloc2' '' True
+"
+timing_exit spdkcli_delete_malloc
+
+killprocess $spdk_tgt_pid
diff --git a/src/spdk/test/spdkcli/rbd.sh b/src/spdk/test/spdkcli/rbd.sh
new file mode 100755
index 000000000..bc6c798f6
--- /dev/null
+++ b/src/spdk/test/spdkcli/rbd.sh
@@ -0,0 +1,32 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/spdkcli/common.sh
+
+MATCH_FILE="spdkcli_rbd.test"
+SPDKCLI_BRANCH="/bdevs/rbd"
+
+trap 'on_error_exit' ERR
+timing_enter run_spdk_tgt
+run_spdk_tgt
+timing_exit run_spdk_tgt
+
+timing_enter spdkcli_create_rbd_config
+trap 'rbd_cleanup; on_error_exit' ERR
+rbd_cleanup
+rbd_setup 127.0.0.1
+$spdkcli_job "\"/bdevs/rbd create rbd foo 512' 'Ceph0' True "\"/bdevs/rbd create rbd foo 512 'Ceph1' True"
+timing_exit spdkcli_create_rbd_config
+
+timing_enter spdkcli_check_match
+check_match
+timing_exit spdkcli_check_match
+
+timing_enter spdkcli_clear_rbd_config
+$spdkcli_job "\"/bdevs/rbd delete Ceph0 'Ceph0' "\"/bdevs/rbd delete_all' 'Ceph1' "
+rbd_cleanup
+timing_exit spdkcli_clear_rbd_config
+
+killprocess $spdk_tgt_pid
diff --git a/src/spdk/test/spdkcli/spdkcli_job.py b/src/spdk/test/spdkcli/spdkcli_job.py
new file mode 100755
index 000000000..ce6530a90
--- /dev/null
+++ b/src/spdk/test/spdkcli/spdkcli_job.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python3
+import pexpect
+import os
+import sys
+import re
+
+
+def execute_command(cmd, element=None, element_exists=False):
+ child.sendline(cmd)
+ child.expect("/>")
+ if "error response" in child.before.decode():
+ print("Error in cmd: %s" % cmd)
+ exit(1)
+ ls_tree = cmd.split(" ")[0]
+ if ls_tree and element:
+ child.sendline("ls %s" % ls_tree)
+ child.expect("/>")
+ if element_exists:
+ if element not in child.before.decode():
+ print("Element %s not in list:\n%s" % (element, child.before.decode()))
+ exit(1)
+ else:
+ if element in child.before.decode():
+ print("Element %s is in list:\n%s" % (element, child.before.decode()))
+ exit(1)
+
+
+if __name__ == "__main__":
+ socket = "/var/tmp/spdk.sock"
+ port = None
+ if len(sys.argv) == 3:
+ socket = sys.argv[2]
+ elif len(sys.argv) == 4:
+ port = sys.argv[3]
+ testdir = os.path.dirname(os.path.realpath(sys.argv[0]))
+
+ if port is None:
+ child = pexpect.spawn(os.path.join(testdir, "../../scripts/spdkcli.py") + " -s %s" % socket)
+ else:
+ child = pexpect.spawn(os.path.join(testdir, "../../scripts/spdkcli.py") + " -s %s -p %s" % (socket, port))
+ child.expect(">")
+ child.sendline("cd /")
+ child.expect("/>")
+
+ cmd_lines = sys.argv[1].strip().split("\n")
+ for line in cmd_lines:
+ data = line.strip()
+ p = re.compile('\'(.*?)\'')
+ cmd = p.findall(data)
+ if data[-1] != "\'":
+ cmd.append(data.rsplit(" ", 1)[1].strip())
+ if cmd[-1] == "False":
+ cmd[-1] = False
+ else:
+ cmd[-1] = True
+ else:
+ cmd.append(False)
+ print("Executing command: %s" % cmd)
+ execute_command(*cmd[0:3])
diff --git a/src/spdk/test/spdkcli/tcp.sh b/src/spdk/test/spdkcli/tcp.sh
new file mode 100755
index 000000000..d0d4dafba
--- /dev/null
+++ b/src/spdk/test/spdkcli/tcp.sh
@@ -0,0 +1,35 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/spdkcli/common.sh
+
+function err_cleanup() {
+ if [ -n "$socat_pid" ]; then
+ killprocess $socat_pid || true
+ fi
+ killprocess $spdk_tgt_pid
+}
+
+IP_ADDRESS="127.0.0.1"
+PORT="9998"
+
+trap 'err_cleanup; exit 1' SIGINT SIGTERM EXIT
+
+timing_enter run_spdk_tgt_tcp
+$SPDK_BIN_DIR/spdk_tgt -m 0x3 -p 0 -s 2048 &
+spdk_tgt_pid=$!
+
+waitforlisten $spdk_tgt_pid
+
+# socat will terminate automatically after the connection is closed
+socat TCP-LISTEN:$PORT UNIX-CONNECT:$DEFAULT_RPC_ADDR &
+socat_pid=$!
+
+$rootdir/scripts/rpc.py -r 100 -t 2 -s $IP_ADDRESS -p $PORT rpc_get_methods
+
+timing_exit run_spdk_tgt_tcp
+
+trap - SIGINT SIGTERM EXIT
+killprocess $spdk_tgt_pid
diff --git a/src/spdk/test/spdkcli/vhost.sh b/src/spdk/test/spdkcli/vhost.sh
new file mode 100755
index 000000000..809a322f8
--- /dev/null
+++ b/src/spdk/test/spdkcli/vhost.sh
@@ -0,0 +1,147 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/spdkcli/common.sh
+
+MATCH_FILE="spdkcli_vhost.test"
+SPDKCLI_BRANCH="/"
+
+sample_aio=$SPDK_TEST_STORAGE/sample_aio
+sample_aio2=$SPDK_TEST_STORAGE/sample_aio2
+
+trap 'on_error_exit' ERR
+timing_enter run_vhost_tgt
+run_vhost_tgt
+timing_exit run_vhost_tgt
+
+timing_enter spdkcli_create_bdevs_config
+$spdkcli_job "'/bdevs/malloc create 40 512 Malloc0' 'Malloc0' True
+'/bdevs/malloc create 32 512 Malloc1' 'Malloc1' True
+'/bdevs/malloc create 32 512 Malloc2' 'Malloc2' True
+'/bdevs/malloc create 32 4096 Malloc3' 'Malloc3' True
+'/bdevs/malloc create 32 4096 Malloc4' 'Malloc4' True
+'/bdevs/malloc create 32 4096 Malloc5' 'Malloc5' True
+'/bdevs/error create Malloc1' 'EE_Malloc1' True
+'/bdevs/error create Malloc4' 'EE_Malloc4' True
+'/bdevs/null create null_bdev0 32 512' 'null_bdev0' True
+'/bdevs/null create null_bdev1 32 512' 'null_bdev1' True
+"
+dd if=/dev/zero of="$sample_aio" bs=2048 count=5000
+dd if=/dev/zero of="$sample_aio2" bs=2048 count=5000
+$spdkcli_job "'/bdevs/aio create sample0 $sample_aio 512' 'sample0' True
+'/bdevs/aio create sample1 $sample_aio2 512' 'sample1' True
+"
+trtype=$($rootdir/scripts/gen_nvme.sh --json | jq -r '.config[].params | select(.name=="Nvme0").trtype')
+traddr=$($rootdir/scripts/gen_nvme.sh --json | jq -r '.config[].params | select(.name=="Nvme0").traddr')
+$spdkcli_job "'/bdevs/nvme create Nvme0 $trtype $traddr' 'Nvme0' True
+'/bdevs/split_disk bdev_split_create Nvme0n1 4' 'Nvme0n1p0' True
+"
+timing_exit spdkcli_create_bdevs_config
+
+timing_enter spdkcli_create_lvols_config
+$spdkcli_job "'/lvol_stores create lvs0 Malloc0' 'lvs0' True
+'/lvol_stores create lvs1 Malloc5' 'lvs1' True
+'/bdevs/logical_volume create lvol0 16 lvs0' 'lvs0/lvol0' True
+'/bdevs/logical_volume create lvol1 16 lvs0' 'lvs0/lvol1' True
+"
+timing_exit spdkcli_create_lvols_config
+
+timing_enter spdkcli_check_match_details
+$rootdir/scripts/spdkcli.py /lvol_stores/lvs0 show_details | jq -r -S '.' > $testdir/match_files/spdkcli_details_lvs.test
+$rootdir/test/app/match/match $testdir/match_files/spdkcli_details_lvs.test.match
+rm -f $testdir/match_files/spdkcli_details_lvs.test
+timing_exit spdkcli_check_match_details
+
+timing_enter spdkcli_create_vhosts_config
+$spdkcli_job "'vhost/block create vhost_blk1 Nvme0n1p0' 'Nvme0n1p0' True
+'vhost/block create vhost_blk2 Nvme0n1p1 0x1 readonly' 'Nvme0n1p1' True
+'vhost/scsi create vhost_scsi1' 'vhost_scsi1' True
+'vhost/scsi create vhost_scsi2' 'vhost_scsi2' True
+'vhost/scsi/vhost_scsi1 add_lun 0 Malloc2' 'Malloc2' True
+'vhost/scsi/vhost_scsi2 add_lun 0 Malloc3' 'Malloc3' True
+'vhost/scsi/vhost_scsi2 add_lun 1 Nvme0n1p2' 'Nvme0n1p2' True
+'vhost/scsi/vhost_scsi2 add_lun 2 Nvme0n1p3' 'Nvme0n1p3' True
+'vhost/scsi/vhost_scsi1 set_coalescing 20 1000000' '' True
+"
+timing_exit spdkcli_create_vhosts_config
+
+timing_enter spdkcli_check_match
+check_match
+timing_exit spdkcli_check_match
+
+timing_enter spdkcli_save_config
+$spdkcli_job "'save_config $testdir/config.json'
+'save_subsystem_config $testdir/config_bdev.json bdev'
+'save_subsystem_config $testdir/config_vhost.json vhost'
+"
+timing_exit spdkcli_save_config
+
+timing_enter spdkcli_check_match_details
+$rootdir/scripts/spdkcli.py vhost/scsi/vhost_scsi1/Target_0 show_details | jq -r -S '.' > $testdir/match_files/spdkcli_details_vhost_target.test
+$rootdir/test/app/match/match $testdir/match_files/spdkcli_details_vhost_target.test.match
+rm -f $testdir/match_files/spdkcli_details_vhost_target.test
+
+$rootdir/scripts/spdkcli.py bdevs/split_disk/Nvme0n1p0 show_details | jq -r -S '.' > $testdir/match_files/spdkcli_details_vhost.test
+$rootdir/test/app/match/match $testdir/match_files/spdkcli_details_vhost.test.match
+rm -f $testdir/match_files/spdkcli_details_vhost.test
+
+$rootdir/scripts/spdkcli.py vhost/scsi/vhost_scsi1 show_details | jq -r -S '.' > $testdir/match_files/spdkcli_details_vhost_ctrl.test
+$rootdir/test/app/match/match $testdir/match_files/spdkcli_details_vhost_ctrl.test.match
+rm -f $testdir/match_files/spdkcli_details_vhost_ctrl.test
+timing_exit spdkcli_check_match_details
+
+timing_enter spdkcli_clear_config
+$spdkcli_job "'vhost/scsi/vhost_scsi2 remove_target 2' 'Nvme0n1p3'
+'vhost/scsi/vhost_scsi2 remove_target 1' 'Nvme0n1p2'
+'vhost/scsi/vhost_scsi2 remove_target 0' 'Malloc3'
+'vhost/scsi/vhost_scsi1 remove_target 0' 'Malloc2'
+'vhost/scsi delete vhost_scsi2' 'vhost_scsi2'
+'vhost/scsi delete vhost_scsi1' 'vhost_scsi1'
+'vhost/block delete vhost_blk2' 'vhost_blk2'
+'vhost/block delete vhost_blk1' 'vhost_blk1'
+'/bdevs/split_disk bdev_split_delete Nvme0n1' 'Nvme0n1p0'
+'/bdevs/aio delete sample0' 'sample0'
+'/bdevs/aio delete_all' 'sample1'
+'/bdevs/nvme delete Nvme0' 'Nvme0'
+'/bdevs/null delete null_bdev0' 'null_bdev0'
+'/bdevs/null delete_all' 'null_bdev1'
+'/bdevs/logical_volume delete lvs0/lvol0' 'lvs0/lvol0'
+'/bdevs/logical_volume delete_all' 'lvs0/lvol1'
+'/lvol_stores delete lvs0' 'lvs0'
+'/lvol_stores delete_all' 'lvs1'
+'/bdevs/error delete EE_Malloc1' 'EE_Malloc1'
+'/bdevs/error delete_all' 'EE_Malloc4'
+'/bdevs/malloc delete Malloc0' 'Malloc0'
+'/bdevs/malloc delete_all' 'Malloc1'
+"
+timing_exit spdkcli_clear_config
+
+timing_enter spdkcli_load_config
+$spdkcli_job "'load_config $testdir/config.json'
+'/lvol_stores create lvs0 Malloc0' 'lvs0' True
+'/lvol_stores create lvs1 Malloc5' 'lvs1' True
+'/bdevs/logical_volume create lvol0 16 lvs0' 'lvs0/lvol0' True
+'/bdevs/logical_volume create lvol1 16 lvs0' 'lvs0/lvol1' True
+"
+check_match
+$spdk_clear_config_py clear_config
+# FIXME: remove this sleep when NVMe driver will be fixed to wait for reset to complete
+sleep 2
+$spdkcli_job "'load_subsystem_config $testdir/config_bdev.json'
+'load_subsystem_config $testdir/config_vhost.json'
+'/lvol_stores create lvs0 Malloc0' 'lvs0' True
+'/lvol_stores create lvs1 Malloc5' 'lvs1' True
+'/bdevs/logical_volume create lvol0 16 lvs0' 'lvs0/lvol0' True
+'/bdevs/logical_volume create lvol1 16 lvs0' 'lvs0/lvol1' True
+"
+check_match
+$spdk_clear_config_py clear_config
+rm -f $testdir/config.json
+rm -f $testdir/config_bdev.json
+rm -f $testdir/config_vhost.json
+rm -f "$sample_aio" "$sample_aio2"
+timing_exit spdkcli_load_config
+
+killprocess $vhost_tgt_pid
diff --git a/src/spdk/test/spdkcli/virtio.sh b/src/spdk/test/spdkcli/virtio.sh
new file mode 100755
index 000000000..b483e0562
--- /dev/null
+++ b/src/spdk/test/spdkcli/virtio.sh
@@ -0,0 +1,78 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/spdkcli/common.sh
+
+trap 'killprocess $virtio_pid; on_error_exit' ERR
+
+timing_enter run_spdk_tgt
+run_spdk_tgt
+timing_exit run_spdk_tgt
+
+timing_enter run_spdk_virtio
+$SPDK_BIN_DIR/spdk_tgt -m 0x4 -p 0 -g -u -s 1024 -r /var/tmp/virtio.sock &
+virtio_pid=$!
+waitforlisten $virtio_pid /var/tmp/virtio.sock
+timing_exit run_spdk_virtio
+
+timing_enter spdkcli_create_virtio_pci_config
+$spdkcli_job "'/bdevs/malloc create 32 512 Malloc0' 'Malloc0' True
+'/bdevs/malloc create 32 512 Malloc1' 'Malloc1' True
+"
+pci_blk=$(lspci -nn -D | grep '1af4:1001' | head -1 | awk '{print $1;}')
+if [ -n "$pci_blk" ] && grep -Eq "DRIVER=(uio|vfio)" "/sys/bus/pci/devices/$pci_blk/uevent"; then
+ $spdkcli_job "'/bdevs/virtioblk_disk create virtioblk_pci pci $pci_blk' 'virtioblk_pci' True"
+fi
+pci_scsi=$(lspci -nn -D | grep '1af4:1004' | head -1 | awk '{print $1;}')
+if [ -n "$pci_scsi" ] && grep -Eq "DRIVER=(uio|vfio)" "/sys/bus/pci/devices/$pci_scsi/uevent"; then
+ $spdkcli_job "'/bdevs/virtioscsi_disk create virtioscsi_pci pci $pci_scsi' 'virtioscsi_pci' True"
+fi
+$spdkcli_job "'/vhost/scsi create sample_scsi' 'sample_scsi' True
+'/vhost/scsi/sample_scsi add_lun 0 Malloc0' 'Malloc0' True
+'/vhost/block create sample_block Malloc1' 'Malloc1' True
+"
+timing_exit spdkcli_create_virtio_pci_config
+
+timing_enter spdkcli_check_match
+if [ -n "$pci_blk" ] && [ -n "$pci_scsi" ]; then
+ MATCH_FILE="spdkcli_virtio_pci.test"
+ SPDKCLI_BRANCH="/bdevs"
+ check_match
+fi
+timing_exit spdkcli_check_match
+
+timing_enter spdkcli_create_virtio_user_config
+$spdkcli_job "'/bdevs/virtioblk_disk create virtioblk_user user $testdir/../../sample_block' 'virtioblk_user' True
+'/bdevs/virtioscsi_disk create virtioscsi_user user $testdir/../../sample_scsi' 'virtioscsi_user' True
+" /var/tmp/virtio.sock
+timing_exit spdkcli_create_virtio_user_config
+
+timing_enter spdkcli_check_match_user_config
+MATCH_FILE="spdkcli_virtio_user.test"
+SPDKCLI_BRANCH="/vhost"
+check_match
+timing_exit spdkcli_check_match_user_config
+
+timing_enter spdkcli_clear_virtio_config
+$spdkcli_job "'/bdevs/virtioscsi_disk delete virtioscsi_user' '' False
+'/bdevs/virtioblk_disk delete virtioblk_user' '' False
+" /var/tmp/virtio.sock
+$spdkcli_job "'/vhost/block delete sample_block' 'sample_block'
+'/vhost/scsi/sample_scsi remove_target 0' 'Malloc0'
+'/vhost/scsi delete sample_scsi' 'sample_scsi'
+"
+if [ -n "$pci_blk" ] && grep -Eq "DRIVER=(uio|vfio)" "/sys/bus/pci/devices/$pci_blk/uevent"; then
+ $spdkcli_job "'/bdevs/virtioblk_disk delete virtioblk_pci' 'virtioblk_pci'"
+fi
+if [ -n "$pci_scsi" ] && grep -Eq "DRIVER=(uio|vfio)" "/sys/bus/pci/devices/$pci_scsi/uevent"; then
+ $spdkcli_job "'/bdevs/virtioscsi_disk delete virtioscsi_pci' 'virtioscsi_pci'"
+fi
+$spdkcli_job "'/bdevs/malloc delete Malloc0' 'Malloc0'
+'/bdevs/malloc delete Malloc1' 'Malloc1'
+"
+timing_exit spdkcli_clear_virtio_config
+
+killprocess $virtio_pid
+killprocess $spdk_tgt_pid
diff --git a/src/spdk/test/unit/Makefile b/src/spdk/test/unit/Makefile
new file mode 100644
index 000000000..dbe663cbb
--- /dev/null
+++ b/src/spdk/test/unit/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = include lib
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/include/Makefile b/src/spdk/test/unit/include/Makefile
new file mode 100644
index 000000000..0ddc15242
--- /dev/null
+++ b/src/spdk/test/unit/include/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = spdk
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/include/spdk/Makefile b/src/spdk/test/unit/include/spdk/Makefile
new file mode 100644
index 000000000..d99c7d632
--- /dev/null
+++ b/src/spdk/test/unit/include/spdk/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = histogram_data.h
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/include/spdk/histogram_data.h/.gitignore b/src/spdk/test/unit/include/spdk/histogram_data.h/.gitignore
new file mode 100644
index 000000000..b2b36ff73
--- /dev/null
+++ b/src/spdk/test/unit/include/spdk/histogram_data.h/.gitignore
@@ -0,0 +1 @@
+histogram_ut
diff --git a/src/spdk/test/unit/include/spdk/histogram_data.h/Makefile b/src/spdk/test/unit/include/spdk/histogram_data.h/Makefile
new file mode 100644
index 000000000..54f7278ae
--- /dev/null
+++ b/src/spdk/test/unit/include/spdk/histogram_data.h/Makefile
@@ -0,0 +1,37 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) 2016 FUJITSU LIMITED, All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of the copyright holder nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = histogram_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/include/spdk/histogram_data.h/histogram_ut.c b/src/spdk/test/unit/include/spdk/histogram_data.h/histogram_ut.c
new file mode 100644
index 000000000..45b81d594
--- /dev/null
+++ b/src/spdk/test/unit/include/spdk/histogram_data.h/histogram_ut.c
@@ -0,0 +1,161 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "spdk/histogram_data.h"
+#include "spdk/util.h"
+
+uint64_t g_values[] = {
+ 1,
+ 10,
+ 1000,
+ 50000,
+ (1ULL << 63),
+ UINT64_MAX
+};
+
+uint64_t *g_values_end = &g_values[SPDK_COUNTOF(g_values)];
+uint64_t g_total;
+uint64_t g_number_of_merged_histograms;
+
+static void
+check_values(void *ctx, uint64_t start, uint64_t end, uint64_t count,
+ uint64_t total, uint64_t so_far)
+{
+ uint64_t **values = ctx;
+
+ if (count == 0) {
+ return;
+ }
+
+ CU_ASSERT(so_far == (g_total + count));
+
+ /*
+ * The bucket for this iteration does not include end, but
+ * subtract one anyways to account for the last bucket
+ * which will have end = 0x0 (UINT64_MAX + 1).
+ */
+ end--;
+
+ while (1) {
+ CU_ASSERT(**values >= start);
+ /*
+ * We subtracted one from end above, so it's OK here for
+ * **values to equal end.
+ */
+ CU_ASSERT(**values <= end);
+ g_total += g_number_of_merged_histograms;
+ count -= g_number_of_merged_histograms;
+ (*values)++;
+ if (*values == g_values_end || **values > end) {
+ break;
+ }
+ }
+ CU_ASSERT(count == 0);
+}
+
+static void
+histogram_test(void)
+{
+ struct spdk_histogram_data *h;
+ uint64_t *values = g_values;
+ uint32_t i;
+
+ h = spdk_histogram_data_alloc();
+
+ for (i = 0; i < SPDK_COUNTOF(g_values); i++) {
+ spdk_histogram_data_tally(h, g_values[i]);
+ }
+ g_total = 0;
+ g_number_of_merged_histograms = 1;
+ spdk_histogram_data_iterate(h, check_values, &values);
+
+ spdk_histogram_data_free(h);
+}
+
+static void
+histogram_merge(void)
+{
+ struct spdk_histogram_data *h1, *h2;
+ uint64_t *values = g_values;
+ uint32_t i;
+
+ h1 = spdk_histogram_data_alloc();
+ h2 = spdk_histogram_data_alloc();
+
+ for (i = 0; i < SPDK_COUNTOF(g_values); i++) {
+ spdk_histogram_data_tally(h1, g_values[i]);
+ spdk_histogram_data_tally(h2, g_values[i]);
+ }
+
+ spdk_histogram_data_merge(h1, h2);
+
+ g_total = 0;
+ g_number_of_merged_histograms = 2;
+ spdk_histogram_data_iterate(h1, check_values, &values);
+
+ spdk_histogram_data_free(h1);
+ spdk_histogram_data_free(h2);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("histogram", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "histogram_test", histogram_test) == NULL ||
+ CU_add_test(suite, "histogram_merge", histogram_merge) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/Makefile b/src/spdk/test/unit/lib/Makefile
new file mode 100644
index 000000000..aa2d707ab
--- /dev/null
+++ b/src/spdk/test/unit/lib/Makefile
@@ -0,0 +1,51 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = bdev blob blobfs event ioat iscsi json jsonrpc log lvol
+DIRS-y += notify nvme nvmf scsi sock thread util
+DIRS-$(CONFIG_IDXD) += idxd
+DIRS-$(CONFIG_REDUCE) += reduce
+ifeq ($(OS),Linux)
+DIRS-$(CONFIG_VHOST) += vhost
+DIRS-y += ftl
+endif
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/bdev/Makefile b/src/spdk/test/unit/lib/bdev/Makefile
new file mode 100644
index 000000000..8120b1127
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/Makefile
@@ -0,0 +1,51 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = bdev.c part.c scsi_nvme.c gpt vbdev_lvol.c mt raid bdev_zone.c vbdev_zone_block.c bdev_ocssd.c
+
+DIRS-$(CONFIG_CRYPTO) += crypto.c
+
+# enable once new mocks are added for compressdev
+DIRS-$(CONFIG_REDUCE) += compress.c
+
+DIRS-$(CONFIG_PMDK) += pmem
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/bdev/bdev.c/.gitignore b/src/spdk/test/unit/lib/bdev/bdev.c/.gitignore
new file mode 100644
index 000000000..a5a22d0d3
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/bdev.c/.gitignore
@@ -0,0 +1 @@
+bdev_ut
diff --git a/src/spdk/test/unit/lib/bdev/bdev.c/Makefile b/src/spdk/test/unit/lib/bdev/bdev.c/Makefile
new file mode 100644
index 000000000..eb73fafb3
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/bdev.c/Makefile
@@ -0,0 +1,37 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) 2016 FUJITSU LIMITED, All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of the copyright holder nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = bdev_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/bdev/bdev.c/bdev_ut.c b/src/spdk/test/unit/lib/bdev/bdev.c/bdev_ut.c
new file mode 100644
index 000000000..36916f4f5
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/bdev.c/bdev_ut.c
@@ -0,0 +1,3417 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation. All rights reserved.
+ * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "common/lib/ut_multithread.c"
+#include "unit/lib/json_mock.c"
+
+#include "spdk/config.h"
+/* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
+#undef SPDK_CONFIG_VTUNE
+
+#include "bdev/bdev.c"
+
+DEFINE_STUB(spdk_conf_find_section, struct spdk_conf_section *, (struct spdk_conf *cp,
+ const char *name), NULL);
+DEFINE_STUB(spdk_conf_section_get_nmval, char *,
+ (struct spdk_conf_section *sp, const char *key, int idx1, int idx2), NULL);
+DEFINE_STUB(spdk_conf_section_get_intval, int, (struct spdk_conf_section *sp, const char *key), -1);
+
+struct spdk_trace_histories *g_trace_histories;
+DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn));
+DEFINE_STUB_V(spdk_trace_register_owner, (uint8_t type, char id_prefix));
+DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix));
+DEFINE_STUB_V(spdk_trace_register_description, (const char *name,
+ uint16_t tpoint_id, uint8_t owner_type,
+ uint8_t object_type, uint8_t new_object,
+ uint8_t arg1_type, const char *arg1_name));
+DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
+ uint32_t size, uint64_t object_id, uint64_t arg1));
+DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0);
+DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL);
+
+
+int g_status;
+int g_count;
+enum spdk_bdev_event_type g_event_type1;
+enum spdk_bdev_event_type g_event_type2;
+struct spdk_histogram_data *g_histogram;
+void *g_unregister_arg;
+int g_unregister_rc;
+
+void
+spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io,
+ int *sc, int *sk, int *asc, int *ascq)
+{
+}
+
+static int
+null_init(void)
+{
+ return 0;
+}
+
+static int
+null_clean(void)
+{
+ return 0;
+}
+
+static int
+stub_destruct(void *ctx)
+{
+ return 0;
+}
+
+struct ut_expected_io {
+ uint8_t type;
+ uint64_t offset;
+ uint64_t length;
+ int iovcnt;
+ struct iovec iov[BDEV_IO_NUM_CHILD_IOV];
+ void *md_buf;
+ TAILQ_ENTRY(ut_expected_io) link;
+};
+
+struct bdev_ut_channel {
+ TAILQ_HEAD(, spdk_bdev_io) outstanding_io;
+ uint32_t outstanding_io_count;
+ TAILQ_HEAD(, ut_expected_io) expected_io;
+};
+
+static bool g_io_done;
+static struct spdk_bdev_io *g_bdev_io;
+static enum spdk_bdev_io_status g_io_status;
+static enum spdk_bdev_io_status g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
+static uint32_t g_bdev_ut_io_device;
+static struct bdev_ut_channel *g_bdev_ut_channel;
+static void *g_compare_read_buf;
+static uint32_t g_compare_read_buf_len;
+static void *g_compare_write_buf;
+static uint32_t g_compare_write_buf_len;
+static bool g_abort_done;
+static enum spdk_bdev_io_status g_abort_status;
+
+static struct ut_expected_io *
+ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt)
+{
+ struct ut_expected_io *expected_io;
+
+ expected_io = calloc(1, sizeof(*expected_io));
+ SPDK_CU_ASSERT_FATAL(expected_io != NULL);
+
+ expected_io->type = type;
+ expected_io->offset = offset;
+ expected_io->length = length;
+ expected_io->iovcnt = iovcnt;
+
+ return expected_io;
+}
+
+static void
+ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len)
+{
+ expected_io->iov[pos].iov_base = base;
+ expected_io->iov[pos].iov_len = len;
+}
+
+static void
+stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
+{
+ struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch);
+ struct ut_expected_io *expected_io;
+ struct iovec *iov, *expected_iov;
+ struct spdk_bdev_io *bio_to_abort;
+ int i;
+
+ g_bdev_io = bdev_io;
+
+ if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_READ) {
+ uint32_t len = bdev_io->u.bdev.iovs[0].iov_len;
+
+ CU_ASSERT(bdev_io->u.bdev.iovcnt == 1);
+ CU_ASSERT(g_compare_read_buf_len == len);
+ memcpy(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len);
+ }
+
+ if (g_compare_write_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
+ uint32_t len = bdev_io->u.bdev.iovs[0].iov_len;
+
+ CU_ASSERT(bdev_io->u.bdev.iovcnt == 1);
+ CU_ASSERT(g_compare_write_buf_len == len);
+ memcpy(g_compare_write_buf, bdev_io->u.bdev.iovs[0].iov_base, len);
+ }
+
+ if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_COMPARE) {
+ uint32_t len = bdev_io->u.bdev.iovs[0].iov_len;
+
+ CU_ASSERT(bdev_io->u.bdev.iovcnt == 1);
+ CU_ASSERT(g_compare_read_buf_len == len);
+ if (memcmp(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len)) {
+ g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE;
+ }
+ }
+
+ if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) {
+ if (g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS) {
+ TAILQ_FOREACH(bio_to_abort, &ch->outstanding_io, module_link) {
+ if (bio_to_abort == bdev_io->u.abort.bio_to_abort) {
+ TAILQ_REMOVE(&ch->outstanding_io, bio_to_abort, module_link);
+ ch->outstanding_io_count--;
+ spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_FAILED);
+ break;
+ }
+ }
+ }
+ }
+
+ TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link);
+ ch->outstanding_io_count++;
+
+ expected_io = TAILQ_FIRST(&ch->expected_io);
+ if (expected_io == NULL) {
+ return;
+ }
+ TAILQ_REMOVE(&ch->expected_io, expected_io, link);
+
+ if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) {
+ CU_ASSERT(bdev_io->type == expected_io->type);
+ }
+
+ if (expected_io->md_buf != NULL) {
+ CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.md_buf);
+ }
+
+ if (expected_io->length == 0) {
+ free(expected_io);
+ return;
+ }
+
+ CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks);
+ CU_ASSERT(expected_io->length = bdev_io->u.bdev.num_blocks);
+
+ if (expected_io->iovcnt == 0) {
+ free(expected_io);
+ /* UNMAP, WRITE_ZEROES and FLUSH don't have iovs, so we can just return now. */
+ return;
+ }
+
+ CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt);
+ for (i = 0; i < expected_io->iovcnt; i++) {
+ iov = &bdev_io->u.bdev.iovs[i];
+ expected_iov = &expected_io->iov[i];
+ CU_ASSERT(iov->iov_len == expected_iov->iov_len);
+ CU_ASSERT(iov->iov_base == expected_iov->iov_base);
+ }
+
+ free(expected_io);
+}
+
+static void
+stub_submit_request_get_buf_cb(struct spdk_io_channel *_ch,
+ struct spdk_bdev_io *bdev_io, bool success)
+{
+ CU_ASSERT(success == true);
+
+ stub_submit_request(_ch, bdev_io);
+}
+
+static void
+stub_submit_request_get_buf(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
+{
+ spdk_bdev_io_get_buf(bdev_io, stub_submit_request_get_buf_cb,
+ bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
+}
+
+static uint32_t
+stub_complete_io(uint32_t num_to_complete)
+{
+ struct bdev_ut_channel *ch = g_bdev_ut_channel;
+ struct spdk_bdev_io *bdev_io;
+ static enum spdk_bdev_io_status io_status;
+ uint32_t num_completed = 0;
+
+ while (num_completed < num_to_complete) {
+ if (TAILQ_EMPTY(&ch->outstanding_io)) {
+ break;
+ }
+ bdev_io = TAILQ_FIRST(&ch->outstanding_io);
+ TAILQ_REMOVE(&ch->outstanding_io, bdev_io, module_link);
+ ch->outstanding_io_count--;
+ io_status = g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS ? SPDK_BDEV_IO_STATUS_SUCCESS :
+ g_io_exp_status;
+ spdk_bdev_io_complete(bdev_io, io_status);
+ num_completed++;
+ }
+
+ return num_completed;
+}
+
+static struct spdk_io_channel *
+bdev_ut_get_io_channel(void *ctx)
+{
+ return spdk_get_io_channel(&g_bdev_ut_io_device);
+}
+
+static bool g_io_types_supported[SPDK_BDEV_NUM_IO_TYPES] = {
+ [SPDK_BDEV_IO_TYPE_READ] = true,
+ [SPDK_BDEV_IO_TYPE_WRITE] = true,
+ [SPDK_BDEV_IO_TYPE_COMPARE] = true,
+ [SPDK_BDEV_IO_TYPE_UNMAP] = true,
+ [SPDK_BDEV_IO_TYPE_FLUSH] = true,
+ [SPDK_BDEV_IO_TYPE_RESET] = true,
+ [SPDK_BDEV_IO_TYPE_NVME_ADMIN] = true,
+ [SPDK_BDEV_IO_TYPE_NVME_IO] = true,
+ [SPDK_BDEV_IO_TYPE_NVME_IO_MD] = true,
+ [SPDK_BDEV_IO_TYPE_WRITE_ZEROES] = true,
+ [SPDK_BDEV_IO_TYPE_ZCOPY] = true,
+ [SPDK_BDEV_IO_TYPE_ABORT] = true,
+};
+
+static void
+ut_enable_io_type(enum spdk_bdev_io_type io_type, bool enable)
+{
+ g_io_types_supported[io_type] = enable;
+}
+
+static bool
+stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type)
+{
+ return g_io_types_supported[io_type];
+}
+
+static struct spdk_bdev_fn_table fn_table = {
+ .destruct = stub_destruct,
+ .submit_request = stub_submit_request,
+ .get_io_channel = bdev_ut_get_io_channel,
+ .io_type_supported = stub_io_type_supported,
+};
+
+static int
+bdev_ut_create_ch(void *io_device, void *ctx_buf)
+{
+ struct bdev_ut_channel *ch = ctx_buf;
+
+ CU_ASSERT(g_bdev_ut_channel == NULL);
+ g_bdev_ut_channel = ch;
+
+ TAILQ_INIT(&ch->outstanding_io);
+ ch->outstanding_io_count = 0;
+ TAILQ_INIT(&ch->expected_io);
+ return 0;
+}
+
+static void
+bdev_ut_destroy_ch(void *io_device, void *ctx_buf)
+{
+ CU_ASSERT(g_bdev_ut_channel != NULL);
+ g_bdev_ut_channel = NULL;
+}
+
+struct spdk_bdev_module bdev_ut_if;
+
+static int
+bdev_ut_module_init(void)
+{
+ spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch,
+ sizeof(struct bdev_ut_channel), NULL);
+ spdk_bdev_module_init_done(&bdev_ut_if);
+ return 0;
+}
+
+static void
+bdev_ut_module_fini(void)
+{
+ spdk_io_device_unregister(&g_bdev_ut_io_device, NULL);
+}
+
+struct spdk_bdev_module bdev_ut_if = {
+ .name = "bdev_ut",
+ .module_init = bdev_ut_module_init,
+ .module_fini = bdev_ut_module_fini,
+ .async_init = true,
+};
+
+static void vbdev_ut_examine(struct spdk_bdev *bdev);
+
+static int
+vbdev_ut_module_init(void)
+{
+ return 0;
+}
+
+static void
+vbdev_ut_module_fini(void)
+{
+}
+
+struct spdk_bdev_module vbdev_ut_if = {
+ .name = "vbdev_ut",
+ .module_init = vbdev_ut_module_init,
+ .module_fini = vbdev_ut_module_fini,
+ .examine_config = vbdev_ut_examine,
+};
+
+SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if)
+SPDK_BDEV_MODULE_REGISTER(vbdev_ut, &vbdev_ut_if)
+
+static void
+vbdev_ut_examine(struct spdk_bdev *bdev)
+{
+ spdk_bdev_module_examine_done(&vbdev_ut_if);
+}
+
+static struct spdk_bdev *
+allocate_bdev(char *name)
+{
+ struct spdk_bdev *bdev;
+ int rc;
+
+ bdev = calloc(1, sizeof(*bdev));
+ SPDK_CU_ASSERT_FATAL(bdev != NULL);
+
+ bdev->name = name;
+ bdev->fn_table = &fn_table;
+ bdev->module = &bdev_ut_if;
+ bdev->blockcnt = 1024;
+ bdev->blocklen = 512;
+
+ rc = spdk_bdev_register(bdev);
+ CU_ASSERT(rc == 0);
+
+ return bdev;
+}
+
+static struct spdk_bdev *
+allocate_vbdev(char *name)
+{
+ struct spdk_bdev *bdev;
+ int rc;
+
+ bdev = calloc(1, sizeof(*bdev));
+ SPDK_CU_ASSERT_FATAL(bdev != NULL);
+
+ bdev->name = name;
+ bdev->fn_table = &fn_table;
+ bdev->module = &vbdev_ut_if;
+
+ rc = spdk_bdev_register(bdev);
+ CU_ASSERT(rc == 0);
+
+ return bdev;
+}
+
+static void
+free_bdev(struct spdk_bdev *bdev)
+{
+ spdk_bdev_unregister(bdev, NULL, NULL);
+ poll_threads();
+ memset(bdev, 0xFF, sizeof(*bdev));
+ free(bdev);
+}
+
+static void
+free_vbdev(struct spdk_bdev *bdev)
+{
+ spdk_bdev_unregister(bdev, NULL, NULL);
+ poll_threads();
+ memset(bdev, 0xFF, sizeof(*bdev));
+ free(bdev);
+}
+
+static void
+get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc)
+{
+ const char *bdev_name;
+
+ CU_ASSERT(bdev != NULL);
+ CU_ASSERT(rc == 0);
+ bdev_name = spdk_bdev_get_name(bdev);
+ CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0");
+
+ free(stat);
+ free_bdev(bdev);
+
+ *(bool *)cb_arg = true;
+}
+
+static void
+bdev_unregister_cb(void *cb_arg, int rc)
+{
+ g_unregister_arg = cb_arg;
+ g_unregister_rc = rc;
+}
+
+static void
+bdev_open_cb1(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx)
+{
+ struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx;
+
+ g_event_type1 = type;
+ if (SPDK_BDEV_EVENT_REMOVE == type) {
+ spdk_bdev_close(desc);
+ }
+}
+
+static void
+bdev_open_cb2(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx)
+{
+ struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx;
+
+ g_event_type2 = type;
+ if (SPDK_BDEV_EVENT_REMOVE == type) {
+ spdk_bdev_close(desc);
+ }
+}
+
+static void
+get_device_stat_test(void)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_io_stat *stat;
+ bool done;
+
+ bdev = allocate_bdev("bdev0");
+ stat = calloc(1, sizeof(struct spdk_bdev_io_stat));
+ if (stat == NULL) {
+ free_bdev(bdev);
+ return;
+ }
+
+ done = false;
+ spdk_bdev_get_device_stat(bdev, stat, get_device_stat_cb, &done);
+ while (!done) { poll_threads(); }
+
+
+}
+
+static void
+open_write_test(void)
+{
+ struct spdk_bdev *bdev[9];
+ struct spdk_bdev_desc *desc[9] = {};
+ int rc;
+
+ /*
+ * Create a tree of bdevs to test various open w/ write cases.
+ *
+ * bdev0 through bdev3 are physical block devices, such as NVMe
+ * namespaces or Ceph block devices.
+ *
+ * bdev4 is a virtual bdev with multiple base bdevs. This models
+ * caching or RAID use cases.
+ *
+ * bdev5 through bdev7 are all virtual bdevs with the same base
+ * bdev (except bdev7). This models partitioning or logical volume
+ * use cases.
+ *
+ * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs
+ * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This
+ * models caching, RAID, partitioning or logical volumes use cases.
+ *
+ * bdev8 is a virtual bdev with multiple base bdevs, but these
+ * base bdevs are themselves virtual bdevs.
+ *
+ * bdev8
+ * |
+ * +----------+
+ * | |
+ * bdev4 bdev5 bdev6 bdev7
+ * | | | |
+ * +---+---+ +---+ + +---+---+
+ * | | \ | / \
+ * bdev0 bdev1 bdev2 bdev3
+ */
+
+ bdev[0] = allocate_bdev("bdev0");
+ rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if);
+ CU_ASSERT(rc == 0);
+
+ bdev[1] = allocate_bdev("bdev1");
+ rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if);
+ CU_ASSERT(rc == 0);
+
+ bdev[2] = allocate_bdev("bdev2");
+ rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if);
+ CU_ASSERT(rc == 0);
+
+ bdev[3] = allocate_bdev("bdev3");
+ rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if);
+ CU_ASSERT(rc == 0);
+
+ bdev[4] = allocate_vbdev("bdev4");
+ rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if);
+ CU_ASSERT(rc == 0);
+
+ bdev[5] = allocate_vbdev("bdev5");
+ rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if);
+ CU_ASSERT(rc == 0);
+
+ bdev[6] = allocate_vbdev("bdev6");
+
+ bdev[7] = allocate_vbdev("bdev7");
+
+ bdev[8] = allocate_vbdev("bdev8");
+
+ /* Open bdev0 read-only. This should succeed. */
+ rc = spdk_bdev_open(bdev[0], false, NULL, NULL, &desc[0]);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(desc[0] != NULL);
+ spdk_bdev_close(desc[0]);
+
+ /*
+ * Open bdev1 read/write. This should fail since bdev1 has been claimed
+ * by a vbdev module.
+ */
+ rc = spdk_bdev_open(bdev[1], true, NULL, NULL, &desc[1]);
+ CU_ASSERT(rc == -EPERM);
+
+ /*
+ * Open bdev4 read/write. This should fail since bdev3 has been claimed
+ * by a vbdev module.
+ */
+ rc = spdk_bdev_open(bdev[4], true, NULL, NULL, &desc[4]);
+ CU_ASSERT(rc == -EPERM);
+
+ /* Open bdev4 read-only. This should succeed. */
+ rc = spdk_bdev_open(bdev[4], false, NULL, NULL, &desc[4]);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(desc[4] != NULL);
+ spdk_bdev_close(desc[4]);
+
+ /*
+ * Open bdev8 read/write. This should succeed since it is a leaf
+ * bdev.
+ */
+ rc = spdk_bdev_open(bdev[8], true, NULL, NULL, &desc[8]);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(desc[8] != NULL);
+ spdk_bdev_close(desc[8]);
+
+ /*
+ * Open bdev5 read/write. This should fail since bdev4 has been claimed
+ * by a vbdev module.
+ */
+ rc = spdk_bdev_open(bdev[5], true, NULL, NULL, &desc[5]);
+ CU_ASSERT(rc == -EPERM);
+
+ /* Open bdev4 read-only. This should succeed. */
+ rc = spdk_bdev_open(bdev[5], false, NULL, NULL, &desc[5]);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(desc[5] != NULL);
+ spdk_bdev_close(desc[5]);
+
+ free_vbdev(bdev[8]);
+
+ free_vbdev(bdev[5]);
+ free_vbdev(bdev[6]);
+ free_vbdev(bdev[7]);
+
+ free_vbdev(bdev[4]);
+
+ free_bdev(bdev[0]);
+ free_bdev(bdev[1]);
+ free_bdev(bdev[2]);
+ free_bdev(bdev[3]);
+}
+
+static void
+bytes_to_blocks_test(void)
+{
+ struct spdk_bdev bdev;
+ uint64_t offset_blocks, num_blocks;
+
+ memset(&bdev, 0, sizeof(bdev));
+
+ bdev.blocklen = 512;
+
+ /* All parameters valid */
+ offset_blocks = 0;
+ num_blocks = 0;
+ CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 1024, &num_blocks) == 0);
+ CU_ASSERT(offset_blocks == 1);
+ CU_ASSERT(num_blocks == 2);
+
+ /* Offset not a block multiple */
+ CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 512, &num_blocks) != 0);
+
+ /* Length not a block multiple */
+ CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 3, &num_blocks) != 0);
+
+ /* In case blocklen not the power of two */
+ bdev.blocklen = 100;
+ CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 200, &num_blocks) == 0);
+ CU_ASSERT(offset_blocks == 1);
+ CU_ASSERT(num_blocks == 2);
+
+ /* Offset not a block multiple */
+ CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 100, &num_blocks) != 0);
+
+ /* Length not a block multiple */
+ CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 3, &num_blocks) != 0);
+}
+
+static void
+num_blocks_test(void)
+{
+ struct spdk_bdev bdev;
+ struct spdk_bdev_desc *desc = NULL;
+ struct spdk_bdev_desc *desc_ext = NULL;
+ int rc;
+
+ memset(&bdev, 0, sizeof(bdev));
+ bdev.name = "num_blocks";
+ bdev.fn_table = &fn_table;
+ bdev.module = &bdev_ut_if;
+ spdk_bdev_register(&bdev);
+ spdk_bdev_notify_blockcnt_change(&bdev, 50);
+
+ /* Growing block number */
+ CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 70) == 0);
+ /* Shrinking block number */
+ CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 30) == 0);
+
+ /* In case bdev opened */
+ rc = spdk_bdev_open(&bdev, false, NULL, NULL, &desc);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(desc != NULL);
+
+ /* Growing block number */
+ CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 80) == 0);
+ /* Shrinking block number */
+ CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 20) != 0);
+
+ /* In case bdev opened with ext API */
+ rc = spdk_bdev_open_ext("num_blocks", false, bdev_open_cb1, &desc_ext, &desc_ext);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(desc_ext != NULL);
+
+ g_event_type1 = 0xFF;
+ /* Growing block number */
+ CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 90) == 0);
+
+ poll_threads();
+ CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_RESIZE);
+
+ g_event_type1 = 0xFF;
+ /* Growing block number and closing */
+ CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 100) == 0);
+
+ spdk_bdev_close(desc);
+ spdk_bdev_close(desc_ext);
+ spdk_bdev_unregister(&bdev, NULL, NULL);
+
+ poll_threads();
+
+ /* Callback is not called for closed device */
+ CU_ASSERT_EQUAL(g_event_type1, 0xFF);
+}
+
+static void
+io_valid_test(void)
+{
+ struct spdk_bdev bdev;
+
+ memset(&bdev, 0, sizeof(bdev));
+
+ bdev.blocklen = 512;
+ spdk_bdev_notify_blockcnt_change(&bdev, 100);
+
+ /* All parameters valid */
+ CU_ASSERT(bdev_io_valid_blocks(&bdev, 1, 2) == true);
+
+ /* Last valid block */
+ CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 1) == true);
+
+ /* Offset past end of bdev */
+ CU_ASSERT(bdev_io_valid_blocks(&bdev, 100, 1) == false);
+
+ /* Offset + length past end of bdev */
+ CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 2) == false);
+
+ /* Offset near end of uint64_t range (2^64 - 1) */
+ CU_ASSERT(bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false);
+}
+
+static void
+alias_add_del_test(void)
+{
+ struct spdk_bdev *bdev[3];
+ int rc;
+
+ /* Creating and registering bdevs */
+ bdev[0] = allocate_bdev("bdev0");
+ SPDK_CU_ASSERT_FATAL(bdev[0] != 0);
+
+ bdev[1] = allocate_bdev("bdev1");
+ SPDK_CU_ASSERT_FATAL(bdev[1] != 0);
+
+ bdev[2] = allocate_bdev("bdev2");
+ SPDK_CU_ASSERT_FATAL(bdev[2] != 0);
+
+ poll_threads();
+
+ /*
+ * Trying adding an alias identical to name.
+ * Alias is identical to name, so it can not be added to aliases list
+ */
+ rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name);
+ CU_ASSERT(rc == -EEXIST);
+
+ /*
+ * Trying to add empty alias,
+ * this one should fail
+ */
+ rc = spdk_bdev_alias_add(bdev[0], NULL);
+ CU_ASSERT(rc == -EINVAL);
+
+ /* Trying adding same alias to two different registered bdevs */
+
+ /* Alias is used first time, so this one should pass */
+ rc = spdk_bdev_alias_add(bdev[0], "proper alias 0");
+ CU_ASSERT(rc == 0);
+
+ /* Alias was added to another bdev, so this one should fail */
+ rc = spdk_bdev_alias_add(bdev[1], "proper alias 0");
+ CU_ASSERT(rc == -EEXIST);
+
+ /* Alias is used first time, so this one should pass */
+ rc = spdk_bdev_alias_add(bdev[1], "proper alias 1");
+ CU_ASSERT(rc == 0);
+
+ /* Trying removing an alias from registered bdevs */
+
+ /* Alias is not on a bdev aliases list, so this one should fail */
+ rc = spdk_bdev_alias_del(bdev[0], "not existing");
+ CU_ASSERT(rc == -ENOENT);
+
+ /* Alias is present on a bdev aliases list, so this one should pass */
+ rc = spdk_bdev_alias_del(bdev[0], "proper alias 0");
+ CU_ASSERT(rc == 0);
+
+ /* Alias is present on a bdev aliases list, so this one should pass */
+ rc = spdk_bdev_alias_del(bdev[1], "proper alias 1");
+ CU_ASSERT(rc == 0);
+
+ /* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */
+ rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name);
+ CU_ASSERT(rc != 0);
+
+ /* Trying to del all alias from empty alias list */
+ spdk_bdev_alias_del_all(bdev[2]);
+ SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases));
+
+ /* Trying to del all alias from non-empty alias list */
+ rc = spdk_bdev_alias_add(bdev[2], "alias0");
+ CU_ASSERT(rc == 0);
+ rc = spdk_bdev_alias_add(bdev[2], "alias1");
+ CU_ASSERT(rc == 0);
+ spdk_bdev_alias_del_all(bdev[2]);
+ CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases));
+
+ /* Unregister and free bdevs */
+ spdk_bdev_unregister(bdev[0], NULL, NULL);
+ spdk_bdev_unregister(bdev[1], NULL, NULL);
+ spdk_bdev_unregister(bdev[2], NULL, NULL);
+
+ poll_threads();
+
+ free(bdev[0]);
+ free(bdev[1]);
+ free(bdev[2]);
+}
+
+static void
+io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
+{
+ g_io_done = true;
+ g_io_status = bdev_io->internal.status;
+ spdk_bdev_free_io(bdev_io);
+}
+
+static void
+bdev_init_cb(void *arg, int rc)
+{
+ CU_ASSERT(rc == 0);
+}
+
+static void
+bdev_fini_cb(void *arg)
+{
+}
+
+struct bdev_ut_io_wait_entry {
+ struct spdk_bdev_io_wait_entry entry;
+ struct spdk_io_channel *io_ch;
+ struct spdk_bdev_desc *desc;
+ bool submitted;
+};
+
+static void
+io_wait_cb(void *arg)
+{
+ struct bdev_ut_io_wait_entry *entry = arg;
+ int rc;
+
+ rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ entry->submitted = true;
+}
+
+static void
+bdev_io_types_test(void)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_desc *desc = NULL;
+ struct spdk_io_channel *io_ch;
+ struct spdk_bdev_opts bdev_opts = {
+ .bdev_io_pool_size = 4,
+ .bdev_io_cache_size = 2,
+ };
+ int rc;
+
+ rc = spdk_bdev_set_opts(&bdev_opts);
+ CU_ASSERT(rc == 0);
+ spdk_bdev_initialize(bdev_init_cb, NULL);
+ poll_threads();
+
+ bdev = allocate_bdev("bdev0");
+
+ rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
+ CU_ASSERT(rc == 0);
+ poll_threads();
+ SPDK_CU_ASSERT_FATAL(desc != NULL);
+ io_ch = spdk_bdev_get_io_channel(desc);
+ CU_ASSERT(io_ch != NULL);
+
+ /* WRITE and WRITE ZEROES are not supported */
+ ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false);
+ ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, false);
+ rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 0, 128, io_done, NULL);
+ CU_ASSERT(rc == -ENOTSUP);
+ ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true);
+ ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, true);
+
+ spdk_put_io_channel(io_ch);
+ spdk_bdev_close(desc);
+ free_bdev(bdev);
+ spdk_bdev_finish(bdev_fini_cb, NULL);
+ poll_threads();
+}
+
+static void
+bdev_io_wait_test(void)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_desc *desc = NULL;
+ struct spdk_io_channel *io_ch;
+ struct spdk_bdev_opts bdev_opts = {
+ .bdev_io_pool_size = 4,
+ .bdev_io_cache_size = 2,
+ };
+ struct bdev_ut_io_wait_entry io_wait_entry;
+ struct bdev_ut_io_wait_entry io_wait_entry2;
+ int rc;
+
+ rc = spdk_bdev_set_opts(&bdev_opts);
+ CU_ASSERT(rc == 0);
+ spdk_bdev_initialize(bdev_init_cb, NULL);
+ poll_threads();
+
+ bdev = allocate_bdev("bdev0");
+
+ rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
+ CU_ASSERT(rc == 0);
+ poll_threads();
+ SPDK_CU_ASSERT_FATAL(desc != NULL);
+ io_ch = spdk_bdev_get_io_channel(desc);
+ CU_ASSERT(io_ch != NULL);
+
+ rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
+
+ rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
+ CU_ASSERT(rc == -ENOMEM);
+
+ io_wait_entry.entry.bdev = bdev;
+ io_wait_entry.entry.cb_fn = io_wait_cb;
+ io_wait_entry.entry.cb_arg = &io_wait_entry;
+ io_wait_entry.io_ch = io_ch;
+ io_wait_entry.desc = desc;
+ io_wait_entry.submitted = false;
+ /* Cannot use the same io_wait_entry for two different calls. */
+ memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry));
+ io_wait_entry2.entry.cb_arg = &io_wait_entry2;
+
+ /* Queue two I/O waits. */
+ rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(io_wait_entry.submitted == false);
+ rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(io_wait_entry2.submitted == false);
+
+ stub_complete_io(1);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
+ CU_ASSERT(io_wait_entry.submitted == true);
+ CU_ASSERT(io_wait_entry2.submitted == false);
+
+ stub_complete_io(1);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
+ CU_ASSERT(io_wait_entry2.submitted == true);
+
+ stub_complete_io(4);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
+
+ spdk_put_io_channel(io_ch);
+ spdk_bdev_close(desc);
+ free_bdev(bdev);
+ spdk_bdev_finish(bdev_fini_cb, NULL);
+ poll_threads();
+}
+
+static void
+bdev_io_spans_boundary_test(void)
+{
+ struct spdk_bdev bdev;
+ struct spdk_bdev_io bdev_io;
+
+ memset(&bdev, 0, sizeof(bdev));
+
+ bdev.optimal_io_boundary = 0;
+ bdev_io.bdev = &bdev;
+
+ /* bdev has no optimal_io_boundary set - so this should return false. */
+ CU_ASSERT(bdev_io_should_split(&bdev_io) == false);
+
+ bdev.optimal_io_boundary = 32;
+ bdev_io.type = SPDK_BDEV_IO_TYPE_RESET;
+
+ /* RESETs are not based on LBAs - so this should return false. */
+ CU_ASSERT(bdev_io_should_split(&bdev_io) == false);
+
+ bdev_io.type = SPDK_BDEV_IO_TYPE_READ;
+ bdev_io.u.bdev.offset_blocks = 0;
+ bdev_io.u.bdev.num_blocks = 32;
+
+ /* This I/O run right up to, but does not cross, the boundary - so this should return false. */
+ CU_ASSERT(bdev_io_should_split(&bdev_io) == false);
+
+ bdev_io.u.bdev.num_blocks = 33;
+
+ /* This I/O spans a boundary. */
+ CU_ASSERT(bdev_io_should_split(&bdev_io) == true);
+}
+
+static void
+bdev_io_split_test(void)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_desc *desc = NULL;
+ struct spdk_io_channel *io_ch;
+ struct spdk_bdev_opts bdev_opts = {
+ .bdev_io_pool_size = 512,
+ .bdev_io_cache_size = 64,
+ };
+ struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2];
+ struct ut_expected_io *expected_io;
+ uint64_t i;
+ int rc;
+
+ rc = spdk_bdev_set_opts(&bdev_opts);
+ CU_ASSERT(rc == 0);
+ spdk_bdev_initialize(bdev_init_cb, NULL);
+
+ bdev = allocate_bdev("bdev0");
+
+ rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(desc != NULL);
+ io_ch = spdk_bdev_get_io_channel(desc);
+ CU_ASSERT(io_ch != NULL);
+
+ bdev->optimal_io_boundary = 16;
+ bdev->split_on_optimal_io_boundary = false;
+
+ g_io_done = false;
+
+ /* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1);
+ ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
+
+ bdev->split_on_optimal_io_boundary = true;
+
+ /* Now test that a single-vector command is split correctly.
+ * Offset 14, length 8, payload 0xF000
+ * Child - Offset 14, length 2, payload 0xF000
+ * Child - Offset 16, length 6, payload 0xF000 + 2 * 512
+ *
+ * Set up the expected values before calling spdk_bdev_read_blocks
+ */
+ g_io_done = false;
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1);
+ ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1);
+ ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ /* spdk_bdev_read_blocks will submit the first child immediately. */
+ rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
+ stub_complete_io(2);
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
+
+ /* Now set up a more complex, multi-vector command that needs to be split,
+ * including splitting iovecs.
+ */
+ iov[0].iov_base = (void *)0x10000;
+ iov[0].iov_len = 512;
+ iov[1].iov_base = (void *)0x20000;
+ iov[1].iov_len = 20 * 512;
+ iov[2].iov_base = (void *)0x30000;
+ iov[2].iov_len = 11 * 512;
+
+ g_io_done = false;
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2);
+ ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512);
+ ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1);
+ ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2);
+ ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512);
+ ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
+ stub_complete_io(3);
+ CU_ASSERT(g_io_done == true);
+
+ /* Test multi vector command that needs to be split by strip and then needs to be
+ * split further due to the capacity of child iovs.
+ */
+ for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) {
+ iov[i].iov_base = (void *)((i + 1) * 0x10000);
+ iov[i].iov_len = 512;
+ }
+
+ bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV;
+ g_io_done = false;
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV,
+ BDEV_IO_NUM_CHILD_IOV);
+ for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) {
+ ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512);
+ }
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV,
+ BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV);
+ for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) {
+ ut_expected_io_set_iov(expected_io, i,
+ (void *)((i + 1 + BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512);
+ }
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0,
+ BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == false);
+
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
+
+ /* Test multi vector command that needs to be split by strip and then needs to be
+ * split further due to the capacity of child iovs. In this case, the length of
+ * the rest of iovec array with an I/O boundary is the multiple of block size.
+ */
+
+ /* Fill iovec array for exactly one boundary. The iovec cnt for this boundary
+ * is BDEV_IO_NUM_CHILD_IOV + 1, which exceeds the capacity of child iovs.
+ */
+ for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) {
+ iov[i].iov_base = (void *)((i + 1) * 0x10000);
+ iov[i].iov_len = 512;
+ }
+ for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) {
+ iov[i].iov_base = (void *)((i + 1) * 0x10000);
+ iov[i].iov_len = 256;
+ }
+ iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000);
+ iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 512;
+
+ /* Add an extra iovec to trigger split */
+ iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000);
+ iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512;
+
+ bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV;
+ g_io_done = false;
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0,
+ BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV);
+ for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) {
+ ut_expected_io_set_iov(expected_io, i,
+ (void *)((i + 1) * 0x10000), 512);
+ }
+ for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) {
+ ut_expected_io_set_iov(expected_io, i,
+ (void *)((i + 1) * 0x10000), 256);
+ }
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1,
+ 1, 1);
+ ut_expected_io_set_iov(expected_io, 0,
+ (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 512);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV,
+ 1, 1);
+ ut_expected_io_set_iov(expected_io, 0,
+ (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, 0,
+ BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == false);
+
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
+ stub_complete_io(2);
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
+
+ /* Test multi vector command that needs to be split by strip and then needs to be
+ * split further due to the capacity of child iovs, the child request offset should
+ * be rewind to last aligned offset and go success without error.
+ */
+ for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) {
+ iov[i].iov_base = (void *)((i + 1) * 0x10000);
+ iov[i].iov_len = 512;
+ }
+ iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000);
+ iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256;
+
+ iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000);
+ iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 256;
+
+ iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000);
+ iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512;
+
+ bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV;
+ g_io_done = false;
+ g_io_status = 0;
+ /* The first expected io should be start from offset 0 to BDEV_IO_NUM_CHILD_IOV - 1 */
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0,
+ BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV - 1);
+ for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) {
+ ut_expected_io_set_iov(expected_io, i,
+ (void *)((i + 1) * 0x10000), 512);
+ }
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+ /* The second expected io should be start from offset BDEV_IO_NUM_CHILD_IOV - 1 to BDEV_IO_NUM_CHILD_IOV */
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1,
+ 1, 2);
+ ut_expected_io_set_iov(expected_io, 0,
+ (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000), 256);
+ ut_expected_io_set_iov(expected_io, 1,
+ (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 256);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+ /* The third expected io should be start from offset BDEV_IO_NUM_CHILD_IOV to BDEV_IO_NUM_CHILD_IOV + 1 */
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV,
+ 1, 1);
+ ut_expected_io_set_iov(expected_io, 0,
+ (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0,
+ BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == false);
+
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
+ stub_complete_io(2);
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
+
+ /* Test multi vector command that needs to be split due to the IO boundary and
+ * the capacity of child iovs. Especially test the case when the command is
+ * split due to the capacity of child iovs, the tail address is not aligned with
+ * block size and is rewinded to the aligned address.
+ *
+ * The iovecs used in read request is complex but is based on the data
+ * collected in the real issue. We change the base addresses but keep the lengths
+ * not to loose the credibility of the test.
+ */
+ bdev->optimal_io_boundary = 128;
+ g_io_done = false;
+ g_io_status = 0;
+
+ for (i = 0; i < 31; i++) {
+ iov[i].iov_base = (void *)(0xFEED0000000 + (i << 20));
+ iov[i].iov_len = 1024;
+ }
+ iov[31].iov_base = (void *)0xFEED1F00000;
+ iov[31].iov_len = 32768;
+ iov[32].iov_base = (void *)0xFEED2000000;
+ iov[32].iov_len = 160;
+ iov[33].iov_base = (void *)0xFEED2100000;
+ iov[33].iov_len = 4096;
+ iov[34].iov_base = (void *)0xFEED2200000;
+ iov[34].iov_len = 4096;
+ iov[35].iov_base = (void *)0xFEED2300000;
+ iov[35].iov_len = 4096;
+ iov[36].iov_base = (void *)0xFEED2400000;
+ iov[36].iov_len = 4096;
+ iov[37].iov_base = (void *)0xFEED2500000;
+ iov[37].iov_len = 4096;
+ iov[38].iov_base = (void *)0xFEED2600000;
+ iov[38].iov_len = 4096;
+ iov[39].iov_base = (void *)0xFEED2700000;
+ iov[39].iov_len = 4096;
+ iov[40].iov_base = (void *)0xFEED2800000;
+ iov[40].iov_len = 4096;
+ iov[41].iov_base = (void *)0xFEED2900000;
+ iov[41].iov_len = 4096;
+ iov[42].iov_base = (void *)0xFEED2A00000;
+ iov[42].iov_len = 4096;
+ iov[43].iov_base = (void *)0xFEED2B00000;
+ iov[43].iov_len = 12288;
+ iov[44].iov_base = (void *)0xFEED2C00000;
+ iov[44].iov_len = 8192;
+ iov[45].iov_base = (void *)0xFEED2F00000;
+ iov[45].iov_len = 4096;
+ iov[46].iov_base = (void *)0xFEED3000000;
+ iov[46].iov_len = 4096;
+ iov[47].iov_base = (void *)0xFEED3100000;
+ iov[47].iov_len = 4096;
+ iov[48].iov_base = (void *)0xFEED3200000;
+ iov[48].iov_len = 24576;
+ iov[49].iov_base = (void *)0xFEED3300000;
+ iov[49].iov_len = 16384;
+ iov[50].iov_base = (void *)0xFEED3400000;
+ iov[50].iov_len = 12288;
+ iov[51].iov_base = (void *)0xFEED3500000;
+ iov[51].iov_len = 4096;
+ iov[52].iov_base = (void *)0xFEED3600000;
+ iov[52].iov_len = 4096;
+ iov[53].iov_base = (void *)0xFEED3700000;
+ iov[53].iov_len = 4096;
+ iov[54].iov_base = (void *)0xFEED3800000;
+ iov[54].iov_len = 28672;
+ iov[55].iov_base = (void *)0xFEED3900000;
+ iov[55].iov_len = 20480;
+ iov[56].iov_base = (void *)0xFEED3A00000;
+ iov[56].iov_len = 4096;
+ iov[57].iov_base = (void *)0xFEED3B00000;
+ iov[57].iov_len = 12288;
+ iov[58].iov_base = (void *)0xFEED3C00000;
+ iov[58].iov_len = 4096;
+ iov[59].iov_base = (void *)0xFEED3D00000;
+ iov[59].iov_len = 4096;
+ iov[60].iov_base = (void *)0xFEED3E00000;
+ iov[60].iov_len = 352;
+
+ /* The 1st child IO must be from iov[0] to iov[31] split by the capacity
+ * of child iovs,
+ */
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 126, 32);
+ for (i = 0; i < 32; i++) {
+ ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len);
+ }
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ /* The 2nd child IO must be from iov[32] to the first 864 bytes of iov[33]
+ * split by the IO boundary requirement.
+ */
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 126, 2, 2);
+ ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base, iov[32].iov_len);
+ ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 864);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ /* The 3rd child IO must be from the remaining 3232 bytes of iov[33] to
+ * the first 864 bytes of iov[46] split by the IO boundary requirement.
+ */
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 128, 128, 14);
+ ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[33].iov_base + 864),
+ iov[33].iov_len - 864);
+ ut_expected_io_set_iov(expected_io, 1, iov[34].iov_base, iov[34].iov_len);
+ ut_expected_io_set_iov(expected_io, 2, iov[35].iov_base, iov[35].iov_len);
+ ut_expected_io_set_iov(expected_io, 3, iov[36].iov_base, iov[36].iov_len);
+ ut_expected_io_set_iov(expected_io, 4, iov[37].iov_base, iov[37].iov_len);
+ ut_expected_io_set_iov(expected_io, 5, iov[38].iov_base, iov[38].iov_len);
+ ut_expected_io_set_iov(expected_io, 6, iov[39].iov_base, iov[39].iov_len);
+ ut_expected_io_set_iov(expected_io, 7, iov[40].iov_base, iov[40].iov_len);
+ ut_expected_io_set_iov(expected_io, 8, iov[41].iov_base, iov[41].iov_len);
+ ut_expected_io_set_iov(expected_io, 9, iov[42].iov_base, iov[42].iov_len);
+ ut_expected_io_set_iov(expected_io, 10, iov[43].iov_base, iov[43].iov_len);
+ ut_expected_io_set_iov(expected_io, 11, iov[44].iov_base, iov[44].iov_len);
+ ut_expected_io_set_iov(expected_io, 12, iov[45].iov_base, iov[45].iov_len);
+ ut_expected_io_set_iov(expected_io, 13, iov[46].iov_base, 864);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ /* The 4th child IO must be from the remaining 3232 bytes of iov[46] to the
+ * first 864 bytes of iov[52] split by the IO boundary requirement.
+ */
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 256, 128, 7);
+ ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[46].iov_base + 864),
+ iov[46].iov_len - 864);
+ ut_expected_io_set_iov(expected_io, 1, iov[47].iov_base, iov[47].iov_len);
+ ut_expected_io_set_iov(expected_io, 2, iov[48].iov_base, iov[48].iov_len);
+ ut_expected_io_set_iov(expected_io, 3, iov[49].iov_base, iov[49].iov_len);
+ ut_expected_io_set_iov(expected_io, 4, iov[50].iov_base, iov[50].iov_len);
+ ut_expected_io_set_iov(expected_io, 5, iov[51].iov_base, iov[51].iov_len);
+ ut_expected_io_set_iov(expected_io, 6, iov[52].iov_base, 864);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ /* The 5th child IO must be from the remaining 3232 bytes of iov[52] to
+ * the first 4096 bytes of iov[57] split by the IO boundary requirement.
+ */
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 384, 128, 6);
+ ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[52].iov_base + 864),
+ iov[52].iov_len - 864);
+ ut_expected_io_set_iov(expected_io, 1, iov[53].iov_base, iov[53].iov_len);
+ ut_expected_io_set_iov(expected_io, 2, iov[54].iov_base, iov[54].iov_len);
+ ut_expected_io_set_iov(expected_io, 3, iov[55].iov_base, iov[55].iov_len);
+ ut_expected_io_set_iov(expected_io, 4, iov[56].iov_base, iov[56].iov_len);
+ ut_expected_io_set_iov(expected_io, 5, iov[57].iov_base, 4960);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ /* The 6th child IO must be from the remaining 7328 bytes of iov[57]
+ * to the first 3936 bytes of iov[58] split by the capacity of child iovs.
+ */
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 512, 30, 3);
+ ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[57].iov_base + 4960),
+ iov[57].iov_len - 4960);
+ ut_expected_io_set_iov(expected_io, 1, iov[58].iov_base, iov[58].iov_len);
+ ut_expected_io_set_iov(expected_io, 2, iov[59].iov_base, 3936);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ /* The 7th child IO is from the remaining 160 bytes of iov[59] and iov[60]. */
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 542, 1, 2);
+ ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[59].iov_base + 3936),
+ iov[59].iov_len - 3936);
+ ut_expected_io_set_iov(expected_io, 1, iov[60].iov_base, iov[60].iov_len);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 61, 0, 543, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == false);
+
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5);
+ stub_complete_io(5);
+ CU_ASSERT(g_io_done == false);
+
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
+ CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ /* Test a WRITE_ZEROES that would span an I/O boundary. WRITE_ZEROES should not be
+ * split, so test that.
+ */
+ bdev->optimal_io_boundary = 15;
+ g_io_done = false;
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == true);
+
+ /* Test an UNMAP. This should also not be split. */
+ bdev->optimal_io_boundary = 16;
+ g_io_done = false;
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == true);
+
+ /* Test a FLUSH. This should also not be split. */
+ bdev->optimal_io_boundary = 16;
+ g_io_done = false;
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == true);
+
+ CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io));
+
+ /* Children requests return an error status */
+ bdev->optimal_io_boundary = 16;
+ iov[0].iov_base = (void *)0x10000;
+ iov[0].iov_len = 512 * 64;
+ g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED;
+ g_io_done = false;
+ g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS;
+
+ rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 1, 64, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5);
+ stub_complete_io(4);
+ CU_ASSERT(g_io_done == false);
+ CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
+
+ /* Test if a multi vector command terminated with failure before continueing
+ * splitting process when one of child I/O failed.
+ * The multi vector command is as same as the above that needs to be split by strip
+ * and then needs to be split further due to the capacity of child iovs.
+ */
+ for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) {
+ iov[i].iov_base = (void *)((i + 1) * 0x10000);
+ iov[i].iov_len = 512;
+ }
+ iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000);
+ iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256;
+
+ iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000);
+ iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 256;
+
+ iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000);
+ iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512;
+
+ bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV;
+
+ g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED;
+ g_io_done = false;
+ g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS;
+
+ rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0,
+ BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
+
+ g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
+
+ /* for this test we will create the following conditions to hit the code path where
+ * we are trying to send and IO following a split that has no iovs because we had to
+ * trim them for alignment reasons.
+ *
+ * - 16K boundary, our IO will start at offset 0 with a length of 0x4200
+ * - Our IOVs are 0x212 in size so that we run into the 16K boundary at child IOV
+ * position 30 and overshoot by 0x2e.
+ * - That means we'll send the IO and loop back to pick up the remaining bytes at
+ * child IOV index 31. When we do, we find that we have to shorten index 31 by 0x2e
+ * which eliniates that vector so we just send the first split IO with 30 vectors
+ * and let the completion pick up the last 2 vectors.
+ */
+ bdev->optimal_io_boundary = 32;
+ bdev->split_on_optimal_io_boundary = true;
+ g_io_done = false;
+
+ /* Init all parent IOVs to 0x212 */
+ for (i = 0; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) {
+ iov[i].iov_base = (void *)((i + 1) * 0x10000);
+ iov[i].iov_len = 0x212;
+ }
+
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV,
+ BDEV_IO_NUM_CHILD_IOV - 1);
+ /* expect 0-29 to be 1:1 with the parent iov */
+ for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) {
+ ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len);
+ }
+
+ /* expect index 30 to be shortened to 0x1e4 (0x212 - 0x1e) because of the alignment
+ * where 0x1e is the amount we overshot the 16K boundary
+ */
+ ut_expected_io_set_iov(expected_io, BDEV_IO_NUM_CHILD_IOV - 2,
+ (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 2].iov_base), 0x1e4);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ /* 2nd child IO will have 2 remaining vectors, one to pick up from the one that was
+ * shortened that take it to the next boundary and then a final one to get us to
+ * 0x4200 bytes for the IO.
+ */
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV,
+ BDEV_IO_NUM_CHILD_IOV, 2);
+ /* position 30 picked up the remaining bytes to the next boundary */
+ ut_expected_io_set_iov(expected_io, 0,
+ (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 2].iov_base + 0x1e4), 0x2e);
+
+ /* position 31 picked the the rest of the trasnfer to get us to 0x4200 */
+ ut_expected_io_set_iov(expected_io, 1,
+ (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base), 0x1d2);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 1, 0,
+ BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == false);
+
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
+
+ spdk_put_io_channel(io_ch);
+ spdk_bdev_close(desc);
+ free_bdev(bdev);
+ spdk_bdev_finish(bdev_fini_cb, NULL);
+ poll_threads();
+}
+
+static void
+bdev_io_split_with_io_wait(void)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_desc *desc = NULL;
+ struct spdk_io_channel *io_ch;
+ struct spdk_bdev_channel *channel;
+ struct spdk_bdev_mgmt_channel *mgmt_ch;
+ struct spdk_bdev_opts bdev_opts = {
+ .bdev_io_pool_size = 2,
+ .bdev_io_cache_size = 1,
+ };
+ struct iovec iov[3];
+ struct ut_expected_io *expected_io;
+ int rc;
+
+ rc = spdk_bdev_set_opts(&bdev_opts);
+ CU_ASSERT(rc == 0);
+ spdk_bdev_initialize(bdev_init_cb, NULL);
+
+ bdev = allocate_bdev("bdev0");
+
+ rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(desc != NULL);
+ io_ch = spdk_bdev_get_io_channel(desc);
+ CU_ASSERT(io_ch != NULL);
+ channel = spdk_io_channel_get_ctx(io_ch);
+ mgmt_ch = channel->shared_resource->mgmt_ch;
+
+ bdev->optimal_io_boundary = 16;
+ bdev->split_on_optimal_io_boundary = true;
+
+ rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+
+ /* Now test that a single-vector command is split correctly.
+ * Offset 14, length 8, payload 0xF000
+ * Child - Offset 14, length 2, payload 0xF000
+ * Child - Offset 16, length 6, payload 0xF000 + 2 * 512
+ *
+ * Set up the expected values before calling spdk_bdev_read_blocks
+ */
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1);
+ ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1);
+ ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ /* The following children will be submitted sequentially due to the capacity of
+ * spdk_bdev_io.
+ */
+
+ /* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */
+ rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue));
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+
+ /* Completing the first read I/O will submit the first child */
+ stub_complete_io(1);
+ CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue));
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+
+ /* Completing the first child will submit the second child */
+ stub_complete_io(1);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+
+ /* Complete the second child I/O. This should result in our callback getting
+ * invoked since the parent I/O is now complete.
+ */
+ stub_complete_io(1);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
+
+ /* Now set up a more complex, multi-vector command that needs to be split,
+ * including splitting iovecs.
+ */
+ iov[0].iov_base = (void *)0x10000;
+ iov[0].iov_len = 512;
+ iov[1].iov_base = (void *)0x20000;
+ iov[1].iov_len = 20 * 512;
+ iov[2].iov_base = (void *)0x30000;
+ iov[2].iov_len = 11 * 512;
+
+ g_io_done = false;
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2);
+ ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512);
+ ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1);
+ ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2);
+ ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512);
+ ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+
+ /* The following children will be submitted sequentially due to the capacity of
+ * spdk_bdev_io.
+ */
+
+ /* Completing the first child will submit the second child */
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == false);
+
+ /* Completing the second child will submit the third child */
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == false);
+
+ /* Completing the third child will result in our callback getting invoked
+ * since the parent I/O is now complete.
+ */
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == true);
+
+ CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io));
+
+ spdk_put_io_channel(io_ch);
+ spdk_bdev_close(desc);
+ free_bdev(bdev);
+ spdk_bdev_finish(bdev_fini_cb, NULL);
+ poll_threads();
+}
+
+static void
+bdev_io_alignment(void)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_desc *desc = NULL;
+ struct spdk_io_channel *io_ch;
+ struct spdk_bdev_opts bdev_opts = {
+ .bdev_io_pool_size = 20,
+ .bdev_io_cache_size = 2,
+ };
+ int rc;
+ void *buf;
+ struct iovec iovs[2];
+ int iovcnt;
+ uint64_t alignment;
+
+ rc = spdk_bdev_set_opts(&bdev_opts);
+ CU_ASSERT(rc == 0);
+ spdk_bdev_initialize(bdev_init_cb, NULL);
+
+ fn_table.submit_request = stub_submit_request_get_buf;
+ bdev = allocate_bdev("bdev0");
+
+ rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(desc != NULL);
+ io_ch = spdk_bdev_get_io_channel(desc);
+ CU_ASSERT(io_ch != NULL);
+
+ /* Create aligned buffer */
+ rc = posix_memalign(&buf, 4096, 8192);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+
+ /* Pass aligned single buffer with no alignment required */
+ alignment = 1;
+ bdev->required_alignment = spdk_u32log2(alignment);
+
+ rc = spdk_bdev_write_blocks(desc, io_ch, buf, 0, 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ stub_complete_io(1);
+ CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
+ alignment));
+
+ rc = spdk_bdev_read_blocks(desc, io_ch, buf, 0, 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ stub_complete_io(1);
+ CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
+ alignment));
+
+ /* Pass unaligned single buffer with no alignment required */
+ alignment = 1;
+ bdev->required_alignment = spdk_u32log2(alignment);
+
+ rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
+ CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4);
+ stub_complete_io(1);
+
+ rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
+ CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4);
+ stub_complete_io(1);
+
+ /* Pass unaligned single buffer with 512 alignment required */
+ alignment = 512;
+ bdev->required_alignment = spdk_u32log2(alignment);
+
+ rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
+ CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
+ CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
+ alignment));
+ stub_complete_io(1);
+ CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
+
+ rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
+ CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
+ CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
+ alignment));
+ stub_complete_io(1);
+ CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
+
+ /* Pass unaligned single buffer with 4096 alignment required */
+ alignment = 4096;
+ bdev->required_alignment = spdk_u32log2(alignment);
+
+ rc = spdk_bdev_write_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
+ CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
+ CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
+ alignment));
+ stub_complete_io(1);
+ CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
+
+ rc = spdk_bdev_read_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
+ CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
+ CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
+ alignment));
+ stub_complete_io(1);
+ CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
+
+ /* Pass aligned iovs with no alignment required */
+ alignment = 1;
+ bdev->required_alignment = spdk_u32log2(alignment);
+
+ iovcnt = 1;
+ iovs[0].iov_base = buf;
+ iovs[0].iov_len = 512;
+
+ rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
+ stub_complete_io(1);
+ CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
+
+ rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
+ stub_complete_io(1);
+ CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
+
+ /* Pass unaligned iovs with no alignment required */
+ alignment = 1;
+ bdev->required_alignment = spdk_u32log2(alignment);
+
+ iovcnt = 2;
+ iovs[0].iov_base = buf + 16;
+ iovs[0].iov_len = 256;
+ iovs[1].iov_base = buf + 16 + 256 + 32;
+ iovs[1].iov_len = 256;
+
+ rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
+ stub_complete_io(1);
+ CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
+
+ rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
+ stub_complete_io(1);
+ CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
+
+ /* Pass unaligned iov with 2048 alignment required */
+ alignment = 2048;
+ bdev->required_alignment = spdk_u32log2(alignment);
+
+ iovcnt = 2;
+ iovs[0].iov_base = buf + 16;
+ iovs[0].iov_len = 256;
+ iovs[1].iov_base = buf + 16 + 256 + 32;
+ iovs[1].iov_len = 256;
+
+ rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt);
+ CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
+ CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
+ alignment));
+ stub_complete_io(1);
+ CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
+
+ rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt);
+ CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
+ CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
+ alignment));
+ stub_complete_io(1);
+ CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
+
+ /* Pass iov without allocated buffer without alignment required */
+ alignment = 1;
+ bdev->required_alignment = spdk_u32log2(alignment);
+
+ iovcnt = 1;
+ iovs[0].iov_base = NULL;
+ iovs[0].iov_len = 0;
+
+ rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
+ CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
+ alignment));
+ stub_complete_io(1);
+
+ /* Pass iov without allocated buffer with 1024 alignment required */
+ alignment = 1024;
+ bdev->required_alignment = spdk_u32log2(alignment);
+
+ iovcnt = 1;
+ iovs[0].iov_base = NULL;
+ iovs[0].iov_len = 0;
+
+ rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
+ CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
+ alignment));
+ stub_complete_io(1);
+
+ spdk_put_io_channel(io_ch);
+ spdk_bdev_close(desc);
+ free_bdev(bdev);
+ fn_table.submit_request = stub_submit_request;
+ spdk_bdev_finish(bdev_fini_cb, NULL);
+ poll_threads();
+
+ free(buf);
+}
+
+static void
+bdev_io_alignment_with_boundary(void)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_desc *desc = NULL;
+ struct spdk_io_channel *io_ch;
+ struct spdk_bdev_opts bdev_opts = {
+ .bdev_io_pool_size = 20,
+ .bdev_io_cache_size = 2,
+ };
+ int rc;
+ void *buf;
+ struct iovec iovs[2];
+ int iovcnt;
+ uint64_t alignment;
+
+ rc = spdk_bdev_set_opts(&bdev_opts);
+ CU_ASSERT(rc == 0);
+ spdk_bdev_initialize(bdev_init_cb, NULL);
+
+ fn_table.submit_request = stub_submit_request_get_buf;
+ bdev = allocate_bdev("bdev0");
+
+ rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(desc != NULL);
+ io_ch = spdk_bdev_get_io_channel(desc);
+ CU_ASSERT(io_ch != NULL);
+
+ /* Create aligned buffer */
+ rc = posix_memalign(&buf, 4096, 131072);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
+
+ /* 512 * 3 with 2 IO boundary, allocate small data buffer from bdev layer */
+ alignment = 512;
+ bdev->required_alignment = spdk_u32log2(alignment);
+ bdev->optimal_io_boundary = 2;
+ bdev->split_on_optimal_io_boundary = true;
+
+ iovcnt = 1;
+ iovs[0].iov_base = NULL;
+ iovs[0].iov_len = 512 * 3;
+
+ rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
+ stub_complete_io(2);
+
+ /* 8KiB with 16 IO boundary, allocate large data buffer from bdev layer */
+ alignment = 512;
+ bdev->required_alignment = spdk_u32log2(alignment);
+ bdev->optimal_io_boundary = 16;
+ bdev->split_on_optimal_io_boundary = true;
+
+ iovcnt = 1;
+ iovs[0].iov_base = NULL;
+ iovs[0].iov_len = 512 * 16;
+
+ rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 16, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
+ stub_complete_io(2);
+
+ /* 512 * 160 with 128 IO boundary, 63.5KiB + 16.5KiB for the two children requests */
+ alignment = 512;
+ bdev->required_alignment = spdk_u32log2(alignment);
+ bdev->optimal_io_boundary = 128;
+ bdev->split_on_optimal_io_boundary = true;
+
+ iovcnt = 1;
+ iovs[0].iov_base = buf + 16;
+ iovs[0].iov_len = 512 * 160;
+ rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
+ stub_complete_io(2);
+
+ /* 512 * 3 with 2 IO boundary */
+ alignment = 512;
+ bdev->required_alignment = spdk_u32log2(alignment);
+ bdev->optimal_io_boundary = 2;
+ bdev->split_on_optimal_io_boundary = true;
+
+ iovcnt = 2;
+ iovs[0].iov_base = buf + 16;
+ iovs[0].iov_len = 512;
+ iovs[1].iov_base = buf + 16 + 512 + 32;
+ iovs[1].iov_len = 1024;
+
+ rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
+ stub_complete_io(2);
+
+ rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
+ stub_complete_io(2);
+
+ /* 512 * 64 with 32 IO boundary */
+ bdev->optimal_io_boundary = 32;
+ iovcnt = 2;
+ iovs[0].iov_base = buf + 16;
+ iovs[0].iov_len = 16384;
+ iovs[1].iov_base = buf + 16 + 16384 + 32;
+ iovs[1].iov_len = 16384;
+
+ rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
+ stub_complete_io(3);
+
+ rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
+ stub_complete_io(3);
+
+ /* 512 * 160 with 32 IO boundary */
+ iovcnt = 1;
+ iovs[0].iov_base = buf + 16;
+ iovs[0].iov_len = 16384 + 65536;
+
+ rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6);
+ stub_complete_io(6);
+
+ spdk_put_io_channel(io_ch);
+ spdk_bdev_close(desc);
+ free_bdev(bdev);
+ fn_table.submit_request = stub_submit_request;
+ spdk_bdev_finish(bdev_fini_cb, NULL);
+ poll_threads();
+
+ free(buf);
+}
+
+static void
+histogram_status_cb(void *cb_arg, int status)
+{
+ g_status = status;
+}
+
+static void
+histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram)
+{
+ g_status = status;
+ g_histogram = histogram;
+}
+
+static void
+histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count,
+ uint64_t total, uint64_t so_far)
+{
+ g_count += count;
+}
+
+static void
+bdev_histograms(void)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_desc *desc = NULL;
+ struct spdk_io_channel *ch;
+ struct spdk_histogram_data *histogram;
+ uint8_t buf[4096];
+ int rc;
+
+ spdk_bdev_initialize(bdev_init_cb, NULL);
+
+ bdev = allocate_bdev("bdev");
+
+ rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(desc != NULL);
+
+ ch = spdk_bdev_get_io_channel(desc);
+ CU_ASSERT(ch != NULL);
+
+ /* Enable histogram */
+ g_status = -1;
+ spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, true);
+ poll_threads();
+ CU_ASSERT(g_status == 0);
+ CU_ASSERT(bdev->internal.histogram_enabled == true);
+
+ /* Allocate histogram */
+ histogram = spdk_histogram_data_alloc();
+ SPDK_CU_ASSERT_FATAL(histogram != NULL);
+
+ /* Check if histogram is zeroed */
+ spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL);
+ poll_threads();
+ CU_ASSERT(g_status == 0);
+ SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
+
+ g_count = 0;
+ spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
+
+ CU_ASSERT(g_count == 0);
+
+ rc = spdk_bdev_write_blocks(desc, ch, buf, 0, 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+
+ spdk_delay_us(10);
+ stub_complete_io(1);
+ poll_threads();
+
+ rc = spdk_bdev_read_blocks(desc, ch, buf, 0, 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+
+ spdk_delay_us(10);
+ stub_complete_io(1);
+ poll_threads();
+
+ /* Check if histogram gathered data from all I/O channels */
+ g_histogram = NULL;
+ spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL);
+ poll_threads();
+ CU_ASSERT(g_status == 0);
+ CU_ASSERT(bdev->internal.histogram_enabled == true);
+ SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
+
+ g_count = 0;
+ spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
+ CU_ASSERT(g_count == 2);
+
+ /* Disable histogram */
+ spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, false);
+ poll_threads();
+ CU_ASSERT(g_status == 0);
+ CU_ASSERT(bdev->internal.histogram_enabled == false);
+
+ /* Try to run histogram commands on disabled bdev */
+ spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL);
+ poll_threads();
+ CU_ASSERT(g_status == -EFAULT);
+
+ spdk_histogram_data_free(histogram);
+ spdk_put_io_channel(ch);
+ spdk_bdev_close(desc);
+ free_bdev(bdev);
+ spdk_bdev_finish(bdev_fini_cb, NULL);
+ poll_threads();
+}
+
+static void
+_bdev_compare(bool emulated)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_desc *desc = NULL;
+ struct spdk_io_channel *ioch;
+ struct ut_expected_io *expected_io;
+ uint64_t offset, num_blocks;
+ uint32_t num_completed;
+ char aa_buf[512];
+ char bb_buf[512];
+ struct iovec compare_iov;
+ uint8_t io_type;
+ int rc;
+
+ if (emulated) {
+ io_type = SPDK_BDEV_IO_TYPE_READ;
+ } else {
+ io_type = SPDK_BDEV_IO_TYPE_COMPARE;
+ }
+
+ memset(aa_buf, 0xaa, sizeof(aa_buf));
+ memset(bb_buf, 0xbb, sizeof(bb_buf));
+
+ g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated;
+
+ spdk_bdev_initialize(bdev_init_cb, NULL);
+ fn_table.submit_request = stub_submit_request_get_buf;
+ bdev = allocate_bdev("bdev");
+
+ rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
+ CU_ASSERT_EQUAL(rc, 0);
+ SPDK_CU_ASSERT_FATAL(desc != NULL);
+ ioch = spdk_bdev_get_io_channel(desc);
+ SPDK_CU_ASSERT_FATAL(ioch != NULL);
+
+ fn_table.submit_request = stub_submit_request_get_buf;
+ g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
+
+ offset = 50;
+ num_blocks = 1;
+ compare_iov.iov_base = aa_buf;
+ compare_iov.iov_len = sizeof(aa_buf);
+
+ expected_io = ut_alloc_expected_io(io_type, offset, num_blocks, 0);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ g_io_done = false;
+ g_compare_read_buf = aa_buf;
+ g_compare_read_buf_len = sizeof(aa_buf);
+ rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL);
+ CU_ASSERT_EQUAL(rc, 0);
+ num_completed = stub_complete_io(1);
+ CU_ASSERT_EQUAL(num_completed, 1);
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ expected_io = ut_alloc_expected_io(io_type, offset, num_blocks, 0);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ g_io_done = false;
+ g_compare_read_buf = bb_buf;
+ g_compare_read_buf_len = sizeof(bb_buf);
+ rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL);
+ CU_ASSERT_EQUAL(rc, 0);
+ num_completed = stub_complete_io(1);
+ CU_ASSERT_EQUAL(num_completed, 1);
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE);
+
+ spdk_put_io_channel(ioch);
+ spdk_bdev_close(desc);
+ free_bdev(bdev);
+ fn_table.submit_request = stub_submit_request;
+ spdk_bdev_finish(bdev_fini_cb, NULL);
+ poll_threads();
+
+ g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true;
+
+ g_compare_read_buf = NULL;
+}
+
+static void
+bdev_compare(void)
+{
+ _bdev_compare(true);
+ _bdev_compare(false);
+}
+
+static void
+bdev_compare_and_write(void)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_desc *desc = NULL;
+ struct spdk_io_channel *ioch;
+ struct ut_expected_io *expected_io;
+ uint64_t offset, num_blocks;
+ uint32_t num_completed;
+ char aa_buf[512];
+ char bb_buf[512];
+ char cc_buf[512];
+ char write_buf[512];
+ struct iovec compare_iov;
+ struct iovec write_iov;
+ int rc;
+
+ memset(aa_buf, 0xaa, sizeof(aa_buf));
+ memset(bb_buf, 0xbb, sizeof(bb_buf));
+ memset(cc_buf, 0xcc, sizeof(cc_buf));
+
+ g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = false;
+
+ spdk_bdev_initialize(bdev_init_cb, NULL);
+ fn_table.submit_request = stub_submit_request_get_buf;
+ bdev = allocate_bdev("bdev");
+
+ rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
+ CU_ASSERT_EQUAL(rc, 0);
+ SPDK_CU_ASSERT_FATAL(desc != NULL);
+ ioch = spdk_bdev_get_io_channel(desc);
+ SPDK_CU_ASSERT_FATAL(ioch != NULL);
+
+ fn_table.submit_request = stub_submit_request_get_buf;
+ g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
+
+ offset = 50;
+ num_blocks = 1;
+ compare_iov.iov_base = aa_buf;
+ compare_iov.iov_len = sizeof(aa_buf);
+ write_iov.iov_base = bb_buf;
+ write_iov.iov_len = sizeof(bb_buf);
+
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, offset, num_blocks, 0);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ g_io_done = false;
+ g_compare_read_buf = aa_buf;
+ g_compare_read_buf_len = sizeof(aa_buf);
+ memset(write_buf, 0, sizeof(write_buf));
+ g_compare_write_buf = write_buf;
+ g_compare_write_buf_len = sizeof(write_buf);
+ rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1,
+ offset, num_blocks, io_done, NULL);
+ /* Trigger range locking */
+ poll_threads();
+ CU_ASSERT_EQUAL(rc, 0);
+ num_completed = stub_complete_io(1);
+ CU_ASSERT_EQUAL(num_completed, 1);
+ CU_ASSERT(g_io_done == false);
+ num_completed = stub_complete_io(1);
+ /* Trigger range unlocking */
+ poll_threads();
+ CU_ASSERT_EQUAL(num_completed, 1);
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(memcmp(write_buf, bb_buf, sizeof(write_buf)) == 0);
+
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ g_io_done = false;
+ g_compare_read_buf = cc_buf;
+ g_compare_read_buf_len = sizeof(cc_buf);
+ memset(write_buf, 0, sizeof(write_buf));
+ g_compare_write_buf = write_buf;
+ g_compare_write_buf_len = sizeof(write_buf);
+ rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1,
+ offset, num_blocks, io_done, NULL);
+ /* Trigger range locking */
+ poll_threads();
+ CU_ASSERT_EQUAL(rc, 0);
+ num_completed = stub_complete_io(1);
+ /* Trigger range unlocking earlier because we expect error here */
+ poll_threads();
+ CU_ASSERT_EQUAL(num_completed, 1);
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE);
+ num_completed = stub_complete_io(1);
+ CU_ASSERT_EQUAL(num_completed, 0);
+
+ spdk_put_io_channel(ioch);
+ spdk_bdev_close(desc);
+ free_bdev(bdev);
+ fn_table.submit_request = stub_submit_request;
+ spdk_bdev_finish(bdev_fini_cb, NULL);
+ poll_threads();
+
+ g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true;
+
+ g_compare_read_buf = NULL;
+ g_compare_write_buf = NULL;
+}
+
+static void
+bdev_write_zeroes(void)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_desc *desc = NULL;
+ struct spdk_io_channel *ioch;
+ struct ut_expected_io *expected_io;
+ uint64_t offset, num_io_blocks, num_blocks;
+ uint32_t num_completed, num_requests;
+ int rc;
+
+ spdk_bdev_initialize(bdev_init_cb, NULL);
+ bdev = allocate_bdev("bdev");
+
+ rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
+ CU_ASSERT_EQUAL(rc, 0);
+ SPDK_CU_ASSERT_FATAL(desc != NULL);
+ ioch = spdk_bdev_get_io_channel(desc);
+ SPDK_CU_ASSERT_FATAL(ioch != NULL);
+
+ fn_table.submit_request = stub_submit_request;
+ g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
+
+ /* First test that if the bdev supports write_zeroes, the request won't be split */
+ bdev->md_len = 0;
+ bdev->blocklen = 4096;
+ num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2;
+
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+ rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
+ CU_ASSERT_EQUAL(rc, 0);
+ num_completed = stub_complete_io(1);
+ CU_ASSERT_EQUAL(num_completed, 1);
+
+ /* Check that if write zeroes is not supported it'll be replaced by regular writes */
+ ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false);
+ num_io_blocks = ZERO_BUFFER_SIZE / bdev->blocklen;
+ num_requests = 2;
+ num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * num_requests;
+
+ for (offset = 0; offset < num_requests; ++offset) {
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE,
+ offset * num_io_blocks, num_io_blocks, 0);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+ }
+
+ rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
+ CU_ASSERT_EQUAL(rc, 0);
+ num_completed = stub_complete_io(num_requests);
+ CU_ASSERT_EQUAL(num_completed, num_requests);
+
+ /* Check that the splitting is correct if bdev has interleaved metadata */
+ bdev->md_interleave = true;
+ bdev->md_len = 64;
+ bdev->blocklen = 4096 + 64;
+ num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2;
+
+ num_requests = offset = 0;
+ while (offset < num_blocks) {
+ num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / bdev->blocklen, num_blocks - offset);
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE,
+ offset, num_io_blocks, 0);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+ offset += num_io_blocks;
+ num_requests++;
+ }
+
+ rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
+ CU_ASSERT_EQUAL(rc, 0);
+ num_completed = stub_complete_io(num_requests);
+ CU_ASSERT_EQUAL(num_completed, num_requests);
+ num_completed = stub_complete_io(num_requests);
+ assert(num_completed == 0);
+
+ /* Check the the same for separate metadata buffer */
+ bdev->md_interleave = false;
+ bdev->md_len = 64;
+ bdev->blocklen = 4096;
+
+ num_requests = offset = 0;
+ while (offset < num_blocks) {
+ num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / (bdev->blocklen + bdev->md_len), num_blocks);
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE,
+ offset, num_io_blocks, 0);
+ expected_io->md_buf = (char *)g_bdev_mgr.zero_buffer + num_io_blocks * bdev->blocklen;
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+ offset += num_io_blocks;
+ num_requests++;
+ }
+
+ rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
+ CU_ASSERT_EQUAL(rc, 0);
+ num_completed = stub_complete_io(num_requests);
+ CU_ASSERT_EQUAL(num_completed, num_requests);
+
+ ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true);
+ spdk_put_io_channel(ioch);
+ spdk_bdev_close(desc);
+ free_bdev(bdev);
+ spdk_bdev_finish(bdev_fini_cb, NULL);
+ poll_threads();
+}
+
+static void
+bdev_open_while_hotremove(void)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_desc *desc[2] = {};
+ int rc;
+
+ bdev = allocate_bdev("bdev");
+
+ rc = spdk_bdev_open(bdev, false, NULL, NULL, &desc[0]);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(desc[0] != NULL);
+
+ spdk_bdev_unregister(bdev, NULL, NULL);
+
+ rc = spdk_bdev_open(bdev, false, NULL, NULL, &desc[1]);
+ CU_ASSERT(rc == -ENODEV);
+ SPDK_CU_ASSERT_FATAL(desc[1] == NULL);
+
+ spdk_bdev_close(desc[0]);
+ free_bdev(bdev);
+}
+
+static void
+bdev_close_while_hotremove(void)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_desc *desc = NULL;
+ int rc = 0;
+
+ bdev = allocate_bdev("bdev");
+
+ rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc, &desc);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ /* Simulate hot-unplug by unregistering bdev */
+ g_event_type1 = 0xFF;
+ g_unregister_arg = NULL;
+ g_unregister_rc = -1;
+ spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678);
+ /* Close device while remove event is in flight */
+ spdk_bdev_close(desc);
+
+ /* Ensure that unregister callback is delayed */
+ CU_ASSERT_EQUAL(g_unregister_arg, NULL);
+ CU_ASSERT_EQUAL(g_unregister_rc, -1);
+
+ poll_threads();
+
+ /* Event callback shall not be issued because device was closed */
+ CU_ASSERT_EQUAL(g_event_type1, 0xFF);
+ /* Unregister callback is issued */
+ CU_ASSERT_EQUAL(g_unregister_arg, (void *)0x12345678);
+ CU_ASSERT_EQUAL(g_unregister_rc, 0);
+
+ free_bdev(bdev);
+}
+
+static void
+bdev_open_ext(void)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_desc *desc1 = NULL;
+ struct spdk_bdev_desc *desc2 = NULL;
+ int rc = 0;
+
+ bdev = allocate_bdev("bdev");
+
+ rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1);
+ CU_ASSERT_EQUAL(rc, -EINVAL);
+
+ rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ g_event_type1 = 0xFF;
+ g_event_type2 = 0xFF;
+
+ /* Simulate hot-unplug by unregistering bdev */
+ spdk_bdev_unregister(bdev, NULL, NULL);
+ poll_threads();
+
+ /* Check if correct events have been triggered in event callback fn */
+ CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE);
+ CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE);
+
+ free_bdev(bdev);
+ poll_threads();
+}
+
+struct timeout_io_cb_arg {
+ struct iovec iov;
+ uint8_t type;
+};
+
+static int
+bdev_channel_count_submitted_io(struct spdk_bdev_channel *ch)
+{
+ struct spdk_bdev_io *bdev_io;
+ int n = 0;
+
+ if (!ch) {
+ return -1;
+ }
+
+ TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) {
+ n++;
+ }
+
+ return n;
+}
+
+static void
+bdev_channel_io_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io)
+{
+ struct timeout_io_cb_arg *ctx = cb_arg;
+
+ ctx->type = bdev_io->type;
+ ctx->iov.iov_base = bdev_io->iov.iov_base;
+ ctx->iov.iov_len = bdev_io->iov.iov_len;
+}
+
+static void
+bdev_set_io_timeout(void)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_desc *desc = NULL;
+ struct spdk_io_channel *io_ch = NULL;
+ struct spdk_bdev_channel *bdev_ch = NULL;
+ struct timeout_io_cb_arg cb_arg;
+
+ spdk_bdev_initialize(bdev_init_cb, NULL);
+
+ bdev = allocate_bdev("bdev");
+
+ CU_ASSERT(spdk_bdev_open(bdev, true, NULL, NULL, &desc) == 0);
+ SPDK_CU_ASSERT_FATAL(desc != NULL);
+ io_ch = spdk_bdev_get_io_channel(desc);
+ CU_ASSERT(io_ch != NULL);
+
+ bdev_ch = spdk_io_channel_get_ctx(io_ch);
+ CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted));
+
+ /* This is the part1.
+ * We will check the bdev_ch->io_submitted list
+ * TO make sure that it can link IOs and only the user submitted IOs
+ */
+ CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0);
+ CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1);
+ CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0);
+ CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2);
+ stub_complete_io(1);
+ CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1);
+ stub_complete_io(1);
+ CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0);
+
+ /* Split IO */
+ bdev->optimal_io_boundary = 16;
+ bdev->split_on_optimal_io_boundary = true;
+
+ /* Now test that a single-vector command is split correctly.
+ * Offset 14, length 8, payload 0xF000
+ * Child - Offset 14, length 2, payload 0xF000
+ * Child - Offset 16, length 6, payload 0xF000 + 2 * 512
+ *
+ * Set up the expected values before calling spdk_bdev_read_blocks
+ */
+ CU_ASSERT(spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0);
+ /* We count all submitted IOs including IO that are generated by splitting. */
+ CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 3);
+ stub_complete_io(1);
+ CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2);
+ stub_complete_io(1);
+ CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0);
+
+ /* Also include the reset IO */
+ CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0);
+ CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1);
+ poll_threads();
+ stub_complete_io(1);
+ poll_threads();
+ CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0);
+
+ /* This is part2
+ * Test the desc timeout poller register
+ */
+
+ /* Successfully set the timeout */
+ CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0);
+ CU_ASSERT(desc->io_timeout_poller != NULL);
+ CU_ASSERT(desc->timeout_in_sec == 30);
+ CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb);
+ CU_ASSERT(desc->cb_arg == &cb_arg);
+
+ /* Change the timeout limit */
+ CU_ASSERT(spdk_bdev_set_timeout(desc, 20, bdev_channel_io_timeout_cb, &cb_arg) == 0);
+ CU_ASSERT(desc->io_timeout_poller != NULL);
+ CU_ASSERT(desc->timeout_in_sec == 20);
+ CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb);
+ CU_ASSERT(desc->cb_arg == &cb_arg);
+
+ /* Disable the timeout */
+ CU_ASSERT(spdk_bdev_set_timeout(desc, 0, NULL, NULL) == 0);
+ CU_ASSERT(desc->io_timeout_poller == NULL);
+
+ /* This the part3
+ * We will test to catch timeout IO and check whether the IO is
+ * the submitted one.
+ */
+ memset(&cb_arg, 0, sizeof(cb_arg));
+ CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0);
+ CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0x1000, 0, 1, io_done, NULL) == 0);
+
+ /* Don't reach the limit */
+ spdk_delay_us(15 * spdk_get_ticks_hz());
+ poll_threads();
+ CU_ASSERT(cb_arg.type == 0);
+ CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0);
+ CU_ASSERT(cb_arg.iov.iov_len == 0);
+
+ /* 15 + 15 = 30 reach the limit */
+ spdk_delay_us(15 * spdk_get_ticks_hz());
+ poll_threads();
+ CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE);
+ CU_ASSERT(cb_arg.iov.iov_base == (void *)0x1000);
+ CU_ASSERT(cb_arg.iov.iov_len == 1 * bdev->blocklen);
+ stub_complete_io(1);
+
+ /* Use the same split IO above and check the IO */
+ memset(&cb_arg, 0, sizeof(cb_arg));
+ CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0);
+
+ /* The first child complete in time */
+ spdk_delay_us(15 * spdk_get_ticks_hz());
+ poll_threads();
+ stub_complete_io(1);
+ CU_ASSERT(cb_arg.type == 0);
+ CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0);
+ CU_ASSERT(cb_arg.iov.iov_len == 0);
+
+ /* The second child reach the limit */
+ spdk_delay_us(15 * spdk_get_ticks_hz());
+ poll_threads();
+ CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE);
+ CU_ASSERT(cb_arg.iov.iov_base == (void *)0xF000);
+ CU_ASSERT(cb_arg.iov.iov_len == 8 * bdev->blocklen);
+ stub_complete_io(1);
+
+ /* Also include the reset IO */
+ memset(&cb_arg, 0, sizeof(cb_arg));
+ CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0);
+ spdk_delay_us(30 * spdk_get_ticks_hz());
+ poll_threads();
+ CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_RESET);
+ stub_complete_io(1);
+ poll_threads();
+
+ spdk_put_io_channel(io_ch);
+ spdk_bdev_close(desc);
+ free_bdev(bdev);
+ spdk_bdev_finish(bdev_fini_cb, NULL);
+ poll_threads();
+}
+
+static void
+lba_range_overlap(void)
+{
+ struct lba_range r1, r2;
+
+ r1.offset = 100;
+ r1.length = 50;
+
+ r2.offset = 0;
+ r2.length = 1;
+ CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2));
+
+ r2.offset = 0;
+ r2.length = 100;
+ CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2));
+
+ r2.offset = 0;
+ r2.length = 110;
+ CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2));
+
+ r2.offset = 100;
+ r2.length = 10;
+ CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2));
+
+ r2.offset = 110;
+ r2.length = 20;
+ CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2));
+
+ r2.offset = 140;
+ r2.length = 150;
+ CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2));
+
+ r2.offset = 130;
+ r2.length = 200;
+ CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2));
+
+ r2.offset = 150;
+ r2.length = 100;
+ CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2));
+
+ r2.offset = 110;
+ r2.length = 0;
+ CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2));
+}
+
+static bool g_lock_lba_range_done;
+static bool g_unlock_lba_range_done;
+
+static void
+lock_lba_range_done(void *ctx, int status)
+{
+ g_lock_lba_range_done = true;
+}
+
+static void
+unlock_lba_range_done(void *ctx, int status)
+{
+ g_unlock_lba_range_done = true;
+}
+
+static void
+lock_lba_range_check_ranges(void)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_desc *desc = NULL;
+ struct spdk_io_channel *io_ch;
+ struct spdk_bdev_channel *channel;
+ struct lba_range *range;
+ int ctx1;
+ int rc;
+
+ spdk_bdev_initialize(bdev_init_cb, NULL);
+
+ bdev = allocate_bdev("bdev0");
+
+ rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(desc != NULL);
+ io_ch = spdk_bdev_get_io_channel(desc);
+ CU_ASSERT(io_ch != NULL);
+ channel = spdk_io_channel_get_ctx(io_ch);
+
+ g_lock_lba_range_done = false;
+ rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1);
+ CU_ASSERT(rc == 0);
+ poll_threads();
+
+ CU_ASSERT(g_lock_lba_range_done == true);
+ range = TAILQ_FIRST(&channel->locked_ranges);
+ SPDK_CU_ASSERT_FATAL(range != NULL);
+ CU_ASSERT(range->offset == 20);
+ CU_ASSERT(range->length == 10);
+ CU_ASSERT(range->owner_ch == channel);
+
+ /* Unlocks must exactly match a lock. */
+ g_unlock_lba_range_done = false;
+ rc = bdev_unlock_lba_range(desc, io_ch, 20, 1, unlock_lba_range_done, &ctx1);
+ CU_ASSERT(rc == -EINVAL);
+ CU_ASSERT(g_unlock_lba_range_done == false);
+
+ rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1);
+ CU_ASSERT(rc == 0);
+ spdk_delay_us(100);
+ poll_threads();
+
+ CU_ASSERT(g_unlock_lba_range_done == true);
+ CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges));
+
+ spdk_put_io_channel(io_ch);
+ spdk_bdev_close(desc);
+ free_bdev(bdev);
+ spdk_bdev_finish(bdev_fini_cb, NULL);
+ poll_threads();
+}
+
+static void
+lock_lba_range_with_io_outstanding(void)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_desc *desc = NULL;
+ struct spdk_io_channel *io_ch;
+ struct spdk_bdev_channel *channel;
+ struct lba_range *range;
+ char buf[4096];
+ int ctx1;
+ int rc;
+
+ spdk_bdev_initialize(bdev_init_cb, NULL);
+
+ bdev = allocate_bdev("bdev0");
+
+ rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(desc != NULL);
+ io_ch = spdk_bdev_get_io_channel(desc);
+ CU_ASSERT(io_ch != NULL);
+ channel = spdk_io_channel_get_ctx(io_ch);
+
+ g_io_done = false;
+ rc = spdk_bdev_read_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1);
+ CU_ASSERT(rc == 0);
+
+ g_lock_lba_range_done = false;
+ rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1);
+ CU_ASSERT(rc == 0);
+ poll_threads();
+
+ /* The lock should immediately become valid, since there are no outstanding
+ * write I/O.
+ */
+ CU_ASSERT(g_io_done == false);
+ CU_ASSERT(g_lock_lba_range_done == true);
+ range = TAILQ_FIRST(&channel->locked_ranges);
+ SPDK_CU_ASSERT_FATAL(range != NULL);
+ CU_ASSERT(range->offset == 20);
+ CU_ASSERT(range->length == 10);
+ CU_ASSERT(range->owner_ch == channel);
+ CU_ASSERT(range->locked_ctx == &ctx1);
+
+ rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1);
+ CU_ASSERT(rc == 0);
+ stub_complete_io(1);
+ spdk_delay_us(100);
+ poll_threads();
+
+ CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges));
+
+ /* Now try again, but with a write I/O. */
+ g_io_done = false;
+ rc = spdk_bdev_write_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1);
+ CU_ASSERT(rc == 0);
+
+ g_lock_lba_range_done = false;
+ rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1);
+ CU_ASSERT(rc == 0);
+ poll_threads();
+
+ /* The lock should not be fully valid yet, since a write I/O is outstanding.
+ * But note that the range should be on the channel's locked_list, to make sure no
+ * new write I/O are started.
+ */
+ CU_ASSERT(g_io_done == false);
+ CU_ASSERT(g_lock_lba_range_done == false);
+ range = TAILQ_FIRST(&channel->locked_ranges);
+ SPDK_CU_ASSERT_FATAL(range != NULL);
+ CU_ASSERT(range->offset == 20);
+ CU_ASSERT(range->length == 10);
+
+ /* Complete the write I/O. This should make the lock valid (checked by confirming
+ * our callback was invoked).
+ */
+ stub_complete_io(1);
+ spdk_delay_us(100);
+ poll_threads();
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(g_lock_lba_range_done == true);
+
+ rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1);
+ CU_ASSERT(rc == 0);
+ poll_threads();
+
+ CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges));
+
+ spdk_put_io_channel(io_ch);
+ spdk_bdev_close(desc);
+ free_bdev(bdev);
+ spdk_bdev_finish(bdev_fini_cb, NULL);
+ poll_threads();
+}
+
+static void
+lock_lba_range_overlapped(void)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_desc *desc = NULL;
+ struct spdk_io_channel *io_ch;
+ struct spdk_bdev_channel *channel;
+ struct lba_range *range;
+ int ctx1;
+ int rc;
+
+ spdk_bdev_initialize(bdev_init_cb, NULL);
+
+ bdev = allocate_bdev("bdev0");
+
+ rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(desc != NULL);
+ io_ch = spdk_bdev_get_io_channel(desc);
+ CU_ASSERT(io_ch != NULL);
+ channel = spdk_io_channel_get_ctx(io_ch);
+
+ /* Lock range 20-29. */
+ g_lock_lba_range_done = false;
+ rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1);
+ CU_ASSERT(rc == 0);
+ poll_threads();
+
+ CU_ASSERT(g_lock_lba_range_done == true);
+ range = TAILQ_FIRST(&channel->locked_ranges);
+ SPDK_CU_ASSERT_FATAL(range != NULL);
+ CU_ASSERT(range->offset == 20);
+ CU_ASSERT(range->length == 10);
+
+ /* Try to lock range 25-39. It should not lock immediately, since it overlaps with
+ * 20-29.
+ */
+ g_lock_lba_range_done = false;
+ rc = bdev_lock_lba_range(desc, io_ch, 25, 15, lock_lba_range_done, &ctx1);
+ CU_ASSERT(rc == 0);
+ poll_threads();
+
+ CU_ASSERT(g_lock_lba_range_done == false);
+ range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges);
+ SPDK_CU_ASSERT_FATAL(range != NULL);
+ CU_ASSERT(range->offset == 25);
+ CU_ASSERT(range->length == 15);
+
+ /* Unlock 20-29. This should result in range 25-39 now getting locked since it
+ * no longer overlaps with an active lock.
+ */
+ g_unlock_lba_range_done = false;
+ rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1);
+ CU_ASSERT(rc == 0);
+ poll_threads();
+
+ CU_ASSERT(g_unlock_lba_range_done == true);
+ CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges));
+ range = TAILQ_FIRST(&channel->locked_ranges);
+ SPDK_CU_ASSERT_FATAL(range != NULL);
+ CU_ASSERT(range->offset == 25);
+ CU_ASSERT(range->length == 15);
+
+ /* Lock 40-59. This should immediately lock since it does not overlap with the
+ * currently active 25-39 lock.
+ */
+ g_lock_lba_range_done = false;
+ rc = bdev_lock_lba_range(desc, io_ch, 40, 20, lock_lba_range_done, &ctx1);
+ CU_ASSERT(rc == 0);
+ poll_threads();
+
+ CU_ASSERT(g_lock_lba_range_done == true);
+ range = TAILQ_FIRST(&bdev->internal.locked_ranges);
+ SPDK_CU_ASSERT_FATAL(range != NULL);
+ range = TAILQ_NEXT(range, tailq);
+ SPDK_CU_ASSERT_FATAL(range != NULL);
+ CU_ASSERT(range->offset == 40);
+ CU_ASSERT(range->length == 20);
+
+ /* Try to lock 35-44. Note that this overlaps with both 25-39 and 40-59. */
+ g_lock_lba_range_done = false;
+ rc = bdev_lock_lba_range(desc, io_ch, 35, 10, lock_lba_range_done, &ctx1);
+ CU_ASSERT(rc == 0);
+ poll_threads();
+
+ CU_ASSERT(g_lock_lba_range_done == false);
+ range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges);
+ SPDK_CU_ASSERT_FATAL(range != NULL);
+ CU_ASSERT(range->offset == 35);
+ CU_ASSERT(range->length == 10);
+
+ /* Unlock 25-39. Make sure that 35-44 is still in the pending list, since
+ * the 40-59 lock is still active.
+ */
+ g_unlock_lba_range_done = false;
+ rc = bdev_unlock_lba_range(desc, io_ch, 25, 15, unlock_lba_range_done, &ctx1);
+ CU_ASSERT(rc == 0);
+ poll_threads();
+
+ CU_ASSERT(g_unlock_lba_range_done == true);
+ CU_ASSERT(g_lock_lba_range_done == false);
+ range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges);
+ SPDK_CU_ASSERT_FATAL(range != NULL);
+ CU_ASSERT(range->offset == 35);
+ CU_ASSERT(range->length == 10);
+
+ /* Unlock 40-59. This should result in 35-44 now getting locked, since there are
+ * no longer any active overlapping locks.
+ */
+ g_unlock_lba_range_done = false;
+ rc = bdev_unlock_lba_range(desc, io_ch, 40, 20, unlock_lba_range_done, &ctx1);
+ CU_ASSERT(rc == 0);
+ poll_threads();
+
+ CU_ASSERT(g_unlock_lba_range_done == true);
+ CU_ASSERT(g_lock_lba_range_done == true);
+ CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges));
+ range = TAILQ_FIRST(&bdev->internal.locked_ranges);
+ SPDK_CU_ASSERT_FATAL(range != NULL);
+ CU_ASSERT(range->offset == 35);
+ CU_ASSERT(range->length == 10);
+
+ /* Finally, unlock 35-44. */
+ g_unlock_lba_range_done = false;
+ rc = bdev_unlock_lba_range(desc, io_ch, 35, 10, unlock_lba_range_done, &ctx1);
+ CU_ASSERT(rc == 0);
+ poll_threads();
+
+ CU_ASSERT(g_unlock_lba_range_done == true);
+ CU_ASSERT(TAILQ_EMPTY(&bdev->internal.locked_ranges));
+
+ spdk_put_io_channel(io_ch);
+ spdk_bdev_close(desc);
+ free_bdev(bdev);
+ spdk_bdev_finish(bdev_fini_cb, NULL);
+ poll_threads();
+}
+
+static void
+abort_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
+{
+ g_abort_done = true;
+ g_abort_status = bdev_io->internal.status;
+ spdk_bdev_free_io(bdev_io);
+}
+
+static void
+bdev_io_abort(void)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_desc *desc = NULL;
+ struct spdk_io_channel *io_ch;
+ struct spdk_bdev_channel *channel;
+ struct spdk_bdev_mgmt_channel *mgmt_ch;
+ struct spdk_bdev_opts bdev_opts = {
+ .bdev_io_pool_size = 7,
+ .bdev_io_cache_size = 2,
+ };
+ struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2];
+ uint64_t io_ctx1 = 0, io_ctx2 = 0, i;
+ int rc;
+
+ rc = spdk_bdev_set_opts(&bdev_opts);
+ CU_ASSERT(rc == 0);
+ spdk_bdev_initialize(bdev_init_cb, NULL);
+
+ bdev = allocate_bdev("bdev0");
+
+ rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(desc != NULL);
+ io_ch = spdk_bdev_get_io_channel(desc);
+ CU_ASSERT(io_ch != NULL);
+ channel = spdk_io_channel_get_ctx(io_ch);
+ mgmt_ch = channel->shared_resource->mgmt_ch;
+
+ g_abort_done = false;
+
+ ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, false);
+
+ rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL);
+ CU_ASSERT(rc == -ENOTSUP);
+
+ ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, true);
+
+ rc = spdk_bdev_abort(desc, io_ch, &io_ctx2, abort_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_abort_done == true);
+ CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_FAILED);
+
+ /* Test the case that the target I/O was successfully aborted. */
+ g_io_done = false;
+
+ rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+
+ g_abort_done = false;
+ g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
+
+ rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
+ stub_complete_io(1);
+ CU_ASSERT(g_abort_done == true);
+ CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ /* Test the case that the target I/O was not aborted because it completed
+ * in the middle of execution of the abort.
+ */
+ g_io_done = false;
+
+ rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+
+ g_abort_done = false;
+ g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED;
+
+ rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+
+ g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED;
+ stub_complete_io(1);
+ CU_ASSERT(g_abort_done == true);
+ CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
+
+ bdev->optimal_io_boundary = 16;
+ bdev->split_on_optimal_io_boundary = true;
+
+ /* Test that a single-vector command which is split is aborted correctly.
+ * Offset 14, length 8, payload 0xF000
+ * Child - Offset 14, length 2, payload 0xF000
+ * Child - Offset 16, length 6, payload 0xF000 + 2 * 512
+ */
+ g_io_done = false;
+
+ rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, &io_ctx1);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
+
+ g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
+
+ rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
+ stub_complete_io(2);
+ CU_ASSERT(g_abort_done == true);
+ CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ /* Test that a multi-vector command that needs to be split by strip and then
+ * needs to be split is aborted correctly. Abort is requested before the second
+ * child I/O was submitted. The parent I/O should complete with failure without
+ * submitting the second child I/O.
+ */
+ for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) {
+ iov[i].iov_base = (void *)((i + 1) * 0x10000);
+ iov[i].iov_len = 512;
+ }
+
+ bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV;
+ g_io_done = false;
+ rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0,
+ BDEV_IO_NUM_CHILD_IOV * 2, io_done, &io_ctx1);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+
+ g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
+
+ rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
+ stub_complete_io(1);
+ CU_ASSERT(g_abort_done == true);
+ CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
+
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
+
+ bdev->optimal_io_boundary = 16;
+ g_io_done = false;
+
+ /* Test that a ingle-vector command which is split is aborted correctly.
+ * Differently from the above, the child abort request will be submitted
+ * sequentially due to the capacity of spdk_bdev_io.
+ */
+ rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 50, io_done, &io_ctx1);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
+
+ g_abort_done = false;
+ g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
+
+ rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue));
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
+
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
+ stub_complete_io(3);
+ CU_ASSERT(g_abort_done == true);
+ CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
+
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
+
+ spdk_put_io_channel(io_ch);
+ spdk_bdev_close(desc);
+ free_bdev(bdev);
+ spdk_bdev_finish(bdev_fini_cb, NULL);
+ poll_threads();
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("bdev", null_init, null_clean);
+
+ CU_ADD_TEST(suite, bytes_to_blocks_test);
+ CU_ADD_TEST(suite, num_blocks_test);
+ CU_ADD_TEST(suite, io_valid_test);
+ CU_ADD_TEST(suite, open_write_test);
+ CU_ADD_TEST(suite, alias_add_del_test);
+ CU_ADD_TEST(suite, get_device_stat_test);
+ CU_ADD_TEST(suite, bdev_io_types_test);
+ CU_ADD_TEST(suite, bdev_io_wait_test);
+ CU_ADD_TEST(suite, bdev_io_spans_boundary_test);
+ CU_ADD_TEST(suite, bdev_io_split_test);
+ CU_ADD_TEST(suite, bdev_io_split_with_io_wait);
+ CU_ADD_TEST(suite, bdev_io_alignment_with_boundary);
+ CU_ADD_TEST(suite, bdev_io_alignment);
+ CU_ADD_TEST(suite, bdev_histograms);
+ CU_ADD_TEST(suite, bdev_write_zeroes);
+ CU_ADD_TEST(suite, bdev_compare_and_write);
+ CU_ADD_TEST(suite, bdev_compare);
+ CU_ADD_TEST(suite, bdev_open_while_hotremove);
+ CU_ADD_TEST(suite, bdev_close_while_hotremove);
+ CU_ADD_TEST(suite, bdev_open_ext);
+ CU_ADD_TEST(suite, bdev_set_io_timeout);
+ CU_ADD_TEST(suite, lba_range_overlap);
+ CU_ADD_TEST(suite, lock_lba_range_check_ranges);
+ CU_ADD_TEST(suite, lock_lba_range_with_io_outstanding);
+ CU_ADD_TEST(suite, lock_lba_range_overlapped);
+ CU_ADD_TEST(suite, bdev_io_abort);
+
+ allocate_cores(1);
+ allocate_threads(1);
+ set_thread(0);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ free_threads();
+ free_cores();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/bdev/bdev_ocssd.c/.gitignore b/src/spdk/test/unit/lib/bdev/bdev_ocssd.c/.gitignore
new file mode 100644
index 000000000..906b8067c
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/bdev_ocssd.c/.gitignore
@@ -0,0 +1 @@
+bdev_ocssd_ut
diff --git a/src/spdk/test/unit/lib/bdev/bdev_ocssd.c/Makefile b/src/spdk/test/unit/lib/bdev/bdev_ocssd.c/Makefile
new file mode 100644
index 000000000..7106d46fc
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/bdev_ocssd.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = bdev_ocssd_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/bdev/bdev_ocssd.c/bdev_ocssd_ut.c b/src/spdk/test/unit/lib/bdev/bdev_ocssd.c/bdev_ocssd_ut.c
new file mode 100644
index 000000000..a2f8e7f71
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/bdev_ocssd.c/bdev_ocssd_ut.c
@@ -0,0 +1,1195 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+#include "spdk_cunit.h"
+#include "spdk/nvme_ocssd_spec.h"
+#include "spdk/thread.h"
+#include "spdk/bdev_module.h"
+#include "spdk/util.h"
+#include "spdk_internal/mock.h"
+
+#include "bdev/nvme/bdev_ocssd.c"
+#include "bdev/nvme/common.c"
+#include "common/lib/test_env.c"
+#include "unit/lib/json_mock.c"
+
+DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
+DEFINE_STUB(spdk_nvme_ctrlr_is_ocssd_ns, bool, (struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid),
+ true);
+DEFINE_STUB(spdk_nvme_ns_get_extended_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 4096);
+DEFINE_STUB(spdk_nvme_ns_is_active, bool, (struct spdk_nvme_ns *ns), true);
+DEFINE_STUB_V(spdk_opal_dev_destruct, (struct spdk_opal_dev *dev));
+DEFINE_STUB_V(spdk_bdev_io_complete_nvme_status, (struct spdk_bdev_io *bdev_io, uint32_t cdw0,
+ int sct, int sc));
+DEFINE_STUB(spdk_bdev_io_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_io *bdev_io),
+ NULL);
+DEFINE_STUB(spdk_bdev_push_media_events, int, (struct spdk_bdev *bdev,
+ const struct spdk_bdev_media_event *events,
+ size_t num_events), 0);
+DEFINE_STUB_V(spdk_bdev_notify_media_management, (struct spdk_bdev *bdev));
+DEFINE_STUB_V(nvme_ctrlr_depopulate_namespace_done, (struct nvme_bdev_ctrlr *ctrlr));
+DEFINE_STUB_V(spdk_bdev_module_finish_done, (void));
+
+struct nvme_request {
+ spdk_nvme_cmd_cb cb_fn;
+ void *cb_arg;
+ TAILQ_ENTRY(nvme_request) tailq;
+};
+
+struct spdk_nvme_qpair {
+ TAILQ_HEAD(, nvme_request) requests;
+};
+
+struct spdk_nvme_ns {
+ uint32_t nsid;
+};
+
+struct spdk_nvme_ctrlr {
+ struct spdk_nvme_transport_id trid;
+ struct spdk_ocssd_geometry_data geometry;
+ struct spdk_nvme_qpair *admin_qpair;
+ struct spdk_nvme_ns *ns;
+ uint32_t ns_count;
+ struct spdk_ocssd_chunk_information_entry *chunk_info;
+ uint64_t num_chunks;
+
+ LIST_ENTRY(spdk_nvme_ctrlr) list;
+};
+
+static LIST_HEAD(, spdk_nvme_ctrlr) g_ctrlr_list = LIST_HEAD_INITIALIZER(g_ctrlr_list);
+static TAILQ_HEAD(, spdk_bdev) g_bdev_list = TAILQ_HEAD_INITIALIZER(g_bdev_list);
+static struct spdk_thread *g_thread;
+
+static struct spdk_nvme_ctrlr *
+find_controller(const struct spdk_nvme_transport_id *trid)
+{
+ struct spdk_nvme_ctrlr *ctrlr;
+
+ LIST_FOREACH(ctrlr, &g_ctrlr_list, list) {
+ if (!spdk_nvme_transport_id_compare(trid, &ctrlr->trid)) {
+ return ctrlr;
+ }
+ }
+
+ return NULL;
+}
+
+static void
+free_controller(struct spdk_nvme_ctrlr *ctrlr)
+{
+ CU_ASSERT(!nvme_bdev_ctrlr_get(&ctrlr->trid));
+ LIST_REMOVE(ctrlr, list);
+ spdk_nvme_ctrlr_free_io_qpair(ctrlr->admin_qpair);
+ free(ctrlr->chunk_info);
+ free(ctrlr->ns);
+ free(ctrlr);
+}
+
+static uint64_t
+chunk_offset_to_lba(struct spdk_ocssd_geometry_data *geo, uint64_t offset)
+{
+ uint64_t chk, pu, grp;
+ uint64_t chk_off, pu_off, grp_off;
+
+ chk_off = geo->lbaf.lbk_len;
+ pu_off = geo->lbaf.chk_len + chk_off;
+ grp_off = geo->lbaf.pu_len + pu_off;
+
+ chk = offset % geo->num_chk;
+ pu = (offset / geo->num_chk) % geo->num_pu;
+ grp = (offset / (geo->num_chk * geo->num_pu)) % geo->num_grp;
+
+ return chk << chk_off |
+ pu << pu_off |
+ grp << grp_off;
+}
+
+static struct spdk_nvme_ctrlr *
+create_controller(const struct spdk_nvme_transport_id *trid, uint32_t ns_count,
+ const struct spdk_ocssd_geometry_data *geo)
+{
+ struct spdk_nvme_ctrlr *ctrlr;
+ uint32_t nsid, offset;
+
+ SPDK_CU_ASSERT_FATAL(!find_controller(trid));
+
+ ctrlr = calloc(1, sizeof(*ctrlr));
+ SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
+
+ ctrlr->ns = calloc(ns_count, sizeof(*ctrlr->ns));
+ SPDK_CU_ASSERT_FATAL(ctrlr->ns != NULL);
+
+ ctrlr->num_chunks = geo->num_grp * geo->num_pu * geo->num_chk;
+ ctrlr->chunk_info = calloc(ctrlr->num_chunks, sizeof(*ctrlr->chunk_info));
+ SPDK_CU_ASSERT_FATAL(ctrlr->chunk_info != NULL);
+
+ for (nsid = 0; nsid < ns_count; ++nsid) {
+ ctrlr->ns[nsid].nsid = nsid + 1;
+ }
+
+ ctrlr->geometry = *geo;
+ ctrlr->trid = *trid;
+ ctrlr->ns_count = ns_count;
+ ctrlr->admin_qpair = spdk_nvme_ctrlr_alloc_io_qpair(ctrlr, NULL, 0);
+
+ for (offset = 0; offset < ctrlr->num_chunks; ++offset) {
+ ctrlr->chunk_info[offset].cs.free = 1;
+ ctrlr->chunk_info[offset].slba = chunk_offset_to_lba(&ctrlr->geometry, offset);
+ ctrlr->chunk_info[offset].wp = ctrlr->chunk_info[offset].slba;
+ }
+
+ SPDK_CU_ASSERT_FATAL(ctrlr->admin_qpair != NULL);
+
+ LIST_INSERT_HEAD(&g_ctrlr_list, ctrlr, list);
+
+ return ctrlr;
+}
+
+static int
+io_channel_create_cb(void *io_device, void *ctx_buf)
+{
+ return 0;
+}
+
+static void
+io_channel_destroy_cb(void *io_device, void *ctx_buf)
+{}
+
+void
+nvme_ctrlr_populate_namespace_done(struct nvme_async_probe_ctx *ctx,
+ struct nvme_bdev_ns *ns, int rc)
+{
+ CU_ASSERT_EQUAL(rc, 0);
+}
+
+static struct nvme_bdev_ctrlr *
+create_nvme_bdev_controller(const struct spdk_nvme_transport_id *trid, const char *name)
+{
+ struct spdk_nvme_ctrlr *ctrlr;
+ struct nvme_bdev_ctrlr *nvme_bdev_ctrlr;
+ uint32_t nsid;
+
+ ctrlr = find_controller(trid);
+
+ SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
+ SPDK_CU_ASSERT_FATAL(!nvme_bdev_ctrlr_get(trid));
+
+ nvme_bdev_ctrlr = calloc(1, sizeof(*nvme_bdev_ctrlr));
+ SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL);
+
+ nvme_bdev_ctrlr->namespaces = calloc(ctrlr->ns_count, sizeof(struct nvme_bdev_ns *));
+ SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr->namespaces != NULL);
+
+ nvme_bdev_ctrlr->trid = calloc(1, sizeof(struct spdk_nvme_transport_id));
+ SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr->trid != NULL);
+
+ nvme_bdev_ctrlr->ctrlr = ctrlr;
+ nvme_bdev_ctrlr->num_ns = ctrlr->ns_count;
+ nvme_bdev_ctrlr->ref = 0;
+ *nvme_bdev_ctrlr->trid = *trid;
+ nvme_bdev_ctrlr->name = strdup(name);
+
+ for (nsid = 0; nsid < ctrlr->ns_count; ++nsid) {
+ nvme_bdev_ctrlr->namespaces[nsid] = calloc(1, sizeof(struct nvme_bdev_ns));
+ SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr->namespaces[nsid] != NULL);
+
+ nvme_bdev_ctrlr->namespaces[nsid]->id = nsid + 1;
+ nvme_bdev_ctrlr->namespaces[nsid]->ctrlr = nvme_bdev_ctrlr;
+ nvme_bdev_ctrlr->namespaces[nsid]->type = NVME_BDEV_NS_OCSSD;
+ TAILQ_INIT(&nvme_bdev_ctrlr->namespaces[nsid]->bdevs);
+
+ bdev_ocssd_populate_namespace(nvme_bdev_ctrlr, nvme_bdev_ctrlr->namespaces[nsid], NULL);
+ }
+
+ while (spdk_thread_poll(g_thread, 0, 0) > 0) {}
+
+ spdk_io_device_register(nvme_bdev_ctrlr, io_channel_create_cb,
+ io_channel_destroy_cb, 0, name);
+
+ TAILQ_INSERT_TAIL(&g_nvme_bdev_ctrlrs, nvme_bdev_ctrlr, tailq);
+
+ return nvme_bdev_ctrlr;
+}
+
+static struct nvme_request *
+alloc_request(spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ struct nvme_request *ctx;
+
+ ctx = calloc(1, sizeof(*ctx));
+ SPDK_CU_ASSERT_FATAL(ctx != NULL);
+
+ ctx->cb_fn = cb_fn;
+ ctx->cb_arg = cb_arg;
+
+ return ctx;
+}
+
+uint32_t
+spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return ctrlr->ns_count;
+}
+
+uint32_t
+spdk_nvme_ns_get_id(struct spdk_nvme_ns *ns)
+{
+ return ns->nsid;
+}
+
+struct spdk_nvme_ns *
+spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
+{
+ if (nsid == 0 || nsid > ctrlr->ns_count) {
+ return NULL;
+ }
+
+ return &ctrlr->ns[nsid - 1];
+}
+
+struct spdk_nvme_ctrlr *
+spdk_nvme_connect(const struct spdk_nvme_transport_id *trid,
+ const struct spdk_nvme_ctrlr_opts *opts,
+ size_t opts_size)
+{
+ return find_controller(trid);
+}
+
+int
+spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return 0;
+}
+
+struct spdk_bdev *
+spdk_bdev_get_by_name(const char *bdev_name)
+{
+ struct spdk_bdev *bdev;
+
+ SPDK_CU_ASSERT_FATAL(bdev_name != NULL);
+
+ TAILQ_FOREACH(bdev, &g_bdev_list, internal.link) {
+ if (!strcmp(bdev->name, bdev_name)) {
+ return bdev;
+ }
+ }
+
+ return NULL;
+}
+
+const char *
+spdk_bdev_get_name(const struct spdk_bdev *bdev)
+{
+ return bdev->name;
+}
+
+int
+spdk_bdev_register(struct spdk_bdev *bdev)
+{
+ CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(bdev->name));
+ TAILQ_INSERT_TAIL(&g_bdev_list, bdev, internal.link);
+
+ return 0;
+}
+
+void
+spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
+{
+ int rc;
+
+ CU_ASSERT_EQUAL(spdk_bdev_get_by_name(bdev->name), bdev);
+ TAILQ_REMOVE(&g_bdev_list, bdev, internal.link);
+
+ rc = bdev->fn_table->destruct(bdev->ctxt);
+ if (rc <= 0 && cb_fn != NULL) {
+ cb_fn(cb_arg, 0);
+ }
+}
+
+size_t
+spdk_bdev_get_zone_size(const struct spdk_bdev *bdev)
+{
+ return bdev->zone_size;
+}
+
+int
+spdk_nvme_ocssd_ctrlr_cmd_geometry(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
+ void *payload, uint32_t payload_size,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ struct spdk_nvme_cpl cpl = {};
+
+ CU_ASSERT_EQUAL(payload_size, sizeof(ctrlr->geometry));
+ memcpy(payload, &ctrlr->geometry, sizeof(ctrlr->geometry));
+
+ cb_fn(cb_arg, &cpl);
+
+ return 0;
+}
+
+int
+spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1,
+ const struct spdk_nvme_transport_id *trid2)
+{
+ return memcmp(trid1, trid2, sizeof(*trid1));
+}
+
+void
+spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
+{
+}
+
+void
+spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
+{
+}
+
+int32_t
+spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return spdk_nvme_qpair_process_completions(ctrlr->admin_qpair, 0);
+}
+
+struct spdk_nvme_qpair *
+spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
+ const struct spdk_nvme_io_qpair_opts *opts,
+ size_t opts_size)
+{
+ struct spdk_nvme_qpair *qpair;
+
+ qpair = calloc(1, sizeof(*qpair));
+ SPDK_CU_ASSERT_FATAL(qpair != NULL);
+
+ TAILQ_INIT(&qpair->requests);
+ return qpair;
+}
+
+int
+spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
+{
+ CU_ASSERT(TAILQ_EMPTY(&qpair->requests));
+ free(qpair);
+
+ return 0;
+}
+
+int32_t
+spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
+{
+ struct nvme_request *req;
+ struct spdk_nvme_cpl cpl = {};
+ int32_t num_requests = 0;
+
+ while ((req = TAILQ_FIRST(&qpair->requests))) {
+ TAILQ_REMOVE(&qpair->requests, req, tailq);
+
+ req->cb_fn(req->cb_arg, &cpl);
+ free(req);
+
+ num_requests++;
+ }
+
+ return num_requests;
+}
+
+int
+spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
+ uint64_t lba, uint32_t lba_count,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
+ spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
+ spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
+ uint16_t apptag_mask, uint16_t apptag)
+{
+ struct nvme_request *req;
+
+ req = alloc_request(cb_fn, cb_arg);
+ TAILQ_INSERT_TAIL(&qpair->requests, req, tailq);
+
+ return 0;
+}
+
+int
+spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
+ uint64_t lba, uint32_t lba_count,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
+ spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
+ spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
+ uint16_t apptag_mask, uint16_t apptag)
+{
+ struct nvme_request *req;
+
+ req = alloc_request(cb_fn, cb_arg);
+ TAILQ_INSERT_TAIL(&qpair->requests, req, tailq);
+
+ return 0;
+}
+
+int
+spdk_nvme_ocssd_ns_cmd_vector_reset(struct spdk_nvme_ns *ns,
+ struct spdk_nvme_qpair *qpair,
+ uint64_t *lba_list, uint32_t num_lbas,
+ struct spdk_ocssd_chunk_information_entry *chunk_info,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ struct nvme_request *req;
+
+ req = alloc_request(cb_fn, cb_arg);
+ TAILQ_INSERT_TAIL(&qpair->requests, req, tailq);
+
+ return 0;
+}
+
+static struct spdk_nvme_cpl g_chunk_info_cpl;
+static bool g_zone_info_status = true;
+
+int
+spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr,
+ uint8_t log_page, uint32_t nsid,
+ void *payload, uint32_t payload_size,
+ uint64_t offset,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ SPDK_CU_ASSERT_FATAL(offset + payload_size <= sizeof(*ctrlr->chunk_info) * ctrlr->num_chunks);
+ memcpy(payload, ((char *)ctrlr->chunk_info) + offset, payload_size);
+
+ cb_fn(cb_arg, &g_chunk_info_cpl);
+
+ return 0;
+}
+
+static void
+create_bdev_cb(const char *bdev_name, int status, void *ctx)
+{
+ *(int *)ctx = status;
+}
+
+static int
+create_bdev(const char *ctrlr_name, const char *bdev_name, uint32_t nsid,
+ const struct bdev_ocssd_range *range)
+{
+ int status = EFAULT;
+
+ bdev_ocssd_create_bdev(ctrlr_name, bdev_name, nsid, range, create_bdev_cb, &status);
+
+ while (spdk_thread_poll(g_thread, 0, 0) > 0) {}
+
+ return status;
+}
+
+static void
+delete_nvme_bdev_controller(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr)
+{
+ struct nvme_bdev *nvme_bdev, *tmp;
+ struct nvme_bdev_ns *nvme_ns;
+ bool empty = true;
+ uint32_t nsid;
+
+ nvme_bdev_ctrlr->destruct = true;
+
+ for (nsid = 0; nsid < nvme_bdev_ctrlr->num_ns; ++nsid) {
+ nvme_ns = nvme_bdev_ctrlr->namespaces[nsid];
+
+ if (!TAILQ_EMPTY(&nvme_ns->bdevs)) {
+ TAILQ_FOREACH_SAFE(nvme_bdev, &nvme_ns->bdevs, tailq, tmp) {
+ spdk_bdev_unregister(&nvme_bdev->disk, NULL, NULL);
+ }
+
+ empty = false;
+ }
+
+ bdev_ocssd_depopulate_namespace(nvme_bdev_ctrlr->namespaces[nsid]);
+ }
+
+ if (empty) {
+ nvme_bdev_ctrlr_destruct(nvme_bdev_ctrlr);
+ }
+
+ while (spdk_thread_poll(g_thread, 0, 0) > 0) {}
+
+ CU_ASSERT(TAILQ_EMPTY(&g_nvme_bdev_ctrlrs));
+}
+
+static void
+test_create_controller(void)
+{
+ struct spdk_nvme_ctrlr *ctrlr;
+ struct nvme_bdev_ctrlr *nvme_bdev_ctrlr;
+ struct spdk_nvme_transport_id trid = { .traddr = "00:00:00" };
+ struct spdk_ocssd_geometry_data geometry = {};
+ struct spdk_bdev *bdev;
+ struct bdev_ocssd_range range;
+ const char *controller_name = "nvme0";
+ const size_t ns_count = 16;
+ char namebuf[128];
+ uint32_t nsid;
+ int rc;
+
+ geometry = (struct spdk_ocssd_geometry_data) {
+ .clba = 512,
+ .num_chk = 64,
+ .num_pu = 8,
+ .num_grp = 4,
+ .maxoc = 69,
+ .maxocpu = 68,
+ .ws_opt = 86,
+ .lbaf = {
+ .lbk_len = 9,
+ .chk_len = 6,
+ .pu_len = 3,
+ .grp_len = 2,
+ }
+ };
+
+ ctrlr = create_controller(&trid, ns_count, &geometry);
+ nvme_bdev_ctrlr = create_nvme_bdev_controller(&trid, controller_name);
+
+ for (nsid = 1; nsid <= ns_count; ++nsid) {
+ snprintf(namebuf, sizeof(namebuf), "%sn%"PRIu32, controller_name, nsid);
+ rc = create_bdev(controller_name, namebuf, nsid, NULL);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ bdev = spdk_bdev_get_by_name(namebuf);
+ SPDK_CU_ASSERT_FATAL(bdev != NULL);
+ CU_ASSERT_TRUE(bdev->zoned);
+ }
+
+ delete_nvme_bdev_controller(nvme_bdev_ctrlr);
+
+ /* Verify that after deletion the bdevs can still be created */
+ nvme_bdev_ctrlr = create_nvme_bdev_controller(&trid, controller_name);
+
+ for (nsid = 1; nsid <= ns_count; ++nsid) {
+ snprintf(namebuf, sizeof(namebuf), "%sn%"PRIu32, controller_name, nsid);
+ rc = create_bdev(controller_name, namebuf, nsid, NULL);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ bdev = spdk_bdev_get_by_name(namebuf);
+ SPDK_CU_ASSERT_FATAL(bdev != NULL);
+ CU_ASSERT_TRUE(bdev->zoned);
+ }
+
+ delete_nvme_bdev_controller(nvme_bdev_ctrlr);
+
+ nvme_bdev_ctrlr = create_nvme_bdev_controller(&trid, controller_name);
+
+ /* Verify it's not possible to create a bdev on non-existent namespace */
+ rc = create_bdev(controller_name, "invalid", ns_count + 1, NULL);
+ CU_ASSERT_EQUAL(rc, -ENODEV);
+
+ delete_nvme_bdev_controller(nvme_bdev_ctrlr);
+
+ /* Verify the correctness of parallel unit range validation */
+ nvme_bdev_ctrlr = create_nvme_bdev_controller(&trid, controller_name);
+
+ range.begin = 0;
+ range.end = geometry.num_grp * geometry.num_pu;
+
+ rc = create_bdev(controller_name, "invalid", 1, &range);
+ CU_ASSERT_EQUAL(rc, -EINVAL);
+
+ /* Verify it's not possible for the bdevs to overlap */
+ range.begin = 0;
+ range.end = 16;
+ rc = create_bdev(controller_name, "valid", 1, &range);
+ CU_ASSERT_EQUAL(rc, 0);
+ bdev = spdk_bdev_get_by_name("valid");
+ CU_ASSERT_PTR_NOT_NULL(bdev);
+
+ range.begin = 16;
+ range.end = 31;
+ rc = create_bdev(controller_name, "invalid", 1, &range);
+ CU_ASSERT_EQUAL(rc, -EINVAL);
+
+ /* But it is possible to create them without overlap */
+ range.begin = 17;
+ range.end = 31;
+ rc = create_bdev(controller_name, "valid2", 1, &range);
+ CU_ASSERT_EQUAL(rc, 0);
+ bdev = spdk_bdev_get_by_name("valid2");
+ CU_ASSERT_PTR_NOT_NULL(bdev);
+
+ delete_nvme_bdev_controller(nvme_bdev_ctrlr);
+
+ free_controller(ctrlr);
+}
+
+static void
+test_device_geometry(void)
+{
+ struct spdk_nvme_ctrlr *ctrlr;
+ struct nvme_bdev_ctrlr *nvme_bdev_ctrlr;
+ struct spdk_nvme_transport_id trid = { .traddr = "00:00:00" };
+ const char *controller_name = "nvme0";
+ const char *bdev_name = "nvme0n1";
+ struct spdk_ocssd_geometry_data geometry;
+ struct spdk_bdev *bdev;
+ int rc;
+
+ geometry = (struct spdk_ocssd_geometry_data) {
+ .clba = 512,
+ .num_chk = 64,
+ .num_pu = 8,
+ .num_grp = 4,
+ .maxoc = 69,
+ .maxocpu = 68,
+ .ws_opt = 86,
+ .lbaf = {
+ .lbk_len = 9,
+ .chk_len = 6,
+ .pu_len = 3,
+ .grp_len = 2,
+ }
+ };
+
+ ctrlr = create_controller(&trid, 1, &geometry);
+ nvme_bdev_ctrlr = create_nvme_bdev_controller(&trid, controller_name);
+
+ rc = create_bdev(controller_name, bdev_name, 1, NULL);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ bdev = spdk_bdev_get_by_name(bdev_name);
+ CU_ASSERT_EQUAL(bdev->blockcnt, geometry.clba *
+ geometry.num_chk *
+ geometry.num_pu *
+ geometry.num_grp);
+ CU_ASSERT_EQUAL(bdev->zone_size, geometry.clba);
+ CU_ASSERT_EQUAL(bdev->optimal_open_zones, geometry.num_pu * geometry.num_grp);
+ CU_ASSERT_EQUAL(bdev->max_open_zones, geometry.maxocpu);
+ CU_ASSERT_EQUAL(bdev->write_unit_size, geometry.ws_opt);
+
+ delete_nvme_bdev_controller(nvme_bdev_ctrlr);
+
+ free_controller(ctrlr);
+}
+
+static uint64_t
+generate_lba(const struct spdk_ocssd_geometry_data *geo, uint64_t lbk,
+ uint64_t chk, uint64_t pu, uint64_t grp)
+{
+ uint64_t lba, len;
+
+ lba = lbk;
+ len = geo->lbaf.lbk_len;
+ CU_ASSERT(lbk < (1ull << geo->lbaf.lbk_len));
+
+ lba |= chk << len;
+ len += geo->lbaf.chk_len;
+ CU_ASSERT(chk < (1ull << geo->lbaf.chk_len));
+
+ lba |= pu << len;
+ len += geo->lbaf.pu_len;
+ CU_ASSERT(pu < (1ull << geo->lbaf.pu_len));
+
+ lba |= grp << len;
+
+ return lba;
+}
+
+static void
+test_lba_translation(void)
+{
+ struct spdk_nvme_ctrlr *ctrlr;
+ struct nvme_bdev_ctrlr *nvme_bdev_ctrlr;
+ struct spdk_nvme_transport_id trid = { .traddr = "00:00:00" };
+ const char *controller_name = "nvme0";
+ const char *bdev_name = "nvme0n1";
+ struct spdk_ocssd_geometry_data geometry = {};
+ struct ocssd_bdev *ocssd_bdev;
+ struct spdk_bdev *bdev;
+ uint64_t lba;
+ int rc;
+
+ geometry = (struct spdk_ocssd_geometry_data) {
+ .clba = 512,
+ .num_chk = 64,
+ .num_pu = 8,
+ .num_grp = 4,
+ .lbaf = {
+ .lbk_len = 9,
+ .chk_len = 6,
+ .pu_len = 3,
+ .grp_len = 2,
+ }
+ };
+
+ ctrlr = create_controller(&trid, 1, &geometry);
+ nvme_bdev_ctrlr = create_nvme_bdev_controller(&trid, controller_name);
+
+ rc = create_bdev(controller_name, bdev_name, 1, NULL);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ bdev = spdk_bdev_get_by_name(bdev_name);
+ SPDK_CU_ASSERT_FATAL(bdev != NULL);
+ ocssd_bdev = SPDK_CONTAINEROF(bdev, struct ocssd_bdev, nvme_bdev.disk);
+
+ lba = bdev_ocssd_to_disk_lba(ocssd_bdev, 0);
+ CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 0, 0, 0, 0));
+ CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev, lba), 0);
+
+ lba = bdev_ocssd_to_disk_lba(ocssd_bdev, bdev->zone_size - 1);
+ CU_ASSERT_EQUAL(lba, generate_lba(&geometry, bdev->zone_size - 1, 0, 0, 0));
+ CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev, lba), bdev->zone_size - 1);
+
+ lba = bdev_ocssd_to_disk_lba(ocssd_bdev, bdev->zone_size);
+ CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 0, 0, 1, 0));
+ CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev, lba), bdev->zone_size);
+
+ lba = bdev_ocssd_to_disk_lba(ocssd_bdev, bdev->zone_size * geometry.num_pu);
+ CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 0, 0, 0, 1));
+ CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev, lba), bdev->zone_size * geometry.num_pu);
+
+ lba = bdev_ocssd_to_disk_lba(ocssd_bdev, bdev->zone_size * geometry.num_pu + 68);
+ CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 68, 0, 0, 1));
+ CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev, lba), bdev->zone_size * geometry.num_pu + 68);
+
+ lba = bdev_ocssd_to_disk_lba(ocssd_bdev, bdev->zone_size + 68);
+ CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 68, 0, 1, 0));
+ CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev, lba), bdev->zone_size + 68);
+
+ delete_nvme_bdev_controller(nvme_bdev_ctrlr);
+ free_controller(ctrlr);
+
+ geometry = (struct spdk_ocssd_geometry_data) {
+ .clba = 5120,
+ .num_chk = 501,
+ .num_pu = 9,
+ .num_grp = 1,
+ .lbaf = {
+ .lbk_len = 13,
+ .chk_len = 9,
+ .pu_len = 4,
+ .grp_len = 1,
+ }
+ };
+
+ ctrlr = create_controller(&trid, 1, &geometry);
+ nvme_bdev_ctrlr = create_nvme_bdev_controller(&trid, controller_name);
+
+ rc = create_bdev(controller_name, bdev_name, 1, NULL);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ bdev = spdk_bdev_get_by_name(bdev_name);
+ SPDK_CU_ASSERT_FATAL(bdev != NULL);
+ ocssd_bdev = SPDK_CONTAINEROF(bdev, struct ocssd_bdev, nvme_bdev.disk);
+
+ lba = bdev_ocssd_to_disk_lba(ocssd_bdev, 0);
+ CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 0, 0, 0, 0));
+ CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev, lba), 0);
+
+ lba = bdev_ocssd_to_disk_lba(ocssd_bdev, bdev->zone_size - 1);
+ CU_ASSERT_EQUAL(lba, generate_lba(&geometry, bdev->zone_size - 1, 0, 0, 0));
+ CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev, lba), bdev->zone_size - 1);
+
+ lba = bdev_ocssd_to_disk_lba(ocssd_bdev, bdev->zone_size);
+ CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 0, 0, 1, 0));
+ CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev, lba), bdev->zone_size);
+
+ lba = bdev_ocssd_to_disk_lba(ocssd_bdev, bdev->zone_size * (geometry.num_pu - 1));
+ CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 0, 0, geometry.num_pu - 1, 0));
+ CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev, lba), bdev->zone_size * (geometry.num_pu - 1));
+
+ lba = bdev_ocssd_to_disk_lba(ocssd_bdev, bdev->zone_size * geometry.num_pu * geometry.num_grp);
+ CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 0, 1, 0, 0));
+ CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev, lba),
+ bdev->zone_size * geometry.num_pu * geometry.num_grp);
+
+ lba = bdev_ocssd_to_disk_lba(ocssd_bdev, bdev->zone_size * geometry.num_pu * geometry.num_grp + 68);
+ CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 68, 1, 0, 0));
+ CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev, lba),
+ bdev->zone_size * geometry.num_pu * geometry.num_grp + 68);
+
+ delete_nvme_bdev_controller(nvme_bdev_ctrlr);
+
+ free_controller(ctrlr);
+}
+
+static void
+punit_range_to_addr(const struct spdk_nvme_ctrlr *ctrlr, uint64_t punit,
+ uint64_t *grp, uint64_t *pu)
+{
+ const struct spdk_ocssd_geometry_data *geo = &ctrlr->geometry;
+
+ *grp = punit / geo->num_pu;
+ *pu = punit % geo->num_pu;
+
+ CU_ASSERT(*grp < geo->num_grp);
+}
+
+static void
+test_parallel_unit_range(void)
+{
+ struct spdk_nvme_ctrlr *ctrlr;
+ struct nvme_bdev_ctrlr *nvme_bdev_ctrlr;
+ struct spdk_nvme_transport_id trid = { .traddr = "00:00:00" };
+ const char *controller_name = "nvme0";
+ const char *bdev_name[] = { "nvme0n1", "nvme0n2", "nvme0n3" };
+ const struct bdev_ocssd_range range[3] = { { 0, 5 }, { 6, 18 }, { 19, 23 } };
+ struct ocssd_bdev *ocssd_bdev[3];
+ struct spdk_ocssd_geometry_data geometry = {};
+ struct spdk_bdev *bdev[3];
+ uint64_t lba, i, offset, grp, pu, zone_size;
+ int rc;
+
+ geometry = (struct spdk_ocssd_geometry_data) {
+ .clba = 500,
+ .num_chk = 60,
+ .num_pu = 8,
+ .num_grp = 3,
+ .lbaf = {
+ .lbk_len = 9,
+ .chk_len = 6,
+ .pu_len = 3,
+ .grp_len = 2,
+ }
+ };
+
+ ctrlr = create_controller(&trid, 1, &geometry);
+ nvme_bdev_ctrlr = create_nvme_bdev_controller(&trid, controller_name);
+
+ for (i = 0; i < SPDK_COUNTOF(range); ++i) {
+ rc = create_bdev(controller_name, bdev_name[i], 1, &range[i]);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ bdev[i] = spdk_bdev_get_by_name(bdev_name[i]);
+ SPDK_CU_ASSERT_FATAL(bdev[i] != NULL);
+ ocssd_bdev[i] = SPDK_CONTAINEROF(bdev[i], struct ocssd_bdev, nvme_bdev.disk);
+ }
+
+ zone_size = bdev[0]->zone_size;
+ CU_ASSERT_EQUAL(zone_size, bdev[1]->zone_size);
+ CU_ASSERT_EQUAL(zone_size, bdev[2]->zone_size);
+
+ /* Verify the first addresses are correct */
+ lba = bdev_ocssd_to_disk_lba(ocssd_bdev[0], 0);
+ CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 0, 0, 0, 0));
+ CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev[0], lba), 0);
+
+ lba = bdev_ocssd_to_disk_lba(ocssd_bdev[1], 0);
+ CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 0, 0, 6, 0));
+ CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev[1], lba), 0);
+
+ lba = bdev_ocssd_to_disk_lba(ocssd_bdev[2], 0);
+ CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 0, 0, 3, 2));
+ CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev[2], lba), 0);
+
+ /* Verify last address correctness */
+ lba = bdev_ocssd_to_disk_lba(ocssd_bdev[0], bdev[0]->blockcnt - 1);
+ CU_ASSERT_EQUAL(lba, generate_lba(&geometry, geometry.clba - 1, geometry.num_chk - 1, 5, 0));
+ CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev[0], lba), bdev[0]->blockcnt - 1);
+
+ lba = bdev_ocssd_to_disk_lba(ocssd_bdev[1], bdev[1]->blockcnt - 1);
+ CU_ASSERT_EQUAL(lba, generate_lba(&geometry, geometry.clba - 1, geometry.num_chk - 1, 2, 2));
+ CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev[1], lba), bdev[1]->blockcnt - 1);
+
+ lba = bdev_ocssd_to_disk_lba(ocssd_bdev[2], bdev[2]->blockcnt - 1);
+ CU_ASSERT_EQUAL(lba, generate_lba(&geometry, geometry.clba - 1, geometry.num_chk - 1, 7, 2));
+ CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev[2], lba), bdev[2]->blockcnt - 1);
+
+ /* Verify correct jumps across parallel units / groups */
+ for (i = 0; i < SPDK_COUNTOF(range); ++i) {
+ for (offset = 0; offset < bdev_ocssd_num_parallel_units(ocssd_bdev[i]); ++offset) {
+ punit_range_to_addr(ctrlr, range[i].begin + offset, &grp, &pu);
+ lba = bdev_ocssd_to_disk_lba(ocssd_bdev[i], offset * zone_size + 68);
+ CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 68, 0, pu, grp));
+ CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev[i], lba),
+ offset * zone_size + 68);
+ }
+ }
+
+ /* Verify correct address wrapping */
+ for (i = 0; i < SPDK_COUNTOF(range); ++i) {
+ punit_range_to_addr(ctrlr, range[i].begin, &grp, &pu);
+
+ offset = bdev_ocssd_num_parallel_units(ocssd_bdev[i]) * zone_size + 68;
+ lba = bdev_ocssd_to_disk_lba(ocssd_bdev[i], offset);
+ CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 68, 1, pu, grp));
+ assert(lba == generate_lba(&geometry, 68, 1, pu, grp));
+ CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev[i], lba), offset);
+ }
+
+ delete_nvme_bdev_controller(nvme_bdev_ctrlr);
+
+ free_controller(ctrlr);
+}
+
+static void
+get_zone_info_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
+{
+ CU_ASSERT_EQUAL(g_zone_info_status, success);
+}
+
+static uint64_t
+generate_chunk_offset(const struct spdk_ocssd_geometry_data *geo, uint64_t chk,
+ uint64_t pu, uint64_t grp)
+{
+ return grp * geo->num_pu * geo->num_chk +
+ pu * geo->num_chk + chk;
+}
+
+static struct spdk_bdev_io *
+alloc_ocssd_io(void)
+{
+ struct spdk_bdev_io *bdev_io;
+
+ bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct bdev_ocssd_io));
+ SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
+
+ return bdev_io;
+}
+
+static struct spdk_ocssd_chunk_information_entry *
+get_chunk_info(struct spdk_nvme_ctrlr *ctrlr, uint64_t offset)
+{
+ assert(offset < ctrlr->num_chunks);
+ SPDK_CU_ASSERT_FATAL(offset < ctrlr->num_chunks);
+ return &ctrlr->chunk_info[offset];
+}
+
+enum chunk_state {
+ CHUNK_STATE_FREE,
+ CHUNK_STATE_CLOSED,
+ CHUNK_STATE_OPEN,
+ CHUNK_STATE_OFFLINE
+};
+
+static void
+set_chunk_state(struct spdk_ocssd_chunk_information_entry *chunk, enum chunk_state state)
+{
+ memset(&chunk->cs, 0, sizeof(chunk->cs));
+ switch (state) {
+ case CHUNK_STATE_FREE:
+ chunk->cs.free = 1;
+ break;
+ case CHUNK_STATE_CLOSED:
+ chunk->cs.closed = 1;
+ break;
+ case CHUNK_STATE_OPEN:
+ chunk->cs.open = 1;
+ break;
+ case CHUNK_STATE_OFFLINE:
+ chunk->cs.offline = 1;
+ break;
+ default:
+ SPDK_CU_ASSERT_FATAL(0 && "Invalid state");
+ }
+}
+
+static void
+test_get_zone_info(void)
+{
+ struct spdk_nvme_ctrlr *ctrlr;
+ struct nvme_bdev_ctrlr *nvme_bdev_ctrlr;
+ struct spdk_nvme_transport_id trid = { .traddr = "00:00:00" };
+ const char *controller_name = "nvme0";
+ const char *bdev_name = "nvme0n1";
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_io *bdev_io;
+#define MAX_ZONE_INFO_COUNT 64
+ struct spdk_bdev_zone_info zone_info[MAX_ZONE_INFO_COUNT];
+ struct spdk_ocssd_chunk_information_entry *chunk_info;
+ struct spdk_ocssd_geometry_data geometry;
+ uint64_t chunk_offset;
+ int rc, offset;
+
+ geometry = (struct spdk_ocssd_geometry_data) {
+ .clba = 512,
+ .num_chk = 64,
+ .num_pu = 8,
+ .num_grp = 4,
+ .lbaf = {
+ .lbk_len = 9,
+ .chk_len = 6,
+ .pu_len = 3,
+ .grp_len = 2,
+ }
+ };
+
+ ctrlr = create_controller(&trid, 1, &geometry);
+ nvme_bdev_ctrlr = create_nvme_bdev_controller(&trid, controller_name);
+
+ rc = create_bdev(controller_name, bdev_name, 1, NULL);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ bdev = spdk_bdev_get_by_name(bdev_name);
+ SPDK_CU_ASSERT_FATAL(bdev != NULL);
+
+ bdev_io = alloc_ocssd_io();
+ bdev_io->internal.cb = get_zone_info_cb;
+ bdev_io->bdev = bdev;
+
+ /* Verify empty zone */
+ bdev_io->u.zone_mgmt.zone_id = 0;
+ bdev_io->u.zone_mgmt.num_zones = 1;
+ bdev_io->u.zone_mgmt.buf = &zone_info;
+ chunk_info = get_chunk_info(ctrlr, 0);
+ set_chunk_state(chunk_info, CHUNK_STATE_FREE);
+ chunk_info->wp = 0;
+
+ rc = bdev_ocssd_get_zone_info(NULL, bdev_io);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ CU_ASSERT_EQUAL(zone_info[0].state, SPDK_BDEV_ZONE_STATE_EMPTY);
+ CU_ASSERT_EQUAL(zone_info[0].zone_id, 0);
+ CU_ASSERT_EQUAL(zone_info[0].write_pointer, 0);
+ CU_ASSERT_EQUAL(zone_info[0].capacity, geometry.clba);
+
+ /* Verify open zone */
+ bdev_io->u.zone_mgmt.zone_id = bdev->zone_size;
+ bdev_io->u.zone_mgmt.num_zones = 1;
+ bdev_io->u.zone_mgmt.buf = &zone_info;
+ chunk_info = get_chunk_info(ctrlr, generate_chunk_offset(&geometry, 0, 1, 0));
+ set_chunk_state(chunk_info, CHUNK_STATE_OPEN);
+ chunk_info->wp = chunk_info->slba + 68;
+ chunk_info->cnlb = 511;
+ chunk_info->ct.size_deviate = 1;
+
+ rc = bdev_ocssd_get_zone_info(NULL, bdev_io);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ CU_ASSERT_EQUAL(zone_info[0].state, SPDK_BDEV_ZONE_STATE_OPEN);
+ CU_ASSERT_EQUAL(zone_info[0].zone_id, bdev->zone_size);
+ CU_ASSERT_EQUAL(zone_info[0].write_pointer, bdev->zone_size + 68);
+ CU_ASSERT_EQUAL(zone_info[0].capacity, chunk_info->cnlb);
+
+ /* Verify offline zone at 2nd chunk */
+ bdev_io->u.zone_mgmt.zone_id = bdev->zone_size * geometry.num_pu * geometry.num_grp;
+ bdev_io->u.zone_mgmt.num_zones = 1;
+ bdev_io->u.zone_mgmt.buf = &zone_info;
+ chunk_info = get_chunk_info(ctrlr, generate_chunk_offset(&geometry, 1, 0, 0));
+ set_chunk_state(chunk_info, CHUNK_STATE_OFFLINE);
+ chunk_info->wp = chunk_info->slba;
+
+ rc = bdev_ocssd_get_zone_info(NULL, bdev_io);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ CU_ASSERT_EQUAL(zone_info[0].state, SPDK_BDEV_ZONE_STATE_OFFLINE);
+ CU_ASSERT_EQUAL(zone_info[0].zone_id, bdev_io->u.zone_mgmt.zone_id);
+ CU_ASSERT_EQUAL(zone_info[0].write_pointer, bdev_io->u.zone_mgmt.zone_id);
+
+ /* Verify multiple zones at a time */
+ bdev_io->u.zone_mgmt.zone_id = 0;
+ bdev_io->u.zone_mgmt.num_zones = MAX_ZONE_INFO_COUNT;
+ bdev_io->u.zone_mgmt.buf = &zone_info;
+
+ for (offset = 0; offset < MAX_ZONE_INFO_COUNT; ++offset) {
+ chunk_offset = generate_chunk_offset(&geometry,
+ (offset / (geometry.num_grp * geometry.num_pu)) % geometry.num_chk,
+ offset % geometry.num_pu,
+ (offset / geometry.num_pu) % geometry.num_grp);
+
+
+ chunk_info = get_chunk_info(ctrlr, chunk_offset);
+ set_chunk_state(chunk_info, CHUNK_STATE_OPEN);
+ chunk_info->wp = chunk_info->slba + 68;
+ chunk_info->ct.size_deviate = 0;
+ }
+
+ rc = bdev_ocssd_get_zone_info(NULL, bdev_io);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ for (offset = 0; offset < MAX_ZONE_INFO_COUNT; ++offset) {
+ CU_ASSERT_EQUAL(zone_info[offset].state, SPDK_BDEV_ZONE_STATE_OPEN);
+ CU_ASSERT_EQUAL(zone_info[offset].zone_id, bdev->zone_size * offset);
+ CU_ASSERT_EQUAL(zone_info[offset].write_pointer, bdev->zone_size * offset + 68);
+ CU_ASSERT_EQUAL(zone_info[offset].capacity, geometry.clba);
+ }
+
+ /* Verify misaligned start zone LBA */
+ bdev_io->u.zone_mgmt.zone_id = 1;
+ bdev_io->u.zone_mgmt.num_zones = MAX_ZONE_INFO_COUNT;
+ bdev_io->u.zone_mgmt.buf = &zone_info;
+
+ rc = bdev_ocssd_get_zone_info(NULL, bdev_io);
+ CU_ASSERT_EQUAL(rc, -EINVAL);
+
+ /* Verify correct NVMe error forwarding */
+ bdev_io->u.zone_mgmt.zone_id = 0;
+ bdev_io->u.zone_mgmt.num_zones = MAX_ZONE_INFO_COUNT;
+ bdev_io->u.zone_mgmt.buf = &zone_info;
+ chunk_info = get_chunk_info(ctrlr, 0);
+ set_chunk_state(chunk_info, CHUNK_STATE_FREE);
+
+ rc = bdev_ocssd_get_zone_info(NULL, bdev_io);
+ CU_ASSERT_EQUAL(rc, 0);
+ g_chunk_info_cpl = (struct spdk_nvme_cpl) {
+ .status = {
+ .sct = SPDK_NVME_SCT_GENERIC,
+ .sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR
+ }
+ };
+ g_zone_info_status = false;
+
+ g_chunk_info_cpl = (struct spdk_nvme_cpl) {};
+ g_zone_info_status = true;
+
+ delete_nvme_bdev_controller(nvme_bdev_ctrlr);
+
+ free(bdev_io);
+ free_controller(ctrlr);
+}
+
+int
+main(int argc, const char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("ocssd", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_create_controller);
+ CU_ADD_TEST(suite, test_device_geometry);
+ CU_ADD_TEST(suite, test_lba_translation);
+ CU_ADD_TEST(suite, test_parallel_unit_range);
+ CU_ADD_TEST(suite, test_get_zone_info);
+
+ g_thread = spdk_thread_create("test", NULL);
+ spdk_set_thread(g_thread);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+
+ spdk_thread_exit(g_thread);
+ while (!spdk_thread_is_exited(g_thread)) {
+ spdk_thread_poll(g_thread, 0, 0);
+ }
+ spdk_thread_destroy(g_thread);
+
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/bdev/bdev_zone.c/.gitignore b/src/spdk/test/unit/lib/bdev/bdev_zone.c/.gitignore
new file mode 100644
index 000000000..99af16132
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/bdev_zone.c/.gitignore
@@ -0,0 +1 @@
+bdev_zone_ut
diff --git a/src/spdk/test/unit/lib/bdev/bdev_zone.c/Makefile b/src/spdk/test/unit/lib/bdev/bdev_zone.c/Makefile
new file mode 100644
index 000000000..52dc65f23
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/bdev_zone.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = bdev_zone_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/bdev/bdev_zone.c/bdev_zone_ut.c b/src/spdk/test/unit/lib/bdev/bdev_zone.c/bdev_zone_ut.c
new file mode 100644
index 000000000..589e105b9
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/bdev_zone.c/bdev_zone_ut.c
@@ -0,0 +1,429 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE AiRE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+#include "spdk_cunit.h"
+#include "spdk/env.h"
+#include "spdk_internal/mock.h"
+
+#include "bdev/bdev_zone.c"
+
+DEFINE_STUB_V(bdev_io_init, (struct spdk_bdev_io *bdev_io,
+ struct spdk_bdev *bdev, void *cb_arg,
+ spdk_bdev_io_completion_cb cb));
+
+DEFINE_STUB_V(bdev_io_submit, (struct spdk_bdev_io *bdev_io));
+
+/* Construct zone_io_operation structure */
+struct zone_io_operation {
+ struct spdk_bdev_desc *desc;
+ struct spdk_io_channel *ch;
+ struct iovec iov;
+ union {
+ struct {
+ uint64_t zone_id;
+ size_t num_zones;
+ enum spdk_bdev_zone_action zone_action;
+ void *buf;
+ struct spdk_bdev_zone_info *info_;
+ } zone_mgmt;
+ struct {
+ void *md_buf;
+ struct iovec *iovs;
+ int iovcnt;
+ uint64_t num_blocks;
+ uint64_t offset_blocks;
+ uint64_t start_lba;
+ } bdev;
+ };
+ spdk_bdev_io_completion_cb cb;
+ void *cb_arg;
+ enum spdk_bdev_io_type io_type;
+};
+
+/* Global variables */
+struct zone_io_operation *g_zone_op = NULL;
+static struct spdk_bdev *g_bdev = NULL;
+static struct spdk_bdev_io *g_bdev_io = NULL;
+static struct spdk_bdev_zone_info g_zone_info = {0};
+static enum spdk_bdev_zone_action g_zone_action = SPDK_BDEV_ZONE_OPEN;
+static enum spdk_bdev_zone_action g_unexpected_zone_action = SPDK_BDEV_ZONE_CLOSE;
+static enum spdk_bdev_io_type g_io_type = SPDK_BDEV_IO_TYPE_GET_ZONE_INFO;
+
+static uint64_t g_expected_zone_id;
+static uint64_t g_expected_num_zones;
+static uint64_t g_unexpected_zone_id;
+static uint64_t g_unexpected_num_zones;
+static uint64_t g_num_blocks;
+static uint64_t g_unexpected_num_blocks;
+static uint64_t g_start_lba;
+static uint64_t g_unexpected_start_lba;
+static uint64_t g_bdev_blocklen;
+static uint64_t g_unexpected_bdev_blocklen;
+static bool g_append_with_md;
+static int g_unexpected_iovcnt;
+static void *g_md_buf;
+static void *g_unexpetced_md_buf;
+static void *g_buf;
+static void *g_unexpected_buf;
+
+static int
+test_setup(void)
+{
+ /* Initiate expected and unexpected value here */
+ g_expected_zone_id = 0x1000;
+ g_expected_num_zones = 1024;
+ g_unexpected_zone_id = 0xFFFF;
+ g_unexpected_num_zones = 0;
+ g_num_blocks = 4096 * 1024;
+ g_unexpected_num_blocks = 0;
+ g_start_lba = 4096;
+ g_unexpected_start_lba = 0;
+ g_bdev_blocklen = 4096;
+ g_unexpected_bdev_blocklen = 0;
+ g_append_with_md = false;
+ g_unexpected_iovcnt = 1000;
+ g_md_buf = (void *)0xEFDCFEDE;
+ g_unexpetced_md_buf = (void *)0xFECDEFDC;
+ g_buf = (void *)0xFEEDBEEF;
+ g_unexpected_buf = (void *)0xDEADBEEF;
+
+ return 0;
+}
+
+static int
+test_cleanup(void)
+{
+ return 0;
+}
+
+static void
+start_operation(void)
+{
+ g_zone_op = calloc(1, sizeof(struct zone_io_operation));
+ SPDK_CU_ASSERT_FATAL(g_zone_op != NULL);
+
+ switch (g_io_type) {
+ case SPDK_BDEV_IO_TYPE_ZONE_APPEND:
+ g_zone_op->bdev.iovs = &g_zone_op->iov;
+ g_zone_op->bdev.iovs[0].iov_base = g_unexpected_buf;
+ g_zone_op->bdev.iovs[0].iov_len = g_unexpected_num_blocks * g_unexpected_bdev_blocklen;
+ g_zone_op->bdev.iovcnt = g_unexpected_iovcnt;
+ g_zone_op->bdev.md_buf = g_unexpetced_md_buf;
+ g_zone_op->bdev.num_blocks = g_unexpected_num_blocks;
+ g_zone_op->bdev.offset_blocks = g_unexpected_zone_id;
+ g_zone_op->bdev.start_lba = g_unexpected_start_lba;
+ break;
+ default:
+ g_zone_op->bdev.iovcnt = 0;
+ g_zone_op->zone_mgmt.zone_id = g_unexpected_zone_id;
+ g_zone_op->zone_mgmt.num_zones = g_unexpected_num_zones;
+ g_zone_op->zone_mgmt.zone_action = g_unexpected_zone_action;
+ g_zone_op->zone_mgmt.buf = g_unexpected_buf;
+ break;
+ }
+}
+
+static void
+stop_operation(void)
+{
+ free(g_bdev_io);
+ free(g_bdev);
+ free(g_zone_op);
+ g_bdev_io = NULL;
+ g_bdev = NULL;
+ g_zone_op = NULL;
+}
+
+struct spdk_bdev_io *
+bdev_channel_get_io(struct spdk_bdev_channel *channel)
+{
+ struct spdk_bdev_io *bdev_io;
+
+ bdev_io = calloc(1, sizeof(struct spdk_bdev_io));
+ SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
+
+ bdev_io->internal.ch = channel;
+ bdev_io->type = g_io_type;
+
+ CU_ASSERT(g_zone_op != NULL);
+
+ switch (g_io_type) {
+ case SPDK_BDEV_IO_TYPE_GET_ZONE_INFO:
+ case SPDK_BDEV_IO_TYPE_ZONE_MANAGEMENT:
+ bdev_io->u.bdev.iovcnt = 0;
+ bdev_io->u.zone_mgmt.zone_id = g_zone_op->zone_mgmt.zone_id;
+ bdev_io->u.zone_mgmt.num_zones = g_zone_op->zone_mgmt.num_zones;
+ bdev_io->u.zone_mgmt.zone_action = g_zone_op->zone_mgmt.zone_action;
+ bdev_io->u.zone_mgmt.buf = g_zone_op->zone_mgmt.buf;
+ break;
+ case SPDK_BDEV_IO_TYPE_ZONE_APPEND:
+ bdev_io->u.bdev.iovs = g_zone_op->bdev.iovs;
+ bdev_io->u.bdev.iovs[0].iov_base = g_zone_op->bdev.iovs[0].iov_base;
+ bdev_io->u.bdev.iovs[0].iov_len = g_zone_op->bdev.iovs[0].iov_len;
+ bdev_io->u.bdev.iovcnt = g_zone_op->bdev.iovcnt;
+ bdev_io->u.bdev.md_buf = g_zone_op->bdev.md_buf;
+ bdev_io->u.bdev.num_blocks = g_zone_op->bdev.num_blocks;
+ bdev_io->u.bdev.offset_blocks = g_zone_op->bdev.offset_blocks;
+ break;
+ default:
+ CU_ASSERT(0);
+ }
+
+ g_bdev_io = bdev_io;
+
+ return bdev_io;
+}
+
+int
+spdk_bdev_open(struct spdk_bdev *bdev, bool write, spdk_bdev_remove_cb_t remove_cb,
+ void *remove_ctx, struct spdk_bdev_desc **_desc)
+{
+ *_desc = (void *)0x1;
+ return 0;
+}
+
+struct spdk_io_channel *
+spdk_bdev_get_io_channel(struct spdk_bdev_desc *desc)
+{
+ return (struct spdk_io_channel *)0x1;
+}
+
+void
+spdk_put_io_channel(struct spdk_io_channel *ch)
+{
+ CU_ASSERT(ch == (void *)1);
+}
+
+struct spdk_bdev *
+spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc)
+{
+ struct spdk_bdev *bdev;
+
+ bdev = calloc(1, sizeof(struct spdk_bdev));
+ SPDK_CU_ASSERT_FATAL(bdev != NULL);
+
+ if (g_io_type == SPDK_BDEV_IO_TYPE_ZONE_APPEND) {
+ bdev->blocklen = g_bdev_blocklen;
+ }
+
+ g_bdev = bdev;
+
+ return bdev;
+}
+
+static void
+test_get_zone_size(void)
+{
+ struct spdk_bdev bdev = {};
+ uint64_t get_zone_size;
+
+ bdev.zone_size = 1024 * 4096;
+
+ get_zone_size = spdk_bdev_get_zone_size(&bdev);
+ CU_ASSERT(get_zone_size == 1024 * 4096);
+}
+
+static void
+test_get_max_open_zones(void)
+{
+ struct spdk_bdev bdev = {};
+ uint32_t get_max_open_zones;
+
+ bdev.max_open_zones = 8192;
+
+ get_max_open_zones = spdk_bdev_get_max_open_zones(&bdev);
+ CU_ASSERT(get_max_open_zones == 8192);
+}
+
+static void
+test_get_optimal_open_zones(void)
+{
+ struct spdk_bdev bdev = {};
+ uint32_t get_optimal_open_zones;
+
+ bdev.optimal_open_zones = 4096;
+
+ get_optimal_open_zones = spdk_bdev_get_optimal_open_zones(&bdev);
+ CU_ASSERT(get_optimal_open_zones == 4096);
+}
+
+static void
+test_bdev_io_get_append_location(void)
+{
+ struct spdk_bdev_io bdev_io = {};
+ uint64_t get_offset_blocks;
+
+ bdev_io.u.bdev.offset_blocks = 1024 * 10;
+
+ get_offset_blocks = spdk_bdev_io_get_append_location(&bdev_io);
+ CU_ASSERT(get_offset_blocks == 1024 * 10);
+}
+
+static void
+test_zone_get_operation(void)
+{
+ test_get_zone_size();
+ test_get_max_open_zones();
+ test_get_optimal_open_zones();
+}
+
+#define DECLARE_VIRTUAL_BDEV_START() \
+ struct spdk_bdev bdev; \
+ struct spdk_io_channel *ch; \
+ struct spdk_bdev_desc *desc = NULL; \
+ int rc; \
+ memset(&bdev, 0, sizeof(bdev)); \
+ bdev.name = "bdev_zone_ut"; \
+ rc = spdk_bdev_open(&bdev, true, NULL, NULL, &desc); \
+ CU_ASSERT(rc == 0); \
+ SPDK_CU_ASSERT_FATAL(desc != NULL); \
+ ch = spdk_bdev_get_io_channel(desc); \
+ CU_ASSERT(ch != NULL);\
+
+static void
+test_bdev_zone_get_info(void)
+{
+ DECLARE_VIRTUAL_BDEV_START();
+
+ g_zone_info.zone_id = g_expected_zone_id;
+ g_io_type = SPDK_BDEV_IO_TYPE_GET_ZONE_INFO;
+
+ start_operation();
+
+ rc = spdk_bdev_get_zone_info(desc, ch, g_expected_zone_id, g_expected_num_zones, &g_zone_info, NULL,
+ NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_io->type == SPDK_BDEV_IO_TYPE_GET_ZONE_INFO);
+ CU_ASSERT(g_bdev_io->u.zone_mgmt.zone_id == g_expected_zone_id);
+ CU_ASSERT(g_bdev_io->u.zone_mgmt.num_zones == g_expected_num_zones);
+ CU_ASSERT(g_bdev_io->u.zone_mgmt.buf == &g_zone_info);
+
+ stop_operation();
+}
+
+static void
+test_bdev_zone_management(void)
+{
+ DECLARE_VIRTUAL_BDEV_START();
+
+ g_zone_info.zone_id = g_expected_zone_id;
+ g_io_type = SPDK_BDEV_IO_TYPE_ZONE_MANAGEMENT;
+
+ start_operation();
+
+ rc = spdk_bdev_zone_management(desc, ch, g_expected_zone_id, g_zone_action, NULL,
+ NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_io->type == SPDK_BDEV_IO_TYPE_ZONE_MANAGEMENT);
+ CU_ASSERT(g_bdev_io->u.zone_mgmt.zone_id == g_expected_zone_id);
+ CU_ASSERT(g_bdev_io->u.zone_mgmt.zone_action == g_zone_action);
+ CU_ASSERT(g_bdev_io->u.zone_mgmt.num_zones == 1);
+
+ stop_operation();
+}
+
+static void
+test_bdev_zone_append(void)
+{
+ DECLARE_VIRTUAL_BDEV_START();
+
+ g_io_type = SPDK_BDEV_IO_TYPE_ZONE_APPEND;
+ g_append_with_md = false;
+
+ start_operation();
+
+ rc = spdk_bdev_zone_append(desc, ch, g_buf, g_start_lba, g_num_blocks, NULL, NULL);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_io->internal.desc == desc);
+ CU_ASSERT(g_bdev_io->type == SPDK_BDEV_IO_TYPE_ZONE_APPEND);
+ CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == g_buf);
+ CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_len == g_num_blocks * g_bdev_blocklen);
+ CU_ASSERT(g_bdev_io->u.bdev.iovcnt == 1);
+ CU_ASSERT(g_bdev_io->u.bdev.md_buf == NULL);
+ CU_ASSERT(g_bdev_io->u.bdev.num_blocks == g_num_blocks);
+ CU_ASSERT(g_bdev_io->u.bdev.offset_blocks == g_expected_zone_id);
+
+ stop_operation();
+}
+
+static void
+test_bdev_zone_append_with_md(void)
+{
+ DECLARE_VIRTUAL_BDEV_START();
+
+ g_io_type = SPDK_BDEV_IO_TYPE_ZONE_APPEND;
+ g_append_with_md = true;
+
+ start_operation();
+
+ rc = spdk_bdev_zone_append_with_md(desc, ch, g_buf, g_md_buf, g_start_lba, g_num_blocks, NULL,
+ NULL);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_io->internal.desc == desc);
+ CU_ASSERT(g_bdev_io->type == SPDK_BDEV_IO_TYPE_ZONE_APPEND);
+ CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == g_buf);
+ CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_len == g_num_blocks * g_bdev_blocklen);
+ CU_ASSERT(g_bdev_io->u.bdev.iovcnt == 1);
+ CU_ASSERT(g_bdev_io->u.bdev.md_buf == g_md_buf);
+ CU_ASSERT(g_bdev_io->u.bdev.num_blocks == g_num_blocks);
+ CU_ASSERT(g_bdev_io->u.bdev.offset_blocks == g_expected_zone_id);
+
+ stop_operation();
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("zone", test_setup, test_cleanup);
+ CU_ADD_TEST(suite, test_zone_get_operation);
+ CU_ADD_TEST(suite, test_bdev_zone_get_info);
+ CU_ADD_TEST(suite, test_bdev_zone_management);
+ CU_ADD_TEST(suite, test_bdev_zone_append);
+ CU_ADD_TEST(suite, test_bdev_zone_append_with_md);
+ CU_ADD_TEST(suite, test_bdev_io_get_append_location);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/bdev/compress.c/.gitignore b/src/spdk/test/unit/lib/bdev/compress.c/.gitignore
new file mode 100644
index 000000000..bac80ced6
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/compress.c/.gitignore
@@ -0,0 +1 @@
+compress_ut
diff --git a/src/spdk/test/unit/lib/bdev/compress.c/Makefile b/src/spdk/test/unit/lib/bdev/compress.c/Makefile
new file mode 100644
index 000000000..6f33eef39
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/compress.c/Makefile
@@ -0,0 +1,39 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = compress_ut.c
+CFLAGS += $(ENV_CFLAGS)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/bdev/compress.c/compress_ut.c b/src/spdk/test/unit/lib/bdev/compress.c/compress_ut.c
new file mode 100644
index 000000000..53c14310c
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/compress.c/compress_ut.c
@@ -0,0 +1,1140 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+/* We have our own mock for this */
+#define UNIT_TEST_NO_VTOPHYS
+#include "common/lib/test_env.c"
+#include "spdk_internal/mock.h"
+#include "unit/lib/json_mock.c"
+#include "spdk/reduce.h"
+
+#include <rte_compressdev.h>
+
+/* There will be one if the data perfectly matches the chunk size,
+ * or there could be an offset into the data and a remainder after
+ * the data or both for a max of 3.
+ */
+#define UT_MBUFS_PER_OP 3
+/* For testing the crossing of a huge page boundary on address translation,
+ * we'll have an extra one but we only test on the source side.
+ */
+#define UT_MBUFS_PER_OP_BOUND_TEST 4
+
+struct spdk_bdev_io *g_bdev_io;
+struct spdk_io_channel *g_io_ch;
+struct rte_comp_op g_comp_op[2];
+struct vbdev_compress g_comp_bdev;
+struct comp_device_qp g_device_qp;
+struct compress_dev g_device;
+struct rte_compressdev_capabilities g_cdev_cap;
+static struct rte_mbuf *g_src_mbufs[UT_MBUFS_PER_OP_BOUND_TEST];
+static struct rte_mbuf *g_dst_mbufs[UT_MBUFS_PER_OP];
+static struct rte_mbuf g_expected_src_mbufs[UT_MBUFS_PER_OP_BOUND_TEST];
+static struct rte_mbuf g_expected_dst_mbufs[UT_MBUFS_PER_OP];
+struct comp_bdev_io *g_io_ctx;
+struct comp_io_channel *g_comp_ch;
+
+/* Those functions are defined as static inline in DPDK, so we can't
+ * mock them straight away. We use defines to redirect them into
+ * our custom functions.
+ */
+
+static void mock_rte_pktmbuf_attach_extbuf(struct rte_mbuf *m, void *buf_addr, rte_iova_t buf_iova,
+ uint16_t buf_len, struct rte_mbuf_ext_shared_info *shinfo);
+#define rte_pktmbuf_attach_extbuf mock_rte_pktmbuf_attach_extbuf
+static void mock_rte_pktmbuf_attach_extbuf(struct rte_mbuf *m, void *buf_addr, rte_iova_t buf_iova,
+ uint16_t buf_len, struct rte_mbuf_ext_shared_info *shinfo)
+{
+ assert(m != NULL);
+ m->buf_addr = buf_addr;
+ m->buf_iova = buf_iova;
+ m->buf_len = buf_len;
+ m->data_len = m->pkt_len = 0;
+}
+
+static char *mock_rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len);
+#define rte_pktmbuf_append mock_rte_pktmbuf_append
+static char *mock_rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
+{
+ m->pkt_len = m->pkt_len + len;
+ return NULL;
+}
+
+static inline int mock_rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail);
+#define rte_pktmbuf_chain mock_rte_pktmbuf_chain
+static inline int mock_rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
+{
+ struct rte_mbuf *cur_tail;
+
+ cur_tail = rte_pktmbuf_lastseg(head);
+ cur_tail->next = tail;
+
+ return 0;
+}
+
+uint16_t ut_max_nb_queue_pairs = 0;
+void __rte_experimental mock_rte_compressdev_info_get(uint8_t dev_id,
+ struct rte_compressdev_info *dev_info);
+#define rte_compressdev_info_get mock_rte_compressdev_info_get
+void __rte_experimental
+mock_rte_compressdev_info_get(uint8_t dev_id, struct rte_compressdev_info *dev_info)
+{
+ dev_info->max_nb_queue_pairs = ut_max_nb_queue_pairs;
+ dev_info->capabilities = &g_cdev_cap;
+ dev_info->driver_name = "compress_isal";
+}
+
+int ut_rte_compressdev_configure = 0;
+int __rte_experimental mock_rte_compressdev_configure(uint8_t dev_id,
+ struct rte_compressdev_config *config);
+#define rte_compressdev_configure mock_rte_compressdev_configure
+int __rte_experimental
+mock_rte_compressdev_configure(uint8_t dev_id, struct rte_compressdev_config *config)
+{
+ return ut_rte_compressdev_configure;
+}
+
+int ut_rte_compressdev_queue_pair_setup = 0;
+int __rte_experimental mock_rte_compressdev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
+ uint32_t max_inflight_ops, int socket_id);
+#define rte_compressdev_queue_pair_setup mock_rte_compressdev_queue_pair_setup
+int __rte_experimental
+mock_rte_compressdev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
+ uint32_t max_inflight_ops, int socket_id)
+{
+ return ut_rte_compressdev_queue_pair_setup;
+}
+
+int ut_rte_compressdev_start = 0;
+int __rte_experimental mock_rte_compressdev_start(uint8_t dev_id);
+#define rte_compressdev_start mock_rte_compressdev_start
+int __rte_experimental
+mock_rte_compressdev_start(uint8_t dev_id)
+{
+ return ut_rte_compressdev_start;
+}
+
+int ut_rte_compressdev_private_xform_create = 0;
+int __rte_experimental mock_rte_compressdev_private_xform_create(uint8_t dev_id,
+ const struct rte_comp_xform *xform, void **private_xform);
+#define rte_compressdev_private_xform_create mock_rte_compressdev_private_xform_create
+int __rte_experimental
+mock_rte_compressdev_private_xform_create(uint8_t dev_id,
+ const struct rte_comp_xform *xform, void **private_xform)
+{
+ return ut_rte_compressdev_private_xform_create;
+}
+
+uint8_t ut_rte_compressdev_count = 0;
+uint8_t __rte_experimental mock_rte_compressdev_count(void);
+#define rte_compressdev_count mock_rte_compressdev_count
+uint8_t __rte_experimental
+mock_rte_compressdev_count(void)
+{
+ return ut_rte_compressdev_count;
+}
+
+struct rte_mempool *ut_rte_comp_op_pool_create = NULL;
+struct rte_mempool *__rte_experimental mock_rte_comp_op_pool_create(const char *name,
+ unsigned int nb_elts, unsigned int cache_size, uint16_t user_size,
+ int socket_id);
+#define rte_comp_op_pool_create mock_rte_comp_op_pool_create
+struct rte_mempool *__rte_experimental
+mock_rte_comp_op_pool_create(const char *name, unsigned int nb_elts,
+ unsigned int cache_size, uint16_t user_size, int socket_id)
+{
+ return ut_rte_comp_op_pool_create;
+}
+
+void mock_rte_pktmbuf_free(struct rte_mbuf *m);
+#define rte_pktmbuf_free mock_rte_pktmbuf_free
+void mock_rte_pktmbuf_free(struct rte_mbuf *m)
+{
+}
+
+static bool ut_boundary_alloc = false;
+static int ut_rte_pktmbuf_alloc_bulk = 0;
+int mock_rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs,
+ unsigned count);
+#define rte_pktmbuf_alloc_bulk mock_rte_pktmbuf_alloc_bulk
+int mock_rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs,
+ unsigned count)
+{
+ int i;
+
+ /* This mocked function only supports the alloc of up to 3 src and 3 dst. */
+ ut_rte_pktmbuf_alloc_bulk += count;
+
+ if (ut_rte_pktmbuf_alloc_bulk == 1) {
+ /* allocation of an extra mbuf for boundary cross test */
+ ut_boundary_alloc = true;
+ g_src_mbufs[UT_MBUFS_PER_OP_BOUND_TEST - 1]->next = NULL;
+ *mbufs = g_src_mbufs[UT_MBUFS_PER_OP_BOUND_TEST - 1];
+ ut_rte_pktmbuf_alloc_bulk = 0;
+ } else if (ut_rte_pktmbuf_alloc_bulk == UT_MBUFS_PER_OP) {
+ /* first test allocation, src mbufs */
+ for (i = 0; i < UT_MBUFS_PER_OP; i++) {
+ g_src_mbufs[i]->next = NULL;
+ *mbufs++ = g_src_mbufs[i];
+ }
+ } else if (ut_rte_pktmbuf_alloc_bulk == UT_MBUFS_PER_OP * 2) {
+ /* second test allocation, dst mbufs */
+ for (i = 0; i < UT_MBUFS_PER_OP; i++) {
+ g_dst_mbufs[i]->next = NULL;
+ *mbufs++ = g_dst_mbufs[i];
+ }
+ ut_rte_pktmbuf_alloc_bulk = 0;
+ } else {
+ return -1;
+ }
+ return 0;
+}
+
+struct rte_mempool *
+rte_pktmbuf_pool_create(const char *name, unsigned n, unsigned cache_size,
+ uint16_t priv_size, uint16_t data_room_size, int socket_id)
+{
+ struct spdk_mempool *tmp;
+
+ tmp = spdk_mempool_create("mbuf_mp", 1024, sizeof(struct rte_mbuf),
+ SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
+ SPDK_ENV_SOCKET_ID_ANY);
+
+ return (struct rte_mempool *)tmp;
+}
+
+void
+rte_mempool_free(struct rte_mempool *mp)
+{
+ if (mp) {
+ spdk_mempool_free((struct spdk_mempool *)mp);
+ }
+}
+
+static int ut_spdk_reduce_vol_op_complete_err = 0;
+void
+spdk_reduce_vol_writev(struct spdk_reduce_vol *vol, struct iovec *iov, int iovcnt,
+ uint64_t offset, uint64_t length, spdk_reduce_vol_op_complete cb_fn,
+ void *cb_arg)
+{
+ cb_fn(cb_arg, ut_spdk_reduce_vol_op_complete_err);
+}
+
+void
+spdk_reduce_vol_readv(struct spdk_reduce_vol *vol, struct iovec *iov, int iovcnt,
+ uint64_t offset, uint64_t length, spdk_reduce_vol_op_complete cb_fn,
+ void *cb_arg)
+{
+ cb_fn(cb_arg, ut_spdk_reduce_vol_op_complete_err);
+}
+
+#include "bdev/compress/vbdev_compress.c"
+
+/* SPDK stubs */
+DEFINE_STUB(spdk_bdev_get_aliases, const struct spdk_bdev_aliases_list *,
+ (const struct spdk_bdev *bdev), NULL);
+DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
+DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *g_bdev_io));
+DEFINE_STUB(spdk_bdev_io_type_supported, bool, (struct spdk_bdev *bdev,
+ enum spdk_bdev_io_type io_type), 0);
+DEFINE_STUB_V(spdk_bdev_module_release_bdev, (struct spdk_bdev *bdev));
+DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
+DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), 0);
+DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_desc *desc), 0);
+DEFINE_STUB_V(spdk_bdev_unregister, (struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn,
+ void *cb_arg));
+DEFINE_STUB(spdk_bdev_open, int, (struct spdk_bdev *bdev, bool write,
+ spdk_bdev_remove_cb_t remove_cb,
+ void *remove_ctx, struct spdk_bdev_desc **_desc), 0);
+DEFINE_STUB(spdk_bdev_module_claim_bdev, int, (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
+ struct spdk_bdev_module *module), 0);
+DEFINE_STUB_V(spdk_bdev_module_examine_done, (struct spdk_bdev_module *module));
+DEFINE_STUB(spdk_bdev_register, int, (struct spdk_bdev *bdev), 0);
+DEFINE_STUB(spdk_bdev_get_by_name, struct spdk_bdev *, (const char *bdev_name), NULL);
+DEFINE_STUB(spdk_bdev_io_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_io *bdev_io),
+ 0);
+DEFINE_STUB(spdk_bdev_queue_io_wait, int, (struct spdk_bdev *bdev, struct spdk_io_channel *ch,
+ struct spdk_bdev_io_wait_entry *entry), 0);
+DEFINE_STUB_V(spdk_reduce_vol_unload, (struct spdk_reduce_vol *vol,
+ spdk_reduce_vol_op_complete cb_fn, void *cb_arg));
+DEFINE_STUB_V(spdk_reduce_vol_load, (struct spdk_reduce_backing_dev *backing_dev,
+ spdk_reduce_vol_op_with_handle_complete cb_fn, void *cb_arg));
+DEFINE_STUB(spdk_reduce_vol_get_params, const struct spdk_reduce_vol_params *,
+ (struct spdk_reduce_vol *vol), NULL);
+
+/* DPDK stubs */
+DEFINE_STUB(rte_socket_id, unsigned, (void), 0);
+DEFINE_STUB(rte_vdev_init, int, (const char *name, const char *args), 0);
+DEFINE_STUB_V(rte_comp_op_free, (struct rte_comp_op *op));
+DEFINE_STUB(rte_comp_op_alloc, struct rte_comp_op *, (struct rte_mempool *mempool), NULL);
+
+int g_small_size_counter = 0;
+int g_small_size_modify = 0;
+uint64_t g_small_size = 0;
+uint64_t
+spdk_vtophys(void *buf, uint64_t *size)
+{
+ g_small_size_counter++;
+ if (g_small_size_counter == g_small_size_modify) {
+ *size = g_small_size;
+ g_small_size_counter = 0;
+ g_small_size_modify = 0;
+ }
+ return (uint64_t)buf;
+}
+
+void
+spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
+{
+ cb(g_io_ch, g_bdev_io, true);
+}
+
+/* Mock these functions to call the callback and then return the value we require */
+int ut_spdk_bdev_readv_blocks = 0;
+int
+spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct iovec *iov, int iovcnt,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ cb(g_bdev_io, !ut_spdk_bdev_readv_blocks, cb_arg);
+ return ut_spdk_bdev_readv_blocks;
+}
+
+int ut_spdk_bdev_writev_blocks = 0;
+bool ut_spdk_bdev_writev_blocks_mocked = false;
+int
+spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct iovec *iov, int iovcnt,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ cb(g_bdev_io, !ut_spdk_bdev_writev_blocks, cb_arg);
+ return ut_spdk_bdev_writev_blocks;
+}
+
+int ut_spdk_bdev_unmap_blocks = 0;
+bool ut_spdk_bdev_unmap_blocks_mocked = false;
+int
+spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ cb(g_bdev_io, !ut_spdk_bdev_unmap_blocks, cb_arg);
+ return ut_spdk_bdev_unmap_blocks;
+}
+
+int ut_spdk_bdev_flush_blocks = 0;
+bool ut_spdk_bdev_flush_blocks_mocked = false;
+int
+spdk_bdev_flush_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb,
+ void *cb_arg)
+{
+ cb(g_bdev_io, !ut_spdk_bdev_flush_blocks, cb_arg);
+ return ut_spdk_bdev_flush_blocks;
+}
+
+int ut_spdk_bdev_reset = 0;
+bool ut_spdk_bdev_reset_mocked = false;
+int
+spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ cb(g_bdev_io, !ut_spdk_bdev_reset, cb_arg);
+ return ut_spdk_bdev_reset;
+}
+
+bool g_completion_called = false;
+void
+spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
+{
+ bdev_io->internal.status = status;
+ g_completion_called = true;
+}
+
+static uint16_t ut_rte_compressdev_dequeue_burst = 0;
+uint16_t
+rte_compressdev_dequeue_burst(uint8_t dev_id, uint16_t qp_id, struct rte_comp_op **ops,
+ uint16_t nb_op)
+{
+ if (ut_rte_compressdev_dequeue_burst == 0) {
+ return 0;
+ }
+
+ ops[0] = &g_comp_op[0];
+ ops[1] = &g_comp_op[1];
+
+ return ut_rte_compressdev_dequeue_burst;
+}
+
+static int ut_compress_done[2];
+/* done_count and done_idx together control which expected assertion
+ * value to use when dequeuing 2 operations.
+ */
+static uint16_t done_count = 1;
+static uint16_t done_idx = 0;
+static void
+_compress_done(void *_req, int reduce_errno)
+{
+ if (done_count == 1) {
+ CU_ASSERT(reduce_errno == ut_compress_done[0]);
+ } else if (done_count == 2) {
+ CU_ASSERT(reduce_errno == ut_compress_done[done_idx++]);
+ }
+}
+
+static void
+_get_mbuf_array(struct rte_mbuf *mbuf_array[UT_MBUFS_PER_OP_BOUND_TEST],
+ struct rte_mbuf *mbuf_head, int mbuf_count, bool null_final)
+{
+ int i;
+
+ for (i = 0; i < mbuf_count; i++) {
+ mbuf_array[i] = mbuf_head;
+ if (mbuf_head) {
+ mbuf_head = mbuf_head->next;
+ }
+ }
+ if (null_final) {
+ mbuf_array[i - 1] = NULL;
+ }
+}
+
+#define FAKE_ENQUEUE_SUCCESS 255
+#define FAKE_ENQUEUE_ERROR 128
+#define FAKE_ENQUEUE_BUSY 64
+static uint16_t ut_enqueue_value = FAKE_ENQUEUE_SUCCESS;
+static struct rte_comp_op ut_expected_op;
+uint16_t
+rte_compressdev_enqueue_burst(uint8_t dev_id, uint16_t qp_id, struct rte_comp_op **ops,
+ uint16_t nb_ops)
+{
+ struct rte_comp_op *op = *ops;
+ struct rte_mbuf *op_mbuf[UT_MBUFS_PER_OP_BOUND_TEST];
+ struct rte_mbuf *exp_mbuf[UT_MBUFS_PER_OP_BOUND_TEST];
+ int i, num_src_mbufs = UT_MBUFS_PER_OP;
+
+ switch (ut_enqueue_value) {
+ case FAKE_ENQUEUE_BUSY:
+ op->status = RTE_COMP_OP_STATUS_NOT_PROCESSED;
+ return 0;
+ break;
+ case FAKE_ENQUEUE_SUCCESS:
+ op->status = RTE_COMP_OP_STATUS_SUCCESS;
+ return 1;
+ break;
+ case FAKE_ENQUEUE_ERROR:
+ op->status = RTE_COMP_OP_STATUS_ERROR;
+ return 0;
+ break;
+ default:
+ break;
+ }
+
+ /* by design the compress module will never send more than 1 op at a time */
+ CU_ASSERT(op->private_xform == ut_expected_op.private_xform);
+
+ /* setup our local pointers to the chained mbufs, those pointed to in the
+ * operation struct and the expected values.
+ */
+ _get_mbuf_array(op_mbuf, op->m_src, SPDK_COUNTOF(op_mbuf), true);
+ _get_mbuf_array(exp_mbuf, ut_expected_op.m_src, SPDK_COUNTOF(exp_mbuf), true);
+
+ if (ut_boundary_alloc == true) {
+ /* if we crossed a boundary, we need to check the 4th src mbuf and
+ * reset the global that is used to identify whether we crossed
+ * or not
+ */
+ num_src_mbufs = UT_MBUFS_PER_OP_BOUND_TEST;
+ exp_mbuf[UT_MBUFS_PER_OP_BOUND_TEST - 1] = ut_expected_op.m_src->next->next->next;
+ op_mbuf[UT_MBUFS_PER_OP_BOUND_TEST - 1] = op->m_src->next->next->next;
+ ut_boundary_alloc = false;
+ }
+
+
+ for (i = 0; i < num_src_mbufs; i++) {
+ CU_ASSERT(op_mbuf[i]->buf_addr == exp_mbuf[i]->buf_addr);
+ CU_ASSERT(op_mbuf[i]->buf_iova == exp_mbuf[i]->buf_iova);
+ CU_ASSERT(op_mbuf[i]->buf_len == exp_mbuf[i]->buf_len);
+ CU_ASSERT(op_mbuf[i]->pkt_len == exp_mbuf[i]->pkt_len);
+ }
+
+ /* if only 3 mbufs were used in the test, the 4th should be zeroed */
+ if (num_src_mbufs == UT_MBUFS_PER_OP) {
+ CU_ASSERT(op_mbuf[UT_MBUFS_PER_OP_BOUND_TEST - 1] == NULL);
+ CU_ASSERT(exp_mbuf[UT_MBUFS_PER_OP_BOUND_TEST - 1] == NULL);
+ }
+
+ CU_ASSERT(op->m_src->userdata == ut_expected_op.m_src->userdata);
+ CU_ASSERT(op->src.offset == ut_expected_op.src.offset);
+ CU_ASSERT(op->src.length == ut_expected_op.src.length);
+
+ /* check dst mbuf values */
+ _get_mbuf_array(op_mbuf, op->m_dst, SPDK_COUNTOF(op_mbuf), true);
+ _get_mbuf_array(exp_mbuf, ut_expected_op.m_dst, SPDK_COUNTOF(exp_mbuf), true);
+
+ for (i = 0; i < UT_MBUFS_PER_OP; i++) {
+ CU_ASSERT(op_mbuf[i]->buf_addr == exp_mbuf[i]->buf_addr);
+ CU_ASSERT(op_mbuf[i]->buf_iova == exp_mbuf[i]->buf_iova);
+ CU_ASSERT(op_mbuf[i]->buf_len == exp_mbuf[i]->buf_len);
+ CU_ASSERT(op_mbuf[i]->pkt_len == exp_mbuf[i]->pkt_len);
+ }
+ CU_ASSERT(op->dst.offset == ut_expected_op.dst.offset);
+
+ return ut_enqueue_value;
+}
+
+/* Global setup for all tests that share a bunch of preparation... */
+static int
+test_setup(void)
+{
+ struct spdk_thread *thread;
+ int i;
+
+ spdk_thread_lib_init(NULL, 0);
+
+ thread = spdk_thread_create(NULL, NULL);
+ spdk_set_thread(thread);
+
+ g_comp_bdev.reduce_thread = thread;
+ g_comp_bdev.backing_dev.unmap = _comp_reduce_unmap;
+ g_comp_bdev.backing_dev.readv = _comp_reduce_readv;
+ g_comp_bdev.backing_dev.writev = _comp_reduce_writev;
+ g_comp_bdev.backing_dev.compress = _comp_reduce_compress;
+ g_comp_bdev.backing_dev.decompress = _comp_reduce_decompress;
+ g_comp_bdev.backing_dev.blocklen = 512;
+ g_comp_bdev.backing_dev.blockcnt = 1024 * 16;
+
+ g_comp_bdev.device_qp = &g_device_qp;
+ g_comp_bdev.device_qp->device = &g_device;
+
+ TAILQ_INIT(&g_comp_bdev.queued_comp_ops);
+
+ g_comp_xform = (struct rte_comp_xform) {
+ .type = RTE_COMP_COMPRESS,
+ .compress = {
+ .algo = RTE_COMP_ALGO_DEFLATE,
+ .deflate.huffman = RTE_COMP_HUFFMAN_DEFAULT,
+ .level = RTE_COMP_LEVEL_MAX,
+ .window_size = DEFAULT_WINDOW_SIZE,
+ .chksum = RTE_COMP_CHECKSUM_NONE,
+ .hash_algo = RTE_COMP_HASH_ALGO_NONE
+ }
+ };
+
+ g_decomp_xform = (struct rte_comp_xform) {
+ .type = RTE_COMP_DECOMPRESS,
+ .decompress = {
+ .algo = RTE_COMP_ALGO_DEFLATE,
+ .chksum = RTE_COMP_CHECKSUM_NONE,
+ .window_size = DEFAULT_WINDOW_SIZE,
+ .hash_algo = RTE_COMP_HASH_ALGO_NONE
+ }
+ };
+ g_device.comp_xform = &g_comp_xform;
+ g_device.decomp_xform = &g_decomp_xform;
+ g_cdev_cap.comp_feature_flags = RTE_COMP_FF_SHAREABLE_PRIV_XFORM;
+ g_device.cdev_info.driver_name = "compress_isal";
+ g_device.cdev_info.capabilities = &g_cdev_cap;
+ for (i = 0; i < UT_MBUFS_PER_OP_BOUND_TEST; i++) {
+ g_src_mbufs[i] = calloc(1, sizeof(struct rte_mbuf));
+ }
+ for (i = 0; i < UT_MBUFS_PER_OP; i++) {
+ g_dst_mbufs[i] = calloc(1, sizeof(struct rte_mbuf));
+ }
+
+ g_bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct comp_bdev_io));
+ g_bdev_io->u.bdev.iovs = calloc(128, sizeof(struct iovec));
+ g_bdev_io->bdev = &g_comp_bdev.comp_bdev;
+ g_io_ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct comp_io_channel));
+ g_io_ch->thread = thread;
+ g_comp_ch = (struct comp_io_channel *)((uint8_t *)g_io_ch + sizeof(struct spdk_io_channel));
+ g_io_ctx = (struct comp_bdev_io *)g_bdev_io->driver_ctx;
+
+ g_io_ctx->comp_ch = g_comp_ch;
+ g_io_ctx->comp_bdev = &g_comp_bdev;
+ g_comp_bdev.device_qp = &g_device_qp;
+
+ for (i = 0; i < UT_MBUFS_PER_OP_BOUND_TEST - 1; i++) {
+ g_expected_src_mbufs[i].next = &g_expected_src_mbufs[i + 1];
+ }
+ g_expected_src_mbufs[UT_MBUFS_PER_OP_BOUND_TEST - 1].next = NULL;
+
+ /* we only test w/4 mbufs on src side */
+ for (i = 0; i < UT_MBUFS_PER_OP - 1; i++) {
+ g_expected_dst_mbufs[i].next = &g_expected_dst_mbufs[i + 1];
+ }
+ g_expected_dst_mbufs[UT_MBUFS_PER_OP - 1].next = NULL;
+
+ return 0;
+}
+
+/* Global teardown for all tests */
+static int
+test_cleanup(void)
+{
+ struct spdk_thread *thread;
+ int i;
+
+ for (i = 0; i < UT_MBUFS_PER_OP_BOUND_TEST; i++) {
+ free(g_src_mbufs[i]);
+ }
+ for (i = 0; i < UT_MBUFS_PER_OP; i++) {
+ free(g_dst_mbufs[i]);
+ }
+ free(g_bdev_io->u.bdev.iovs);
+ free(g_bdev_io);
+ free(g_io_ch);
+
+ thread = spdk_get_thread();
+ spdk_thread_exit(thread);
+ while (!spdk_thread_is_exited(thread)) {
+ spdk_thread_poll(thread, 0, 0);
+ }
+ spdk_thread_destroy(thread);
+
+ spdk_thread_lib_fini();
+
+ return 0;
+}
+
+static void
+test_compress_operation(void)
+{
+ struct iovec src_iovs[3] = {};
+ int src_iovcnt;
+ struct iovec dst_iovs[3] = {};
+ int dst_iovcnt;
+ struct spdk_reduce_vol_cb_args cb_arg;
+ int rc, i;
+ struct vbdev_comp_op *op;
+ struct rte_mbuf *exp_src_mbuf[UT_MBUFS_PER_OP];
+ struct rte_mbuf *exp_dst_mbuf[UT_MBUFS_PER_OP];
+
+ src_iovcnt = dst_iovcnt = 3;
+ for (i = 0; i < dst_iovcnt; i++) {
+ src_iovs[i].iov_len = 0x1000;
+ dst_iovs[i].iov_len = 0x1000;
+ src_iovs[i].iov_base = (void *)0x10000000 + 0x1000 * i;
+ dst_iovs[i].iov_base = (void *)0x20000000 + 0x1000 * i;
+ }
+
+ /* test rte_comp_op_alloc failure */
+ MOCK_SET(rte_comp_op_alloc, NULL);
+ CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
+ rc = _compress_operation(&g_comp_bdev.backing_dev, &src_iovs[0], src_iovcnt,
+ &dst_iovs[0], dst_iovcnt, true, &cb_arg);
+ CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == false);
+ while (!TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops)) {
+ op = TAILQ_FIRST(&g_comp_bdev.queued_comp_ops);
+ TAILQ_REMOVE(&g_comp_bdev.queued_comp_ops, op, link);
+ free(op);
+ }
+ CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
+ CU_ASSERT(rc == 0);
+ MOCK_SET(rte_comp_op_alloc, &g_comp_op[0]);
+
+ /* test mempool get failure */
+ ut_rte_pktmbuf_alloc_bulk = -1;
+ CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
+ rc = _compress_operation(&g_comp_bdev.backing_dev, &src_iovs[0], src_iovcnt,
+ &dst_iovs[0], dst_iovcnt, true, &cb_arg);
+ CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == false);
+ while (!TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops)) {
+ op = TAILQ_FIRST(&g_comp_bdev.queued_comp_ops);
+ TAILQ_REMOVE(&g_comp_bdev.queued_comp_ops, op, link);
+ free(op);
+ }
+ CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
+ CU_ASSERT(rc == 0);
+ ut_rte_pktmbuf_alloc_bulk = 0;
+
+ /* test enqueue failure busy */
+ ut_enqueue_value = FAKE_ENQUEUE_BUSY;
+ CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
+ rc = _compress_operation(&g_comp_bdev.backing_dev, &src_iovs[0], src_iovcnt,
+ &dst_iovs[0], dst_iovcnt, true, &cb_arg);
+ CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == false);
+ while (!TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops)) {
+ op = TAILQ_FIRST(&g_comp_bdev.queued_comp_ops);
+ TAILQ_REMOVE(&g_comp_bdev.queued_comp_ops, op, link);
+ free(op);
+ }
+ CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
+ CU_ASSERT(rc == 0);
+ ut_enqueue_value = 1;
+
+ /* test enqueue failure error */
+ ut_enqueue_value = FAKE_ENQUEUE_ERROR;
+ CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
+ rc = _compress_operation(&g_comp_bdev.backing_dev, &src_iovs[0], src_iovcnt,
+ &dst_iovs[0], dst_iovcnt, true, &cb_arg);
+ CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
+ CU_ASSERT(rc == -EINVAL);
+ ut_enqueue_value = FAKE_ENQUEUE_SUCCESS;
+
+ /* test success with 3 vector iovec */
+ ut_expected_op.private_xform = &g_decomp_xform;
+ ut_expected_op.src.offset = 0;
+ ut_expected_op.src.length = src_iovs[0].iov_len + src_iovs[1].iov_len + src_iovs[2].iov_len;
+
+ /* setup the src expected values */
+ _get_mbuf_array(exp_src_mbuf, &g_expected_src_mbufs[0], SPDK_COUNTOF(exp_src_mbuf), false);
+ ut_expected_op.m_src = exp_src_mbuf[0];
+
+ for (i = 0; i < UT_MBUFS_PER_OP; i++) {
+ exp_src_mbuf[i]->userdata = &cb_arg;
+ exp_src_mbuf[i]->buf_addr = src_iovs[i].iov_base;
+ exp_src_mbuf[i]->buf_iova = spdk_vtophys(src_iovs[i].iov_base, &src_iovs[i].iov_len);
+ exp_src_mbuf[i]->buf_len = src_iovs[i].iov_len;
+ exp_src_mbuf[i]->pkt_len = src_iovs[i].iov_len;
+ }
+
+ /* setup the dst expected values */
+ _get_mbuf_array(exp_dst_mbuf, &g_expected_dst_mbufs[0], SPDK_COUNTOF(exp_dst_mbuf), false);
+ ut_expected_op.dst.offset = 0;
+ ut_expected_op.m_dst = exp_dst_mbuf[0];
+
+ for (i = 0; i < UT_MBUFS_PER_OP; i++) {
+ exp_dst_mbuf[i]->buf_addr = dst_iovs[i].iov_base;
+ exp_dst_mbuf[i]->buf_iova = spdk_vtophys(dst_iovs[i].iov_base, &dst_iovs[i].iov_len);
+ exp_dst_mbuf[i]->buf_len = dst_iovs[i].iov_len;
+ exp_dst_mbuf[i]->pkt_len = dst_iovs[i].iov_len;
+ }
+
+ rc = _compress_operation(&g_comp_bdev.backing_dev, &src_iovs[0], src_iovcnt,
+ &dst_iovs[0], dst_iovcnt, false, &cb_arg);
+ CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
+ CU_ASSERT(rc == 0);
+
+}
+
+static void
+test_compress_operation_cross_boundary(void)
+{
+ struct iovec src_iovs[3] = {};
+ int src_iovcnt;
+ struct iovec dst_iovs[3] = {};
+ int dst_iovcnt;
+ struct spdk_reduce_vol_cb_args cb_arg;
+ int rc, i;
+ struct rte_mbuf *exp_src_mbuf[UT_MBUFS_PER_OP_BOUND_TEST];
+ struct rte_mbuf *exp_dst_mbuf[UT_MBUFS_PER_OP_BOUND_TEST];
+
+ /* Setup the same basic 3 IOV test as used in the simple success case
+ * but then we'll start testing a vtophy boundary crossing at each
+ * position.
+ */
+ src_iovcnt = dst_iovcnt = 3;
+ for (i = 0; i < dst_iovcnt; i++) {
+ src_iovs[i].iov_len = 0x1000;
+ dst_iovs[i].iov_len = 0x1000;
+ src_iovs[i].iov_base = (void *)0x10000000 + 0x1000 * i;
+ dst_iovs[i].iov_base = (void *)0x20000000 + 0x1000 * i;
+ }
+
+ ut_expected_op.private_xform = &g_decomp_xform;
+ ut_expected_op.src.offset = 0;
+ ut_expected_op.src.length = src_iovs[0].iov_len + src_iovs[1].iov_len + src_iovs[2].iov_len;
+
+ /* setup the src expected values */
+ _get_mbuf_array(exp_src_mbuf, &g_expected_src_mbufs[0], SPDK_COUNTOF(exp_src_mbuf), false);
+ ut_expected_op.m_src = exp_src_mbuf[0];
+
+ for (i = 0; i < UT_MBUFS_PER_OP; i++) {
+ exp_src_mbuf[i]->userdata = &cb_arg;
+ exp_src_mbuf[i]->buf_addr = src_iovs[i].iov_base;
+ exp_src_mbuf[i]->buf_iova = spdk_vtophys(src_iovs[i].iov_base, &src_iovs[i].iov_len);
+ exp_src_mbuf[i]->buf_len = src_iovs[i].iov_len;
+ exp_src_mbuf[i]->pkt_len = src_iovs[i].iov_len;
+ }
+
+ /* setup the dst expected values, we don't test needing a 4th dst mbuf */
+ _get_mbuf_array(exp_dst_mbuf, &g_expected_dst_mbufs[0], SPDK_COUNTOF(exp_dst_mbuf), false);
+ ut_expected_op.dst.offset = 0;
+ ut_expected_op.m_dst = exp_dst_mbuf[0];
+
+ for (i = 0; i < UT_MBUFS_PER_OP; i++) {
+ exp_dst_mbuf[i]->buf_addr = dst_iovs[i].iov_base;
+ exp_dst_mbuf[i]->buf_iova = spdk_vtophys(dst_iovs[i].iov_base, &dst_iovs[i].iov_len);
+ exp_dst_mbuf[i]->buf_len = dst_iovs[i].iov_len;
+ exp_dst_mbuf[i]->pkt_len = dst_iovs[i].iov_len;
+ }
+
+ /* force the 1st IOV to get partial length from spdk_vtophys */
+ g_small_size_counter = 0;
+ g_small_size_modify = 1;
+ g_small_size = 0x800;
+ exp_src_mbuf[3]->userdata = &cb_arg;
+
+ /* first only has shorter length */
+ exp_src_mbuf[0]->pkt_len = exp_src_mbuf[0]->buf_len = 0x800;
+
+ /* 2nd was inserted by the boundary crossing condition and finishes off
+ * the length from the first */
+ exp_src_mbuf[1]->buf_addr = (void *)0x10000800;
+ exp_src_mbuf[1]->buf_iova = 0x10000800;
+ exp_src_mbuf[1]->pkt_len = exp_src_mbuf[1]->buf_len = 0x800;
+
+ /* 3rd looks like that the 2nd would have */
+ exp_src_mbuf[2]->buf_addr = (void *)0x10001000;
+ exp_src_mbuf[2]->buf_iova = 0x10001000;
+ exp_src_mbuf[2]->pkt_len = exp_src_mbuf[2]->buf_len = 0x1000;
+
+ /* a new 4th looks like what the 3rd would have */
+ exp_src_mbuf[3]->buf_addr = (void *)0x10002000;
+ exp_src_mbuf[3]->buf_iova = 0x10002000;
+ exp_src_mbuf[3]->pkt_len = exp_src_mbuf[3]->buf_len = 0x1000;
+
+ rc = _compress_operation(&g_comp_bdev.backing_dev, &src_iovs[0], src_iovcnt,
+ &dst_iovs[0], dst_iovcnt, false, &cb_arg);
+ CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
+ CU_ASSERT(rc == 0);
+
+ /* Now force the 2nd IOV to get partial length from spdk_vtophys */
+ g_small_size_counter = 0;
+ g_small_size_modify = 2;
+ g_small_size = 0x800;
+
+ /* first is normal */
+ exp_src_mbuf[0]->buf_addr = (void *)0x10000000;
+ exp_src_mbuf[0]->buf_iova = 0x10000000;
+ exp_src_mbuf[0]->pkt_len = exp_src_mbuf[0]->buf_len = 0x1000;
+
+ /* second only has shorter length */
+ exp_src_mbuf[1]->buf_addr = (void *)0x10001000;
+ exp_src_mbuf[1]->buf_iova = 0x10001000;
+ exp_src_mbuf[1]->pkt_len = exp_src_mbuf[1]->buf_len = 0x800;
+
+ /* 3rd was inserted by the boundary crossing condition and finishes off
+ * the length from the first */
+ exp_src_mbuf[2]->buf_addr = (void *)0x10001800;
+ exp_src_mbuf[2]->buf_iova = 0x10001800;
+ exp_src_mbuf[2]->pkt_len = exp_src_mbuf[2]->buf_len = 0x800;
+
+ /* a new 4th looks like what the 3rd would have */
+ exp_src_mbuf[3]->buf_addr = (void *)0x10002000;
+ exp_src_mbuf[3]->buf_iova = 0x10002000;
+ exp_src_mbuf[3]->pkt_len = exp_src_mbuf[3]->buf_len = 0x1000;
+
+ rc = _compress_operation(&g_comp_bdev.backing_dev, &src_iovs[0], src_iovcnt,
+ &dst_iovs[0], dst_iovcnt, false, &cb_arg);
+ CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
+ CU_ASSERT(rc == 0);
+
+ /* Finally force the 3rd IOV to get partial length from spdk_vtophys */
+ g_small_size_counter = 0;
+ g_small_size_modify = 3;
+ g_small_size = 0x800;
+
+ /* first is normal */
+ exp_src_mbuf[0]->buf_addr = (void *)0x10000000;
+ exp_src_mbuf[0]->buf_iova = 0x10000000;
+ exp_src_mbuf[0]->pkt_len = exp_src_mbuf[0]->buf_len = 0x1000;
+
+ /* second is normal */
+ exp_src_mbuf[1]->buf_addr = (void *)0x10001000;
+ exp_src_mbuf[1]->buf_iova = 0x10001000;
+ exp_src_mbuf[1]->pkt_len = exp_src_mbuf[1]->buf_len = 0x1000;
+
+ /* 3rd has shorter length */
+ exp_src_mbuf[2]->buf_addr = (void *)0x10002000;
+ exp_src_mbuf[2]->buf_iova = 0x10002000;
+ exp_src_mbuf[2]->pkt_len = exp_src_mbuf[2]->buf_len = 0x800;
+
+ /* a new 4th handles the remainder from the 3rd */
+ exp_src_mbuf[3]->buf_addr = (void *)0x10002800;
+ exp_src_mbuf[3]->buf_iova = 0x10002800;
+ exp_src_mbuf[3]->pkt_len = exp_src_mbuf[3]->buf_len = 0x800;
+
+ rc = _compress_operation(&g_comp_bdev.backing_dev, &src_iovs[0], src_iovcnt,
+ &dst_iovs[0], dst_iovcnt, false, &cb_arg);
+ CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
+ CU_ASSERT(rc == 0);
+}
+
+static void
+test_poller(void)
+{
+ int rc;
+ struct spdk_reduce_vol_cb_args *cb_args;
+ struct rte_mbuf mbuf[4]; /* one src, one dst, 2 ops */
+ struct vbdev_comp_op *op_to_queue;
+ struct iovec src_iovs[3] = {};
+ struct iovec dst_iovs[3] = {};
+ int i;
+
+ cb_args = calloc(1, sizeof(*cb_args));
+ SPDK_CU_ASSERT_FATAL(cb_args != NULL);
+ cb_args->cb_fn = _compress_done;
+ memset(&g_comp_op[0], 0, sizeof(struct rte_comp_op));
+ g_comp_op[0].m_src = &mbuf[0];
+ g_comp_op[1].m_src = &mbuf[1];
+ g_comp_op[0].m_dst = &mbuf[2];
+ g_comp_op[1].m_dst = &mbuf[3];
+ for (i = 0; i < 3; i++) {
+ src_iovs[i].iov_len = 0x1000;
+ dst_iovs[i].iov_len = 0x1000;
+ src_iovs[i].iov_base = (void *)0x10000000 + 0x1000 * i;
+ dst_iovs[i].iov_base = (void *)0x20000000 + 0x1000 * i;
+ }
+
+ /* Error from dequeue, nothing needing to be resubmitted.
+ */
+ ut_rte_compressdev_dequeue_burst = 1;
+ /* setup what we want dequeue to return for the op */
+ g_comp_op[0].m_src->userdata = (void *)cb_args;
+ g_comp_op[0].produced = 1;
+ g_comp_op[0].status = 1;
+ /* value asserted in the reduce callback */
+ ut_compress_done[0] = -EINVAL;
+ CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
+ rc = comp_dev_poller((void *)&g_comp_bdev);
+ CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
+ CU_ASSERT(rc == SPDK_POLLER_BUSY);
+
+ /* Success from dequeue, 2 ops. nothing needing to be resubmitted.
+ */
+ ut_rte_compressdev_dequeue_burst = 2;
+ /* setup what we want dequeue to return for the op */
+ g_comp_op[0].m_src->userdata = (void *)cb_args;
+ g_comp_op[0].produced = 16;
+ g_comp_op[0].status = 0;
+ g_comp_op[1].m_src->userdata = (void *)cb_args;
+ g_comp_op[1].produced = 32;
+ g_comp_op[1].status = 0;
+ /* value asserted in the reduce callback */
+ ut_compress_done[0] = 16;
+ ut_compress_done[1] = 32;
+ done_count = 2;
+ CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
+ rc = comp_dev_poller((void *)&g_comp_bdev);
+ CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
+ CU_ASSERT(rc == SPDK_POLLER_BUSY);
+
+ /* Success from dequeue, one op to be resubmitted.
+ */
+ ut_rte_compressdev_dequeue_burst = 1;
+ /* setup what we want dequeue to return for the op */
+ g_comp_op[0].m_src->userdata = (void *)cb_args;
+ g_comp_op[0].produced = 16;
+ g_comp_op[0].status = 0;
+ /* value asserted in the reduce callback */
+ ut_compress_done[0] = 16;
+ done_count = 1;
+ op_to_queue = calloc(1, sizeof(struct vbdev_comp_op));
+ SPDK_CU_ASSERT_FATAL(op_to_queue != NULL);
+ op_to_queue->backing_dev = &g_comp_bdev.backing_dev;
+ op_to_queue->src_iovs = &src_iovs[0];
+ op_to_queue->src_iovcnt = 3;
+ op_to_queue->dst_iovs = &dst_iovs[0];
+ op_to_queue->dst_iovcnt = 3;
+ op_to_queue->compress = true;
+ op_to_queue->cb_arg = cb_args;
+ ut_enqueue_value = FAKE_ENQUEUE_SUCCESS;
+ TAILQ_INSERT_TAIL(&g_comp_bdev.queued_comp_ops,
+ op_to_queue,
+ link);
+ CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == false);
+ rc = comp_dev_poller((void *)&g_comp_bdev);
+ CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
+ CU_ASSERT(rc == SPDK_POLLER_BUSY);
+
+ /* op_to_queue is freed in code under test */
+ free(cb_args);
+}
+
+static void
+test_vbdev_compress_submit_request(void)
+{
+ /* Single element block size write */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
+ g_completion_called = false;
+ vbdev_compress_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(g_completion_called == true);
+ CU_ASSERT(g_io_ctx->orig_io == g_bdev_io);
+ CU_ASSERT(g_io_ctx->comp_bdev == &g_comp_bdev);
+ CU_ASSERT(g_io_ctx->comp_ch == g_comp_ch);
+
+ /* same write but now fail it */
+ ut_spdk_reduce_vol_op_complete_err = 1;
+ g_completion_called = false;
+ vbdev_compress_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ CU_ASSERT(g_completion_called == true);
+
+ /* test a read success */
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
+ ut_spdk_reduce_vol_op_complete_err = 0;
+ g_completion_called = false;
+ vbdev_compress_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(g_completion_called == true);
+
+ /* test a read failure */
+ ut_spdk_reduce_vol_op_complete_err = 1;
+ g_completion_called = false;
+ vbdev_compress_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ CU_ASSERT(g_completion_called == true);
+}
+
+static void
+test_passthru(void)
+{
+
+}
+
+static void
+test_reset(void)
+{
+ /* TODO: There are a few different ways to do this given that
+ * the code uses spdk_for_each_channel() to implement reset
+ * handling. SUbmitting w/o UT for this function for now and
+ * will follow up with something shortly.
+ */
+}
+
+static void
+test_initdrivers(void)
+{
+ int rc;
+
+ /* test return values from rte_vdev_init() */
+ MOCK_SET(rte_vdev_init, -EEXIST);
+ rc = vbdev_init_compress_drivers();
+ /* This is not an error condition, we already have one */
+ CU_ASSERT(rc == 0);
+
+ /* error */
+ MOCK_SET(rte_vdev_init, -2);
+ rc = vbdev_init_compress_drivers();
+ CU_ASSERT(rc == -EINVAL);
+ CU_ASSERT(g_mbuf_mp == NULL);
+ CU_ASSERT(g_comp_op_mp == NULL);
+
+ /* compressdev count 0 */
+ ut_rte_compressdev_count = 0;
+ MOCK_SET(rte_vdev_init, 0);
+ rc = vbdev_init_compress_drivers();
+ CU_ASSERT(rc == 0);
+
+ /* bogus count */
+ ut_rte_compressdev_count = RTE_COMPRESS_MAX_DEVS + 1;
+ rc = vbdev_init_compress_drivers();
+ CU_ASSERT(rc == -EINVAL);
+
+ /* can't get mbuf pool */
+ ut_rte_compressdev_count = 1;
+ MOCK_SET(spdk_mempool_create, NULL);
+ rc = vbdev_init_compress_drivers();
+ CU_ASSERT(rc == -ENOMEM);
+ MOCK_CLEAR(spdk_mempool_create);
+
+ /* can't get comp op pool */
+ ut_rte_comp_op_pool_create = NULL;
+ rc = vbdev_init_compress_drivers();
+ CU_ASSERT(rc == -ENOMEM);
+
+ /* error on create_compress_dev() */
+ ut_rte_comp_op_pool_create = (struct rte_mempool *)&test_initdrivers;
+ ut_rte_compressdev_configure = -1;
+ rc = vbdev_init_compress_drivers();
+ CU_ASSERT(rc == -1);
+
+ /* error on create_compress_dev() but coverage for large num queues */
+ ut_max_nb_queue_pairs = 99;
+ rc = vbdev_init_compress_drivers();
+ CU_ASSERT(rc == -1);
+
+ /* qpair setup fails */
+ ut_rte_compressdev_configure = 0;
+ ut_max_nb_queue_pairs = 0;
+ ut_rte_compressdev_queue_pair_setup = -1;
+ rc = vbdev_init_compress_drivers();
+ CU_ASSERT(rc == -EINVAL);
+
+ /* rte_compressdev_start fails */
+ ut_rte_compressdev_queue_pair_setup = 0;
+ ut_rte_compressdev_start = -1;
+ rc = vbdev_init_compress_drivers();
+ CU_ASSERT(rc == -1);
+
+ /* rte_compressdev_private_xform_create() fails */
+ ut_rte_compressdev_start = 0;
+ ut_rte_compressdev_private_xform_create = -2;
+ rc = vbdev_init_compress_drivers();
+ CU_ASSERT(rc == -2);
+
+ /* success */
+ ut_rte_compressdev_private_xform_create = 0;
+ rc = vbdev_init_compress_drivers();
+ CU_ASSERT(rc == 0);
+ spdk_mempool_free((struct spdk_mempool *)g_mbuf_mp);
+}
+
+static void
+test_supported_io(void)
+{
+
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("compress", test_setup, test_cleanup);
+ CU_ADD_TEST(suite, test_compress_operation);
+ CU_ADD_TEST(suite, test_compress_operation_cross_boundary);
+ CU_ADD_TEST(suite, test_vbdev_compress_submit_request);
+ CU_ADD_TEST(suite, test_passthru);
+ CU_ADD_TEST(suite, test_initdrivers);
+ CU_ADD_TEST(suite, test_supported_io);
+ CU_ADD_TEST(suite, test_poller);
+ CU_ADD_TEST(suite, test_reset);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/bdev/crypto.c/.gitignore b/src/spdk/test/unit/lib/bdev/crypto.c/.gitignore
new file mode 100644
index 000000000..b2777562d
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/crypto.c/.gitignore
@@ -0,0 +1 @@
+crypto_ut
diff --git a/src/spdk/test/unit/lib/bdev/crypto.c/Makefile b/src/spdk/test/unit/lib/bdev/crypto.c/Makefile
new file mode 100644
index 000000000..a987fbf2e
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/crypto.c/Makefile
@@ -0,0 +1,39 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = crypto_ut.c
+CFLAGS += $(ENV_CFLAGS)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/bdev/crypto.c/crypto_ut.c b/src/spdk/test/unit/lib/bdev/crypto.c/crypto_ut.c
new file mode 100644
index 000000000..f6298fd7d
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/crypto.c/crypto_ut.c
@@ -0,0 +1,1084 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "common/lib/test_env.c"
+#include "spdk_internal/mock.h"
+#include "unit/lib/json_mock.c"
+
+#include <rte_crypto.h>
+#include <rte_cryptodev.h>
+
+#define MAX_TEST_BLOCKS 8192
+struct rte_crypto_op *g_test_crypto_ops[MAX_TEST_BLOCKS];
+struct rte_crypto_op *g_test_dev_full_ops[MAX_TEST_BLOCKS];
+
+uint16_t g_dequeue_mock;
+uint16_t g_enqueue_mock;
+unsigned ut_rte_crypto_op_bulk_alloc;
+int ut_rte_crypto_op_attach_sym_session = 0;
+#define MOCK_INFO_GET_1QP_AESNI 0
+#define MOCK_INFO_GET_1QP_QAT 1
+#define MOCK_INFO_GET_1QP_BOGUS_PMD 2
+int ut_rte_cryptodev_info_get = 0;
+bool ut_rte_cryptodev_info_get_mocked = false;
+
+/* Those functions are defined as static inline in DPDK, so we can't
+ * mock them straight away. We use defines to redirect them into
+ * our custom functions.
+ */
+static bool g_resubmit_test = false;
+#define rte_cryptodev_enqueue_burst mock_rte_cryptodev_enqueue_burst
+static inline uint16_t
+mock_rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ int i;
+
+ CU_ASSERT(nb_ops > 0);
+
+ for (i = 0; i < nb_ops; i++) {
+ /* Use this empty (til now) array of pointers to store
+ * enqueued operations for assertion in dev_full test.
+ */
+ g_test_dev_full_ops[i] = *ops++;
+ if (g_resubmit_test == true) {
+ CU_ASSERT(g_test_dev_full_ops[i] == (void *)0xDEADBEEF);
+ }
+ }
+
+ return g_enqueue_mock;
+}
+
+#define rte_cryptodev_dequeue_burst mock_rte_cryptodev_dequeue_burst
+static inline uint16_t
+mock_rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ int i;
+
+ CU_ASSERT(nb_ops > 0);
+
+ for (i = 0; i < g_dequeue_mock; i++) {
+ *ops++ = g_test_crypto_ops[i];
+ }
+
+ return g_dequeue_mock;
+}
+
+/* Instead of allocating real memory, assign the allocations to our
+ * test array for assertion in tests.
+ */
+#define rte_crypto_op_bulk_alloc mock_rte_crypto_op_bulk_alloc
+static inline unsigned
+mock_rte_crypto_op_bulk_alloc(struct rte_mempool *mempool,
+ enum rte_crypto_op_type type,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ int i;
+
+ for (i = 0; i < nb_ops; i++) {
+ *ops++ = g_test_crypto_ops[i];
+ }
+ return ut_rte_crypto_op_bulk_alloc;
+}
+
+#define rte_mempool_put_bulk mock_rte_mempool_put_bulk
+static __rte_always_inline void
+mock_rte_mempool_put_bulk(struct rte_mempool *mp, void *const *obj_table,
+ unsigned int n)
+{
+ return;
+}
+
+#define rte_crypto_op_attach_sym_session mock_rte_crypto_op_attach_sym_session
+static inline int
+mock_rte_crypto_op_attach_sym_session(struct rte_crypto_op *op,
+ struct rte_cryptodev_sym_session *sess)
+{
+ return ut_rte_crypto_op_attach_sym_session;
+}
+
+#define rte_lcore_count mock_rte_lcore_count
+static inline unsigned
+mock_rte_lcore_count(void)
+{
+ return 1;
+}
+
+#include "bdev/crypto/vbdev_crypto.c"
+
+/* SPDK stubs */
+DEFINE_STUB(spdk_bdev_queue_io_wait, int, (struct spdk_bdev *bdev, struct spdk_io_channel *ch,
+ struct spdk_bdev_io_wait_entry *entry), 0);
+DEFINE_STUB(spdk_conf_find_section, struct spdk_conf_section *,
+ (struct spdk_conf *cp, const char *name), NULL);
+DEFINE_STUB(spdk_conf_section_get_nval, char *,
+ (struct spdk_conf_section *sp, const char *key, int idx), NULL);
+DEFINE_STUB(spdk_conf_section_get_nmval, char *,
+ (struct spdk_conf_section *sp, const char *key, int idx1, int idx2), NULL);
+DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
+DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *g_bdev_io));
+DEFINE_STUB_V(spdk_bdev_io_put_aux_buf, (struct spdk_bdev_io *bdev_io, void *aux_buf));
+DEFINE_STUB(spdk_bdev_io_type_supported, bool, (struct spdk_bdev *bdev,
+ enum spdk_bdev_io_type io_type), 0);
+DEFINE_STUB_V(spdk_bdev_module_release_bdev, (struct spdk_bdev *bdev));
+DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
+DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), 0);
+DEFINE_STUB(spdk_bdev_get_buf_align, size_t, (const struct spdk_bdev *bdev), 64);
+DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_desc *desc), 0);
+DEFINE_STUB_V(spdk_bdev_unregister, (struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn,
+ void *cb_arg));
+DEFINE_STUB(spdk_bdev_open, int, (struct spdk_bdev *bdev, bool write,
+ spdk_bdev_remove_cb_t remove_cb,
+ void *remove_ctx, struct spdk_bdev_desc **_desc), 0);
+DEFINE_STUB(spdk_bdev_module_claim_bdev, int, (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
+ struct spdk_bdev_module *module), 0);
+DEFINE_STUB_V(spdk_bdev_module_examine_done, (struct spdk_bdev_module *module));
+DEFINE_STUB(spdk_bdev_register, int, (struct spdk_bdev *vbdev), 0);
+
+/* DPDK stubs */
+DEFINE_STUB(rte_cryptodev_count, uint8_t, (void), 0);
+DEFINE_STUB_V(rte_mempool_free, (struct rte_mempool *mp));
+DEFINE_STUB(rte_mempool_create, struct rte_mempool *, (const char *name, unsigned n,
+ unsigned elt_size,
+ unsigned cache_size, unsigned private_data_size,
+ rte_mempool_ctor_t *mp_init, void *mp_init_arg,
+ rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
+ int socket_id, unsigned flags), (struct rte_mempool *)1);
+DEFINE_STUB(rte_socket_id, unsigned, (void), 0);
+DEFINE_STUB(rte_crypto_op_pool_create, struct rte_mempool *,
+ (const char *name, enum rte_crypto_op_type type, unsigned nb_elts,
+ unsigned cache_size, uint16_t priv_size, int socket_id), (struct rte_mempool *)1);
+DEFINE_STUB(rte_cryptodev_device_count_by_driver, uint8_t, (uint8_t driver_id), 0);
+DEFINE_STUB(rte_cryptodev_configure, int, (uint8_t dev_id, struct rte_cryptodev_config *config), 0);
+#if RTE_VERSION >= RTE_VERSION_NUM(19, 02, 0, 0)
+DEFINE_STUB(rte_cryptodev_queue_pair_setup, int, (uint8_t dev_id, uint16_t queue_pair_id,
+ const struct rte_cryptodev_qp_conf *qp_conf, int socket_id), 0);
+DEFINE_STUB(rte_cryptodev_sym_session_pool_create, struct rte_mempool *, (const char *name,
+ uint32_t nb_elts,
+ uint32_t elt_size, uint32_t cache_size, uint16_t priv_size,
+ int socket_id), (struct rte_mempool *)1);
+#else
+DEFINE_STUB(rte_cryptodev_queue_pair_setup, int, (uint8_t dev_id, uint16_t queue_pair_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id, struct rte_mempool *session_pool), 0);
+#endif
+DEFINE_STUB(rte_cryptodev_start, int, (uint8_t dev_id), 0);
+DEFINE_STUB_V(rte_cryptodev_stop, (uint8_t dev_id));
+DEFINE_STUB(rte_cryptodev_sym_session_create, struct rte_cryptodev_sym_session *,
+ (struct rte_mempool *mempool), (struct rte_cryptodev_sym_session *)1);
+DEFINE_STUB(rte_cryptodev_sym_session_init, int, (uint8_t dev_id,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_crypto_sym_xform *xforms, struct rte_mempool *mempool), 0);
+DEFINE_STUB(rte_vdev_init, int, (const char *name, const char *args), 0);
+DEFINE_STUB(rte_cryptodev_sym_session_free, int, (struct rte_cryptodev_sym_session *sess), 0);
+DEFINE_STUB(rte_vdev_uninit, int, (const char *name), 0);
+
+struct rte_cryptodev *rte_cryptodevs;
+
+/* global vars and setup/cleanup functions used for all test functions */
+struct spdk_bdev_io *g_bdev_io;
+struct crypto_bdev_io *g_io_ctx;
+struct crypto_io_channel *g_crypto_ch;
+struct spdk_io_channel *g_io_ch;
+struct vbdev_dev g_device;
+struct vbdev_crypto g_crypto_bdev;
+struct device_qp g_dev_qp;
+
+void
+rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
+{
+ dev_info->max_nb_queue_pairs = 1;
+ if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_AESNI) {
+ dev_info->driver_name = g_driver_names[0];
+ } else if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_QAT) {
+ dev_info->driver_name = g_driver_names[1];
+ } else if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_BOGUS_PMD) {
+ dev_info->driver_name = "junk";
+ }
+}
+
+unsigned int
+rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
+{
+ return (unsigned int)dev_id;
+}
+
+void
+spdk_bdev_io_get_aux_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_aux_buf_cb cb)
+{
+ cb(g_io_ch, g_bdev_io, (void *)0xDEADBEEF);
+}
+
+void
+spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
+{
+ cb(g_io_ch, g_bdev_io, true);
+}
+
+/* Mock these functions to call the callback and then return the value we require */
+int ut_spdk_bdev_readv_blocks = 0;
+bool ut_spdk_bdev_readv_blocks_mocked = false;
+int
+spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct iovec *iov, int iovcnt,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ cb(g_bdev_io, !ut_spdk_bdev_readv_blocks, cb_arg);
+ return ut_spdk_bdev_readv_blocks;
+}
+
+int ut_spdk_bdev_writev_blocks = 0;
+bool ut_spdk_bdev_writev_blocks_mocked = false;
+int
+spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct iovec *iov, int iovcnt,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ cb(g_bdev_io, !ut_spdk_bdev_writev_blocks, cb_arg);
+ return ut_spdk_bdev_writev_blocks;
+}
+
+int ut_spdk_bdev_unmap_blocks = 0;
+bool ut_spdk_bdev_unmap_blocks_mocked = false;
+int
+spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ cb(g_bdev_io, !ut_spdk_bdev_unmap_blocks, cb_arg);
+ return ut_spdk_bdev_unmap_blocks;
+}
+
+int ut_spdk_bdev_flush_blocks = 0;
+bool ut_spdk_bdev_flush_blocks_mocked = false;
+int
+spdk_bdev_flush_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb,
+ void *cb_arg)
+{
+ cb(g_bdev_io, !ut_spdk_bdev_flush_blocks, cb_arg);
+ return ut_spdk_bdev_flush_blocks;
+}
+
+int ut_spdk_bdev_reset = 0;
+bool ut_spdk_bdev_reset_mocked = false;
+int
+spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ cb(g_bdev_io, !ut_spdk_bdev_reset, cb_arg);
+ return ut_spdk_bdev_reset;
+}
+
+bool g_completion_called = false;
+void
+spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
+{
+ bdev_io->internal.status = status;
+ g_completion_called = true;
+}
+
+/* Global setup for all tests that share a bunch of preparation... */
+static int
+test_setup(void)
+{
+ int i, rc;
+
+ /* Prepare essential variables for test routines */
+ g_bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct crypto_bdev_io));
+ g_bdev_io->u.bdev.iovs = calloc(1, sizeof(struct iovec) * 128);
+ g_bdev_io->bdev = &g_crypto_bdev.crypto_bdev;
+ g_io_ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct crypto_io_channel));
+ g_crypto_ch = (struct crypto_io_channel *)((uint8_t *)g_io_ch + sizeof(struct spdk_io_channel));
+ g_io_ctx = (struct crypto_bdev_io *)g_bdev_io->driver_ctx;
+ memset(&g_device, 0, sizeof(struct vbdev_dev));
+ memset(&g_crypto_bdev, 0, sizeof(struct vbdev_crypto));
+ g_dev_qp.device = &g_device;
+ g_io_ctx->crypto_ch = g_crypto_ch;
+ g_io_ctx->crypto_bdev = &g_crypto_bdev;
+ g_crypto_ch->device_qp = &g_dev_qp;
+ TAILQ_INIT(&g_crypto_ch->pending_cry_ios);
+ TAILQ_INIT(&g_crypto_ch->queued_cry_ops);
+
+ /* Allocate a real mbuf pool so we can test error paths */
+ g_mbuf_mp = spdk_mempool_create("mbuf_mp", NUM_MBUFS, sizeof(struct rte_mbuf),
+ SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
+ SPDK_ENV_SOCKET_ID_ANY);
+
+ /* Instead of allocating real rte mempools for these, it's easier and provides the
+ * same coverage just calloc them here.
+ */
+ for (i = 0; i < MAX_TEST_BLOCKS; i++) {
+ rc = posix_memalign((void **)&g_test_crypto_ops[i], 64,
+ sizeof(struct rte_crypto_op) + sizeof(struct rte_crypto_sym_op) +
+ AES_CBC_IV_LENGTH + QUEUED_OP_LENGTH);
+ if (rc != 0) {
+ assert(false);
+ }
+ memset(g_test_crypto_ops[i], 0, sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op) + QUEUED_OP_LENGTH);
+ }
+ return 0;
+}
+
+/* Global teardown for all tests */
+static int
+test_cleanup(void)
+{
+ int i;
+
+ spdk_mempool_free(g_mbuf_mp);
+ for (i = 0; i < MAX_TEST_BLOCKS; i++) {
+ free(g_test_crypto_ops[i]);
+ }
+ free(g_bdev_io->u.bdev.iovs);
+ free(g_bdev_io);
+ free(g_io_ch);
+ return 0;
+}
+
+static void
+test_error_paths(void)
+{
+ /* Single element block size write, just to test error paths
+ * in vbdev_crypto_submit_request().
+ */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->u.bdev.iovcnt = 1;
+ g_bdev_io->u.bdev.num_blocks = 1;
+ g_bdev_io->u.bdev.iovs[0].iov_len = 512;
+ g_crypto_bdev.crypto_bdev.blocklen = 512;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
+ g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
+
+ /* test failure of spdk_mempool_get_bulk(), will result in success because it
+ * will get queued.
+ */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ MOCK_SET(spdk_mempool_get, NULL);
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ /* same thing but switch to reads to test error path in _crypto_complete_io() */
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ TAILQ_INSERT_TAIL(&g_crypto_ch->pending_cry_ios, g_bdev_io, module_link);
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ /* Now with the read_blocks failing */
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ MOCK_SET(spdk_bdev_readv_blocks, -1);
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ MOCK_SET(spdk_bdev_readv_blocks, 0);
+ MOCK_CLEAR(spdk_mempool_get);
+
+ /* test failure of rte_crypto_op_bulk_alloc() */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ ut_rte_crypto_op_bulk_alloc = 0;
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ ut_rte_crypto_op_bulk_alloc = 1;
+
+ /* test failure of rte_crypto_op_attach_sym_session() */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ ut_rte_crypto_op_attach_sym_session = -1;
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ ut_rte_crypto_op_attach_sym_session = 0;
+}
+
+static void
+test_simple_write(void)
+{
+ /* Single element block size write */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->u.bdev.iovcnt = 1;
+ g_bdev_io->u.bdev.num_blocks = 1;
+ g_bdev_io->u.bdev.offset_blocks = 0;
+ g_bdev_io->u.bdev.iovs[0].iov_len = 512;
+ g_bdev_io->u.bdev.iovs[0].iov_base = &test_simple_write;
+ g_crypto_bdev.crypto_bdev.blocklen = 512;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
+ g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
+
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 1);
+ CU_ASSERT(g_io_ctx->aux_buf_iov.iov_len == 512);
+ CU_ASSERT(g_io_ctx->aux_buf_iov.iov_base != NULL);
+ CU_ASSERT(g_io_ctx->aux_offset_blocks == 0);
+ CU_ASSERT(g_io_ctx->aux_num_blocks == 1);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == &test_simple_write);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == 512);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->userdata == g_bdev_io);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->buf_addr != NULL);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->data_len == 512);
+
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_src);
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_dst);
+}
+
+static void
+test_simple_read(void)
+{
+ /* Single element block size read */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->u.bdev.iovcnt = 1;
+ g_bdev_io->u.bdev.num_blocks = 1;
+ g_bdev_io->u.bdev.iovs[0].iov_len = 512;
+ g_bdev_io->u.bdev.iovs[0].iov_base = &test_simple_read;
+ g_crypto_bdev.crypto_bdev.blocklen = 512;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
+ g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
+
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 1);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == &test_simple_read);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == 512);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->userdata == g_bdev_io);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst == NULL);
+
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_src);
+}
+
+static void
+test_large_rw(void)
+{
+ unsigned block_len = 512;
+ unsigned num_blocks = CRYPTO_MAX_IO / block_len;
+ unsigned io_len = block_len * num_blocks;
+ unsigned i;
+
+ /* Multi block size read, multi-element */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->u.bdev.iovcnt = 1;
+ g_bdev_io->u.bdev.num_blocks = num_blocks;
+ g_bdev_io->u.bdev.iovs[0].iov_len = io_len;
+ g_bdev_io->u.bdev.iovs[0].iov_base = &test_large_rw;
+ g_crypto_bdev.crypto_bdev.blocklen = block_len;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
+ g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
+
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(g_io_ctx->cryop_cnt_remaining == (int)num_blocks);
+
+ for (i = 0; i < num_blocks; i++) {
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_large_rw + (i * block_len));
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->userdata == g_bdev_io);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src);
+ }
+
+ /* Multi block size write, multi-element */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->u.bdev.iovcnt = 1;
+ g_bdev_io->u.bdev.num_blocks = num_blocks;
+ g_bdev_io->u.bdev.iovs[0].iov_len = io_len;
+ g_bdev_io->u.bdev.iovs[0].iov_base = &test_large_rw;
+ g_crypto_bdev.crypto_bdev.blocklen = block_len;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
+ g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
+
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(g_io_ctx->cryop_cnt_remaining == (int)num_blocks);
+
+ for (i = 0; i < num_blocks; i++) {
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_large_rw + (i * block_len));
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->userdata == g_bdev_io);
+ CU_ASSERT(g_io_ctx->aux_buf_iov.iov_len == io_len);
+ CU_ASSERT(g_io_ctx->aux_buf_iov.iov_base != NULL);
+ CU_ASSERT(g_io_ctx->aux_offset_blocks == 0);
+ CU_ASSERT(g_io_ctx->aux_num_blocks == num_blocks);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->buf_addr != NULL);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->data_len == block_len);
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src);
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_dst);
+ }
+}
+
+static void
+test_dev_full(void)
+{
+ struct vbdev_crypto_op *queued_op;
+ struct rte_crypto_sym_op *sym_op;
+ struct crypto_bdev_io *io_ctx;
+
+ /* Two element block size read */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->u.bdev.iovcnt = 1;
+ g_bdev_io->u.bdev.num_blocks = 2;
+ g_bdev_io->u.bdev.iovs[0].iov_len = 512;
+ g_bdev_io->u.bdev.iovs[0].iov_base = (void *)0xDEADBEEF;
+ g_bdev_io->u.bdev.iovs[1].iov_len = 512;
+ g_bdev_io->u.bdev.iovs[1].iov_base = (void *)0xFEEDBEEF;
+ g_crypto_bdev.crypto_bdev.blocklen = 512;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
+ g_enqueue_mock = g_dequeue_mock = 1;
+ ut_rte_crypto_op_bulk_alloc = 2;
+
+ g_test_crypto_ops[1]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
+
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 2);
+ sym_op = g_test_crypto_ops[0]->sym;
+ CU_ASSERT(sym_op->m_src->buf_addr == (void *)0xDEADBEEF);
+ CU_ASSERT(sym_op->m_src->data_len == 512);
+ CU_ASSERT(sym_op->m_src->next == NULL);
+ CU_ASSERT(sym_op->cipher.data.length == 512);
+ CU_ASSERT(sym_op->cipher.data.offset == 0);
+ CU_ASSERT(sym_op->m_src->userdata == g_bdev_io);
+ CU_ASSERT(sym_op->m_dst == NULL);
+
+ /* make sure one got queued and confirm its values */
+ CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == false);
+ queued_op = TAILQ_FIRST(&g_crypto_ch->queued_cry_ops);
+ sym_op = queued_op->crypto_op->sym;
+ TAILQ_REMOVE(&g_crypto_ch->queued_cry_ops, queued_op, link);
+ CU_ASSERT(queued_op->bdev_io == g_bdev_io);
+ CU_ASSERT(queued_op->crypto_op == g_test_crypto_ops[1]);
+ CU_ASSERT(sym_op->m_src->buf_addr == (void *)0xFEEDBEEF);
+ CU_ASSERT(sym_op->m_src->data_len == 512);
+ CU_ASSERT(sym_op->m_src->next == NULL);
+ CU_ASSERT(sym_op->cipher.data.length == 512);
+ CU_ASSERT(sym_op->cipher.data.offset == 0);
+ CU_ASSERT(sym_op->m_src->userdata == g_bdev_io);
+ CU_ASSERT(sym_op->m_dst == NULL);
+ CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_src);
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[1]->sym->m_src);
+
+ /* Non-busy reason for enqueue failure, all were rejected. */
+ g_enqueue_mock = 0;
+ g_test_crypto_ops[0]->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ io_ctx = (struct crypto_bdev_io *)g_bdev_io->driver_ctx;
+ CU_ASSERT(io_ctx->bdev_io_status == SPDK_BDEV_IO_STATUS_FAILED);
+}
+
+static void
+test_crazy_rw(void)
+{
+ unsigned block_len = 512;
+ int num_blocks = 4;
+ int i;
+
+ /* Multi block size read, single element, strange IOV makeup */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->u.bdev.iovcnt = 3;
+ g_bdev_io->u.bdev.num_blocks = num_blocks;
+ g_bdev_io->u.bdev.iovs[0].iov_len = 512;
+ g_bdev_io->u.bdev.iovs[0].iov_base = &test_crazy_rw;
+ g_bdev_io->u.bdev.iovs[1].iov_len = 1024;
+ g_bdev_io->u.bdev.iovs[1].iov_base = &test_crazy_rw + 512;
+ g_bdev_io->u.bdev.iovs[2].iov_len = 512;
+ g_bdev_io->u.bdev.iovs[2].iov_base = &test_crazy_rw + 512 + 1024;
+
+ g_crypto_bdev.crypto_bdev.blocklen = block_len;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
+ g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
+
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(g_io_ctx->cryop_cnt_remaining == num_blocks);
+
+ for (i = 0; i < num_blocks; i++) {
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_crazy_rw + (i * block_len));
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->userdata == g_bdev_io);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src == g_test_crypto_ops[i]->sym->m_src);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src);
+ }
+
+ /* Multi block size write, single element strange IOV makeup */
+ num_blocks = 8;
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->u.bdev.iovcnt = 4;
+ g_bdev_io->u.bdev.num_blocks = num_blocks;
+ g_bdev_io->u.bdev.iovs[0].iov_len = 2048;
+ g_bdev_io->u.bdev.iovs[0].iov_base = &test_crazy_rw;
+ g_bdev_io->u.bdev.iovs[1].iov_len = 512;
+ g_bdev_io->u.bdev.iovs[1].iov_base = &test_crazy_rw + 2048;
+ g_bdev_io->u.bdev.iovs[2].iov_len = 512;
+ g_bdev_io->u.bdev.iovs[2].iov_base = &test_crazy_rw + 2048 + 512;
+ g_bdev_io->u.bdev.iovs[3].iov_len = 1024;
+ g_bdev_io->u.bdev.iovs[3].iov_base = &test_crazy_rw + 2048 + 512 + 512;
+
+ g_crypto_bdev.crypto_bdev.blocklen = block_len;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
+ g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
+
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(g_io_ctx->cryop_cnt_remaining == num_blocks);
+
+ for (i = 0; i < num_blocks; i++) {
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_crazy_rw + (i * block_len));
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->userdata == g_bdev_io);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src == g_test_crypto_ops[i]->sym->m_src);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == g_test_crypto_ops[i]->sym->m_dst);
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src);
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_dst);
+ }
+}
+
+static void
+test_passthru(void)
+{
+ /* Make sure these follow our completion callback, test success & fail. */
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_UNMAP;
+ MOCK_SET(spdk_bdev_unmap_blocks, 0);
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ MOCK_SET(spdk_bdev_unmap_blocks, -1);
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ MOCK_CLEAR(spdk_bdev_unmap_blocks);
+
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_FLUSH;
+ MOCK_SET(spdk_bdev_flush_blocks, 0);
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ MOCK_SET(spdk_bdev_flush_blocks, -1);
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ MOCK_CLEAR(spdk_bdev_flush_blocks);
+
+ /* We should never get a WZ command, we report that we don't support it. */
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE_ZEROES;
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+}
+
+static void
+test_reset(void)
+{
+ /* TODO: There are a few different ways to do this given that
+ * the code uses spdk_for_each_channel() to implement reset
+ * handling. Submitting w/o UT for this function for now and
+ * will follow up with something shortly.
+ */
+}
+
+static void
+init_cleanup(void)
+{
+ spdk_mempool_free(g_mbuf_mp);
+ rte_mempool_free(g_session_mp);
+ g_mbuf_mp = NULL;
+ g_session_mp = NULL;
+ if (g_session_mp_priv != NULL) {
+ /* g_session_mp_priv may or may not be set depending on the DPDK version */
+ rte_mempool_free(g_session_mp_priv);
+ }
+}
+
+static void
+test_initdrivers(void)
+{
+ int rc;
+ static struct spdk_mempool *orig_mbuf_mp;
+ static struct rte_mempool *orig_session_mp;
+ static struct rte_mempool *orig_session_mp_priv;
+
+ /* These tests will alloc and free our g_mbuf_mp
+ * so save that off here and restore it after each test is over.
+ */
+ orig_mbuf_mp = g_mbuf_mp;
+ orig_session_mp = g_session_mp;
+ orig_session_mp_priv = g_session_mp_priv;
+
+ g_session_mp_priv = NULL;
+ g_session_mp = NULL;
+ g_mbuf_mp = NULL;
+
+ /* No drivers available, not an error though */
+ MOCK_SET(rte_cryptodev_count, 0);
+ rc = vbdev_crypto_init_crypto_drivers();
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_mbuf_mp == NULL);
+ CU_ASSERT(g_session_mp == NULL);
+ CU_ASSERT(g_session_mp_priv == NULL);
+
+ /* Test failure of DPDK dev init. */
+ MOCK_SET(rte_cryptodev_count, 2);
+ MOCK_SET(rte_vdev_init, -1);
+ rc = vbdev_crypto_init_crypto_drivers();
+ CU_ASSERT(rc == -EINVAL);
+ CU_ASSERT(g_mbuf_mp == NULL);
+ CU_ASSERT(g_session_mp == NULL);
+ CU_ASSERT(g_session_mp_priv == NULL);
+ MOCK_SET(rte_vdev_init, 0);
+
+ /* Can't create session pool. */
+ MOCK_SET(spdk_mempool_create, NULL);
+ rc = vbdev_crypto_init_crypto_drivers();
+ CU_ASSERT(rc == -ENOMEM);
+ CU_ASSERT(g_mbuf_mp == NULL);
+ CU_ASSERT(g_session_mp == NULL);
+ CU_ASSERT(g_session_mp_priv == NULL);
+ MOCK_CLEAR(spdk_mempool_create);
+
+ /* Can't create op pool. */
+ MOCK_SET(rte_crypto_op_pool_create, NULL);
+ rc = vbdev_crypto_init_crypto_drivers();
+ CU_ASSERT(rc == -ENOMEM);
+ CU_ASSERT(g_mbuf_mp == NULL);
+ CU_ASSERT(g_session_mp == NULL);
+ CU_ASSERT(g_session_mp_priv == NULL);
+ MOCK_SET(rte_crypto_op_pool_create, (struct rte_mempool *)1);
+
+ /* Check resources are not sufficient */
+ MOCK_CLEARED_ASSERT(spdk_mempool_create);
+ rc = vbdev_crypto_init_crypto_drivers();
+ CU_ASSERT(rc == -EINVAL);
+
+ /* Test crypto dev configure failure. */
+ MOCK_SET(rte_cryptodev_device_count_by_driver, 2);
+ MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_AESNI);
+ MOCK_SET(rte_cryptodev_configure, -1);
+ MOCK_CLEARED_ASSERT(spdk_mempool_create);
+ rc = vbdev_crypto_init_crypto_drivers();
+ MOCK_SET(rte_cryptodev_configure, 0);
+ CU_ASSERT(g_mbuf_mp == NULL);
+ CU_ASSERT(g_session_mp == NULL);
+ CU_ASSERT(g_session_mp_priv == NULL);
+ CU_ASSERT(rc == -EINVAL);
+
+ /* Test failure of qp setup. */
+ MOCK_SET(rte_cryptodev_queue_pair_setup, -1);
+ MOCK_CLEARED_ASSERT(spdk_mempool_create);
+ rc = vbdev_crypto_init_crypto_drivers();
+ CU_ASSERT(rc == -EINVAL);
+ CU_ASSERT(g_mbuf_mp == NULL);
+ CU_ASSERT(g_session_mp == NULL);
+ CU_ASSERT(g_session_mp_priv == NULL);
+ MOCK_SET(rte_cryptodev_queue_pair_setup, 0);
+
+ /* Test failure of dev start. */
+ MOCK_SET(rte_cryptodev_start, -1);
+ MOCK_CLEARED_ASSERT(spdk_mempool_create);
+ rc = vbdev_crypto_init_crypto_drivers();
+ CU_ASSERT(rc == -EINVAL);
+ CU_ASSERT(g_mbuf_mp == NULL);
+ CU_ASSERT(g_session_mp == NULL);
+ CU_ASSERT(g_session_mp_priv == NULL);
+ MOCK_SET(rte_cryptodev_start, 0);
+
+ /* Test bogus PMD */
+ MOCK_CLEARED_ASSERT(spdk_mempool_create);
+ MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_BOGUS_PMD);
+ rc = vbdev_crypto_init_crypto_drivers();
+ CU_ASSERT(g_mbuf_mp == NULL);
+ CU_ASSERT(g_session_mp == NULL);
+ CU_ASSERT(rc == -EINVAL);
+
+ /* Test happy path QAT. */
+ MOCK_CLEARED_ASSERT(spdk_mempool_create);
+ MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_QAT);
+ rc = vbdev_crypto_init_crypto_drivers();
+ CU_ASSERT(g_mbuf_mp != NULL);
+ CU_ASSERT(g_session_mp != NULL);
+ init_cleanup();
+ CU_ASSERT(rc == 0);
+
+ /* Test happy path AESNI. */
+ MOCK_CLEARED_ASSERT(spdk_mempool_create);
+ MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_AESNI);
+ rc = vbdev_crypto_init_crypto_drivers();
+ init_cleanup();
+ CU_ASSERT(rc == 0);
+
+ /* restore our initial values. */
+ g_mbuf_mp = orig_mbuf_mp;
+ g_session_mp = orig_session_mp;
+ g_session_mp_priv = orig_session_mp_priv;
+}
+
+static void
+test_crypto_op_complete(void)
+{
+ /* Make sure completion code respects failure. */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
+ g_completion_called = false;
+ _crypto_operation_complete(g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ CU_ASSERT(g_completion_called == true);
+
+ /* Test read completion. */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
+ g_completion_called = false;
+ _crypto_operation_complete(g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(g_completion_called == true);
+
+ /* Test write completion success. */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
+ g_completion_called = false;
+ MOCK_SET(spdk_bdev_writev_blocks, 0);
+ _crypto_operation_complete(g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(g_completion_called == true);
+
+ /* Test write completion failed. */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
+ g_completion_called = false;
+ MOCK_SET(spdk_bdev_writev_blocks, -1);
+ _crypto_operation_complete(g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ CU_ASSERT(g_completion_called == true);
+
+ /* Test bogus type for this completion. */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_RESET;
+ g_completion_called = false;
+ _crypto_operation_complete(g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ CU_ASSERT(g_completion_called == true);
+}
+
+static void
+test_supported_io(void)
+{
+ void *ctx = NULL;
+ bool rc = true;
+
+ /* Make sure we always report false to WZ, we need the bdev layer to
+ * send real 0's so we can encrypt/decrypt them.
+ */
+ rc = vbdev_crypto_io_type_supported(ctx, SPDK_BDEV_IO_TYPE_WRITE_ZEROES);
+ CU_ASSERT(rc == false);
+}
+
+static void
+test_poller(void)
+{
+ int rc;
+ struct rte_mbuf *src_mbufs[2];
+ struct vbdev_crypto_op *op_to_resubmit;
+
+ /* test regular 1 op to dequeue and complete */
+ g_dequeue_mock = g_enqueue_mock = 1;
+ spdk_mempool_get_bulk(g_mbuf_mp, (void **)&src_mbufs[0], 1);
+ g_test_crypto_ops[0]->sym->m_src = src_mbufs[0];
+ g_test_crypto_ops[0]->sym->m_src->userdata = g_bdev_io;
+ g_test_crypto_ops[0]->sym->m_dst = NULL;
+ g_io_ctx->cryop_cnt_remaining = 1;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
+ rc = crypto_dev_poller(g_crypto_ch);
+ CU_ASSERT(rc == 1);
+
+ /* We have nothing dequeued but have some to resubmit */
+ g_dequeue_mock = 0;
+ CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
+
+ /* add an op to the queued list. */
+ g_resubmit_test = true;
+ op_to_resubmit = (struct vbdev_crypto_op *)((uint8_t *)g_test_crypto_ops[0] + QUEUED_OP_OFFSET);
+ op_to_resubmit->crypto_op = (void *)0xDEADBEEF;
+ op_to_resubmit->bdev_io = g_bdev_io;
+ TAILQ_INSERT_TAIL(&g_crypto_ch->queued_cry_ops,
+ op_to_resubmit,
+ link);
+ CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == false);
+ rc = crypto_dev_poller(g_crypto_ch);
+ g_resubmit_test = false;
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
+
+ /* 2 to dequeue but 2nd one failed */
+ g_dequeue_mock = g_enqueue_mock = 2;
+ g_io_ctx->cryop_cnt_remaining = 2;
+ spdk_mempool_get_bulk(g_mbuf_mp, (void **)&src_mbufs[0], 2);
+ g_test_crypto_ops[0]->sym->m_src = src_mbufs[0];
+ g_test_crypto_ops[0]->sym->m_src->userdata = g_bdev_io;
+ g_test_crypto_ops[0]->sym->m_dst = NULL;
+ g_test_crypto_ops[0]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ g_test_crypto_ops[1]->sym->m_src = src_mbufs[1];
+ g_test_crypto_ops[1]->sym->m_src->userdata = g_bdev_io;
+ g_test_crypto_ops[1]->sym->m_dst = NULL;
+ g_test_crypto_ops[1]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ rc = crypto_dev_poller(g_crypto_ch);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ CU_ASSERT(rc == 2);
+}
+
+/* Helper function for test_assign_device_qp() */
+static void
+_clear_device_qp_lists(void)
+{
+ struct device_qp *device_qp = NULL;
+
+ while (!TAILQ_EMPTY(&g_device_qp_qat)) {
+ device_qp = TAILQ_FIRST(&g_device_qp_qat);
+ TAILQ_REMOVE(&g_device_qp_qat, device_qp, link);
+ free(device_qp);
+
+ }
+ CU_ASSERT(TAILQ_EMPTY(&g_device_qp_qat) == true);
+ while (!TAILQ_EMPTY(&g_device_qp_aesni_mb)) {
+ device_qp = TAILQ_FIRST(&g_device_qp_aesni_mb);
+ TAILQ_REMOVE(&g_device_qp_aesni_mb, device_qp, link);
+ free(device_qp);
+ }
+ CU_ASSERT(TAILQ_EMPTY(&g_device_qp_aesni_mb) == true);
+}
+
+/* Helper function for test_assign_device_qp() */
+static void
+_check_expected_values(struct vbdev_crypto *crypto_bdev, struct device_qp *device_qp,
+ struct crypto_io_channel *crypto_ch, uint8_t expected_index,
+ uint8_t current_index)
+{
+ _assign_device_qp(&g_crypto_bdev, device_qp, g_crypto_ch);
+ CU_ASSERT(g_crypto_ch->device_qp->index == expected_index);
+ CU_ASSERT(g_next_qat_index == current_index);
+}
+
+static void
+test_assign_device_qp(void)
+{
+ struct device_qp *device_qp = NULL;
+ int i;
+
+ /* start with a known state, clear the device/qp lists */
+ _clear_device_qp_lists();
+
+ /* make sure that one AESNI_MB qp is found */
+ device_qp = calloc(1, sizeof(struct device_qp));
+ TAILQ_INSERT_TAIL(&g_device_qp_aesni_mb, device_qp, link);
+ g_crypto_ch->device_qp = NULL;
+ g_crypto_bdev.drv_name = AESNI_MB;
+ _assign_device_qp(&g_crypto_bdev, device_qp, g_crypto_ch);
+ CU_ASSERT(g_crypto_ch->device_qp != NULL);
+
+ /* QAT testing is more complex as the code under test load balances by
+ * assigning each subsequent device/qp to every QAT_VF_SPREAD modulo
+ * g_qat_total_qp. For the current latest QAT we'll have 48 virtual functions
+ * each with 2 qp so the "spread" betwen assignments is 32.
+ */
+ g_qat_total_qp = 96;
+ for (i = 0; i < g_qat_total_qp; i++) {
+ device_qp = calloc(1, sizeof(struct device_qp));
+ device_qp->index = i;
+ TAILQ_INSERT_TAIL(&g_device_qp_qat, device_qp, link);
+ }
+ g_crypto_ch->device_qp = NULL;
+ g_crypto_bdev.drv_name = QAT;
+
+ /* First assignment will assign to 0 and next at 32. */
+ _check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch,
+ 0, QAT_VF_SPREAD);
+
+ /* Second assignment will assign to 32 and next at 64. */
+ _check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch,
+ QAT_VF_SPREAD, QAT_VF_SPREAD * 2);
+
+ /* Third assignment will assign to 64 and next at 0. */
+ _check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch,
+ QAT_VF_SPREAD * 2, 0);
+
+ /* Fourth assignment will assign to 1 and next at 33. */
+ _check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch,
+ 1, QAT_VF_SPREAD + 1);
+
+ _clear_device_qp_lists();
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("crypto", test_setup, test_cleanup);
+ CU_ADD_TEST(suite, test_error_paths);
+ CU_ADD_TEST(suite, test_simple_write);
+ CU_ADD_TEST(suite, test_simple_read);
+ CU_ADD_TEST(suite, test_large_rw);
+ CU_ADD_TEST(suite, test_dev_full);
+ CU_ADD_TEST(suite, test_crazy_rw);
+ CU_ADD_TEST(suite, test_passthru);
+ CU_ADD_TEST(suite, test_initdrivers);
+ CU_ADD_TEST(suite, test_crypto_op_complete);
+ CU_ADD_TEST(suite, test_supported_io);
+ CU_ADD_TEST(suite, test_reset);
+ CU_ADD_TEST(suite, test_poller);
+ CU_ADD_TEST(suite, test_assign_device_qp);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/bdev/gpt/Makefile b/src/spdk/test/unit/lib/bdev/gpt/Makefile
new file mode 100644
index 000000000..2fad9ba03
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/gpt/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = gpt.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/bdev/gpt/gpt.c/.gitignore b/src/spdk/test/unit/lib/bdev/gpt/gpt.c/.gitignore
new file mode 100644
index 000000000..74d476f5c
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/gpt/gpt.c/.gitignore
@@ -0,0 +1 @@
+gpt_ut
diff --git a/src/spdk/test/unit/lib/bdev/gpt/gpt.c/Makefile b/src/spdk/test/unit/lib/bdev/gpt/gpt.c/Makefile
new file mode 100644
index 000000000..202fe9cb4
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/gpt/gpt.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of the copyright holder nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../../..)
+
+TEST_FILE = gpt_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/bdev/gpt/gpt.c/gpt_ut.c b/src/spdk/test/unit/lib/bdev/gpt/gpt.c/gpt_ut.c
new file mode 100644
index 000000000..8095fce19
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/gpt/gpt.c/gpt_ut.c
@@ -0,0 +1,363 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "common/lib/test_env.c"
+
+#include "bdev/gpt/gpt.c"
+
+static void
+test_check_mbr(void)
+{
+ struct spdk_gpt *gpt;
+ struct spdk_mbr *mbr;
+ unsigned char a[SPDK_GPT_BUFFER_SIZE];
+ int re;
+
+ /* Set gpt is NULL */
+ re = gpt_parse_mbr(NULL);
+ CU_ASSERT(re == -1);
+
+ /* Set gpt->buf is NULL */
+ gpt = calloc(1, sizeof(*gpt));
+ SPDK_CU_ASSERT_FATAL(gpt != NULL);
+ re = gpt_parse_mbr(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set *gpt is "aaa...", all are mismatch include mbr_signature */
+ memset(a, 'a', sizeof(a));
+ gpt->buf = &a[0];
+ re = gpt_check_mbr(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set mbr->mbr_signature matched, start lba mismatch */
+ mbr = (struct spdk_mbr *)gpt->buf;
+ mbr->mbr_signature = 0xAA55;
+ re = gpt_check_mbr(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set mbr->partitions[0].start lba matched, os_type mismatch */
+ mbr->partitions[0].start_lba = 1;
+ re = gpt_check_mbr(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set mbr->partitions[0].os_type matched, size_lba mismatch */
+ mbr->partitions[0].os_type = 0xEE;
+ re = gpt_check_mbr(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set mbr->partitions[0].size_lba matched, passing case */
+ mbr->partitions[0].size_lba = 0xFFFFFFFF;
+ re = gpt_check_mbr(gpt);
+ CU_ASSERT(re == 0);
+
+ free(gpt);
+}
+
+static void
+test_read_header(void)
+{
+ struct spdk_gpt *gpt;
+ struct spdk_gpt_header *head;
+ unsigned char a[SPDK_GPT_BUFFER_SIZE];
+ int re;
+
+ /* gpt_read_header(NULL) does not exist, NULL is filtered out in gpt_parse_mbr() */
+ gpt = calloc(1, sizeof(*gpt));
+ SPDK_CU_ASSERT_FATAL(gpt != NULL);
+ gpt->parse_phase = SPDK_GPT_PARSE_PHASE_PRIMARY;
+ gpt->sector_size = 512;
+
+ /* Set *gpt is "aaa..." */
+ memset(a, 'a', sizeof(a));
+ gpt->buf = &a[0];
+ gpt->buf_size = sizeof(a);
+
+ /* Set header_size mismatch */
+ gpt->sector_size = 512;
+ head = (struct spdk_gpt_header *)(gpt->buf + GPT_PRIMARY_PARTITION_TABLE_LBA * gpt->sector_size);
+ to_le32(&head->header_size, 0x258);
+ re = gpt_read_header(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set head->header_size matched, header_crc32 mismatch */
+ head->header_size = sizeof(*head);
+ to_le32(&head->header_crc32, 0x22D18C80);
+ re = gpt_read_header(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set head->header_crc32 matched, gpt_signature mismatch */
+ to_le32(&head->header_crc32, 0xC5B2117E);
+ re = gpt_read_header(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set head->gpt_signature matched, head->my_lba mismatch */
+ to_le32(&head->header_crc32, 0xD637335A);
+ head->gpt_signature[0] = 'E';
+ head->gpt_signature[1] = 'F';
+ head->gpt_signature[2] = 'I';
+ head->gpt_signature[3] = ' ';
+ head->gpt_signature[4] = 'P';
+ head->gpt_signature[5] = 'A';
+ head->gpt_signature[6] = 'R';
+ head->gpt_signature[7] = 'T';
+ re = gpt_read_header(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set head->my_lba matched, lba_end usable_lba mismatch */
+ to_le32(&head->header_crc32, 0xB3CDB2D2);
+ to_le64(&head->my_lba, 0x1);
+ re = gpt_read_header(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set gpt->lba_end usable_lba matched, passing case */
+ to_le32(&head->header_crc32, 0x5531F2F0);
+ to_le64(&gpt->lba_start, 0x0);
+ to_le64(&gpt->lba_end, 0x2E935FFE);
+ to_le64(&head->first_usable_lba, 0xA);
+ to_le64(&head->last_usable_lba, 0xF4240);
+ re = gpt_read_header(gpt);
+ CU_ASSERT(re == 0);
+
+ free(gpt);
+}
+
+static void
+test_read_partitions(void)
+{
+ struct spdk_gpt *gpt;
+ struct spdk_gpt_header *head;
+ unsigned char a[SPDK_GPT_BUFFER_SIZE];
+ int re;
+
+ /* gpt_read_partitions(NULL) does not exist, NULL is filtered out in gpt_parse_mbr() */
+ gpt = calloc(1, sizeof(*gpt));
+ SPDK_CU_ASSERT_FATAL(gpt != NULL);
+ gpt->parse_phase = SPDK_GPT_PARSE_PHASE_PRIMARY;
+ gpt->sector_size = 512;
+
+ /* Set *gpt is "aaa..." */
+ memset(a, 'a', sizeof(a));
+ gpt->buf = &a[0];
+ gpt->buf_size = sizeof(a);
+
+ /* Set num_partition_entries exceeds Max value of entries GPT supported */
+ gpt->sector_size = 512;
+ head = (struct spdk_gpt_header *)(gpt->buf + GPT_PRIMARY_PARTITION_TABLE_LBA * gpt->sector_size);
+ gpt->header = head;
+ to_le32(&head->num_partition_entries, 0x100);
+ re = gpt_read_partitions(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set num_partition_entries within Max value, size_of_partition_entry mismatch */
+ to_le32(&head->header_crc32, 0x573857BE);
+ to_le32(&head->num_partition_entries, 0x40);
+ to_le32(&head->size_of_partition_entry, 0x0);
+ re = gpt_read_partitions(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set size_of_partition_entry matched, partition_entry_lba mismatch */
+ to_le32(&head->header_crc32, 0x5279B712);
+ to_le32(&head->size_of_partition_entry, 0x80);
+ to_le64(&head->partition_entry_lba, 0x64);
+ re = gpt_read_partitions(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set partition_entry_lba matched, partition_entry_array_crc32 mismatch */
+ to_le32(&head->header_crc32, 0xEC093B43);
+ to_le64(&head->partition_entry_lba, 0x20);
+ to_le32(&head->partition_entry_array_crc32, 0x0);
+ re = gpt_read_partitions(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set partition_entry_array_crc32 matched, passing case */
+ to_le32(&head->header_crc32, 0xE1A08822);
+ to_le32(&head->partition_entry_array_crc32, 0xEBEE44FB);
+ to_le32(&head->num_partition_entries, 0x80);
+ re = gpt_read_partitions(gpt);
+ CU_ASSERT(re == 0);
+
+ free(gpt);
+}
+
+static void
+test_parse_mbr_and_primary(void)
+{
+ struct spdk_gpt *gpt;
+ struct spdk_mbr *mbr;
+ struct spdk_gpt_header *head;
+ unsigned char a[SPDK_GPT_BUFFER_SIZE];
+ int re;
+
+ /* Set gpt is NULL */
+ re = gpt_parse_mbr(NULL);
+ CU_ASSERT(re == -1);
+
+ /* Set gpt->buf is NULL */
+ gpt = calloc(1, sizeof(*gpt));
+ SPDK_CU_ASSERT_FATAL(gpt != NULL);
+ gpt->parse_phase = SPDK_GPT_PARSE_PHASE_PRIMARY;
+ gpt->sector_size = 512;
+ re = gpt_parse_mbr(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set *gpt is "aaa...", check_mbr failed */
+ memset(a, 'a', sizeof(a));
+ gpt->buf = &a[0];
+ gpt->buf_size = sizeof(a);
+ re = gpt_parse_mbr(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set check_mbr passed */
+ mbr = (struct spdk_mbr *)gpt->buf;
+ mbr->mbr_signature = 0xAA55;
+ mbr->partitions[0].start_lba = 1;
+ mbr->partitions[0].os_type = 0xEE;
+ mbr->partitions[0].size_lba = 0xFFFFFFFF;
+ re = gpt_parse_mbr(gpt);
+ CU_ASSERT(re == 0);
+
+ /* Expect read_header failed */
+ re = gpt_parse_partition_table(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set read_header passed, read_partitions failed */
+ head = (struct spdk_gpt_header *)(gpt->buf + GPT_PRIMARY_PARTITION_TABLE_LBA * gpt->sector_size);
+ head->header_size = sizeof(*head);
+ head->gpt_signature[0] = 'E';
+ head->gpt_signature[1] = 'F';
+ head->gpt_signature[2] = 'I';
+ head->gpt_signature[3] = ' ';
+ head->gpt_signature[4] = 'P';
+ head->gpt_signature[5] = 'A';
+ head->gpt_signature[6] = 'R';
+ head->gpt_signature[7] = 'T';
+ to_le32(&head->header_crc32, 0x5531F2F0);
+ to_le64(&head->my_lba, 0x1);
+ to_le64(&gpt->lba_start, 0x0);
+ to_le64(&gpt->lba_end, 0x2E935FFE);
+ to_le64(&head->first_usable_lba, 0xA);
+ to_le64(&head->last_usable_lba, 0xF4240);
+ re = gpt_parse_partition_table(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set read_partitions passed, all passed */
+ to_le32(&head->size_of_partition_entry, 0x80);
+ to_le64(&head->partition_entry_lba, 0x20);
+ to_le32(&head->header_crc32, 0x845A09AA);
+ to_le32(&head->partition_entry_array_crc32, 0xEBEE44FB);
+ to_le32(&head->num_partition_entries, 0x80);
+ re = gpt_parse_partition_table(gpt);
+ CU_ASSERT(re == 0);
+
+ free(gpt);
+}
+
+static void
+test_parse_secondary(void)
+{
+ struct spdk_gpt *gpt;
+ struct spdk_gpt_header *head;
+ unsigned char a[SPDK_GPT_BUFFER_SIZE];
+ int re;
+
+ /* gpt_parse_partition_table(NULL) does not exist, NULL is filtered out in gpt_parse_mbr() */
+ gpt = calloc(1, sizeof(*gpt));
+ SPDK_CU_ASSERT_FATAL(gpt != NULL);
+ gpt->parse_phase = SPDK_GPT_PARSE_PHASE_SECONDARY;
+ gpt->sector_size = 512;
+
+ /* Set *gpt is "aaa...", read_header failed */
+ memset(a, 'a', sizeof(a));
+ gpt->buf = &a[0];
+ gpt->buf_size = sizeof(a);
+ re = gpt_parse_partition_table(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set read_header passed, read_partitions failed */
+ head = (struct spdk_gpt_header *)(gpt->buf + gpt->buf_size - gpt->sector_size);
+ head->header_size = sizeof(*head);
+ head->gpt_signature[0] = 'E';
+ head->gpt_signature[1] = 'F';
+ head->gpt_signature[2] = 'I';
+ head->gpt_signature[3] = ' ';
+ head->gpt_signature[4] = 'P';
+ head->gpt_signature[5] = 'A';
+ head->gpt_signature[6] = 'R';
+ head->gpt_signature[7] = 'T';
+ to_le32(&head->header_crc32, 0xAA68A167);
+ to_le64(&head->my_lba, 0x63FFFFF);
+ to_le64(&gpt->lba_start, 0x0);
+ to_le64(&gpt->lba_end, 0x63FFFFF);
+ to_le64(&gpt->total_sectors, 0x6400000);
+ to_le64(&head->first_usable_lba, 0xA);
+ to_le64(&head->last_usable_lba, 0x63FFFDE);
+ re = gpt_parse_partition_table(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set read_partitions passed, all passed */
+ to_le32(&head->size_of_partition_entry, 0x80);
+ to_le64(&head->partition_entry_lba, 0x63FFFDF);
+ to_le32(&head->header_crc32, 0x204129E8);
+ to_le32(&head->partition_entry_array_crc32, 0xEBEE44FB);
+ to_le32(&head->num_partition_entries, 0x80);
+ re = gpt_parse_partition_table(gpt);
+ CU_ASSERT(re == 0);
+
+ free(gpt);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("gpt_parse", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_parse_mbr_and_primary);
+ CU_ADD_TEST(suite, test_parse_secondary);
+ CU_ADD_TEST(suite, test_check_mbr);
+ CU_ADD_TEST(suite, test_read_header);
+ CU_ADD_TEST(suite, test_read_partitions);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/bdev/mt/Makefile b/src/spdk/test/unit/lib/bdev/mt/Makefile
new file mode 100644
index 000000000..a19b345aa
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/mt/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = bdev.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/bdev/mt/bdev.c/.gitignore b/src/spdk/test/unit/lib/bdev/mt/bdev.c/.gitignore
new file mode 100644
index 000000000..a5a22d0d3
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/mt/bdev.c/.gitignore
@@ -0,0 +1 @@
+bdev_ut
diff --git a/src/spdk/test/unit/lib/bdev/mt/bdev.c/Makefile b/src/spdk/test/unit/lib/bdev/mt/bdev.c/Makefile
new file mode 100644
index 000000000..46b2987ae
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/mt/bdev.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../../..)
+
+TEST_FILE = bdev_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/bdev/mt/bdev.c/bdev_ut.c b/src/spdk/test/unit/lib/bdev/mt/bdev.c/bdev_ut.c
new file mode 100644
index 000000000..351404a37
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/mt/bdev.c/bdev_ut.c
@@ -0,0 +1,1994 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "common/lib/ut_multithread.c"
+#include "unit/lib/json_mock.c"
+
+#include "spdk/config.h"
+/* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
+#undef SPDK_CONFIG_VTUNE
+
+#include "bdev/bdev.c"
+
+#define BDEV_UT_NUM_THREADS 3
+
+DEFINE_STUB(spdk_conf_find_section, struct spdk_conf_section *, (struct spdk_conf *cp,
+ const char *name), NULL);
+DEFINE_STUB(spdk_conf_section_get_nmval, char *,
+ (struct spdk_conf_section *sp, const char *key, int idx1, int idx2), NULL);
+DEFINE_STUB(spdk_conf_section_get_intval, int, (struct spdk_conf_section *sp, const char *key), -1);
+
+struct spdk_trace_histories *g_trace_histories;
+DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn));
+DEFINE_STUB_V(spdk_trace_register_owner, (uint8_t type, char id_prefix));
+DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix));
+DEFINE_STUB_V(spdk_trace_register_description, (const char *name,
+ uint16_t tpoint_id, uint8_t owner_type,
+ uint8_t object_type, uint8_t new_object,
+ uint8_t arg1_type, const char *arg1_name));
+DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
+ uint32_t size, uint64_t object_id, uint64_t arg1));
+DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0);
+DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL);
+
+struct ut_bdev {
+ struct spdk_bdev bdev;
+ void *io_target;
+};
+
+struct ut_bdev_channel {
+ TAILQ_HEAD(, spdk_bdev_io) outstanding_io;
+ uint32_t outstanding_cnt;
+ uint32_t avail_cnt;
+};
+
+int g_io_device;
+struct ut_bdev g_bdev;
+struct spdk_bdev_desc *g_desc;
+bool g_teardown_done = false;
+bool g_get_io_channel = true;
+bool g_create_ch = true;
+bool g_init_complete_called = false;
+bool g_fini_start_called = true;
+int g_status = 0;
+int g_count = 0;
+struct spdk_histogram_data *g_histogram = NULL;
+
+static int
+stub_create_ch(void *io_device, void *ctx_buf)
+{
+ struct ut_bdev_channel *ch = ctx_buf;
+
+ if (g_create_ch == false) {
+ return -1;
+ }
+
+ TAILQ_INIT(&ch->outstanding_io);
+ ch->outstanding_cnt = 0;
+ /*
+ * When avail gets to 0, the submit_request function will return ENOMEM.
+ * Most tests to not want ENOMEM to occur, so by default set this to a
+ * big value that won't get hit. The ENOMEM tests can then override this
+ * value to something much smaller to induce ENOMEM conditions.
+ */
+ ch->avail_cnt = 2048;
+ return 0;
+}
+
+static void
+stub_destroy_ch(void *io_device, void *ctx_buf)
+{
+}
+
+static struct spdk_io_channel *
+stub_get_io_channel(void *ctx)
+{
+ struct ut_bdev *ut_bdev = ctx;
+
+ if (g_get_io_channel == true) {
+ return spdk_get_io_channel(ut_bdev->io_target);
+ } else {
+ return NULL;
+ }
+}
+
+static int
+stub_destruct(void *ctx)
+{
+ return 0;
+}
+
+static void
+stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
+{
+ struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
+ struct spdk_bdev_io *io;
+
+ if (bdev_io->type == SPDK_BDEV_IO_TYPE_RESET) {
+ while (!TAILQ_EMPTY(&ch->outstanding_io)) {
+ io = TAILQ_FIRST(&ch->outstanding_io);
+ TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
+ ch->outstanding_cnt--;
+ spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_ABORTED);
+ ch->avail_cnt++;
+ }
+ } else if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) {
+ TAILQ_FOREACH(io, &ch->outstanding_io, module_link) {
+ if (io == bdev_io->u.abort.bio_to_abort) {
+ TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
+ ch->outstanding_cnt--;
+ spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_ABORTED);
+ ch->avail_cnt++;
+
+ spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS);
+ return;
+ }
+ }
+
+ spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
+ return;
+ }
+
+ if (ch->avail_cnt > 0) {
+ TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link);
+ ch->outstanding_cnt++;
+ ch->avail_cnt--;
+ } else {
+ spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM);
+ }
+}
+
+static uint32_t
+stub_complete_io(void *io_target, uint32_t num_to_complete)
+{
+ struct spdk_io_channel *_ch = spdk_get_io_channel(io_target);
+ struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
+ struct spdk_bdev_io *io;
+ bool complete_all = (num_to_complete == 0);
+ uint32_t num_completed = 0;
+
+ while (complete_all || num_completed < num_to_complete) {
+ if (TAILQ_EMPTY(&ch->outstanding_io)) {
+ break;
+ }
+ io = TAILQ_FIRST(&ch->outstanding_io);
+ TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
+ ch->outstanding_cnt--;
+ spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_SUCCESS);
+ ch->avail_cnt++;
+ num_completed++;
+ }
+
+ spdk_put_io_channel(_ch);
+ return num_completed;
+}
+
+static bool
+stub_io_type_supported(void *ctx, enum spdk_bdev_io_type type)
+{
+ return true;
+}
+
+static struct spdk_bdev_fn_table fn_table = {
+ .get_io_channel = stub_get_io_channel,
+ .destruct = stub_destruct,
+ .submit_request = stub_submit_request,
+ .io_type_supported = stub_io_type_supported,
+};
+
+struct spdk_bdev_module bdev_ut_if;
+
+static int
+module_init(void)
+{
+ spdk_bdev_module_init_done(&bdev_ut_if);
+ return 0;
+}
+
+static void
+module_fini(void)
+{
+}
+
+static void
+init_complete(void)
+{
+ g_init_complete_called = true;
+}
+
+static void
+fini_start(void)
+{
+ g_fini_start_called = true;
+}
+
+struct spdk_bdev_module bdev_ut_if = {
+ .name = "bdev_ut",
+ .module_init = module_init,
+ .module_fini = module_fini,
+ .async_init = true,
+ .init_complete = init_complete,
+ .fini_start = fini_start,
+};
+
+SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if)
+
+static void
+register_bdev(struct ut_bdev *ut_bdev, char *name, void *io_target)
+{
+ memset(ut_bdev, 0, sizeof(*ut_bdev));
+
+ ut_bdev->io_target = io_target;
+ ut_bdev->bdev.ctxt = ut_bdev;
+ ut_bdev->bdev.name = name;
+ ut_bdev->bdev.fn_table = &fn_table;
+ ut_bdev->bdev.module = &bdev_ut_if;
+ ut_bdev->bdev.blocklen = 4096;
+ ut_bdev->bdev.blockcnt = 1024;
+
+ spdk_bdev_register(&ut_bdev->bdev);
+}
+
+static void
+unregister_bdev(struct ut_bdev *ut_bdev)
+{
+ /* Handle any deferred messages. */
+ poll_threads();
+ spdk_bdev_unregister(&ut_bdev->bdev, NULL, NULL);
+}
+
+static void
+bdev_init_cb(void *done, int rc)
+{
+ CU_ASSERT(rc == 0);
+ *(bool *)done = true;
+}
+
+static void
+setup_test(void)
+{
+ bool done = false;
+
+ allocate_cores(BDEV_UT_NUM_THREADS);
+ allocate_threads(BDEV_UT_NUM_THREADS);
+ set_thread(0);
+ spdk_bdev_initialize(bdev_init_cb, &done);
+ spdk_io_device_register(&g_io_device, stub_create_ch, stub_destroy_ch,
+ sizeof(struct ut_bdev_channel), NULL);
+ register_bdev(&g_bdev, "ut_bdev", &g_io_device);
+ spdk_bdev_open(&g_bdev.bdev, true, NULL, NULL, &g_desc);
+}
+
+static void
+finish_cb(void *cb_arg)
+{
+ g_teardown_done = true;
+}
+
+static void
+teardown_test(void)
+{
+ set_thread(0);
+ g_teardown_done = false;
+ spdk_bdev_close(g_desc);
+ g_desc = NULL;
+ unregister_bdev(&g_bdev);
+ spdk_io_device_unregister(&g_io_device, NULL);
+ spdk_bdev_finish(finish_cb, NULL);
+ poll_threads();
+ memset(&g_bdev, 0, sizeof(g_bdev));
+ CU_ASSERT(g_teardown_done == true);
+ g_teardown_done = false;
+ free_threads();
+ free_cores();
+}
+
+static uint32_t
+bdev_io_tailq_cnt(bdev_io_tailq_t *tailq)
+{
+ struct spdk_bdev_io *io;
+ uint32_t cnt = 0;
+
+ TAILQ_FOREACH(io, tailq, internal.link) {
+ cnt++;
+ }
+
+ return cnt;
+}
+
+static void
+basic(void)
+{
+ g_init_complete_called = false;
+ setup_test();
+ CU_ASSERT(g_init_complete_called == true);
+
+ set_thread(0);
+
+ g_get_io_channel = false;
+ g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
+ CU_ASSERT(g_ut_threads[0].ch == NULL);
+
+ g_get_io_channel = true;
+ g_create_ch = false;
+ g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
+ CU_ASSERT(g_ut_threads[0].ch == NULL);
+
+ g_get_io_channel = true;
+ g_create_ch = true;
+ g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
+ CU_ASSERT(g_ut_threads[0].ch != NULL);
+ spdk_put_io_channel(g_ut_threads[0].ch);
+
+ g_fini_start_called = false;
+ teardown_test();
+ CU_ASSERT(g_fini_start_called == true);
+}
+
+static void
+_bdev_removed(void *done)
+{
+ *(bool *)done = true;
+}
+
+static void
+_bdev_unregistered(void *done, int rc)
+{
+ CU_ASSERT(rc == 0);
+ *(bool *)done = true;
+}
+
+static void
+unregister_and_close(void)
+{
+ bool done, remove_notify;
+ struct spdk_bdev_desc *desc = NULL;
+
+ setup_test();
+ set_thread(0);
+
+ /* setup_test() automatically opens the bdev,
+ * but this test needs to do that in a different
+ * way. */
+ spdk_bdev_close(g_desc);
+ poll_threads();
+
+ /* Try hotremoving a bdev with descriptors which don't provide
+ * the notification callback */
+ spdk_bdev_open(&g_bdev.bdev, true, NULL, NULL, &desc);
+ SPDK_CU_ASSERT_FATAL(desc != NULL);
+
+ /* There is an open descriptor on the device. Unregister it,
+ * which can't proceed until the descriptor is closed. */
+ done = false;
+ spdk_bdev_unregister(&g_bdev.bdev, _bdev_unregistered, &done);
+
+ /* Poll the threads to allow all events to be processed */
+ poll_threads();
+
+ /* Make sure the bdev was not unregistered. We still have a
+ * descriptor open */
+ CU_ASSERT(done == false);
+
+ spdk_bdev_close(desc);
+ poll_threads();
+ desc = NULL;
+
+ /* The unregister should have completed */
+ CU_ASSERT(done == true);
+
+
+ /* Register the bdev again */
+ register_bdev(&g_bdev, "ut_bdev", &g_io_device);
+
+ remove_notify = false;
+ spdk_bdev_open(&g_bdev.bdev, true, _bdev_removed, &remove_notify, &desc);
+ SPDK_CU_ASSERT_FATAL(desc != NULL);
+ CU_ASSERT(remove_notify == false);
+
+ /* There is an open descriptor on the device. Unregister it,
+ * which can't proceed until the descriptor is closed. */
+ done = false;
+ spdk_bdev_unregister(&g_bdev.bdev, _bdev_unregistered, &done);
+ /* No polling has occurred, so neither of these should execute */
+ CU_ASSERT(remove_notify == false);
+ CU_ASSERT(done == false);
+
+ /* Prior to the unregister completing, close the descriptor */
+ spdk_bdev_close(desc);
+
+ /* Poll the threads to allow all events to be processed */
+ poll_threads();
+
+ /* Remove notify should not have been called because the
+ * descriptor is already closed. */
+ CU_ASSERT(remove_notify == false);
+
+ /* The unregister should have completed */
+ CU_ASSERT(done == true);
+
+ /* Restore the original g_bdev so that we can use teardown_test(). */
+ register_bdev(&g_bdev, "ut_bdev", &g_io_device);
+ spdk_bdev_open(&g_bdev.bdev, true, NULL, NULL, &g_desc);
+ teardown_test();
+}
+
+static void
+reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
+{
+ bool *done = cb_arg;
+
+ CU_ASSERT(success == true);
+ *done = true;
+ spdk_bdev_free_io(bdev_io);
+}
+
+static void
+put_channel_during_reset(void)
+{
+ struct spdk_io_channel *io_ch;
+ bool done = false;
+
+ setup_test();
+
+ set_thread(0);
+ io_ch = spdk_bdev_get_io_channel(g_desc);
+ CU_ASSERT(io_ch != NULL);
+
+ /*
+ * Start a reset, but then put the I/O channel before
+ * the deferred messages for the reset get a chance to
+ * execute.
+ */
+ spdk_bdev_reset(g_desc, io_ch, reset_done, &done);
+ spdk_put_io_channel(io_ch);
+ poll_threads();
+ stub_complete_io(g_bdev.io_target, 0);
+
+ teardown_test();
+}
+
+static void
+aborted_reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
+{
+ enum spdk_bdev_io_status *status = cb_arg;
+
+ *status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
+ spdk_bdev_free_io(bdev_io);
+}
+
+static void
+aborted_reset(void)
+{
+ struct spdk_io_channel *io_ch[2];
+ enum spdk_bdev_io_status status1 = SPDK_BDEV_IO_STATUS_PENDING,
+ status2 = SPDK_BDEV_IO_STATUS_PENDING;
+
+ setup_test();
+
+ set_thread(0);
+ io_ch[0] = spdk_bdev_get_io_channel(g_desc);
+ CU_ASSERT(io_ch[0] != NULL);
+ spdk_bdev_reset(g_desc, io_ch[0], aborted_reset_done, &status1);
+ poll_threads();
+ CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
+
+ /*
+ * First reset has been submitted on ch0. Now submit a second
+ * reset on ch1 which will get queued since there is already a
+ * reset in progress.
+ */
+ set_thread(1);
+ io_ch[1] = spdk_bdev_get_io_channel(g_desc);
+ CU_ASSERT(io_ch[1] != NULL);
+ spdk_bdev_reset(g_desc, io_ch[1], aborted_reset_done, &status2);
+ poll_threads();
+ CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
+
+ /*
+ * Now destroy ch1. This will abort the queued reset. Check that
+ * the second reset was completed with failed status. Also check
+ * that bdev->internal.reset_in_progress != NULL, since the
+ * original reset has not been completed yet. This ensures that
+ * the bdev code is correctly noticing that the failed reset is
+ * *not* the one that had been submitted to the bdev module.
+ */
+ set_thread(1);
+ spdk_put_io_channel(io_ch[1]);
+ poll_threads();
+ CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_FAILED);
+ CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
+
+ /*
+ * Now complete the first reset, verify that it completed with SUCCESS
+ * status and that bdev->internal.reset_in_progress is also set back to NULL.
+ */
+ set_thread(0);
+ spdk_put_io_channel(io_ch[0]);
+ stub_complete_io(g_bdev.io_target, 0);
+ poll_threads();
+ CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
+
+ teardown_test();
+}
+
+static void
+io_during_io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
+{
+ enum spdk_bdev_io_status *status = cb_arg;
+
+ *status = bdev_io->internal.status;
+ spdk_bdev_free_io(bdev_io);
+}
+
+static void
+io_during_reset(void)
+{
+ struct spdk_io_channel *io_ch[2];
+ struct spdk_bdev_channel *bdev_ch[2];
+ enum spdk_bdev_io_status status0, status1, status_reset;
+ int rc;
+
+ setup_test();
+
+ /*
+ * First test normal case - submit an I/O on each of two channels (with no resets)
+ * and verify they complete successfully.
+ */
+ set_thread(0);
+ io_ch[0] = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
+ CU_ASSERT(bdev_ch[0]->flags == 0);
+ status0 = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
+ CU_ASSERT(rc == 0);
+
+ set_thread(1);
+ io_ch[1] = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
+ CU_ASSERT(bdev_ch[1]->flags == 0);
+ status1 = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
+ CU_ASSERT(rc == 0);
+
+ poll_threads();
+ CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
+ CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
+
+ set_thread(0);
+ stub_complete_io(g_bdev.io_target, 0);
+ CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ set_thread(1);
+ stub_complete_io(g_bdev.io_target, 0);
+ CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ /*
+ * Now submit a reset, and leave it pending while we submit I/O on two different
+ * channels. These I/O should be failed by the bdev layer since the reset is in
+ * progress.
+ */
+ set_thread(0);
+ status_reset = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &status_reset);
+ CU_ASSERT(rc == 0);
+
+ CU_ASSERT(bdev_ch[0]->flags == 0);
+ CU_ASSERT(bdev_ch[1]->flags == 0);
+ poll_threads();
+ CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_RESET_IN_PROGRESS);
+ CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_RESET_IN_PROGRESS);
+
+ set_thread(0);
+ status0 = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
+ CU_ASSERT(rc == 0);
+
+ set_thread(1);
+ status1 = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
+ CU_ASSERT(rc == 0);
+
+ /*
+ * A reset is in progress so these read I/O should complete with aborted. Note that we
+ * need to poll_threads() since I/O completed inline have their completion deferred.
+ */
+ poll_threads();
+ CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
+ CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_ABORTED);
+ CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_ABORTED);
+
+ /*
+ * Complete the reset
+ */
+ set_thread(0);
+ stub_complete_io(g_bdev.io_target, 0);
+
+ /*
+ * Only poll thread 0. We should not get a completion.
+ */
+ poll_thread(0);
+ CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
+
+ /*
+ * Poll both thread 0 and 1 so the messages can propagate and we
+ * get a completion.
+ */
+ poll_threads();
+ CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ spdk_put_io_channel(io_ch[0]);
+ set_thread(1);
+ spdk_put_io_channel(io_ch[1]);
+ poll_threads();
+
+ teardown_test();
+}
+
+static void
+basic_qos(void)
+{
+ struct spdk_io_channel *io_ch[2];
+ struct spdk_bdev_channel *bdev_ch[2];
+ struct spdk_bdev *bdev;
+ enum spdk_bdev_io_status status, abort_status;
+ int rc;
+
+ setup_test();
+
+ /* Enable QoS */
+ bdev = &g_bdev.bdev;
+ bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
+ SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
+ TAILQ_INIT(&bdev->internal.qos->queued);
+ /*
+ * Enable read/write IOPS, read only byte per second and
+ * read/write byte per second rate limits.
+ * In this case, all rate limits will take equal effect.
+ */
+ /* 2000 read/write I/O per second, or 2 per millisecond */
+ bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT].limit = 2000;
+ /* 8K read/write byte per millisecond with 4K block size */
+ bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT].limit = 8192000;
+ /* 8K read only byte per millisecond with 4K block size */
+ bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT].limit = 8192000;
+
+ g_get_io_channel = true;
+
+ set_thread(0);
+ io_ch[0] = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
+ CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
+
+ set_thread(1);
+ io_ch[1] = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
+ CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
+
+ /*
+ * Send an I/O on thread 0, which is where the QoS thread is running.
+ */
+ set_thread(0);
+ status = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
+ poll_threads();
+ stub_complete_io(g_bdev.io_target, 0);
+ poll_threads();
+ CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ /* Send an I/O on thread 1. The QoS thread is not running here. */
+ status = SPDK_BDEV_IO_STATUS_PENDING;
+ set_thread(1);
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
+ poll_threads();
+ /* Complete I/O on thread 1. This should not complete the I/O we submitted */
+ stub_complete_io(g_bdev.io_target, 0);
+ poll_threads();
+ CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
+ /* Now complete I/O on thread 0 */
+ set_thread(0);
+ poll_threads();
+ stub_complete_io(g_bdev.io_target, 0);
+ poll_threads();
+ CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ /* Reset rate limit for the next test cases. */
+ spdk_delay_us(SPDK_BDEV_QOS_TIMESLICE_IN_USEC);
+ poll_threads();
+
+ /*
+ * Test abort request when QoS is enabled.
+ */
+
+ /* Send an I/O on thread 0, which is where the QoS thread is running. */
+ set_thread(0);
+ status = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
+ /* Send an abort to the I/O on the same thread. */
+ abort_status = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_abort(g_desc, io_ch[0], &status, io_during_io_done, &abort_status);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_PENDING);
+ poll_threads();
+ CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(status == SPDK_BDEV_IO_STATUS_ABORTED);
+
+ /* Send an I/O on thread 1. The QoS thread is not running here. */
+ status = SPDK_BDEV_IO_STATUS_PENDING;
+ set_thread(1);
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
+ poll_threads();
+ /* Send an abort to the I/O on the same thread. */
+ abort_status = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_abort(g_desc, io_ch[1], &status, io_during_io_done, &abort_status);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_PENDING);
+ poll_threads();
+ /* Complete the I/O with failure and the abort with success on thread 1. */
+ CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(status == SPDK_BDEV_IO_STATUS_ABORTED);
+
+ set_thread(0);
+
+ /*
+ * Close the descriptor only, which should stop the qos channel as
+ * the last descriptor removed.
+ */
+ spdk_bdev_close(g_desc);
+ poll_threads();
+ CU_ASSERT(bdev->internal.qos->ch == NULL);
+
+ /*
+ * Open the bdev again which shall setup the qos channel as the
+ * channels are valid.
+ */
+ spdk_bdev_open(bdev, true, NULL, NULL, &g_desc);
+ poll_threads();
+ CU_ASSERT(bdev->internal.qos->ch != NULL);
+
+ /* Tear down the channels */
+ set_thread(0);
+ spdk_put_io_channel(io_ch[0]);
+ set_thread(1);
+ spdk_put_io_channel(io_ch[1]);
+ poll_threads();
+ set_thread(0);
+
+ /* Close the descriptor, which should stop the qos channel */
+ spdk_bdev_close(g_desc);
+ poll_threads();
+ CU_ASSERT(bdev->internal.qos->ch == NULL);
+
+ /* Open the bdev again, no qos channel setup without valid channels. */
+ spdk_bdev_open(bdev, true, NULL, NULL, &g_desc);
+ poll_threads();
+ CU_ASSERT(bdev->internal.qos->ch == NULL);
+
+ /* Create the channels in reverse order. */
+ set_thread(1);
+ io_ch[1] = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
+ CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
+
+ set_thread(0);
+ io_ch[0] = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
+ CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
+
+ /* Confirm that the qos thread is now thread 1 */
+ CU_ASSERT(bdev->internal.qos->ch == bdev_ch[1]);
+
+ /* Tear down the channels */
+ set_thread(0);
+ spdk_put_io_channel(io_ch[0]);
+ set_thread(1);
+ spdk_put_io_channel(io_ch[1]);
+ poll_threads();
+
+ set_thread(0);
+
+ teardown_test();
+}
+
+static void
+io_during_qos_queue(void)
+{
+ struct spdk_io_channel *io_ch[2];
+ struct spdk_bdev_channel *bdev_ch[2];
+ struct spdk_bdev *bdev;
+ enum spdk_bdev_io_status status0, status1, status2;
+ int rc;
+
+ setup_test();
+ MOCK_SET(spdk_get_ticks, 0);
+
+ /* Enable QoS */
+ bdev = &g_bdev.bdev;
+ bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
+ SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
+ TAILQ_INIT(&bdev->internal.qos->queued);
+ /*
+ * Enable read/write IOPS, read only byte per sec, write only
+ * byte per sec and read/write byte per sec rate limits.
+ * In this case, both read only and write only byte per sec
+ * rate limit will take effect.
+ */
+ /* 4000 read/write I/O per second, or 4 per millisecond */
+ bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT].limit = 4000;
+ /* 8K byte per millisecond with 4K block size */
+ bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT].limit = 8192000;
+ /* 4K byte per millisecond with 4K block size */
+ bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT].limit = 4096000;
+ /* 4K byte per millisecond with 4K block size */
+ bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT].limit = 4096000;
+
+ g_get_io_channel = true;
+
+ /* Create channels */
+ set_thread(0);
+ io_ch[0] = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
+ CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
+
+ set_thread(1);
+ io_ch[1] = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
+ CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
+
+ /* Send two read I/Os */
+ status1 = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
+ set_thread(0);
+ status0 = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
+ /* Send one write I/O */
+ status2 = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_write_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status2);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_PENDING);
+
+ /* Complete any I/O that arrived at the disk */
+ poll_threads();
+ set_thread(1);
+ stub_complete_io(g_bdev.io_target, 0);
+ set_thread(0);
+ stub_complete_io(g_bdev.io_target, 0);
+ poll_threads();
+
+ /* Only one of the two read I/Os should complete. (logical XOR) */
+ if (status0 == SPDK_BDEV_IO_STATUS_SUCCESS) {
+ CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
+ } else {
+ CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
+ }
+ /* The write I/O should complete. */
+ CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ /* Advance in time by a millisecond */
+ spdk_delay_us(1000);
+
+ /* Complete more I/O */
+ poll_threads();
+ set_thread(1);
+ stub_complete_io(g_bdev.io_target, 0);
+ set_thread(0);
+ stub_complete_io(g_bdev.io_target, 0);
+ poll_threads();
+
+ /* Now the second read I/O should be done */
+ CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ /* Tear down the channels */
+ set_thread(1);
+ spdk_put_io_channel(io_ch[1]);
+ set_thread(0);
+ spdk_put_io_channel(io_ch[0]);
+ poll_threads();
+
+ teardown_test();
+}
+
+static void
+io_during_qos_reset(void)
+{
+ struct spdk_io_channel *io_ch[2];
+ struct spdk_bdev_channel *bdev_ch[2];
+ struct spdk_bdev *bdev;
+ enum spdk_bdev_io_status status0, status1, reset_status;
+ int rc;
+
+ setup_test();
+ MOCK_SET(spdk_get_ticks, 0);
+
+ /* Enable QoS */
+ bdev = &g_bdev.bdev;
+ bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
+ SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
+ TAILQ_INIT(&bdev->internal.qos->queued);
+ /*
+ * Enable read/write IOPS, write only byte per sec and
+ * read/write byte per second rate limits.
+ * In this case, read/write byte per second rate limit will
+ * take effect first.
+ */
+ /* 2000 read/write I/O per second, or 2 per millisecond */
+ bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT].limit = 2000;
+ /* 4K byte per millisecond with 4K block size */
+ bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT].limit = 4096000;
+ /* 8K byte per millisecond with 4K block size */
+ bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT].limit = 8192000;
+
+ g_get_io_channel = true;
+
+ /* Create channels */
+ set_thread(0);
+ io_ch[0] = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
+ CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
+
+ set_thread(1);
+ io_ch[1] = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
+ CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
+
+ /* Send two I/O. One of these gets queued by QoS. The other is sitting at the disk. */
+ status1 = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_write_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
+ CU_ASSERT(rc == 0);
+ set_thread(0);
+ status0 = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_write_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
+ CU_ASSERT(rc == 0);
+
+ poll_threads();
+ CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
+ CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
+
+ /* Reset the bdev. */
+ reset_status = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &reset_status);
+ CU_ASSERT(rc == 0);
+
+ /* Complete any I/O that arrived at the disk */
+ poll_threads();
+ set_thread(1);
+ stub_complete_io(g_bdev.io_target, 0);
+ set_thread(0);
+ stub_complete_io(g_bdev.io_target, 0);
+ poll_threads();
+
+ CU_ASSERT(reset_status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_ABORTED);
+ CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_ABORTED);
+
+ /* Tear down the channels */
+ set_thread(1);
+ spdk_put_io_channel(io_ch[1]);
+ set_thread(0);
+ spdk_put_io_channel(io_ch[0]);
+ poll_threads();
+
+ teardown_test();
+}
+
+static void
+enomem_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
+{
+ enum spdk_bdev_io_status *status = cb_arg;
+
+ *status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
+ spdk_bdev_free_io(bdev_io);
+}
+
+static void
+enomem(void)
+{
+ struct spdk_io_channel *io_ch;
+ struct spdk_bdev_channel *bdev_ch;
+ struct spdk_bdev_shared_resource *shared_resource;
+ struct ut_bdev_channel *ut_ch;
+ const uint32_t IO_ARRAY_SIZE = 64;
+ const uint32_t AVAIL = 20;
+ enum spdk_bdev_io_status status[IO_ARRAY_SIZE], status_reset;
+ uint32_t nomem_cnt, i;
+ struct spdk_bdev_io *first_io;
+ int rc;
+
+ setup_test();
+
+ set_thread(0);
+ io_ch = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch = spdk_io_channel_get_ctx(io_ch);
+ shared_resource = bdev_ch->shared_resource;
+ ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
+ ut_ch->avail_cnt = AVAIL;
+
+ /* First submit a number of IOs equal to what the channel can support. */
+ for (i = 0; i < AVAIL; i++) {
+ status[i] = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
+ CU_ASSERT(rc == 0);
+ }
+ CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io));
+
+ /*
+ * Next, submit one additional I/O. This one should fail with ENOMEM and then go onto
+ * the enomem_io list.
+ */
+ status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io));
+ first_io = TAILQ_FIRST(&shared_resource->nomem_io);
+
+ /*
+ * Now submit a bunch more I/O. These should all fail with ENOMEM and get queued behind
+ * the first_io above.
+ */
+ for (i = AVAIL + 1; i < IO_ARRAY_SIZE; i++) {
+ status[i] = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
+ CU_ASSERT(rc == 0);
+ }
+
+ /* Assert that first_io is still at the head of the list. */
+ CU_ASSERT(TAILQ_FIRST(&shared_resource->nomem_io) == first_io);
+ CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == (IO_ARRAY_SIZE - AVAIL));
+ nomem_cnt = bdev_io_tailq_cnt(&shared_resource->nomem_io);
+ CU_ASSERT(shared_resource->nomem_threshold == (AVAIL - NOMEM_THRESHOLD_COUNT));
+
+ /*
+ * Complete 1 I/O only. The key check here is bdev_io_tailq_cnt - this should not have
+ * changed since completing just 1 I/O should not trigger retrying the queued nomem_io
+ * list.
+ */
+ stub_complete_io(g_bdev.io_target, 1);
+ CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == nomem_cnt);
+
+ /*
+ * Complete enough I/O to hit the nomem_theshold. This should trigger retrying nomem_io,
+ * and we should see I/O get resubmitted to the test bdev module.
+ */
+ stub_complete_io(g_bdev.io_target, NOMEM_THRESHOLD_COUNT - 1);
+ CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) < nomem_cnt);
+ nomem_cnt = bdev_io_tailq_cnt(&shared_resource->nomem_io);
+
+ /* Complete 1 I/O only. This should not trigger retrying the queued nomem_io. */
+ stub_complete_io(g_bdev.io_target, 1);
+ CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == nomem_cnt);
+
+ /*
+ * Send a reset and confirm that all I/O are completed, including the ones that
+ * were queued on the nomem_io list.
+ */
+ status_reset = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_reset(g_desc, io_ch, enomem_done, &status_reset);
+ poll_threads();
+ CU_ASSERT(rc == 0);
+ /* This will complete the reset. */
+ stub_complete_io(g_bdev.io_target, 0);
+
+ CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == 0);
+ CU_ASSERT(shared_resource->io_outstanding == 0);
+
+ spdk_put_io_channel(io_ch);
+ poll_threads();
+ teardown_test();
+}
+
+static void
+enomem_multi_bdev(void)
+{
+ struct spdk_io_channel *io_ch;
+ struct spdk_bdev_channel *bdev_ch;
+ struct spdk_bdev_shared_resource *shared_resource;
+ struct ut_bdev_channel *ut_ch;
+ const uint32_t IO_ARRAY_SIZE = 64;
+ const uint32_t AVAIL = 20;
+ enum spdk_bdev_io_status status[IO_ARRAY_SIZE];
+ uint32_t i;
+ struct ut_bdev *second_bdev;
+ struct spdk_bdev_desc *second_desc = NULL;
+ struct spdk_bdev_channel *second_bdev_ch;
+ struct spdk_io_channel *second_ch;
+ int rc;
+
+ setup_test();
+
+ /* Register second bdev with the same io_target */
+ second_bdev = calloc(1, sizeof(*second_bdev));
+ SPDK_CU_ASSERT_FATAL(second_bdev != NULL);
+ register_bdev(second_bdev, "ut_bdev2", g_bdev.io_target);
+ spdk_bdev_open(&second_bdev->bdev, true, NULL, NULL, &second_desc);
+ SPDK_CU_ASSERT_FATAL(second_desc != NULL);
+
+ set_thread(0);
+ io_ch = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch = spdk_io_channel_get_ctx(io_ch);
+ shared_resource = bdev_ch->shared_resource;
+ ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
+ ut_ch->avail_cnt = AVAIL;
+
+ second_ch = spdk_bdev_get_io_channel(second_desc);
+ second_bdev_ch = spdk_io_channel_get_ctx(second_ch);
+ SPDK_CU_ASSERT_FATAL(shared_resource == second_bdev_ch->shared_resource);
+
+ /* Saturate io_target through bdev A. */
+ for (i = 0; i < AVAIL; i++) {
+ status[i] = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
+ CU_ASSERT(rc == 0);
+ }
+ CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io));
+
+ /*
+ * Now submit I/O through the second bdev. This should fail with ENOMEM
+ * and then go onto the nomem_io list.
+ */
+ status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io));
+
+ /* Complete first bdev's I/O. This should retry sending second bdev's nomem_io */
+ stub_complete_io(g_bdev.io_target, AVAIL);
+
+ SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&shared_resource->nomem_io));
+ CU_ASSERT(shared_resource->io_outstanding == 1);
+
+ /* Now complete our retried I/O */
+ stub_complete_io(g_bdev.io_target, 1);
+ SPDK_CU_ASSERT_FATAL(shared_resource->io_outstanding == 0);
+
+ spdk_put_io_channel(io_ch);
+ spdk_put_io_channel(second_ch);
+ spdk_bdev_close(second_desc);
+ unregister_bdev(second_bdev);
+ poll_threads();
+ free(second_bdev);
+ teardown_test();
+}
+
+
+static void
+enomem_multi_io_target(void)
+{
+ struct spdk_io_channel *io_ch;
+ struct spdk_bdev_channel *bdev_ch;
+ struct ut_bdev_channel *ut_ch;
+ const uint32_t IO_ARRAY_SIZE = 64;
+ const uint32_t AVAIL = 20;
+ enum spdk_bdev_io_status status[IO_ARRAY_SIZE];
+ uint32_t i;
+ int new_io_device;
+ struct ut_bdev *second_bdev;
+ struct spdk_bdev_desc *second_desc = NULL;
+ struct spdk_bdev_channel *second_bdev_ch;
+ struct spdk_io_channel *second_ch;
+ int rc;
+
+ setup_test();
+
+ /* Create new io_target and a second bdev using it */
+ spdk_io_device_register(&new_io_device, stub_create_ch, stub_destroy_ch,
+ sizeof(struct ut_bdev_channel), NULL);
+ second_bdev = calloc(1, sizeof(*second_bdev));
+ SPDK_CU_ASSERT_FATAL(second_bdev != NULL);
+ register_bdev(second_bdev, "ut_bdev2", &new_io_device);
+ spdk_bdev_open(&second_bdev->bdev, true, NULL, NULL, &second_desc);
+ SPDK_CU_ASSERT_FATAL(second_desc != NULL);
+
+ set_thread(0);
+ io_ch = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch = spdk_io_channel_get_ctx(io_ch);
+ ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
+ ut_ch->avail_cnt = AVAIL;
+
+ /* Different io_target should imply a different shared_resource */
+ second_ch = spdk_bdev_get_io_channel(second_desc);
+ second_bdev_ch = spdk_io_channel_get_ctx(second_ch);
+ SPDK_CU_ASSERT_FATAL(bdev_ch->shared_resource != second_bdev_ch->shared_resource);
+
+ /* Saturate io_target through bdev A. */
+ for (i = 0; i < AVAIL; i++) {
+ status[i] = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
+ CU_ASSERT(rc == 0);
+ }
+ CU_ASSERT(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
+
+ /* Issue one more I/O to fill ENOMEM list. */
+ status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
+
+ /*
+ * Now submit I/O through the second bdev. This should go through and complete
+ * successfully because we're using a different io_device underneath.
+ */
+ status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&second_bdev_ch->shared_resource->nomem_io));
+ stub_complete_io(second_bdev->io_target, 1);
+
+ /* Cleanup; Complete outstanding I/O. */
+ stub_complete_io(g_bdev.io_target, AVAIL);
+ SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
+ /* Complete the ENOMEM I/O */
+ stub_complete_io(g_bdev.io_target, 1);
+ CU_ASSERT(bdev_ch->shared_resource->io_outstanding == 0);
+
+ SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
+ CU_ASSERT(bdev_ch->shared_resource->io_outstanding == 0);
+ spdk_put_io_channel(io_ch);
+ spdk_put_io_channel(second_ch);
+ spdk_bdev_close(second_desc);
+ unregister_bdev(second_bdev);
+ spdk_io_device_unregister(&new_io_device, NULL);
+ poll_threads();
+ free(second_bdev);
+ teardown_test();
+}
+
+static void
+qos_dynamic_enable_done(void *cb_arg, int status)
+{
+ int *rc = cb_arg;
+ *rc = status;
+}
+
+static void
+qos_dynamic_enable(void)
+{
+ struct spdk_io_channel *io_ch[2];
+ struct spdk_bdev_channel *bdev_ch[2];
+ struct spdk_bdev *bdev;
+ enum spdk_bdev_io_status bdev_io_status[2];
+ uint64_t limits[SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES] = {};
+ int status, second_status, rc, i;
+
+ setup_test();
+ MOCK_SET(spdk_get_ticks, 0);
+
+ for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
+ limits[i] = UINT64_MAX;
+ }
+
+ bdev = &g_bdev.bdev;
+
+ g_get_io_channel = true;
+
+ /* Create channels */
+ set_thread(0);
+ io_ch[0] = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
+ CU_ASSERT(bdev_ch[0]->flags == 0);
+
+ set_thread(1);
+ io_ch[1] = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
+ CU_ASSERT(bdev_ch[1]->flags == 0);
+
+ set_thread(0);
+
+ /*
+ * Enable QoS: Read/Write IOPS, Read/Write byte,
+ * Read only byte and Write only byte per second
+ * rate limits.
+ * More than 10 I/Os allowed per timeslice.
+ */
+ status = -1;
+ limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 10000;
+ limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT] = 100;
+ limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT] = 100;
+ limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT] = 10;
+ spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
+ poll_threads();
+ CU_ASSERT(status == 0);
+ CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
+ CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
+
+ /*
+ * Submit and complete 10 I/O to fill the QoS allotment for this timeslice.
+ * Additional I/O will then be queued.
+ */
+ set_thread(0);
+ for (i = 0; i < 10; i++) {
+ bdev_io_status[0] = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &bdev_io_status[0]);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_PENDING);
+ poll_thread(0);
+ stub_complete_io(g_bdev.io_target, 0);
+ CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_SUCCESS);
+ }
+
+ /*
+ * Send two more I/O. These I/O will be queued since the current timeslice allotment has been
+ * filled already. We want to test that when QoS is disabled that these two I/O:
+ * 1) are not aborted
+ * 2) are sent back to their original thread for resubmission
+ */
+ bdev_io_status[0] = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &bdev_io_status[0]);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_PENDING);
+ set_thread(1);
+ bdev_io_status[1] = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &bdev_io_status[1]);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_PENDING);
+ poll_threads();
+
+ /*
+ * Disable QoS: Read/Write IOPS, Read/Write byte,
+ * Read only byte rate limits
+ */
+ status = -1;
+ limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 0;
+ limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT] = 0;
+ limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT] = 0;
+ spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
+ poll_threads();
+ CU_ASSERT(status == 0);
+ CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
+ CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
+
+ /* Disable QoS: Write only Byte per second rate limit */
+ status = -1;
+ limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT] = 0;
+ spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
+ poll_threads();
+ CU_ASSERT(status == 0);
+ CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
+ CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
+
+ /*
+ * All I/O should have been resubmitted back on their original thread. Complete
+ * all I/O on thread 0, and ensure that only the thread 0 I/O was completed.
+ */
+ set_thread(0);
+ stub_complete_io(g_bdev.io_target, 0);
+ poll_threads();
+ CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_PENDING);
+
+ /* Now complete all I/O on thread 1 and ensure the thread 1 I/O was completed. */
+ set_thread(1);
+ stub_complete_io(g_bdev.io_target, 0);
+ poll_threads();
+ CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ /* Disable QoS again */
+ status = -1;
+ limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 0;
+ spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
+ poll_threads();
+ CU_ASSERT(status == 0); /* This should succeed */
+ CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
+ CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
+
+ /* Enable QoS on thread 0 */
+ status = -1;
+ limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 10000;
+ spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
+ poll_threads();
+ CU_ASSERT(status == 0);
+ CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
+ CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
+
+ /* Disable QoS on thread 1 */
+ set_thread(1);
+ status = -1;
+ limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 0;
+ spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
+ /* Don't poll yet. This should leave the channels with QoS enabled */
+ CU_ASSERT(status == -1);
+ CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
+ CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
+
+ /* Enable QoS. This should immediately fail because the previous disable QoS hasn't completed. */
+ second_status = 0;
+ limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT] = 10;
+ spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &second_status);
+ poll_threads();
+ CU_ASSERT(status == 0); /* The disable should succeed */
+ CU_ASSERT(second_status < 0); /* The enable should fail */
+ CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
+ CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
+
+ /* Enable QoS on thread 1. This should succeed now that the disable has completed. */
+ status = -1;
+ limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 10000;
+ spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
+ poll_threads();
+ CU_ASSERT(status == 0);
+ CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
+ CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
+
+ /* Tear down the channels */
+ set_thread(0);
+ spdk_put_io_channel(io_ch[0]);
+ set_thread(1);
+ spdk_put_io_channel(io_ch[1]);
+ poll_threads();
+
+ set_thread(0);
+ teardown_test();
+}
+
+static void
+histogram_status_cb(void *cb_arg, int status)
+{
+ g_status = status;
+}
+
+static void
+histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram)
+{
+ g_status = status;
+ g_histogram = histogram;
+}
+
+static void
+histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count,
+ uint64_t total, uint64_t so_far)
+{
+ g_count += count;
+}
+
+static void
+bdev_histograms_mt(void)
+{
+ struct spdk_io_channel *ch[2];
+ struct spdk_histogram_data *histogram;
+ uint8_t buf[4096];
+ int status = false;
+ int rc;
+
+
+ setup_test();
+
+ set_thread(0);
+ ch[0] = spdk_bdev_get_io_channel(g_desc);
+ CU_ASSERT(ch[0] != NULL);
+
+ set_thread(1);
+ ch[1] = spdk_bdev_get_io_channel(g_desc);
+ CU_ASSERT(ch[1] != NULL);
+
+
+ /* Enable histogram */
+ spdk_bdev_histogram_enable(&g_bdev.bdev, histogram_status_cb, NULL, true);
+ poll_threads();
+ CU_ASSERT(g_status == 0);
+ CU_ASSERT(g_bdev.bdev.internal.histogram_enabled == true);
+
+ /* Allocate histogram */
+ histogram = spdk_histogram_data_alloc();
+
+ /* Check if histogram is zeroed */
+ spdk_bdev_histogram_get(&g_bdev.bdev, histogram, histogram_data_cb, NULL);
+ poll_threads();
+ CU_ASSERT(g_status == 0);
+ SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
+
+ g_count = 0;
+ spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
+
+ CU_ASSERT(g_count == 0);
+
+ set_thread(0);
+ rc = spdk_bdev_write_blocks(g_desc, ch[0], &buf, 0, 1, io_during_io_done, &status);
+ CU_ASSERT(rc == 0);
+
+ spdk_delay_us(10);
+ stub_complete_io(g_bdev.io_target, 1);
+ poll_threads();
+ CU_ASSERT(status == true);
+
+
+ set_thread(1);
+ rc = spdk_bdev_read_blocks(g_desc, ch[1], &buf, 0, 1, io_during_io_done, &status);
+ CU_ASSERT(rc == 0);
+
+ spdk_delay_us(10);
+ stub_complete_io(g_bdev.io_target, 1);
+ poll_threads();
+ CU_ASSERT(status == true);
+
+ set_thread(0);
+
+ /* Check if histogram gathered data from all I/O channels */
+ spdk_bdev_histogram_get(&g_bdev.bdev, histogram, histogram_data_cb, NULL);
+ poll_threads();
+ CU_ASSERT(g_status == 0);
+ CU_ASSERT(g_bdev.bdev.internal.histogram_enabled == true);
+ SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
+
+ g_count = 0;
+ spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
+ CU_ASSERT(g_count == 2);
+
+ /* Disable histogram */
+ spdk_bdev_histogram_enable(&g_bdev.bdev, histogram_status_cb, NULL, false);
+ poll_threads();
+ CU_ASSERT(g_status == 0);
+ CU_ASSERT(g_bdev.bdev.internal.histogram_enabled == false);
+
+ spdk_histogram_data_free(histogram);
+
+ /* Tear down the channels */
+ set_thread(0);
+ spdk_put_io_channel(ch[0]);
+ set_thread(1);
+ spdk_put_io_channel(ch[1]);
+ poll_threads();
+ set_thread(0);
+ teardown_test();
+
+}
+
+struct timeout_io_cb_arg {
+ struct iovec iov;
+ uint8_t type;
+};
+
+static int
+bdev_channel_count_submitted_io(struct spdk_bdev_channel *ch)
+{
+ struct spdk_bdev_io *bdev_io;
+ int n = 0;
+
+ if (!ch) {
+ return -1;
+ }
+
+ TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) {
+ n++;
+ }
+
+ return n;
+}
+
+static void
+bdev_channel_io_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io)
+{
+ struct timeout_io_cb_arg *ctx = cb_arg;
+
+ ctx->type = bdev_io->type;
+ ctx->iov.iov_base = bdev_io->iov.iov_base;
+ ctx->iov.iov_len = bdev_io->iov.iov_len;
+}
+
+static bool g_io_done;
+
+static void
+io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
+{
+ g_io_done = true;
+ spdk_bdev_free_io(bdev_io);
+}
+
+static void
+bdev_set_io_timeout_mt(void)
+{
+ struct spdk_io_channel *ch[3];
+ struct spdk_bdev_channel *bdev_ch[3];
+ struct timeout_io_cb_arg cb_arg;
+
+ setup_test();
+
+ g_bdev.bdev.optimal_io_boundary = 16;
+ g_bdev.bdev.split_on_optimal_io_boundary = true;
+
+ set_thread(0);
+ ch[0] = spdk_bdev_get_io_channel(g_desc);
+ CU_ASSERT(ch[0] != NULL);
+
+ set_thread(1);
+ ch[1] = spdk_bdev_get_io_channel(g_desc);
+ CU_ASSERT(ch[1] != NULL);
+
+ set_thread(2);
+ ch[2] = spdk_bdev_get_io_channel(g_desc);
+ CU_ASSERT(ch[2] != NULL);
+
+ /* Multi-thread mode
+ * 1, Check the poller was registered successfully
+ * 2, Check the timeout IO and ensure the IO was the submitted by user
+ * 3, Check the link int the bdev_ch works right.
+ * 4, Close desc and put io channel during the timeout poller is polling
+ */
+
+ /* In desc thread set the timeout */
+ set_thread(0);
+ CU_ASSERT(spdk_bdev_set_timeout(g_desc, 5, bdev_channel_io_timeout_cb, &cb_arg) == 0);
+ CU_ASSERT(g_desc->io_timeout_poller != NULL);
+ CU_ASSERT(g_desc->cb_fn == bdev_channel_io_timeout_cb);
+ CU_ASSERT(g_desc->cb_arg == &cb_arg);
+
+ /* check the IO submitted list and timeout handler */
+ CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[0], (void *)0x2000, 0, 1, io_done, NULL) == 0);
+ bdev_ch[0] = spdk_io_channel_get_ctx(ch[0]);
+ CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[0]) == 1);
+
+ set_thread(1);
+ CU_ASSERT(spdk_bdev_write_blocks(g_desc, ch[1], (void *)0x1000, 0, 1, io_done, NULL) == 0);
+ bdev_ch[1] = spdk_io_channel_get_ctx(ch[1]);
+ CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[1]) == 1);
+
+ /* Now test that a single-vector command is split correctly.
+ * Offset 14, length 8, payload 0xF000
+ * Child - Offset 14, length 2, payload 0xF000
+ * Child - Offset 16, length 6, payload 0xF000 + 2 * 512
+ *
+ * Set up the expected values before calling spdk_bdev_read_blocks
+ */
+ set_thread(2);
+ CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[2], (void *)0xF000, 14, 8, io_done, NULL) == 0);
+ bdev_ch[2] = spdk_io_channel_get_ctx(ch[2]);
+ CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[2]) == 3);
+
+ set_thread(0);
+ memset(&cb_arg, 0, sizeof(cb_arg));
+ spdk_delay_us(3 * spdk_get_ticks_hz());
+ poll_threads();
+ CU_ASSERT(cb_arg.type == 0);
+ CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0);
+ CU_ASSERT(cb_arg.iov.iov_len == 0);
+
+ /* Now the time reach the limit */
+ spdk_delay_us(3 * spdk_get_ticks_hz());
+ poll_thread(0);
+ CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_READ);
+ CU_ASSERT(cb_arg.iov.iov_base == (void *)0x2000);
+ CU_ASSERT(cb_arg.iov.iov_len == 1 * g_bdev.bdev.blocklen);
+ stub_complete_io(g_bdev.io_target, 1);
+ CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[0]) == 0);
+
+ memset(&cb_arg, 0, sizeof(cb_arg));
+ set_thread(1);
+ poll_thread(1);
+ CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE);
+ CU_ASSERT(cb_arg.iov.iov_base == (void *)0x1000);
+ CU_ASSERT(cb_arg.iov.iov_len == 1 * g_bdev.bdev.blocklen);
+ stub_complete_io(g_bdev.io_target, 1);
+ CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[1]) == 0);
+
+ memset(&cb_arg, 0, sizeof(cb_arg));
+ set_thread(2);
+ poll_thread(2);
+ CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_READ);
+ CU_ASSERT(cb_arg.iov.iov_base == (void *)0xF000);
+ CU_ASSERT(cb_arg.iov.iov_len == 8 * g_bdev.bdev.blocklen);
+ stub_complete_io(g_bdev.io_target, 1);
+ CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[2]) == 2);
+ stub_complete_io(g_bdev.io_target, 1);
+ CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[2]) == 0);
+
+ /* Run poll_timeout_done() it means complete the timeout poller */
+ set_thread(0);
+ poll_thread(0);
+ CU_ASSERT(g_desc->refs == 0);
+ CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[0], (void *)0x1000, 0, 1, io_done, NULL) == 0);
+ set_thread(1);
+ CU_ASSERT(spdk_bdev_write_blocks(g_desc, ch[1], (void *)0x2000, 0, 2, io_done, NULL) == 0);
+ set_thread(2);
+ CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[2], (void *)0x3000, 0, 3, io_done, NULL) == 0);
+
+ /* Trigger timeout poller to run again, desc->refs is incremented.
+ * In thread 0 we destroy the io channel before timeout poller runs.
+ * Timeout callback is not called on thread 0.
+ */
+ spdk_delay_us(6 * spdk_get_ticks_hz());
+ memset(&cb_arg, 0, sizeof(cb_arg));
+ set_thread(0);
+ stub_complete_io(g_bdev.io_target, 1);
+ spdk_put_io_channel(ch[0]);
+ poll_thread(0);
+ CU_ASSERT(g_desc->refs == 1)
+ CU_ASSERT(cb_arg.type == 0);
+ CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0);
+ CU_ASSERT(cb_arg.iov.iov_len == 0);
+
+ /* In thread 1 timeout poller runs then we destroy the io channel
+ * Timeout callback is called on thread 1.
+ */
+ memset(&cb_arg, 0, sizeof(cb_arg));
+ set_thread(1);
+ poll_thread(1);
+ stub_complete_io(g_bdev.io_target, 1);
+ spdk_put_io_channel(ch[1]);
+ poll_thread(1);
+ CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE);
+ CU_ASSERT(cb_arg.iov.iov_base == (void *)0x2000);
+ CU_ASSERT(cb_arg.iov.iov_len == 2 * g_bdev.bdev.blocklen);
+
+ /* Close the desc.
+ * Unregister the timeout poller first.
+ * Then decrement desc->refs but it's not zero yet so desc is not freed.
+ */
+ set_thread(0);
+ spdk_bdev_close(g_desc);
+ CU_ASSERT(g_desc->refs == 1);
+ CU_ASSERT(g_desc->io_timeout_poller == NULL);
+
+ /* Timeout poller runs on thread 2 then we destroy the io channel.
+ * Desc is closed so we would exit the timeout poller directly.
+ * timeout callback is not called on thread 2.
+ */
+ memset(&cb_arg, 0, sizeof(cb_arg));
+ set_thread(2);
+ poll_thread(2);
+ stub_complete_io(g_bdev.io_target, 1);
+ spdk_put_io_channel(ch[2]);
+ poll_thread(2);
+ CU_ASSERT(cb_arg.type == 0);
+ CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0);
+ CU_ASSERT(cb_arg.iov.iov_len == 0);
+
+ set_thread(0);
+ poll_thread(0);
+ g_teardown_done = false;
+ unregister_bdev(&g_bdev);
+ spdk_io_device_unregister(&g_io_device, NULL);
+ spdk_bdev_finish(finish_cb, NULL);
+ poll_threads();
+ memset(&g_bdev, 0, sizeof(g_bdev));
+ CU_ASSERT(g_teardown_done == true);
+ g_teardown_done = false;
+ free_threads();
+ free_cores();
+}
+
+static bool g_io_done2;
+static bool g_lock_lba_range_done;
+static bool g_unlock_lba_range_done;
+
+static void
+io_done2(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
+{
+ g_io_done2 = true;
+ spdk_bdev_free_io(bdev_io);
+}
+
+static void
+lock_lba_range_done(void *ctx, int status)
+{
+ g_lock_lba_range_done = true;
+}
+
+static void
+unlock_lba_range_done(void *ctx, int status)
+{
+ g_unlock_lba_range_done = true;
+}
+
+static uint32_t
+stub_channel_outstanding_cnt(void *io_target)
+{
+ struct spdk_io_channel *_ch = spdk_get_io_channel(io_target);
+ struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
+ uint32_t outstanding_cnt;
+
+ outstanding_cnt = ch->outstanding_cnt;
+
+ spdk_put_io_channel(_ch);
+ return outstanding_cnt;
+}
+
+static void
+lock_lba_range_then_submit_io(void)
+{
+ struct spdk_bdev_desc *desc = NULL;
+ void *io_target;
+ struct spdk_io_channel *io_ch[3];
+ struct spdk_bdev_channel *bdev_ch[3];
+ struct lba_range *range;
+ char buf[4096];
+ int ctx0, ctx1, ctx2;
+ int rc;
+
+ setup_test();
+
+ io_target = g_bdev.io_target;
+ desc = g_desc;
+
+ set_thread(0);
+ io_ch[0] = spdk_bdev_get_io_channel(desc);
+ bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
+ CU_ASSERT(io_ch[0] != NULL);
+
+ set_thread(1);
+ io_ch[1] = spdk_bdev_get_io_channel(desc);
+ bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
+ CU_ASSERT(io_ch[1] != NULL);
+
+ set_thread(0);
+ g_lock_lba_range_done = false;
+ rc = bdev_lock_lba_range(desc, io_ch[0], 20, 10, lock_lba_range_done, &ctx0);
+ CU_ASSERT(rc == 0);
+ poll_threads();
+
+ /* The lock should immediately become valid, since there are no outstanding
+ * write I/O.
+ */
+ CU_ASSERT(g_lock_lba_range_done == true);
+ range = TAILQ_FIRST(&bdev_ch[0]->locked_ranges);
+ SPDK_CU_ASSERT_FATAL(range != NULL);
+ CU_ASSERT(range->offset == 20);
+ CU_ASSERT(range->length == 10);
+ CU_ASSERT(range->owner_ch == bdev_ch[0]);
+
+ g_io_done = false;
+ CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked));
+ rc = spdk_bdev_read_blocks(desc, io_ch[0], buf, 20, 1, io_done, &ctx0);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1);
+
+ stub_complete_io(io_target, 1);
+ poll_threads();
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked));
+
+ /* Try a write I/O. This should actually be allowed to execute, since the channel
+ * holding the lock is submitting the write I/O.
+ */
+ g_io_done = false;
+ CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked));
+ rc = spdk_bdev_write_blocks(desc, io_ch[0], buf, 20, 1, io_done, &ctx0);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1);
+
+ stub_complete_io(io_target, 1);
+ poll_threads();
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked));
+
+ /* Try a write I/O. This should get queued in the io_locked tailq. */
+ set_thread(1);
+ g_io_done = false;
+ CU_ASSERT(TAILQ_EMPTY(&bdev_ch[1]->io_locked));
+ rc = spdk_bdev_write_blocks(desc, io_ch[1], buf, 20, 1, io_done, &ctx1);
+ CU_ASSERT(rc == 0);
+ poll_threads();
+ CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 0);
+ CU_ASSERT(!TAILQ_EMPTY(&bdev_ch[1]->io_locked));
+ CU_ASSERT(g_io_done == false);
+
+ /* Try to unlock the lba range using thread 1's io_ch. This should fail. */
+ rc = bdev_unlock_lba_range(desc, io_ch[1], 20, 10, unlock_lba_range_done, &ctx1);
+ CU_ASSERT(rc == -EINVAL);
+
+ /* Now create a new channel and submit a write I/O with it. This should also be queued.
+ * The new channel should inherit the active locks from the bdev's internal list.
+ */
+ set_thread(2);
+ io_ch[2] = spdk_bdev_get_io_channel(desc);
+ bdev_ch[2] = spdk_io_channel_get_ctx(io_ch[2]);
+ CU_ASSERT(io_ch[2] != NULL);
+
+ g_io_done2 = false;
+ CU_ASSERT(TAILQ_EMPTY(&bdev_ch[2]->io_locked));
+ rc = spdk_bdev_write_blocks(desc, io_ch[2], buf, 22, 2, io_done2, &ctx2);
+ CU_ASSERT(rc == 0);
+ poll_threads();
+ CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 0);
+ CU_ASSERT(!TAILQ_EMPTY(&bdev_ch[2]->io_locked));
+ CU_ASSERT(g_io_done2 == false);
+
+ set_thread(0);
+ rc = bdev_unlock_lba_range(desc, io_ch[0], 20, 10, unlock_lba_range_done, &ctx0);
+ CU_ASSERT(rc == 0);
+ poll_threads();
+ CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->locked_ranges));
+
+ /* The LBA range is unlocked, so the write IOs should now have started execution. */
+ CU_ASSERT(TAILQ_EMPTY(&bdev_ch[1]->io_locked));
+ CU_ASSERT(TAILQ_EMPTY(&bdev_ch[2]->io_locked));
+
+ set_thread(1);
+ CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1);
+ stub_complete_io(io_target, 1);
+ set_thread(2);
+ CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1);
+ stub_complete_io(io_target, 1);
+
+ poll_threads();
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(g_io_done2 == true);
+
+ /* Tear down the channels */
+ set_thread(0);
+ spdk_put_io_channel(io_ch[0]);
+ set_thread(1);
+ spdk_put_io_channel(io_ch[1]);
+ set_thread(2);
+ spdk_put_io_channel(io_ch[2]);
+ poll_threads();
+ set_thread(0);
+ teardown_test();
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("bdev", NULL, NULL);
+
+ CU_ADD_TEST(suite, basic);
+ CU_ADD_TEST(suite, unregister_and_close);
+ CU_ADD_TEST(suite, basic_qos);
+ CU_ADD_TEST(suite, put_channel_during_reset);
+ CU_ADD_TEST(suite, aborted_reset);
+ CU_ADD_TEST(suite, io_during_reset);
+ CU_ADD_TEST(suite, io_during_qos_queue);
+ CU_ADD_TEST(suite, io_during_qos_reset);
+ CU_ADD_TEST(suite, enomem);
+ CU_ADD_TEST(suite, enomem_multi_bdev);
+ CU_ADD_TEST(suite, enomem_multi_io_target);
+ CU_ADD_TEST(suite, qos_dynamic_enable);
+ CU_ADD_TEST(suite, bdev_histograms_mt);
+ CU_ADD_TEST(suite, bdev_set_io_timeout_mt);
+ CU_ADD_TEST(suite, lock_lba_range_then_submit_io);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/bdev/part.c/.gitignore b/src/spdk/test/unit/lib/bdev/part.c/.gitignore
new file mode 100644
index 000000000..c8302779b
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/part.c/.gitignore
@@ -0,0 +1 @@
+part_ut
diff --git a/src/spdk/test/unit/lib/bdev/part.c/Makefile b/src/spdk/test/unit/lib/bdev/part.c/Makefile
new file mode 100644
index 000000000..9b9637dbb
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/part.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = part_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/bdev/part.c/part_ut.c b/src/spdk/test/unit/lib/bdev/part.c/part_ut.c
new file mode 100644
index 000000000..8bab15f48
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/part.c/part_ut.c
@@ -0,0 +1,173 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "common/lib/ut_multithread.c"
+#include "unit/lib/json_mock.c"
+
+#include "spdk/config.h"
+/* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
+#undef SPDK_CONFIG_VTUNE
+
+#include "spdk_internal/thread.h"
+
+#include "bdev/bdev.c"
+#include "bdev/part.c"
+
+DEFINE_STUB(spdk_conf_find_section, struct spdk_conf_section *, (struct spdk_conf *cp,
+ const char *name), NULL);
+DEFINE_STUB(spdk_conf_section_get_nmval, char *,
+ (struct spdk_conf_section *sp, const char *key, int idx1, int idx2), NULL);
+DEFINE_STUB(spdk_conf_section_get_intval, int, (struct spdk_conf_section *sp, const char *key), -1);
+
+struct spdk_trace_histories *g_trace_histories;
+DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn));
+DEFINE_STUB_V(spdk_trace_register_owner, (uint8_t type, char id_prefix));
+DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix));
+DEFINE_STUB_V(spdk_trace_register_description, (const char *name,
+ uint16_t tpoint_id, uint8_t owner_type,
+ uint8_t object_type, uint8_t new_object,
+ uint8_t arg1_type, const char *arg1_name));
+DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
+ uint32_t size, uint64_t object_id, uint64_t arg1));
+DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0);
+DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL);
+
+static void
+_part_cleanup(struct spdk_bdev_part *part)
+{
+ free(part->internal.bdev.name);
+ free(part->internal.bdev.product_name);
+}
+
+void
+spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io,
+ int *sc, int *sk, int *asc, int *ascq)
+{
+}
+
+struct spdk_bdev_module bdev_ut_if = {
+ .name = "bdev_ut",
+};
+
+static void vbdev_ut_examine(struct spdk_bdev *bdev);
+
+struct spdk_bdev_module vbdev_ut_if = {
+ .name = "vbdev_ut",
+ .examine_config = vbdev_ut_examine,
+};
+
+SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if)
+SPDK_BDEV_MODULE_REGISTER(vbdev_ut, &vbdev_ut_if)
+
+static void
+vbdev_ut_examine(struct spdk_bdev *bdev)
+{
+ spdk_bdev_module_examine_done(&vbdev_ut_if);
+}
+
+static int
+__destruct(void *ctx)
+{
+ return 0;
+}
+
+static struct spdk_bdev_fn_table base_fn_table = {
+ .destruct = __destruct,
+};
+static struct spdk_bdev_fn_table part_fn_table = {
+ .destruct = __destruct,
+};
+
+static void
+part_test(void)
+{
+ struct spdk_bdev_part_base *base;
+ struct spdk_bdev_part part1 = {};
+ struct spdk_bdev_part part2 = {};
+ struct spdk_bdev bdev_base = {};
+ SPDK_BDEV_PART_TAILQ tailq = TAILQ_HEAD_INITIALIZER(tailq);
+ int rc;
+
+ bdev_base.name = "base";
+ bdev_base.fn_table = &base_fn_table;
+ bdev_base.module = &bdev_ut_if;
+ rc = spdk_bdev_register(&bdev_base);
+ CU_ASSERT(rc == 0);
+ base = spdk_bdev_part_base_construct(&bdev_base, NULL, &vbdev_ut_if,
+ &part_fn_table, &tailq, NULL,
+ NULL, 0, NULL, NULL);
+
+ SPDK_CU_ASSERT_FATAL(base != NULL);
+
+ rc = spdk_bdev_part_construct(&part1, base, "test1", 0, 100, "test");
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ rc = spdk_bdev_part_construct(&part2, base, "test2", 100, 100, "test");
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+
+ spdk_bdev_part_base_hotremove(base, &tailq);
+
+ spdk_bdev_part_base_free(base);
+ _part_cleanup(&part1);
+ _part_cleanup(&part2);
+ spdk_bdev_unregister(&bdev_base, NULL, NULL);
+
+ poll_threads();
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("bdev_part", NULL, NULL);
+
+ CU_ADD_TEST(suite, part_test);
+
+ allocate_threads(1);
+ set_thread(0);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ free_threads();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/bdev/pmem/.gitignore b/src/spdk/test/unit/lib/bdev/pmem/.gitignore
new file mode 100644
index 000000000..b2e0df1eb
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/pmem/.gitignore
@@ -0,0 +1 @@
+bdev_pmem_ut
diff --git a/src/spdk/test/unit/lib/bdev/pmem/Makefile b/src/spdk/test/unit/lib/bdev/pmem/Makefile
new file mode 100644
index 000000000..cb601f1e0
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/pmem/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = bdev_pmem_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/bdev/pmem/bdev_pmem_ut.c b/src/spdk/test/unit/lib/bdev/pmem/bdev_pmem_ut.c
new file mode 100644
index 000000000..8cd51e9f7
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/pmem/bdev_pmem_ut.c
@@ -0,0 +1,772 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "common/lib/ut_multithread.c"
+#include "unit/lib/json_mock.c"
+
+#include "spdk_internal/thread.h"
+
+#include "bdev/pmem/bdev_pmem.c"
+
+DEFINE_STUB(spdk_conf_find_section, struct spdk_conf_section *,
+ (struct spdk_conf *cp, const char *name), NULL);
+DEFINE_STUB(spdk_conf_section_get_nval, char *,
+ (struct spdk_conf_section *sp, const char *key, int idx), NULL);
+DEFINE_STUB(spdk_conf_section_get_nmval, char *,
+ (struct spdk_conf_section *sp, const char *key, int idx1, int idx2), NULL);
+
+static struct spdk_bdev_module *g_bdev_pmem_module;
+static int g_bdev_module_cnt;
+
+struct pmemblk {
+ const char *name;
+ bool is_open;
+ bool is_consistent;
+ size_t bsize;
+ long long nblock;
+
+ uint8_t *buffer;
+};
+
+static const char *g_bdev_name = "pmem0";
+
+/* PMEMblkpool is a typedef of struct pmemblk */
+static PMEMblkpool g_pool_ok = {
+ .name = "/pools/ok_pool",
+ .is_open = false,
+ .is_consistent = true,
+ .bsize = 4096,
+ .nblock = 150
+};
+
+static PMEMblkpool g_pool_nblock_0 = {
+ .name = "/pools/nblock_0",
+ .is_open = false,
+ .is_consistent = true,
+ .bsize = 4096,
+ .nblock = 0
+};
+
+static PMEMblkpool g_pool_bsize_0 = {
+ .name = "/pools/nblock_0",
+ .is_open = false,
+ .is_consistent = true,
+ .bsize = 0,
+ .nblock = 100
+};
+
+static PMEMblkpool g_pool_inconsistent = {
+ .name = "/pools/inconsistent",
+ .is_open = false,
+ .is_consistent = false,
+ .bsize = 512,
+ .nblock = 1
+};
+
+static int g_opened_pools;
+static struct spdk_bdev *g_bdev;
+static const char *g_check_version_msg;
+static bool g_pmemblk_open_allow_open = true;
+
+static PMEMblkpool *
+find_pmemblk_pool(const char *path)
+{
+ if (path == NULL) {
+ errno = EINVAL;
+ return NULL;
+ } else if (strcmp(g_pool_ok.name, path) == 0) {
+ return &g_pool_ok;
+ } else if (strcmp(g_pool_nblock_0.name, path) == 0) {
+ return &g_pool_nblock_0;
+ } else if (strcmp(g_pool_bsize_0.name, path) == 0) {
+ return &g_pool_bsize_0;
+ } else if (strcmp(g_pool_inconsistent.name, path) == 0) {
+ return &g_pool_inconsistent;
+ }
+
+ errno = ENOENT;
+ return NULL;
+}
+
+PMEMblkpool *
+pmemblk_open(const char *path, size_t bsize)
+{
+ PMEMblkpool *pool;
+
+ if (!g_pmemblk_open_allow_open) {
+ errno = EIO;
+ return NULL;
+ }
+
+ pool = find_pmemblk_pool(path);
+ if (!pool) {
+ errno = ENOENT;
+ return NULL;
+ }
+
+ CU_ASSERT_TRUE_FATAL(pool->is_consistent);
+ CU_ASSERT_FALSE(pool->is_open);
+ if (pool->is_open == false) {
+ pool->is_open = true;
+ g_opened_pools++;
+ } else {
+ errno = EBUSY;
+ pool = NULL;
+ }
+
+ return pool;
+}
+void
+spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
+{
+ cb(NULL, bdev_io, true);
+}
+
+static void
+check_open_pool_fatal(PMEMblkpool *pool)
+{
+ SPDK_CU_ASSERT_FATAL(pool != NULL);
+ SPDK_CU_ASSERT_FATAL(find_pmemblk_pool(pool->name) == pool);
+ SPDK_CU_ASSERT_FATAL(pool->is_open == true);
+}
+
+void
+pmemblk_close(PMEMblkpool *pool)
+{
+ check_open_pool_fatal(pool);
+ pool->is_open = false;
+ CU_ASSERT(g_opened_pools > 0);
+ g_opened_pools--;
+}
+
+size_t
+pmemblk_bsize(PMEMblkpool *pool)
+{
+ check_open_pool_fatal(pool);
+ return pool->bsize;
+}
+
+size_t
+pmemblk_nblock(PMEMblkpool *pool)
+{
+ check_open_pool_fatal(pool);
+ return pool->nblock;
+}
+
+int
+pmemblk_read(PMEMblkpool *pool, void *buf, long long blockno)
+{
+ check_open_pool_fatal(pool);
+ if (blockno >= pool->nblock) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ memcpy(buf, &pool->buffer[blockno * pool->bsize], pool->bsize);
+ return 0;
+}
+
+int
+pmemblk_write(PMEMblkpool *pool, const void *buf, long long blockno)
+{
+ check_open_pool_fatal(pool);
+ if (blockno >= pool->nblock) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ memcpy(&pool->buffer[blockno * pool->bsize], buf, pool->bsize);
+ return 0;
+}
+
+int
+pmemblk_set_zero(PMEMblkpool *pool, long long blockno)
+{
+ check_open_pool_fatal(pool);
+ if (blockno >= pool->nblock) {
+
+ errno = EINVAL;
+ return -1;
+ }
+
+ memset(&pool->buffer[blockno * pool->bsize], 0, pool->bsize);
+ return 0;
+}
+
+const char *
+pmemblk_errormsg(void)
+{
+ return strerror(errno);
+}
+
+const char *
+pmemblk_check_version(unsigned major_required, unsigned minor_required)
+{
+ return g_check_version_msg;
+}
+
+int
+pmemblk_check(const char *path, size_t bsize)
+{
+ PMEMblkpool *pool = find_pmemblk_pool(path);
+
+ if (!pool) {
+ errno = ENOENT;
+ return -1;
+ }
+
+ if (!pool->is_consistent) {
+ /* errno ? */
+ return 0;
+ }
+
+ if (bsize != 0 && pool->bsize != bsize) {
+ /* errno ? */
+ return 0;
+ }
+
+ return 1;
+}
+
+void
+spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
+{
+ bdev_io->internal.status = status;
+}
+
+int
+spdk_bdev_register(struct spdk_bdev *bdev)
+{
+ CU_ASSERT_PTR_NULL(g_bdev);
+ g_bdev = bdev;
+
+ return 0;
+}
+
+void
+spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
+{
+}
+
+void
+spdk_bdev_module_finish_done(void)
+{
+}
+
+int
+spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size)
+{
+ bdev->blockcnt = size;
+ return 0;
+}
+
+static void
+ut_bdev_pmem_destruct(struct spdk_bdev *bdev)
+{
+ SPDK_CU_ASSERT_FATAL(g_bdev != NULL);
+ CU_ASSERT_EQUAL(bdev_pmem_destruct(bdev->ctxt), 0);
+ g_bdev = NULL;
+}
+
+void
+spdk_bdev_module_list_add(struct spdk_bdev_module *bdev_module)
+{
+ g_bdev_pmem_module = bdev_module;
+ g_bdev_module_cnt++;
+}
+
+static int
+bdev_submit_request(struct spdk_bdev *bdev, int16_t io_type, uint64_t offset_blocks,
+ uint64_t num_blocks, struct iovec *iovs, size_t iov_cnt)
+{
+ struct spdk_bdev_io bio = { 0 };
+
+ switch (io_type) {
+ case SPDK_BDEV_IO_TYPE_READ:
+ bio.u.bdev.iovs = iovs;
+ bio.u.bdev.iovcnt = iov_cnt;
+ bio.u.bdev.offset_blocks = offset_blocks;
+ bio.u.bdev.num_blocks = num_blocks;
+ break;
+ case SPDK_BDEV_IO_TYPE_WRITE:
+ bio.u.bdev.iovs = iovs;
+ bio.u.bdev.iovcnt = iov_cnt;
+ bio.u.bdev.offset_blocks = offset_blocks;
+ bio.u.bdev.num_blocks = num_blocks;
+ break;
+ case SPDK_BDEV_IO_TYPE_FLUSH:
+ bio.u.bdev.offset_blocks = offset_blocks;
+ bio.u.bdev.num_blocks = num_blocks;
+ break;
+ case SPDK_BDEV_IO_TYPE_RESET:
+ break;
+ case SPDK_BDEV_IO_TYPE_UNMAP:
+ bio.u.bdev.offset_blocks = offset_blocks;
+ bio.u.bdev.num_blocks = num_blocks;
+ break;
+ case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
+ bio.u.bdev.offset_blocks = offset_blocks;
+ bio.u.bdev.num_blocks = num_blocks;
+ break;
+ default:
+ CU_FAIL_FATAL("BUG:Unexpected IO type");
+ break;
+ }
+
+ /*
+ * Set status to value that shouldn't be returned
+ */
+ bio.type = io_type;
+ bio.internal.status = SPDK_BDEV_IO_STATUS_PENDING;
+ bio.bdev = bdev;
+ bdev_pmem_submit_request(NULL, &bio);
+ return bio.internal.status;
+}
+
+
+static int
+ut_pmem_blk_clean(void)
+{
+ free(g_pool_ok.buffer);
+ g_pool_ok.buffer = NULL;
+
+ /* Unload module to free IO channel */
+ g_bdev_pmem_module->module_fini();
+ poll_threads();
+
+ free_threads();
+
+ return 0;
+}
+
+static int
+ut_pmem_blk_init(void)
+{
+ errno = 0;
+
+ allocate_threads(1);
+ set_thread(0);
+
+ g_pool_ok.buffer = calloc(g_pool_ok.nblock, g_pool_ok.bsize);
+ if (g_pool_ok.buffer == NULL) {
+ ut_pmem_blk_clean();
+ return -1;
+ }
+
+ return 0;
+}
+
+static void
+ut_pmem_init(void)
+{
+ SPDK_CU_ASSERT_FATAL(g_bdev_pmem_module != NULL);
+ CU_ASSERT_EQUAL(g_bdev_module_cnt, 1);
+
+ /* Make pmemblk_check_version fail with provided error message */
+ g_check_version_msg = "TEST FAIL MESSAGE";
+ CU_ASSERT_NOT_EQUAL(g_bdev_pmem_module->module_init(), 0);
+
+ /* This init must success */
+ g_check_version_msg = NULL;
+ CU_ASSERT_EQUAL(g_bdev_pmem_module->module_init(), 0);
+}
+
+static void
+ut_pmem_open_close(void)
+{
+ struct spdk_bdev *bdev = NULL;
+ int pools_cnt;
+ int rc;
+
+ pools_cnt = g_opened_pools;
+
+ /* Try opening with NULL name */
+ rc = create_pmem_disk(NULL, NULL, &bdev);
+ CU_ASSERT_PTR_NULL(bdev);
+ CU_ASSERT_EQUAL(pools_cnt, g_opened_pools);
+ CU_ASSERT_NOT_EQUAL(rc, 0);
+
+ /* Open non-existent pool */
+ rc = create_pmem_disk("non existent pool", NULL, &bdev);
+ CU_ASSERT_PTR_NULL(bdev);
+ CU_ASSERT_EQUAL(pools_cnt, g_opened_pools);
+ CU_ASSERT_NOT_EQUAL(rc, 0);
+
+ /* Open inconsistent pool */
+ rc = create_pmem_disk(g_pool_inconsistent.name, NULL, &bdev);
+ CU_ASSERT_PTR_NULL(bdev);
+ CU_ASSERT_EQUAL(pools_cnt, g_opened_pools);
+ CU_ASSERT_NOT_EQUAL(rc, 0);
+
+ /* Open consistent pool fail the open from unknown reason. */
+ g_pmemblk_open_allow_open = false;
+ rc = create_pmem_disk(g_pool_inconsistent.name, NULL, &bdev);
+ g_pmemblk_open_allow_open = true;
+ CU_ASSERT_PTR_NULL(bdev);
+ CU_ASSERT_EQUAL(pools_cnt, g_opened_pools);
+ CU_ASSERT_NOT_EQUAL(rc, 0);
+
+ /* Open pool with nblocks = 0 */
+ rc = create_pmem_disk(g_pool_nblock_0.name, NULL, &bdev);
+ CU_ASSERT_PTR_NULL(bdev);
+ CU_ASSERT_EQUAL(pools_cnt, g_opened_pools);
+ CU_ASSERT_NOT_EQUAL(rc, 0);
+
+ /* Open pool with bsize = 0 */
+ rc = create_pmem_disk(g_pool_bsize_0.name, NULL, &bdev);
+ CU_ASSERT_PTR_NULL(bdev);
+ CU_ASSERT_EQUAL(pools_cnt, g_opened_pools);
+ CU_ASSERT_NOT_EQUAL(rc, 0);
+
+ /* Open pool with NULL name */
+ rc = create_pmem_disk(g_pool_ok.name, NULL, &bdev);
+ CU_ASSERT_PTR_NULL(bdev);
+ CU_ASSERT_EQUAL(pools_cnt, g_opened_pools);
+ CU_ASSERT_NOT_EQUAL(rc, 0);
+
+ /* Open good pool */
+ rc = create_pmem_disk(g_pool_ok.name, g_bdev_name, &bdev);
+ SPDK_CU_ASSERT_FATAL(bdev != NULL);
+ CU_ASSERT_TRUE(g_pool_ok.is_open);
+ CU_ASSERT_EQUAL(pools_cnt + 1, g_opened_pools);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ /* Now remove this bdev */
+ ut_bdev_pmem_destruct(bdev);
+ CU_ASSERT_FALSE(g_pool_ok.is_open);
+ CU_ASSERT_EQUAL(pools_cnt, g_opened_pools);
+}
+
+static void
+ut_pmem_write_read(void)
+{
+ uint8_t *write_buf, *read_buf;
+ struct spdk_bdev *bdev;
+ int rc;
+ size_t unaligned_aligned_size = 100;
+ size_t buf_size = g_pool_ok.bsize * g_pool_ok.nblock;
+ size_t i;
+ const uint64_t nblock_offset = 10;
+ uint64_t offset;
+ size_t io_size, nblock, total_io_size, bsize;
+
+ bsize = 4096;
+ struct iovec iov[] = {
+ { 0, 2 * bsize },
+ { 0, 3 * bsize },
+ { 0, 4 * bsize },
+ };
+
+ rc = create_pmem_disk(g_pool_ok.name, g_bdev_name, &bdev);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ SPDK_CU_ASSERT_FATAL(g_pool_ok.nblock > 40);
+
+ write_buf = calloc(1, buf_size);
+ read_buf = calloc(1, buf_size);
+
+ SPDK_CU_ASSERT_FATAL(bdev != NULL);
+ SPDK_CU_ASSERT_FATAL(write_buf != NULL);
+ SPDK_CU_ASSERT_FATAL(read_buf != NULL);
+
+ total_io_size = 0;
+ offset = nblock_offset * g_pool_ok.bsize;
+ for (i = 0; i < 3; i++) {
+ iov[i].iov_base = &write_buf[offset + total_io_size];
+ total_io_size += iov[i].iov_len;
+ }
+
+ for (i = 0; i < total_io_size + unaligned_aligned_size; i++) {
+ write_buf[offset + i] = 0x42 + i;
+ }
+
+ SPDK_CU_ASSERT_FATAL(total_io_size < buf_size);
+
+ /*
+ * Write outside pool.
+ */
+ rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_WRITE, g_pool_ok.nblock, 1, &iov[0], 2);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_FAILED);
+
+ /*
+ * Write with insufficient IOV buffers length.
+ */
+ rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_WRITE, 0, g_pool_ok.nblock, &iov[0], 2);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_FAILED);
+
+ /*
+ * Try to write two IOV with first one iov_len % bsize != 0.
+ */
+ io_size = iov[0].iov_len + iov[1].iov_len;
+ nblock = io_size / g_pool_ok.bsize;
+ iov[0].iov_len += unaligned_aligned_size;
+ rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_WRITE, 0, nblock, &iov[0], 2);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_FAILED);
+ iov[0].iov_len -= unaligned_aligned_size;
+
+ /*
+ * Try to write one IOV.
+ */
+ nblock = iov[0].iov_len / g_pool_ok.bsize;
+ rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_WRITE, nblock_offset, nblock, &iov[0], 1);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ /*
+ * Try to write 2 IOV.
+ * Sum of IOV length is larger than IO size and last IOV is larger and iov_len % bsize != 0
+ */
+ offset = iov[0].iov_len / g_pool_ok.bsize;
+ io_size = iov[1].iov_len + iov[2].iov_len;
+ nblock = io_size / g_pool_ok.bsize;
+ iov[2].iov_len += unaligned_aligned_size;
+ rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_WRITE, nblock_offset + offset, nblock,
+ &iov[1], 2);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_SUCCESS);
+ iov[2].iov_len -= unaligned_aligned_size;
+
+ /*
+ * Examine pool state:
+ * 1. Written area should have expected values.
+ * 2. Anything else should contain zeros.
+ */
+ offset = nblock_offset * g_pool_ok.bsize + total_io_size;
+ rc = memcmp(&g_pool_ok.buffer[0], write_buf, offset);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ for (i = offset; i < buf_size; i++) {
+ if (g_pool_ok.buffer[i] != 0) {
+ CU_ASSERT_EQUAL(g_pool_ok.buffer[i], 0);
+ break;
+ }
+ }
+
+ /* Setup IOV for reads */
+ memset(read_buf, 0xAB, buf_size);
+ offset = nblock_offset * g_pool_ok.bsize;
+ for (i = 0; i < 3; i++) {
+ iov[i].iov_base = &read_buf[offset];
+ offset += iov[i].iov_len;
+ }
+
+ /*
+ * Write outside pool.
+ */
+ rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_READ, g_pool_ok.nblock, 1, &iov[0], 2);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_FAILED);
+
+ /*
+ * Read with insufficient IOV buffers length.
+ */
+ rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_READ, 0, g_pool_ok.nblock, &iov[0], 2);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_FAILED);
+
+ /*
+ * Try to read two IOV with first one iov_len % bsize != 0.
+ */
+ io_size = iov[0].iov_len + iov[1].iov_len;
+ nblock = io_size / g_pool_ok.bsize;
+ iov[0].iov_len += unaligned_aligned_size;
+ rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_READ, 0, nblock, &iov[0], 2);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_FAILED);
+ iov[0].iov_len -= unaligned_aligned_size;
+
+ /*
+ * Try to write one IOV.
+ */
+ nblock = iov[0].iov_len / g_pool_ok.bsize;
+ rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_READ, nblock_offset, nblock, &iov[0], 1);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ /*
+ * Try to read 2 IOV.
+ * Sum of IOV length is larger than IO size and last IOV is larger and iov_len % bsize != 0
+ */
+ offset = iov[0].iov_len / g_pool_ok.bsize;
+ io_size = iov[1].iov_len + iov[2].iov_len;
+ nblock = io_size / g_pool_ok.bsize;
+ iov[2].iov_len += unaligned_aligned_size;
+ rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_READ, nblock_offset + offset, nblock,
+ &iov[1], 2);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_SUCCESS);
+ iov[2].iov_len -= unaligned_aligned_size;
+
+
+ /*
+ * Examine what we read state:
+ * 1. Written area should have expected values.
+ * 2. Anything else should contain zeros.
+ */
+ offset = nblock_offset * g_pool_ok.bsize;
+ for (i = 0; i < offset; i++) {
+ if (read_buf[i] != 0xAB) {
+ CU_ASSERT_EQUAL(read_buf[i], 0xAB);
+ break;
+ }
+ }
+
+ rc = memcmp(&read_buf[offset], &write_buf[offset], total_io_size);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ offset += total_io_size;
+ for (i = offset; i < buf_size; i++) {
+ if (read_buf[i] != 0xAB) {
+ CU_ASSERT_EQUAL(read_buf[i], 0xAB);
+ break;
+ }
+ }
+
+ memset(g_pool_ok.buffer, 0, g_pool_ok.bsize * g_pool_ok.nblock);
+ free(write_buf);
+ free(read_buf);
+
+ /* Now remove this bdev */
+ ut_bdev_pmem_destruct(bdev);
+ CU_ASSERT_FALSE(g_pool_ok.is_open);
+ CU_ASSERT_EQUAL(g_opened_pools, 0);
+}
+
+static void
+ut_pmem_reset(void)
+{
+ struct spdk_bdev *bdev;
+ int rc;
+
+ rc = create_pmem_disk(g_pool_ok.name, g_bdev_name, &bdev);
+ CU_ASSERT_EQUAL(rc, 0);
+ SPDK_CU_ASSERT_FATAL(bdev != NULL);
+
+ rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_RESET, 0, 0, NULL, 0);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ ut_bdev_pmem_destruct(bdev);
+}
+
+static void
+ut_pmem_unmap_write_zero(int16_t io_type)
+{
+ struct spdk_bdev *bdev;
+ size_t buff_size = g_pool_ok.nblock * g_pool_ok.bsize;
+ size_t i;
+ uint8_t *buffer;
+ int rc;
+
+ CU_ASSERT(io_type == SPDK_BDEV_IO_TYPE_UNMAP || io_type == SPDK_BDEV_IO_TYPE_WRITE_ZEROES);
+ rc = create_pmem_disk(g_pool_ok.name, g_bdev_name, &bdev);
+ CU_ASSERT_EQUAL(rc, 0);
+ SPDK_CU_ASSERT_FATAL(bdev != NULL);
+ SPDK_CU_ASSERT_FATAL(g_pool_ok.nblock > 40);
+
+ buffer = calloc(1, buff_size);
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+
+ for (i = 10 * g_pool_ok.bsize; i < 30 * g_pool_ok.bsize; i++) {
+ buffer[i] = 0x30 + io_type + i;
+ }
+ memcpy(g_pool_ok.buffer, buffer, buff_size);
+
+ /*
+ * Block outside of pool.
+ */
+ rc = bdev_submit_request(bdev, io_type, g_pool_ok.nblock, 1, NULL, 0);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_FAILED);
+
+ rc = memcmp(buffer, g_pool_ok.buffer, buff_size);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ /*
+ * Blocks 15 to 25
+ */
+ memset(&buffer[15 * g_pool_ok.bsize], 0, 10 * g_pool_ok.bsize);
+ rc = bdev_submit_request(bdev, io_type, 15, 10, NULL, 0);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ rc = memcmp(buffer, g_pool_ok.buffer, buff_size);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ /*
+ * All blocks.
+ */
+ memset(buffer, 0, buff_size);
+ rc = bdev_submit_request(bdev, io_type, 0, g_pool_ok.nblock, NULL, 0);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ rc = memcmp(buffer, g_pool_ok.buffer, buff_size);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ /* Now remove this bdev */
+ ut_bdev_pmem_destruct(bdev);
+ CU_ASSERT_FALSE(g_pool_ok.is_open);
+ CU_ASSERT_EQUAL(g_opened_pools, 0);
+
+ free(buffer);
+}
+
+static void
+ut_pmem_write_zero(void)
+{
+ ut_pmem_unmap_write_zero(SPDK_BDEV_IO_TYPE_WRITE_ZEROES);
+}
+
+static void
+ut_pmem_unmap(void)
+{
+ ut_pmem_unmap_write_zero(SPDK_BDEV_IO_TYPE_UNMAP);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("bdev_pmem", ut_pmem_blk_init, ut_pmem_blk_clean);
+
+ CU_ADD_TEST(suite, ut_pmem_init);
+ CU_ADD_TEST(suite, ut_pmem_open_close);
+ CU_ADD_TEST(suite, ut_pmem_write_read);
+ CU_ADD_TEST(suite, ut_pmem_reset);
+ CU_ADD_TEST(suite, ut_pmem_write_zero);
+ CU_ADD_TEST(suite, ut_pmem_unmap);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/bdev/raid/Makefile b/src/spdk/test/unit/lib/bdev/raid/Makefile
new file mode 100644
index 000000000..0090a85ce
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/raid/Makefile
@@ -0,0 +1,46 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = bdev_raid.c
+
+DIRS-$(CONFIG_RAID5) += raid5.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/bdev/raid/bdev_raid.c/.gitignore b/src/spdk/test/unit/lib/bdev/raid/bdev_raid.c/.gitignore
new file mode 100644
index 000000000..98d1a166e
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/raid/bdev_raid.c/.gitignore
@@ -0,0 +1 @@
+bdev_raid_ut
diff --git a/src/spdk/test/unit/lib/bdev/raid/bdev_raid.c/Makefile b/src/spdk/test/unit/lib/bdev/raid/bdev_raid.c/Makefile
new file mode 100644
index 000000000..da0ab94ba
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/raid/bdev_raid.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../../..)
+
+TEST_FILE = bdev_raid_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/bdev/raid/bdev_raid.c/bdev_raid_ut.c b/src/spdk/test/unit/lib/bdev/raid/bdev_raid.c/bdev_raid_ut.c
new file mode 100644
index 000000000..6cf8e9f69
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/raid/bdev_raid.c/bdev_raid_ut.c
@@ -0,0 +1,2258 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+#include "spdk_cunit.h"
+#include "spdk/env.h"
+#include "spdk_internal/mock.h"
+#include "bdev/raid/bdev_raid.c"
+#include "bdev/raid/bdev_raid_rpc.c"
+#include "bdev/raid/raid0.c"
+#include "common/lib/ut_multithread.c"
+
+#define MAX_BASE_DRIVES 32
+#define MAX_RAIDS 2
+#define INVALID_IO_SUBMIT 0xFFFF
+#define MAX_TEST_IO_RANGE (3 * 3 * 3 * (MAX_BASE_DRIVES + 5))
+#define BLOCK_CNT (1024ul * 1024ul * 1024ul * 1024ul)
+
+struct spdk_bdev_channel {
+ struct spdk_io_channel *channel;
+};
+
+/* Data structure to capture the output of IO for verification */
+struct io_output {
+ struct spdk_bdev_desc *desc;
+ struct spdk_io_channel *ch;
+ uint64_t offset_blocks;
+ uint64_t num_blocks;
+ spdk_bdev_io_completion_cb cb;
+ void *cb_arg;
+ enum spdk_bdev_io_type iotype;
+};
+
+struct raid_io_ranges {
+ uint64_t lba;
+ uint64_t nblocks;
+};
+
+/* Globals */
+int g_bdev_io_submit_status;
+struct io_output *g_io_output = NULL;
+uint32_t g_io_output_index;
+uint32_t g_io_comp_status;
+bool g_child_io_status_flag;
+void *g_rpc_req;
+uint32_t g_rpc_req_size;
+TAILQ_HEAD(bdev, spdk_bdev);
+struct bdev g_bdev_list;
+TAILQ_HEAD(waitq, spdk_bdev_io_wait_entry);
+struct waitq g_io_waitq;
+uint32_t g_block_len;
+uint32_t g_strip_size;
+uint32_t g_max_io_size;
+uint8_t g_max_base_drives;
+uint8_t g_max_raids;
+uint8_t g_ignore_io_output;
+uint8_t g_rpc_err;
+char *g_get_raids_output[MAX_RAIDS];
+uint32_t g_get_raids_count;
+uint8_t g_json_decode_obj_err;
+uint8_t g_json_decode_obj_create;
+uint8_t g_config_level_create = 0;
+uint8_t g_test_multi_raids;
+struct raid_io_ranges g_io_ranges[MAX_TEST_IO_RANGE];
+uint32_t g_io_range_idx;
+uint64_t g_lba_offset;
+struct spdk_io_channel g_io_channel;
+
+DEFINE_STUB_V(spdk_bdev_module_examine_done, (struct spdk_bdev_module *module));
+DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
+DEFINE_STUB(spdk_bdev_register, int, (struct spdk_bdev *bdev), 0);
+DEFINE_STUB(spdk_bdev_io_type_supported, bool, (struct spdk_bdev *bdev,
+ enum spdk_bdev_io_type io_type), true);
+DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
+DEFINE_STUB(spdk_bdev_flush_blocks, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb,
+ void *cb_arg), 0);
+DEFINE_STUB(spdk_conf_next_section, struct spdk_conf_section *, (struct spdk_conf_section *sp),
+ NULL);
+DEFINE_STUB_V(spdk_rpc_register_method, (const char *method, spdk_rpc_method_handler func,
+ uint32_t state_mask));
+DEFINE_STUB_V(spdk_rpc_register_alias_deprecated, (const char *method, const char *alias));
+DEFINE_STUB_V(spdk_jsonrpc_end_result, (struct spdk_jsonrpc_request *request,
+ struct spdk_json_write_ctx *w));
+DEFINE_STUB(spdk_json_decode_string, int, (const struct spdk_json_val *val, void *out), 0);
+DEFINE_STUB(spdk_json_decode_uint32, int, (const struct spdk_json_val *val, void *out), 0);
+DEFINE_STUB(spdk_json_decode_array, int, (const struct spdk_json_val *values,
+ spdk_json_decode_fn decode_func,
+ void *out, size_t max_size, size_t *out_size, size_t stride), 0);
+DEFINE_STUB(spdk_json_write_name, int, (struct spdk_json_write_ctx *w, const char *name), 0);
+DEFINE_STUB(spdk_json_write_object_begin, int, (struct spdk_json_write_ctx *w), 0);
+DEFINE_STUB(spdk_json_write_named_object_begin, int, (struct spdk_json_write_ctx *w,
+ const char *name), 0);
+DEFINE_STUB(spdk_json_write_object_end, int, (struct spdk_json_write_ctx *w), 0);
+DEFINE_STUB(spdk_json_write_array_begin, int, (struct spdk_json_write_ctx *w), 0);
+DEFINE_STUB(spdk_json_write_array_end, int, (struct spdk_json_write_ctx *w), 0);
+DEFINE_STUB(spdk_json_write_named_array_begin, int, (struct spdk_json_write_ctx *w,
+ const char *name), 0);
+DEFINE_STUB(spdk_json_write_bool, int, (struct spdk_json_write_ctx *w, bool val), 0);
+DEFINE_STUB(spdk_json_write_null, int, (struct spdk_json_write_ctx *w), 0);
+DEFINE_STUB(spdk_strerror, const char *, (int errnum), NULL);
+DEFINE_STUB(spdk_bdev_queue_io_wait, int, (struct spdk_bdev *bdev, struct spdk_io_channel *ch,
+ struct spdk_bdev_io_wait_entry *entry), 0);
+
+struct spdk_io_channel *
+spdk_bdev_get_io_channel(struct spdk_bdev_desc *desc)
+{
+ g_io_channel.thread = spdk_get_thread();
+
+ return &g_io_channel;
+}
+
+static void
+set_test_opts(void)
+{
+
+ g_max_base_drives = MAX_BASE_DRIVES;
+ g_max_raids = MAX_RAIDS;
+ g_block_len = 4096;
+ g_strip_size = 64;
+ g_max_io_size = 1024;
+
+ printf("Test Options\n");
+ printf("blocklen = %u, strip_size = %u, max_io_size = %u, g_max_base_drives = %u, "
+ "g_max_raids = %u\n",
+ g_block_len, g_strip_size, g_max_io_size, g_max_base_drives, g_max_raids);
+}
+
+/* Set globals before every test run */
+static void
+set_globals(void)
+{
+ uint32_t max_splits;
+
+ g_bdev_io_submit_status = 0;
+ if (g_max_io_size < g_strip_size) {
+ max_splits = 2;
+ } else {
+ max_splits = (g_max_io_size / g_strip_size) + 1;
+ }
+ if (max_splits < g_max_base_drives) {
+ max_splits = g_max_base_drives;
+ }
+
+ g_io_output = calloc(max_splits, sizeof(struct io_output));
+ SPDK_CU_ASSERT_FATAL(g_io_output != NULL);
+ g_io_output_index = 0;
+ memset(g_get_raids_output, 0, sizeof(g_get_raids_output));
+ g_get_raids_count = 0;
+ g_io_comp_status = 0;
+ g_ignore_io_output = 0;
+ g_config_level_create = 0;
+ g_rpc_err = 0;
+ g_test_multi_raids = 0;
+ g_child_io_status_flag = true;
+ TAILQ_INIT(&g_bdev_list);
+ TAILQ_INIT(&g_io_waitq);
+ g_rpc_req = NULL;
+ g_rpc_req_size = 0;
+ g_json_decode_obj_err = 0;
+ g_json_decode_obj_create = 0;
+ g_lba_offset = 0;
+}
+
+static void
+base_bdevs_cleanup(void)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev *bdev_next;
+
+ if (!TAILQ_EMPTY(&g_bdev_list)) {
+ TAILQ_FOREACH_SAFE(bdev, &g_bdev_list, internal.link, bdev_next) {
+ free(bdev->name);
+ TAILQ_REMOVE(&g_bdev_list, bdev, internal.link);
+ free(bdev);
+ }
+ }
+}
+
+static void
+check_and_remove_raid_bdev(struct raid_bdev_config *raid_cfg)
+{
+ struct raid_bdev *raid_bdev;
+ struct raid_base_bdev_info *base_info;
+
+ /* Get the raid structured allocated if exists */
+ raid_bdev = raid_cfg->raid_bdev;
+ if (raid_bdev == NULL) {
+ return;
+ }
+
+ assert(raid_bdev->base_bdev_info != NULL);
+
+ RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) {
+ if (base_info->bdev) {
+ raid_bdev_free_base_bdev_resource(raid_bdev, base_info);
+ }
+ }
+ assert(raid_bdev->num_base_bdevs_discovered == 0);
+ raid_bdev_cleanup(raid_bdev);
+}
+
+/* Reset globals */
+static void
+reset_globals(void)
+{
+ if (g_io_output) {
+ free(g_io_output);
+ g_io_output = NULL;
+ }
+ g_rpc_req = NULL;
+ g_rpc_req_size = 0;
+}
+
+void
+spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb,
+ uint64_t len)
+{
+ cb(bdev_io->internal.ch->channel, bdev_io, true);
+}
+
+/* Store the IO completion status in global variable to verify by various tests */
+void
+spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
+{
+ g_io_comp_status = ((status == SPDK_BDEV_IO_STATUS_SUCCESS) ? true : false);
+}
+
+static void
+set_io_output(struct io_output *output,
+ struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg,
+ enum spdk_bdev_io_type iotype)
+{
+ output->desc = desc;
+ output->ch = ch;
+ output->offset_blocks = offset_blocks;
+ output->num_blocks = num_blocks;
+ output->cb = cb;
+ output->cb_arg = cb_arg;
+ output->iotype = iotype;
+}
+
+/* It will cache the split IOs for verification */
+int
+spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct iovec *iov, int iovcnt,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ struct io_output *output = &g_io_output[g_io_output_index];
+ struct spdk_bdev_io *child_io;
+
+ if (g_ignore_io_output) {
+ return 0;
+ }
+
+ if (g_max_io_size < g_strip_size) {
+ SPDK_CU_ASSERT_FATAL(g_io_output_index < 2);
+ } else {
+ SPDK_CU_ASSERT_FATAL(g_io_output_index < (g_max_io_size / g_strip_size) + 1);
+ }
+ if (g_bdev_io_submit_status == 0) {
+ set_io_output(output, desc, ch, offset_blocks, num_blocks, cb, cb_arg,
+ SPDK_BDEV_IO_TYPE_WRITE);
+ g_io_output_index++;
+
+ child_io = calloc(1, sizeof(struct spdk_bdev_io));
+ SPDK_CU_ASSERT_FATAL(child_io != NULL);
+ cb(child_io, g_child_io_status_flag, cb_arg);
+ }
+
+ return g_bdev_io_submit_status;
+}
+
+int
+spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ struct io_output *output = &g_io_output[g_io_output_index];
+ struct spdk_bdev_io *child_io;
+
+ if (g_ignore_io_output) {
+ return 0;
+ }
+
+ if (g_bdev_io_submit_status == 0) {
+ set_io_output(output, desc, ch, 0, 0, cb, cb_arg, SPDK_BDEV_IO_TYPE_RESET);
+ g_io_output_index++;
+
+ child_io = calloc(1, sizeof(struct spdk_bdev_io));
+ SPDK_CU_ASSERT_FATAL(child_io != NULL);
+ cb(child_io, g_child_io_status_flag, cb_arg);
+ }
+
+ return g_bdev_io_submit_status;
+}
+
+int
+spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ struct io_output *output = &g_io_output[g_io_output_index];
+ struct spdk_bdev_io *child_io;
+
+ if (g_ignore_io_output) {
+ return 0;
+ }
+
+ if (g_bdev_io_submit_status == 0) {
+ set_io_output(output, desc, ch, offset_blocks, num_blocks, cb, cb_arg,
+ SPDK_BDEV_IO_TYPE_UNMAP);
+ g_io_output_index++;
+
+ child_io = calloc(1, sizeof(struct spdk_bdev_io));
+ SPDK_CU_ASSERT_FATAL(child_io != NULL);
+ cb(child_io, g_child_io_status_flag, cb_arg);
+ }
+
+ return g_bdev_io_submit_status;
+}
+
+void
+spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
+{
+ bdev->fn_table->destruct(bdev->ctxt);
+
+ if (cb_fn) {
+ cb_fn(cb_arg, 0);
+ }
+}
+
+int
+spdk_bdev_open(struct spdk_bdev *bdev, bool write, spdk_bdev_remove_cb_t remove_cb,
+ void *remove_ctx, struct spdk_bdev_desc **_desc)
+{
+ *_desc = (void *)0x1;
+ return 0;
+}
+
+char *
+spdk_sprintf_alloc(const char *format, ...)
+{
+ return strdup(format);
+}
+
+int spdk_json_write_named_uint32(struct spdk_json_write_ctx *w, const char *name, uint32_t val)
+{
+ struct rpc_bdev_raid_create *req = g_rpc_req;
+ if (strcmp(name, "strip_size_kb") == 0) {
+ CU_ASSERT(req->strip_size_kb == val);
+ } else if (strcmp(name, "blocklen_shift") == 0) {
+ CU_ASSERT(spdk_u32log2(g_block_len) == val);
+ } else if (strcmp(name, "num_base_bdevs") == 0) {
+ CU_ASSERT(req->base_bdevs.num_base_bdevs == val);
+ } else if (strcmp(name, "state") == 0) {
+ CU_ASSERT(val == RAID_BDEV_STATE_ONLINE);
+ } else if (strcmp(name, "destruct_called") == 0) {
+ CU_ASSERT(val == 0);
+ } else if (strcmp(name, "num_base_bdevs_discovered") == 0) {
+ CU_ASSERT(req->base_bdevs.num_base_bdevs == val);
+ }
+ return 0;
+}
+
+int spdk_json_write_named_string(struct spdk_json_write_ctx *w, const char *name, const char *val)
+{
+ struct rpc_bdev_raid_create *req = g_rpc_req;
+ if (strcmp(name, "raid_level") == 0) {
+ CU_ASSERT(strcmp(val, raid_bdev_level_to_str(req->level)) == 0);
+ }
+ return 0;
+}
+
+void
+spdk_bdev_free_io(struct spdk_bdev_io *bdev_io)
+{
+ if (bdev_io) {
+ free(bdev_io);
+ }
+}
+
+/* It will cache split IOs for verification */
+int
+spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct iovec *iov, int iovcnt,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ struct io_output *output = &g_io_output[g_io_output_index];
+ struct spdk_bdev_io *child_io;
+
+ if (g_ignore_io_output) {
+ return 0;
+ }
+
+ SPDK_CU_ASSERT_FATAL(g_io_output_index <= (g_max_io_size / g_strip_size) + 1);
+ if (g_bdev_io_submit_status == 0) {
+ set_io_output(output, desc, ch, offset_blocks, num_blocks, cb, cb_arg,
+ SPDK_BDEV_IO_TYPE_READ);
+ g_io_output_index++;
+
+ child_io = calloc(1, sizeof(struct spdk_bdev_io));
+ SPDK_CU_ASSERT_FATAL(child_io != NULL);
+ cb(child_io, g_child_io_status_flag, cb_arg);
+ }
+
+ return g_bdev_io_submit_status;
+}
+
+void
+spdk_bdev_module_release_bdev(struct spdk_bdev *bdev)
+{
+ CU_ASSERT(bdev->internal.claim_module != NULL);
+ bdev->internal.claim_module = NULL;
+}
+
+struct spdk_conf_section *
+spdk_conf_first_section(struct spdk_conf *cp)
+{
+ if (g_config_level_create) {
+ return (void *) 0x1;
+ }
+
+ return NULL;
+}
+
+bool
+spdk_conf_section_match_prefix(const struct spdk_conf_section *sp, const char *name_prefix)
+{
+ if (g_config_level_create) {
+ return true;
+ }
+
+ return false;
+}
+
+char *
+spdk_conf_section_get_val(struct spdk_conf_section *sp, const char *key)
+{
+ struct rpc_bdev_raid_create *req = g_rpc_req;
+
+ if (g_config_level_create) {
+ if (strcmp(key, "Name") == 0) {
+ return req->name;
+ } else if (strcmp(key, "RaidLevel") == 0) {
+ return (char *)raid_bdev_level_to_str(req->level);
+ }
+ }
+
+ return NULL;
+}
+
+int
+spdk_conf_section_get_intval(struct spdk_conf_section *sp, const char *key)
+{
+ struct rpc_bdev_raid_create *req = g_rpc_req;
+
+ if (g_config_level_create) {
+ if (strcmp(key, "StripSize") == 0) {
+ return req->strip_size_kb;
+ } else if (strcmp(key, "NumDevices") == 0) {
+ return req->base_bdevs.num_base_bdevs;
+ }
+ }
+
+ return 0;
+}
+
+char *
+spdk_conf_section_get_nmval(struct spdk_conf_section *sp, const char *key, int idx1, int idx2)
+{
+ struct rpc_bdev_raid_create *req = g_rpc_req;
+
+ if (g_config_level_create) {
+ if (strcmp(key, "Devices") == 0) {
+ if (idx2 >= g_max_base_drives) {
+ return NULL;
+ }
+ return req->base_bdevs.base_bdevs[idx2];
+ }
+ }
+
+ return NULL;
+}
+
+int
+spdk_bdev_module_claim_bdev(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
+ struct spdk_bdev_module *module)
+{
+ if (bdev->internal.claim_module != NULL) {
+ return -1;
+ }
+ bdev->internal.claim_module = module;
+ return 0;
+}
+
+int
+spdk_json_decode_object(const struct spdk_json_val *values,
+ const struct spdk_json_object_decoder *decoders, size_t num_decoders,
+ void *out)
+{
+ struct rpc_bdev_raid_create *req, *_out;
+ size_t i;
+
+ if (g_json_decode_obj_err) {
+ return -1;
+ } else if (g_json_decode_obj_create) {
+ req = g_rpc_req;
+ _out = out;
+
+ _out->name = strdup(req->name);
+ SPDK_CU_ASSERT_FATAL(_out->name != NULL);
+ _out->strip_size_kb = req->strip_size_kb;
+ _out->level = req->level;
+ _out->base_bdevs.num_base_bdevs = req->base_bdevs.num_base_bdevs;
+ for (i = 0; i < req->base_bdevs.num_base_bdevs; i++) {
+ _out->base_bdevs.base_bdevs[i] = strdup(req->base_bdevs.base_bdevs[i]);
+ SPDK_CU_ASSERT_FATAL(_out->base_bdevs.base_bdevs[i]);
+ }
+ } else {
+ memcpy(out, g_rpc_req, g_rpc_req_size);
+ }
+
+ return 0;
+}
+
+struct spdk_json_write_ctx *
+spdk_jsonrpc_begin_result(struct spdk_jsonrpc_request *request)
+{
+ return (void *)1;
+}
+
+int
+spdk_json_write_string(struct spdk_json_write_ctx *w, const char *val)
+{
+ if (g_test_multi_raids) {
+ g_get_raids_output[g_get_raids_count] = strdup(val);
+ SPDK_CU_ASSERT_FATAL(g_get_raids_output[g_get_raids_count] != NULL);
+ g_get_raids_count++;
+ }
+
+ return 0;
+}
+
+void
+spdk_jsonrpc_send_error_response(struct spdk_jsonrpc_request *request,
+ int error_code, const char *msg)
+{
+ g_rpc_err = 1;
+}
+
+void
+spdk_jsonrpc_send_error_response_fmt(struct spdk_jsonrpc_request *request,
+ int error_code, const char *fmt, ...)
+{
+ g_rpc_err = 1;
+}
+
+struct spdk_bdev *
+spdk_bdev_get_by_name(const char *bdev_name)
+{
+ struct spdk_bdev *bdev;
+
+ if (!TAILQ_EMPTY(&g_bdev_list)) {
+ TAILQ_FOREACH(bdev, &g_bdev_list, internal.link) {
+ if (strcmp(bdev_name, bdev->name) == 0) {
+ return bdev;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+static void
+bdev_io_cleanup(struct spdk_bdev_io *bdev_io)
+{
+ if (bdev_io->u.bdev.iovs) {
+ if (bdev_io->u.bdev.iovs->iov_base) {
+ free(bdev_io->u.bdev.iovs->iov_base);
+ }
+ free(bdev_io->u.bdev.iovs);
+ }
+ free(bdev_io);
+}
+
+static void
+bdev_io_initialize(struct spdk_bdev_io *bdev_io, struct spdk_io_channel *ch, struct spdk_bdev *bdev,
+ uint64_t lba, uint64_t blocks, int16_t iotype)
+{
+ struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
+
+ bdev_io->bdev = bdev;
+ bdev_io->u.bdev.offset_blocks = lba;
+ bdev_io->u.bdev.num_blocks = blocks;
+ bdev_io->type = iotype;
+
+ if (bdev_io->type == SPDK_BDEV_IO_TYPE_UNMAP || bdev_io->type == SPDK_BDEV_IO_TYPE_FLUSH) {
+ return;
+ }
+
+ bdev_io->u.bdev.iovcnt = 1;
+ bdev_io->u.bdev.iovs = calloc(1, sizeof(struct iovec));
+ SPDK_CU_ASSERT_FATAL(bdev_io->u.bdev.iovs != NULL);
+ bdev_io->u.bdev.iovs->iov_base = calloc(1, bdev_io->u.bdev.num_blocks * g_block_len);
+ SPDK_CU_ASSERT_FATAL(bdev_io->u.bdev.iovs->iov_base != NULL);
+ bdev_io->u.bdev.iovs->iov_len = bdev_io->u.bdev.num_blocks * g_block_len;
+ bdev_io->internal.ch = channel;
+}
+
+static void
+verify_reset_io(struct spdk_bdev_io *bdev_io, uint8_t num_base_drives,
+ struct raid_bdev_io_channel *ch_ctx, struct raid_bdev *raid_bdev, uint32_t io_status)
+{
+ uint8_t index = 0;
+ struct io_output *output;
+
+ SPDK_CU_ASSERT_FATAL(raid_bdev != NULL);
+ SPDK_CU_ASSERT_FATAL(num_base_drives != 0);
+ SPDK_CU_ASSERT_FATAL(io_status != INVALID_IO_SUBMIT);
+ SPDK_CU_ASSERT_FATAL(ch_ctx->base_channel != NULL);
+
+ CU_ASSERT(g_io_output_index == num_base_drives);
+ for (index = 0; index < g_io_output_index; index++) {
+ output = &g_io_output[index];
+ CU_ASSERT(ch_ctx->base_channel[index] == output->ch);
+ CU_ASSERT(raid_bdev->base_bdev_info[index].desc == output->desc);
+ CU_ASSERT(bdev_io->type == output->iotype);
+ }
+ CU_ASSERT(g_io_comp_status == io_status);
+}
+
+static void
+verify_io(struct spdk_bdev_io *bdev_io, uint8_t num_base_drives,
+ struct raid_bdev_io_channel *ch_ctx, struct raid_bdev *raid_bdev, uint32_t io_status)
+{
+ uint32_t strip_shift = spdk_u32log2(g_strip_size);
+ uint64_t start_strip = bdev_io->u.bdev.offset_blocks >> strip_shift;
+ uint64_t end_strip = (bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) >>
+ strip_shift;
+ uint32_t splits_reqd = (end_strip - start_strip + 1);
+ uint32_t strip;
+ uint64_t pd_strip;
+ uint8_t pd_idx;
+ uint32_t offset_in_strip;
+ uint64_t pd_lba;
+ uint64_t pd_blocks;
+ uint32_t index = 0;
+ uint8_t *buf = bdev_io->u.bdev.iovs->iov_base;
+ struct io_output *output;
+
+ if (io_status == INVALID_IO_SUBMIT) {
+ CU_ASSERT(g_io_comp_status == false);
+ return;
+ }
+ SPDK_CU_ASSERT_FATAL(raid_bdev != NULL);
+ SPDK_CU_ASSERT_FATAL(num_base_drives != 0);
+
+ CU_ASSERT(splits_reqd == g_io_output_index);
+ for (strip = start_strip; strip <= end_strip; strip++, index++) {
+ pd_strip = strip / num_base_drives;
+ pd_idx = strip % num_base_drives;
+ if (strip == start_strip) {
+ offset_in_strip = bdev_io->u.bdev.offset_blocks & (g_strip_size - 1);
+ pd_lba = (pd_strip << strip_shift) + offset_in_strip;
+ if (strip == end_strip) {
+ pd_blocks = bdev_io->u.bdev.num_blocks;
+ } else {
+ pd_blocks = g_strip_size - offset_in_strip;
+ }
+ } else if (strip == end_strip) {
+ pd_lba = pd_strip << strip_shift;
+ pd_blocks = ((bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) &
+ (g_strip_size - 1)) + 1;
+ } else {
+ pd_lba = pd_strip << raid_bdev->strip_size_shift;
+ pd_blocks = raid_bdev->strip_size;
+ }
+ output = &g_io_output[index];
+ CU_ASSERT(pd_lba == output->offset_blocks);
+ CU_ASSERT(pd_blocks == output->num_blocks);
+ CU_ASSERT(ch_ctx->base_channel[pd_idx] == output->ch);
+ CU_ASSERT(raid_bdev->base_bdev_info[pd_idx].desc == output->desc);
+ CU_ASSERT(bdev_io->type == output->iotype);
+ buf += (pd_blocks << spdk_u32log2(g_block_len));
+ }
+ CU_ASSERT(g_io_comp_status == io_status);
+}
+
+static void
+verify_io_without_payload(struct spdk_bdev_io *bdev_io, uint8_t num_base_drives,
+ struct raid_bdev_io_channel *ch_ctx, struct raid_bdev *raid_bdev,
+ uint32_t io_status)
+{
+ uint32_t strip_shift = spdk_u32log2(g_strip_size);
+ uint64_t start_offset_in_strip = bdev_io->u.bdev.offset_blocks % g_strip_size;
+ uint64_t end_offset_in_strip = (bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) %
+ g_strip_size;
+ uint64_t start_strip = bdev_io->u.bdev.offset_blocks >> strip_shift;
+ uint64_t end_strip = (bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) >>
+ strip_shift;
+ uint8_t n_disks_involved;
+ uint64_t start_strip_disk_idx;
+ uint64_t end_strip_disk_idx;
+ uint64_t nblocks_in_start_disk;
+ uint64_t offset_in_start_disk;
+ uint8_t disk_idx;
+ uint64_t base_io_idx;
+ uint64_t sum_nblocks = 0;
+ struct io_output *output;
+
+ if (io_status == INVALID_IO_SUBMIT) {
+ CU_ASSERT(g_io_comp_status == false);
+ return;
+ }
+ SPDK_CU_ASSERT_FATAL(raid_bdev != NULL);
+ SPDK_CU_ASSERT_FATAL(num_base_drives != 0);
+ SPDK_CU_ASSERT_FATAL(bdev_io->type != SPDK_BDEV_IO_TYPE_READ);
+ SPDK_CU_ASSERT_FATAL(bdev_io->type != SPDK_BDEV_IO_TYPE_WRITE);
+
+ n_disks_involved = spdk_min(end_strip - start_strip + 1, num_base_drives);
+ CU_ASSERT(n_disks_involved == g_io_output_index);
+
+ start_strip_disk_idx = start_strip % num_base_drives;
+ end_strip_disk_idx = end_strip % num_base_drives;
+
+ offset_in_start_disk = g_io_output[0].offset_blocks;
+ nblocks_in_start_disk = g_io_output[0].num_blocks;
+
+ for (base_io_idx = 0, disk_idx = start_strip_disk_idx; base_io_idx < n_disks_involved;
+ base_io_idx++, disk_idx++) {
+ uint64_t start_offset_in_disk;
+ uint64_t end_offset_in_disk;
+
+ output = &g_io_output[base_io_idx];
+
+ /* round disk_idx */
+ if (disk_idx >= num_base_drives) {
+ disk_idx %= num_base_drives;
+ }
+
+ /* start_offset_in_disk aligned in strip check:
+ * The first base io has a same start_offset_in_strip with the whole raid io.
+ * Other base io should have aligned start_offset_in_strip which is 0.
+ */
+ start_offset_in_disk = output->offset_blocks;
+ if (base_io_idx == 0) {
+ CU_ASSERT(start_offset_in_disk % g_strip_size == start_offset_in_strip);
+ } else {
+ CU_ASSERT(start_offset_in_disk % g_strip_size == 0);
+ }
+
+ /* end_offset_in_disk aligned in strip check:
+ * Base io on disk at which end_strip is located, has a same end_offset_in_strip
+ * with the whole raid io.
+ * Other base io should have aligned end_offset_in_strip.
+ */
+ end_offset_in_disk = output->offset_blocks + output->num_blocks - 1;
+ if (disk_idx == end_strip_disk_idx) {
+ CU_ASSERT(end_offset_in_disk % g_strip_size == end_offset_in_strip);
+ } else {
+ CU_ASSERT(end_offset_in_disk % g_strip_size == g_strip_size - 1);
+ }
+
+ /* start_offset_in_disk compared with start_disk.
+ * 1. For disk_idx which is larger than start_strip_disk_idx: Its start_offset_in_disk
+ * mustn't be larger than the start offset of start_offset_in_disk; And the gap
+ * must be less than strip size.
+ * 2. For disk_idx which is less than start_strip_disk_idx, Its start_offset_in_disk
+ * must be larger than the start offset of start_offset_in_disk; And the gap mustn't
+ * be less than strip size.
+ */
+ if (disk_idx > start_strip_disk_idx) {
+ CU_ASSERT(start_offset_in_disk <= offset_in_start_disk);
+ CU_ASSERT(offset_in_start_disk - start_offset_in_disk < g_strip_size);
+ } else if (disk_idx < start_strip_disk_idx) {
+ CU_ASSERT(start_offset_in_disk > offset_in_start_disk);
+ CU_ASSERT(output->offset_blocks - offset_in_start_disk <= g_strip_size);
+ }
+
+ /* nblocks compared with start_disk:
+ * The gap between them must be within a strip size.
+ */
+ if (output->num_blocks <= nblocks_in_start_disk) {
+ CU_ASSERT(nblocks_in_start_disk - output->num_blocks <= g_strip_size);
+ } else {
+ CU_ASSERT(output->num_blocks - nblocks_in_start_disk < g_strip_size);
+ }
+
+ sum_nblocks += output->num_blocks;
+
+ CU_ASSERT(ch_ctx->base_channel[disk_idx] == output->ch);
+ CU_ASSERT(raid_bdev->base_bdev_info[disk_idx].desc == output->desc);
+ CU_ASSERT(bdev_io->type == output->iotype);
+ }
+
+ /* Sum of each nblocks should be same with raid bdev_io */
+ CU_ASSERT(bdev_io->u.bdev.num_blocks == sum_nblocks);
+
+ CU_ASSERT(g_io_comp_status == io_status);
+}
+
+static void
+verify_raid_config_present(const char *name, bool presence)
+{
+ struct raid_bdev_config *raid_cfg;
+ bool cfg_found;
+
+ cfg_found = false;
+
+ TAILQ_FOREACH(raid_cfg, &g_raid_config.raid_bdev_config_head, link) {
+ if (raid_cfg->name != NULL) {
+ if (strcmp(name, raid_cfg->name) == 0) {
+ cfg_found = true;
+ break;
+ }
+ }
+ }
+
+ if (presence == true) {
+ CU_ASSERT(cfg_found == true);
+ } else {
+ CU_ASSERT(cfg_found == false);
+ }
+}
+
+static void
+verify_raid_bdev_present(const char *name, bool presence)
+{
+ struct raid_bdev *pbdev;
+ bool pbdev_found;
+
+ pbdev_found = false;
+ TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
+ if (strcmp(pbdev->bdev.name, name) == 0) {
+ pbdev_found = true;
+ break;
+ }
+ }
+ if (presence == true) {
+ CU_ASSERT(pbdev_found == true);
+ } else {
+ CU_ASSERT(pbdev_found == false);
+ }
+}
+static void
+verify_raid_config(struct rpc_bdev_raid_create *r, bool presence)
+{
+ struct raid_bdev_config *raid_cfg = NULL;
+ uint8_t i;
+ int val;
+
+ TAILQ_FOREACH(raid_cfg, &g_raid_config.raid_bdev_config_head, link) {
+ if (strcmp(r->name, raid_cfg->name) == 0) {
+ if (presence == false) {
+ break;
+ }
+ CU_ASSERT(raid_cfg->raid_bdev != NULL);
+ CU_ASSERT(raid_cfg->strip_size == r->strip_size_kb);
+ CU_ASSERT(raid_cfg->num_base_bdevs == r->base_bdevs.num_base_bdevs);
+ CU_ASSERT(raid_cfg->level == r->level);
+ if (raid_cfg->base_bdev != NULL) {
+ for (i = 0; i < raid_cfg->num_base_bdevs; i++) {
+ val = strcmp(raid_cfg->base_bdev[i].name,
+ r->base_bdevs.base_bdevs[i]);
+ CU_ASSERT(val == 0);
+ }
+ }
+ break;
+ }
+ }
+
+ if (presence == true) {
+ CU_ASSERT(raid_cfg != NULL);
+ } else {
+ CU_ASSERT(raid_cfg == NULL);
+ }
+}
+
+static void
+verify_raid_bdev(struct rpc_bdev_raid_create *r, bool presence, uint32_t raid_state)
+{
+ struct raid_bdev *pbdev;
+ struct raid_base_bdev_info *base_info;
+ struct spdk_bdev *bdev = NULL;
+ bool pbdev_found;
+ uint64_t min_blockcnt = 0xFFFFFFFFFFFFFFFF;
+
+ pbdev_found = false;
+ TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
+ if (strcmp(pbdev->bdev.name, r->name) == 0) {
+ pbdev_found = true;
+ if (presence == false) {
+ break;
+ }
+ CU_ASSERT(pbdev->config->raid_bdev == pbdev);
+ CU_ASSERT(pbdev->base_bdev_info != NULL);
+ CU_ASSERT(pbdev->strip_size == ((r->strip_size_kb * 1024) / g_block_len));
+ CU_ASSERT(pbdev->strip_size_shift == spdk_u32log2(((r->strip_size_kb * 1024) /
+ g_block_len)));
+ CU_ASSERT(pbdev->blocklen_shift == spdk_u32log2(g_block_len));
+ CU_ASSERT(pbdev->state == raid_state);
+ CU_ASSERT(pbdev->num_base_bdevs == r->base_bdevs.num_base_bdevs);
+ CU_ASSERT(pbdev->num_base_bdevs_discovered == r->base_bdevs.num_base_bdevs);
+ CU_ASSERT(pbdev->level == r->level);
+ CU_ASSERT(pbdev->destruct_called == false);
+ CU_ASSERT(pbdev->base_bdev_info != NULL);
+ RAID_FOR_EACH_BASE_BDEV(pbdev, base_info) {
+ CU_ASSERT(base_info->bdev != NULL);
+ bdev = spdk_bdev_get_by_name(base_info->bdev->name);
+ CU_ASSERT(bdev != NULL);
+ CU_ASSERT(base_info->remove_scheduled == false);
+
+ if (bdev && bdev->blockcnt < min_blockcnt) {
+ min_blockcnt = bdev->blockcnt;
+ }
+ }
+ CU_ASSERT((((min_blockcnt / (r->strip_size_kb * 1024 / g_block_len)) *
+ (r->strip_size_kb * 1024 / g_block_len)) *
+ r->base_bdevs.num_base_bdevs) == pbdev->bdev.blockcnt);
+ CU_ASSERT(strcmp(pbdev->bdev.product_name, "Raid Volume") == 0);
+ CU_ASSERT(pbdev->bdev.write_cache == 0);
+ CU_ASSERT(pbdev->bdev.blocklen == g_block_len);
+ if (pbdev->num_base_bdevs > 1) {
+ CU_ASSERT(pbdev->bdev.optimal_io_boundary == pbdev->strip_size);
+ CU_ASSERT(pbdev->bdev.split_on_optimal_io_boundary == true);
+ } else {
+ CU_ASSERT(pbdev->bdev.optimal_io_boundary == 0);
+ CU_ASSERT(pbdev->bdev.split_on_optimal_io_boundary == false);
+ }
+ CU_ASSERT(pbdev->bdev.ctxt == pbdev);
+ CU_ASSERT(pbdev->bdev.fn_table == &g_raid_bdev_fn_table);
+ CU_ASSERT(pbdev->bdev.module == &g_raid_if);
+ break;
+ }
+ }
+ if (presence == true) {
+ CU_ASSERT(pbdev_found == true);
+ } else {
+ CU_ASSERT(pbdev_found == false);
+ }
+ pbdev_found = false;
+ if (raid_state == RAID_BDEV_STATE_ONLINE) {
+ TAILQ_FOREACH(pbdev, &g_raid_bdev_configured_list, state_link) {
+ if (strcmp(pbdev->bdev.name, r->name) == 0) {
+ pbdev_found = true;
+ break;
+ }
+ }
+ } else if (raid_state == RAID_BDEV_STATE_CONFIGURING) {
+ TAILQ_FOREACH(pbdev, &g_raid_bdev_configuring_list, state_link) {
+ if (strcmp(pbdev->bdev.name, r->name) == 0) {
+ pbdev_found = true;
+ break;
+ }
+ }
+ } else if (raid_state == RAID_BDEV_STATE_OFFLINE) {
+ TAILQ_FOREACH(pbdev, &g_raid_bdev_offline_list, state_link) {
+ if (strcmp(pbdev->bdev.name, r->name) == 0) {
+ pbdev_found = true;
+ break;
+ }
+ }
+ }
+ if (presence == true) {
+ CU_ASSERT(pbdev_found == true);
+ } else {
+ CU_ASSERT(pbdev_found == false);
+ }
+}
+
+static void
+verify_get_raids(struct rpc_bdev_raid_create *construct_req,
+ uint8_t g_max_raids,
+ char **g_get_raids_output, uint32_t g_get_raids_count)
+{
+ uint8_t i, j;
+ bool found;
+
+ CU_ASSERT(g_max_raids == g_get_raids_count);
+ if (g_max_raids == g_get_raids_count) {
+ for (i = 0; i < g_max_raids; i++) {
+ found = false;
+ for (j = 0; j < g_max_raids; j++) {
+ if (construct_req[i].name &&
+ strcmp(construct_req[i].name, g_get_raids_output[i]) == 0) {
+ found = true;
+ break;
+ }
+ }
+ CU_ASSERT(found == true);
+ }
+ }
+}
+
+static void
+create_base_bdevs(uint32_t bbdev_start_idx)
+{
+ uint8_t i;
+ struct spdk_bdev *base_bdev;
+ char name[16];
+
+ for (i = 0; i < g_max_base_drives; i++, bbdev_start_idx++) {
+ snprintf(name, 16, "%s%u%s", "Nvme", bbdev_start_idx, "n1");
+ base_bdev = calloc(1, sizeof(struct spdk_bdev));
+ SPDK_CU_ASSERT_FATAL(base_bdev != NULL);
+ base_bdev->name = strdup(name);
+ SPDK_CU_ASSERT_FATAL(base_bdev->name != NULL);
+ base_bdev->blocklen = g_block_len;
+ base_bdev->blockcnt = BLOCK_CNT;
+ TAILQ_INSERT_TAIL(&g_bdev_list, base_bdev, internal.link);
+ }
+}
+
+static void
+create_test_req(struct rpc_bdev_raid_create *r, const char *raid_name,
+ uint8_t bbdev_start_idx, bool create_base_bdev)
+{
+ uint8_t i;
+ char name[16];
+ uint8_t bbdev_idx = bbdev_start_idx;
+
+ r->name = strdup(raid_name);
+ SPDK_CU_ASSERT_FATAL(r->name != NULL);
+ r->strip_size_kb = (g_strip_size * g_block_len) / 1024;
+ r->level = RAID0;
+ r->base_bdevs.num_base_bdevs = g_max_base_drives;
+ for (i = 0; i < g_max_base_drives; i++, bbdev_idx++) {
+ snprintf(name, 16, "%s%u%s", "Nvme", bbdev_idx, "n1");
+ r->base_bdevs.base_bdevs[i] = strdup(name);
+ SPDK_CU_ASSERT_FATAL(r->base_bdevs.base_bdevs[i] != NULL);
+ }
+ if (create_base_bdev == true) {
+ create_base_bdevs(bbdev_start_idx);
+ }
+ g_rpc_req = r;
+ g_rpc_req_size = sizeof(*r);
+}
+
+static void
+create_raid_bdev_create_req(struct rpc_bdev_raid_create *r, const char *raid_name,
+ uint8_t bbdev_start_idx, bool create_base_bdev,
+ uint8_t json_decode_obj_err)
+{
+ create_test_req(r, raid_name, bbdev_start_idx, create_base_bdev);
+
+ g_rpc_err = 0;
+ g_json_decode_obj_create = 1;
+ g_json_decode_obj_err = json_decode_obj_err;
+ g_config_level_create = 0;
+ g_test_multi_raids = 0;
+}
+
+static void
+create_raid_bdev_create_config(struct rpc_bdev_raid_create *r, const char *raid_name,
+ uint8_t bbdev_start_idx, bool create_base_bdev)
+{
+ create_test_req(r, raid_name, bbdev_start_idx, create_base_bdev);
+
+ g_config_level_create = 1;
+ g_test_multi_raids = 0;
+}
+
+static void
+free_test_req(struct rpc_bdev_raid_create *r)
+{
+ uint8_t i;
+
+ free(r->name);
+ for (i = 0; i < r->base_bdevs.num_base_bdevs; i++) {
+ free(r->base_bdevs.base_bdevs[i]);
+ }
+}
+
+static void
+create_raid_bdev_delete_req(struct rpc_bdev_raid_delete *r, const char *raid_name,
+ uint8_t json_decode_obj_err)
+{
+ r->name = strdup(raid_name);
+ SPDK_CU_ASSERT_FATAL(r->name != NULL);
+
+ g_rpc_req = r;
+ g_rpc_req_size = sizeof(*r);
+ g_rpc_err = 0;
+ g_json_decode_obj_create = 0;
+ g_json_decode_obj_err = json_decode_obj_err;
+ g_config_level_create = 0;
+ g_test_multi_raids = 0;
+}
+
+static void
+create_get_raids_req(struct rpc_bdev_raid_get_bdevs *r, const char *category,
+ uint8_t json_decode_obj_err)
+{
+ r->category = strdup(category);
+ SPDK_CU_ASSERT_FATAL(r->category != NULL);
+
+ g_rpc_req = r;
+ g_rpc_req_size = sizeof(*r);
+ g_rpc_err = 0;
+ g_json_decode_obj_create = 0;
+ g_json_decode_obj_err = json_decode_obj_err;
+ g_config_level_create = 0;
+ g_test_multi_raids = 1;
+ g_get_raids_count = 0;
+}
+
+static void
+test_create_raid(void)
+{
+ struct rpc_bdev_raid_create req;
+ struct rpc_bdev_raid_delete delete_req;
+
+ set_globals();
+ CU_ASSERT(raid_bdev_init() == 0);
+
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+ create_raid_bdev_create_req(&req, "raid1", 0, true, 0);
+ rpc_bdev_raid_create(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config(&req, true);
+ verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
+ free_test_req(&req);
+
+ create_raid_bdev_delete_req(&delete_req, "raid1", 0);
+ rpc_bdev_raid_delete(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ raid_bdev_exit();
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+static void
+test_delete_raid(void)
+{
+ struct rpc_bdev_raid_create construct_req;
+ struct rpc_bdev_raid_delete delete_req;
+
+ set_globals();
+ CU_ASSERT(raid_bdev_init() == 0);
+
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+ create_raid_bdev_create_req(&construct_req, "raid1", 0, true, 0);
+ rpc_bdev_raid_create(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config(&construct_req, true);
+ verify_raid_bdev(&construct_req, true, RAID_BDEV_STATE_ONLINE);
+ free_test_req(&construct_req);
+
+ create_raid_bdev_delete_req(&delete_req, "raid1", 0);
+ rpc_bdev_raid_delete(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ raid_bdev_exit();
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+static void
+test_create_raid_invalid_args(void)
+{
+ struct rpc_bdev_raid_create req;
+ struct rpc_bdev_raid_delete destroy_req;
+ struct raid_bdev_config *raid_cfg;
+
+ set_globals();
+ CU_ASSERT(raid_bdev_init() == 0);
+
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+ create_raid_bdev_create_req(&req, "raid1", 0, true, 0);
+ req.level = INVALID_RAID_LEVEL;
+ rpc_bdev_raid_create(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 1);
+ free_test_req(&req);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ create_raid_bdev_create_req(&req, "raid1", 0, false, 1);
+ rpc_bdev_raid_create(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 1);
+ free_test_req(&req);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ create_raid_bdev_create_req(&req, "raid1", 0, false, 0);
+ req.strip_size_kb = 1231;
+ rpc_bdev_raid_create(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 1);
+ free_test_req(&req);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ create_raid_bdev_create_req(&req, "raid1", 0, false, 0);
+ rpc_bdev_raid_create(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config(&req, true);
+ verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
+ free_test_req(&req);
+
+ create_raid_bdev_create_req(&req, "raid1", 0, false, 0);
+ rpc_bdev_raid_create(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 1);
+ free_test_req(&req);
+
+ create_raid_bdev_create_req(&req, "raid2", 0, false, 0);
+ rpc_bdev_raid_create(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 1);
+ free_test_req(&req);
+ verify_raid_config_present("raid2", false);
+ verify_raid_bdev_present("raid2", false);
+
+ create_raid_bdev_create_req(&req, "raid2", g_max_base_drives, true, 0);
+ free(req.base_bdevs.base_bdevs[g_max_base_drives - 1]);
+ req.base_bdevs.base_bdevs[g_max_base_drives - 1] = strdup("Nvme0n1");
+ SPDK_CU_ASSERT_FATAL(req.base_bdevs.base_bdevs[g_max_base_drives - 1] != NULL);
+ rpc_bdev_raid_create(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 1);
+ free_test_req(&req);
+ verify_raid_config_present("raid2", false);
+ verify_raid_bdev_present("raid2", false);
+
+ create_raid_bdev_create_req(&req, "raid2", g_max_base_drives, true, 0);
+ free(req.base_bdevs.base_bdevs[g_max_base_drives - 1]);
+ req.base_bdevs.base_bdevs[g_max_base_drives - 1] = strdup("Nvme100000n1");
+ SPDK_CU_ASSERT_FATAL(req.base_bdevs.base_bdevs[g_max_base_drives - 1] != NULL);
+ rpc_bdev_raid_create(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ free_test_req(&req);
+ verify_raid_config_present("raid2", true);
+ verify_raid_bdev_present("raid2", true);
+ raid_cfg = raid_bdev_config_find_by_name("raid2");
+ SPDK_CU_ASSERT_FATAL(raid_cfg != NULL);
+ check_and_remove_raid_bdev(raid_cfg);
+ raid_bdev_config_cleanup(raid_cfg);
+
+ create_raid_bdev_create_req(&req, "raid2", g_max_base_drives, false, 0);
+ rpc_bdev_raid_create(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ free_test_req(&req);
+ verify_raid_config_present("raid2", true);
+ verify_raid_bdev_present("raid2", true);
+ verify_raid_config_present("raid1", true);
+ verify_raid_bdev_present("raid1", true);
+
+ create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
+ rpc_bdev_raid_delete(NULL, NULL);
+ create_raid_bdev_delete_req(&destroy_req, "raid2", 0);
+ rpc_bdev_raid_delete(NULL, NULL);
+ raid_bdev_exit();
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+static void
+test_delete_raid_invalid_args(void)
+{
+ struct rpc_bdev_raid_create construct_req;
+ struct rpc_bdev_raid_delete destroy_req;
+
+ set_globals();
+ CU_ASSERT(raid_bdev_init() == 0);
+
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+ create_raid_bdev_create_req(&construct_req, "raid1", 0, true, 0);
+ rpc_bdev_raid_create(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config(&construct_req, true);
+ verify_raid_bdev(&construct_req, true, RAID_BDEV_STATE_ONLINE);
+ free_test_req(&construct_req);
+
+ create_raid_bdev_delete_req(&destroy_req, "raid2", 0);
+ rpc_bdev_raid_delete(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 1);
+
+ create_raid_bdev_delete_req(&destroy_req, "raid1", 1);
+ rpc_bdev_raid_delete(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 1);
+ free(destroy_req.name);
+ verify_raid_config_present("raid1", true);
+ verify_raid_bdev_present("raid1", true);
+
+ create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
+ rpc_bdev_raid_delete(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ raid_bdev_exit();
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+static void
+test_io_channel(void)
+{
+ struct rpc_bdev_raid_create req;
+ struct rpc_bdev_raid_delete destroy_req;
+ struct raid_bdev *pbdev;
+ struct raid_bdev_io_channel *ch_ctx;
+ uint8_t i;
+
+ set_globals();
+ CU_ASSERT(raid_bdev_init() == 0);
+
+ create_raid_bdev_create_req(&req, "raid1", 0, true, 0);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+ rpc_bdev_raid_create(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config(&req, true);
+ verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
+
+ TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
+ if (strcmp(pbdev->bdev.name, "raid1") == 0) {
+ break;
+ }
+ }
+ CU_ASSERT(pbdev != NULL);
+ ch_ctx = calloc(1, sizeof(struct raid_bdev_io_channel));
+ SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
+
+ CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0);
+ for (i = 0; i < req.base_bdevs.num_base_bdevs; i++) {
+ CU_ASSERT(ch_ctx->base_channel && ch_ctx->base_channel[i] == &g_io_channel);
+ }
+ raid_bdev_destroy_cb(pbdev, ch_ctx);
+ CU_ASSERT(ch_ctx->base_channel == NULL);
+ free_test_req(&req);
+
+ create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
+ rpc_bdev_raid_delete(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ free(ch_ctx);
+ raid_bdev_exit();
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+static void
+test_write_io(void)
+{
+ struct rpc_bdev_raid_create req;
+ struct rpc_bdev_raid_delete destroy_req;
+ struct raid_bdev *pbdev;
+ struct spdk_io_channel *ch;
+ struct raid_bdev_io_channel *ch_ctx;
+ uint8_t i;
+ struct spdk_bdev_io *bdev_io;
+ uint64_t io_len;
+ uint64_t lba = 0;
+ struct spdk_io_channel *ch_b;
+ struct spdk_bdev_channel *ch_b_ctx;
+
+ set_globals();
+ CU_ASSERT(raid_bdev_init() == 0);
+
+ create_raid_bdev_create_req(&req, "raid1", 0, true, 0);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+ rpc_bdev_raid_create(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config(&req, true);
+ verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
+ TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
+ if (strcmp(pbdev->bdev.name, "raid1") == 0) {
+ break;
+ }
+ }
+ CU_ASSERT(pbdev != NULL);
+ ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct raid_bdev_io_channel));
+ SPDK_CU_ASSERT_FATAL(ch != NULL);
+
+ ch_b = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct spdk_bdev_channel));
+ SPDK_CU_ASSERT_FATAL(ch_b != NULL);
+ ch_b_ctx = spdk_io_channel_get_ctx(ch_b);
+ ch_b_ctx->channel = ch;
+
+ ch_ctx = spdk_io_channel_get_ctx(ch);
+ SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
+
+ CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0);
+ for (i = 0; i < req.base_bdevs.num_base_bdevs; i++) {
+ CU_ASSERT(ch_ctx->base_channel && ch_ctx->base_channel[i] == &g_io_channel);
+ }
+
+ /* test 2 IO sizes based on global strip size set earlier */
+ for (i = 0; i < 2; i++) {
+ bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
+ SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
+ io_len = (g_strip_size / 2) << i;
+ bdev_io_initialize(bdev_io, ch_b, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_WRITE);
+ lba += g_strip_size;
+ memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
+ g_io_output_index = 0;
+ raid_bdev_submit_request(ch, bdev_io);
+ verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
+ g_child_io_status_flag);
+ bdev_io_cleanup(bdev_io);
+ }
+
+ free_test_req(&req);
+ raid_bdev_destroy_cb(pbdev, ch_ctx);
+ CU_ASSERT(ch_ctx->base_channel == NULL);
+ free(ch);
+ free(ch_b);
+ create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
+ rpc_bdev_raid_delete(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ raid_bdev_exit();
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+static void
+test_read_io(void)
+{
+ struct rpc_bdev_raid_create req;
+ struct rpc_bdev_raid_delete destroy_req;
+ struct raid_bdev *pbdev;
+ struct spdk_io_channel *ch;
+ struct raid_bdev_io_channel *ch_ctx;
+ uint8_t i;
+ struct spdk_bdev_io *bdev_io;
+ uint64_t io_len;
+ uint64_t lba;
+ struct spdk_io_channel *ch_b;
+ struct spdk_bdev_channel *ch_b_ctx;
+
+ set_globals();
+ CU_ASSERT(raid_bdev_init() == 0);
+
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+ create_raid_bdev_create_req(&req, "raid1", 0, true, 0);
+ rpc_bdev_raid_create(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config(&req, true);
+ verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
+ TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
+ if (strcmp(pbdev->bdev.name, "raid1") == 0) {
+ break;
+ }
+ }
+ CU_ASSERT(pbdev != NULL);
+ ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct raid_bdev_io_channel));
+ SPDK_CU_ASSERT_FATAL(ch != NULL);
+
+ ch_b = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct spdk_bdev_channel));
+ SPDK_CU_ASSERT_FATAL(ch_b != NULL);
+ ch_b_ctx = spdk_io_channel_get_ctx(ch_b);
+ ch_b_ctx->channel = ch;
+
+ ch_ctx = spdk_io_channel_get_ctx(ch);
+ SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
+
+ CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0);
+ for (i = 0; i < req.base_bdevs.num_base_bdevs; i++) {
+ CU_ASSERT(ch_ctx->base_channel && ch_ctx->base_channel[i] == &g_io_channel);
+ }
+ free_test_req(&req);
+
+ /* test 2 IO sizes based on global strip size set earlier */
+ lba = 0;
+ for (i = 0; i < 2; i++) {
+ bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
+ SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
+ io_len = (g_strip_size / 2) << i;
+ bdev_io_initialize(bdev_io, ch_b, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_READ);
+ lba += g_strip_size;
+ memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
+ g_io_output_index = 0;
+ raid_bdev_submit_request(ch, bdev_io);
+ verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
+ g_child_io_status_flag);
+ bdev_io_cleanup(bdev_io);
+ }
+
+ raid_bdev_destroy_cb(pbdev, ch_ctx);
+ CU_ASSERT(ch_ctx->base_channel == NULL);
+ free(ch);
+ free(ch_b);
+ create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
+ rpc_bdev_raid_delete(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ raid_bdev_exit();
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+static void
+raid_bdev_io_generate_by_strips(uint64_t n_strips)
+{
+ uint64_t lba;
+ uint64_t nblocks;
+ uint64_t start_offset;
+ uint64_t end_offset;
+ uint64_t offsets_in_strip[3];
+ uint64_t start_bdev_idx;
+ uint64_t start_bdev_offset;
+ uint64_t start_bdev_idxs[3];
+ int i, j, l;
+
+ /* 3 different situations of offset in strip */
+ offsets_in_strip[0] = 0;
+ offsets_in_strip[1] = g_strip_size >> 1;
+ offsets_in_strip[2] = g_strip_size - 1;
+
+ /* 3 different situations of start_bdev_idx */
+ start_bdev_idxs[0] = 0;
+ start_bdev_idxs[1] = g_max_base_drives >> 1;
+ start_bdev_idxs[2] = g_max_base_drives - 1;
+
+ /* consider different offset in strip */
+ for (i = 0; i < 3; i++) {
+ start_offset = offsets_in_strip[i];
+ for (j = 0; j < 3; j++) {
+ end_offset = offsets_in_strip[j];
+ if (n_strips == 1 && start_offset > end_offset) {
+ continue;
+ }
+
+ /* consider at which base_bdev lba is started. */
+ for (l = 0; l < 3; l++) {
+ start_bdev_idx = start_bdev_idxs[l];
+ start_bdev_offset = start_bdev_idx * g_strip_size;
+ lba = g_lba_offset + start_bdev_offset + start_offset;
+ nblocks = (n_strips - 1) * g_strip_size + end_offset - start_offset + 1;
+
+ g_io_ranges[g_io_range_idx].lba = lba;
+ g_io_ranges[g_io_range_idx].nblocks = nblocks;
+
+ SPDK_CU_ASSERT_FATAL(g_io_range_idx < MAX_TEST_IO_RANGE);
+ g_io_range_idx++;
+ }
+ }
+ }
+}
+
+static void
+raid_bdev_io_generate(void)
+{
+ uint64_t n_strips;
+ uint64_t n_strips_span = g_max_base_drives;
+ uint64_t n_strips_times[5] = {g_max_base_drives + 1, g_max_base_drives * 2 - 1,
+ g_max_base_drives * 2, g_max_base_drives * 3,
+ g_max_base_drives * 4
+ };
+ uint32_t i;
+
+ g_io_range_idx = 0;
+
+ /* consider different number of strips from 1 to strips spanned base bdevs,
+ * and even to times of strips spanned base bdevs
+ */
+ for (n_strips = 1; n_strips < n_strips_span; n_strips++) {
+ raid_bdev_io_generate_by_strips(n_strips);
+ }
+
+ for (i = 0; i < SPDK_COUNTOF(n_strips_times); i++) {
+ n_strips = n_strips_times[i];
+ raid_bdev_io_generate_by_strips(n_strips);
+ }
+}
+
+static void
+test_unmap_io(void)
+{
+ struct rpc_bdev_raid_create req;
+ struct rpc_bdev_raid_delete destroy_req;
+ struct raid_bdev *pbdev;
+ struct spdk_io_channel *ch;
+ struct raid_bdev_io_channel *ch_ctx;
+ uint8_t i;
+ struct spdk_bdev_io *bdev_io;
+ uint32_t count;
+ uint64_t io_len;
+ uint64_t lba;
+
+ set_globals();
+ CU_ASSERT(raid_bdev_init() == 0);
+
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+ create_raid_bdev_create_req(&req, "raid1", 0, true, 0);
+ rpc_bdev_raid_create(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config(&req, true);
+ verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
+ TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
+ if (strcmp(pbdev->bdev.name, "raid1") == 0) {
+ break;
+ }
+ }
+ CU_ASSERT(pbdev != NULL);
+ ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct raid_bdev_io_channel));
+ SPDK_CU_ASSERT_FATAL(ch != NULL);
+ ch_ctx = spdk_io_channel_get_ctx(ch);
+ SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
+
+ CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0);
+ for (i = 0; i < req.base_bdevs.num_base_bdevs; i++) {
+ SPDK_CU_ASSERT_FATAL(ch_ctx->base_channel && ch_ctx->base_channel[i] == &g_io_channel);
+ }
+
+ CU_ASSERT(raid_bdev_io_type_supported(pbdev, SPDK_BDEV_IO_TYPE_UNMAP) == true);
+ CU_ASSERT(raid_bdev_io_type_supported(pbdev, SPDK_BDEV_IO_TYPE_FLUSH) == true);
+
+ raid_bdev_io_generate();
+ for (count = 0; count < g_io_range_idx; count++) {
+ bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
+ SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
+ io_len = g_io_ranges[count].nblocks;
+ lba = g_io_ranges[count].lba;
+ bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_UNMAP);
+ memset(g_io_output, 0, g_max_base_drives * sizeof(struct io_output));
+ g_io_output_index = 0;
+ raid_bdev_submit_request(ch, bdev_io);
+ verify_io_without_payload(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
+ g_child_io_status_flag);
+ bdev_io_cleanup(bdev_io);
+ }
+ free_test_req(&req);
+
+ raid_bdev_destroy_cb(pbdev, ch_ctx);
+ CU_ASSERT(ch_ctx->base_channel == NULL);
+ free(ch);
+ create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
+ rpc_bdev_raid_delete(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ raid_bdev_exit();
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+/* Test IO failures */
+static void
+test_io_failure(void)
+{
+ struct rpc_bdev_raid_create req;
+ struct rpc_bdev_raid_delete destroy_req;
+ struct raid_bdev *pbdev;
+ struct spdk_io_channel *ch;
+ struct raid_bdev_io_channel *ch_ctx;
+ uint8_t i;
+ struct spdk_bdev_io *bdev_io;
+ uint32_t count;
+ uint64_t io_len;
+ uint64_t lba;
+
+ set_globals();
+ CU_ASSERT(raid_bdev_init() == 0);
+
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+ create_raid_bdev_create_req(&req, "raid1", 0, true, 0);
+ rpc_bdev_raid_create(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config(&req, true);
+ verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
+ TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
+ if (strcmp(pbdev->bdev.name, req.name) == 0) {
+ break;
+ }
+ }
+ CU_ASSERT(pbdev != NULL);
+ ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct raid_bdev_io_channel));
+ SPDK_CU_ASSERT_FATAL(ch != NULL);
+ ch_ctx = spdk_io_channel_get_ctx(ch);
+ SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
+
+ CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0);
+ for (i = 0; i < req.base_bdevs.num_base_bdevs; i++) {
+ CU_ASSERT(ch_ctx->base_channel && ch_ctx->base_channel[i] == &g_io_channel);
+ }
+ free_test_req(&req);
+
+ lba = 0;
+ for (count = 0; count < 1; count++) {
+ bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
+ SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
+ io_len = (g_strip_size / 2) << count;
+ bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_INVALID);
+ lba += g_strip_size;
+ memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
+ g_io_output_index = 0;
+ raid_bdev_submit_request(ch, bdev_io);
+ verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
+ INVALID_IO_SUBMIT);
+ bdev_io_cleanup(bdev_io);
+ }
+
+
+ lba = 0;
+ g_child_io_status_flag = false;
+ for (count = 0; count < 1; count++) {
+ bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
+ SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
+ io_len = (g_strip_size / 2) << count;
+ bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_WRITE);
+ lba += g_strip_size;
+ memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
+ g_io_output_index = 0;
+ raid_bdev_submit_request(ch, bdev_io);
+ verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
+ g_child_io_status_flag);
+ bdev_io_cleanup(bdev_io);
+ }
+
+ raid_bdev_destroy_cb(pbdev, ch_ctx);
+ CU_ASSERT(ch_ctx->base_channel == NULL);
+ free(ch);
+ create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
+ rpc_bdev_raid_delete(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ raid_bdev_exit();
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+/* Test reset IO */
+static void
+test_reset_io(void)
+{
+ struct rpc_bdev_raid_create req;
+ struct rpc_bdev_raid_delete destroy_req;
+ struct raid_bdev *pbdev;
+ struct spdk_io_channel *ch;
+ struct raid_bdev_io_channel *ch_ctx;
+ uint8_t i;
+ struct spdk_bdev_io *bdev_io;
+
+ set_globals();
+ CU_ASSERT(raid_bdev_init() == 0);
+
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+ create_raid_bdev_create_req(&req, "raid1", 0, true, 0);
+ rpc_bdev_raid_create(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config(&req, true);
+ verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
+ TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
+ if (strcmp(pbdev->bdev.name, "raid1") == 0) {
+ break;
+ }
+ }
+ CU_ASSERT(pbdev != NULL);
+ ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct raid_bdev_io_channel));
+ SPDK_CU_ASSERT_FATAL(ch != NULL);
+ ch_ctx = spdk_io_channel_get_ctx(ch);
+ SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
+
+ SPDK_CU_ASSERT_FATAL(raid_bdev_create_cb(pbdev, ch_ctx) == 0);
+ for (i = 0; i < req.base_bdevs.num_base_bdevs; i++) {
+ CU_ASSERT(ch_ctx->base_channel && ch_ctx->base_channel[i] == &g_io_channel);
+ }
+ free_test_req(&req);
+
+ g_bdev_io_submit_status = 0;
+ g_child_io_status_flag = true;
+
+ CU_ASSERT(raid_bdev_io_type_supported(pbdev, SPDK_BDEV_IO_TYPE_RESET) == true);
+
+ bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
+ SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
+ bdev_io_initialize(bdev_io, ch, &pbdev->bdev, 0, 1, SPDK_BDEV_IO_TYPE_RESET);
+ memset(g_io_output, 0, g_max_base_drives * sizeof(struct io_output));
+ g_io_output_index = 0;
+ raid_bdev_submit_request(ch, bdev_io);
+ verify_reset_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
+ true);
+ bdev_io_cleanup(bdev_io);
+
+ raid_bdev_destroy_cb(pbdev, ch_ctx);
+ CU_ASSERT(ch_ctx->base_channel == NULL);
+ free(ch);
+ create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
+ rpc_bdev_raid_delete(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ raid_bdev_exit();
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+/* Create multiple raids, destroy raids without IO, get_raids related tests */
+static void
+test_multi_raid_no_io(void)
+{
+ struct rpc_bdev_raid_create *construct_req;
+ struct rpc_bdev_raid_delete destroy_req;
+ struct rpc_bdev_raid_get_bdevs get_raids_req;
+ uint8_t i;
+ char name[16];
+ uint8_t bbdev_idx = 0;
+
+ set_globals();
+ construct_req = calloc(MAX_RAIDS, sizeof(struct rpc_bdev_raid_create));
+ SPDK_CU_ASSERT_FATAL(construct_req != NULL);
+ CU_ASSERT(raid_bdev_init() == 0);
+ for (i = 0; i < g_max_raids; i++) {
+ snprintf(name, 16, "%s%u", "raid", i);
+ verify_raid_config_present(name, false);
+ verify_raid_bdev_present(name, false);
+ create_raid_bdev_create_req(&construct_req[i], name, bbdev_idx, true, 0);
+ bbdev_idx += g_max_base_drives;
+ rpc_bdev_raid_create(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config(&construct_req[i], true);
+ verify_raid_bdev(&construct_req[i], true, RAID_BDEV_STATE_ONLINE);
+ }
+
+ create_get_raids_req(&get_raids_req, "all", 0);
+ rpc_bdev_raid_get_bdevs(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_get_raids(construct_req, g_max_raids, g_get_raids_output, g_get_raids_count);
+ for (i = 0; i < g_get_raids_count; i++) {
+ free(g_get_raids_output[i]);
+ }
+
+ create_get_raids_req(&get_raids_req, "online", 0);
+ rpc_bdev_raid_get_bdevs(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_get_raids(construct_req, g_max_raids, g_get_raids_output, g_get_raids_count);
+ for (i = 0; i < g_get_raids_count; i++) {
+ free(g_get_raids_output[i]);
+ }
+
+ create_get_raids_req(&get_raids_req, "configuring", 0);
+ rpc_bdev_raid_get_bdevs(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ CU_ASSERT(g_get_raids_count == 0);
+
+ create_get_raids_req(&get_raids_req, "offline", 0);
+ rpc_bdev_raid_get_bdevs(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ CU_ASSERT(g_get_raids_count == 0);
+
+ create_get_raids_req(&get_raids_req, "invalid_category", 0);
+ rpc_bdev_raid_get_bdevs(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 1);
+ CU_ASSERT(g_get_raids_count == 0);
+
+ create_get_raids_req(&get_raids_req, "all", 1);
+ rpc_bdev_raid_get_bdevs(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 1);
+ free(get_raids_req.category);
+ CU_ASSERT(g_get_raids_count == 0);
+
+ create_get_raids_req(&get_raids_req, "all", 0);
+ rpc_bdev_raid_get_bdevs(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ CU_ASSERT(g_get_raids_count == g_max_raids);
+ for (i = 0; i < g_get_raids_count; i++) {
+ free(g_get_raids_output[i]);
+ }
+
+ for (i = 0; i < g_max_raids; i++) {
+ SPDK_CU_ASSERT_FATAL(construct_req[i].name != NULL);
+ snprintf(name, 16, "%s", construct_req[i].name);
+ create_raid_bdev_delete_req(&destroy_req, name, 0);
+ rpc_bdev_raid_delete(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config_present(name, false);
+ verify_raid_bdev_present(name, false);
+ }
+ raid_bdev_exit();
+ for (i = 0; i < g_max_raids; i++) {
+ free_test_req(&construct_req[i]);
+ }
+ free(construct_req);
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+/* Create multiple raids, fire IOs on raids */
+static void
+test_multi_raid_with_io(void)
+{
+ struct rpc_bdev_raid_create *construct_req;
+ struct rpc_bdev_raid_delete destroy_req;
+ uint8_t i, j;
+ char name[16];
+ uint8_t bbdev_idx = 0;
+ struct raid_bdev *pbdev;
+ struct spdk_io_channel *ch;
+ struct raid_bdev_io_channel *ch_ctx = NULL;
+ struct spdk_bdev_io *bdev_io;
+ uint64_t io_len;
+ uint64_t lba = 0;
+ int16_t iotype;
+ struct spdk_io_channel *ch_b;
+ struct spdk_bdev_channel *ch_b_ctx;
+
+ set_globals();
+ construct_req = calloc(g_max_raids, sizeof(struct rpc_bdev_raid_create));
+ SPDK_CU_ASSERT_FATAL(construct_req != NULL);
+ CU_ASSERT(raid_bdev_init() == 0);
+ ch = calloc(g_max_raids, sizeof(struct spdk_io_channel) + sizeof(struct raid_bdev_io_channel));
+ SPDK_CU_ASSERT_FATAL(ch != NULL);
+
+ ch_b = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct spdk_bdev_channel));
+ SPDK_CU_ASSERT_FATAL(ch_b != NULL);
+ ch_b_ctx = spdk_io_channel_get_ctx(ch_b);
+ ch_b_ctx->channel = ch;
+
+ for (i = 0; i < g_max_raids; i++) {
+ snprintf(name, 16, "%s%u", "raid", i);
+ verify_raid_config_present(name, false);
+ verify_raid_bdev_present(name, false);
+ create_raid_bdev_create_req(&construct_req[i], name, bbdev_idx, true, 0);
+ bbdev_idx += g_max_base_drives;
+ rpc_bdev_raid_create(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config(&construct_req[i], true);
+ verify_raid_bdev(&construct_req[i], true, RAID_BDEV_STATE_ONLINE);
+ TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
+ if (strcmp(pbdev->bdev.name, construct_req[i].name) == 0) {
+ break;
+ }
+ }
+ CU_ASSERT(pbdev != NULL);
+ ch_ctx = spdk_io_channel_get_ctx(&ch[i]);
+ SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
+ CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0);
+ SPDK_CU_ASSERT_FATAL(ch_ctx->base_channel != NULL);
+ for (j = 0; j < construct_req[i].base_bdevs.num_base_bdevs; j++) {
+ CU_ASSERT(ch_ctx->base_channel[j] == &g_io_channel);
+ }
+ }
+
+ /* This will perform a write on the first raid and a read on the second. It can be
+ * expanded in the future to perform r/w on each raid device in the event that
+ * multiple raid levels are supported.
+ */
+ for (i = 0; i < g_max_raids; i++) {
+ bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
+ SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
+ io_len = g_strip_size;
+ iotype = (i) ? SPDK_BDEV_IO_TYPE_WRITE : SPDK_BDEV_IO_TYPE_READ;
+ memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
+ g_io_output_index = 0;
+ TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
+ if (strcmp(pbdev->bdev.name, construct_req[i].name) == 0) {
+ break;
+ }
+ }
+ bdev_io_initialize(bdev_io, ch_b, &pbdev->bdev, lba, io_len, iotype);
+ CU_ASSERT(pbdev != NULL);
+ raid_bdev_submit_request(ch, bdev_io);
+ verify_io(bdev_io, g_max_base_drives, ch_ctx, pbdev,
+ g_child_io_status_flag);
+ bdev_io_cleanup(bdev_io);
+ }
+
+ for (i = 0; i < g_max_raids; i++) {
+ TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
+ if (strcmp(pbdev->bdev.name, construct_req[i].name) == 0) {
+ break;
+ }
+ }
+ CU_ASSERT(pbdev != NULL);
+ ch_ctx = spdk_io_channel_get_ctx(&ch[i]);
+ SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
+ raid_bdev_destroy_cb(pbdev, ch_ctx);
+ CU_ASSERT(ch_ctx->base_channel == NULL);
+ snprintf(name, 16, "%s", construct_req[i].name);
+ create_raid_bdev_delete_req(&destroy_req, name, 0);
+ rpc_bdev_raid_delete(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config_present(name, false);
+ verify_raid_bdev_present(name, false);
+ }
+ raid_bdev_exit();
+ for (i = 0; i < g_max_raids; i++) {
+ free_test_req(&construct_req[i]);
+ }
+ free(construct_req);
+ free(ch);
+ free(ch_b);
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+static void
+test_io_type_supported(void)
+{
+ CU_ASSERT(raid_bdev_io_type_supported(NULL, SPDK_BDEV_IO_TYPE_READ) == true);
+ CU_ASSERT(raid_bdev_io_type_supported(NULL, SPDK_BDEV_IO_TYPE_WRITE) == true);
+ CU_ASSERT(raid_bdev_io_type_supported(NULL, SPDK_BDEV_IO_TYPE_INVALID) == false);
+}
+
+static void
+test_create_raid_from_config(void)
+{
+ struct rpc_bdev_raid_create req;
+ struct spdk_bdev *bdev;
+ struct rpc_bdev_raid_delete destroy_req;
+ bool can_claim;
+ struct raid_bdev_config *raid_cfg;
+ uint8_t base_bdev_slot;
+
+ set_globals();
+ create_raid_bdev_create_config(&req, "raid1", 0, true);
+ CU_ASSERT(raid_bdev_init() == 0);
+
+ verify_raid_config_present("raid1", true);
+ verify_raid_bdev_present("raid1", true);
+
+ TAILQ_FOREACH(bdev, &g_bdev_list, internal.link) {
+ raid_bdev_examine(bdev);
+ }
+
+ can_claim = raid_bdev_can_claim_bdev("Invalid", &raid_cfg, &base_bdev_slot);
+ CU_ASSERT(can_claim == false);
+
+ verify_raid_config(&req, true);
+ verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
+
+ create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
+ rpc_bdev_raid_delete(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ raid_bdev_exit();
+ free_test_req(&req);
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+static void
+test_create_raid_from_config_invalid_params(void)
+{
+ struct rpc_bdev_raid_create req;
+
+ set_globals();
+
+ create_raid_bdev_create_config(&req, "raid1", 0, true);
+ free(req.name);
+ req.name = NULL;
+ CU_ASSERT(raid_bdev_init() != 0);
+ free_test_req(&req);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ create_raid_bdev_create_config(&req, "raid1", 0, false);
+ req.strip_size_kb = 1234;
+ CU_ASSERT(raid_bdev_init() != 0);
+ free_test_req(&req);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ create_raid_bdev_create_config(&req, "raid1", 0, false);
+ req.level = INVALID_RAID_LEVEL;
+ CU_ASSERT(raid_bdev_init() != 0);
+ free_test_req(&req);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ create_raid_bdev_create_config(&req, "raid1", 0, false);
+ req.level = INVALID_RAID_LEVEL;
+ CU_ASSERT(raid_bdev_init() != 0);
+ free_test_req(&req);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ create_raid_bdev_create_config(&req, "raid1", 0, false);
+ req.base_bdevs.num_base_bdevs++;
+ CU_ASSERT(raid_bdev_init() != 0);
+ req.base_bdevs.num_base_bdevs--;
+ free_test_req(&req);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ create_raid_bdev_create_config(&req, "raid1", 0, false);
+ req.base_bdevs.num_base_bdevs--;
+ CU_ASSERT(raid_bdev_init() != 0);
+ req.base_bdevs.num_base_bdevs++;
+ free_test_req(&req);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ if (g_max_base_drives > 1) {
+ create_raid_bdev_create_config(&req, "raid1", 0, false);
+ snprintf(req.base_bdevs.base_bdevs[g_max_base_drives - 1], 15, "%s", "Nvme0n1");
+ CU_ASSERT(raid_bdev_init() != 0);
+ free_test_req(&req);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+ }
+
+ raid_bdev_exit();
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+static void
+test_raid_json_dump_info(void)
+{
+ struct rpc_bdev_raid_create req;
+ struct rpc_bdev_raid_delete destroy_req;
+ struct raid_bdev *pbdev;
+
+ set_globals();
+ CU_ASSERT(raid_bdev_init() == 0);
+
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+ create_raid_bdev_create_req(&req, "raid1", 0, true, 0);
+ rpc_bdev_raid_create(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
+
+ TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
+ if (strcmp(pbdev->bdev.name, "raid1") == 0) {
+ break;
+ }
+ }
+ CU_ASSERT(pbdev != NULL);
+
+ CU_ASSERT(raid_bdev_dump_info_json(pbdev, NULL) == 0);
+
+ free_test_req(&req);
+
+ create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
+ rpc_bdev_raid_delete(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ raid_bdev_exit();
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+static void
+test_context_size(void)
+{
+ CU_ASSERT(raid_bdev_get_ctx_size() == sizeof(struct raid_bdev_io));
+}
+
+static void
+test_raid_level_conversions(void)
+{
+ const char *raid_str;
+
+ CU_ASSERT(raid_bdev_parse_raid_level("abcd123") == INVALID_RAID_LEVEL);
+ CU_ASSERT(raid_bdev_parse_raid_level("0") == RAID0);
+ CU_ASSERT(raid_bdev_parse_raid_level("raid0") == RAID0);
+ CU_ASSERT(raid_bdev_parse_raid_level("RAID0") == RAID0);
+
+ raid_str = raid_bdev_level_to_str(INVALID_RAID_LEVEL);
+ CU_ASSERT(raid_str != NULL && strlen(raid_str) == 0);
+ raid_str = raid_bdev_level_to_str(1234);
+ CU_ASSERT(raid_str != NULL && strlen(raid_str) == 0);
+ raid_str = raid_bdev_level_to_str(RAID0);
+ CU_ASSERT(raid_str != NULL && strcmp(raid_str, "raid0") == 0);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("raid", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_create_raid);
+ CU_ADD_TEST(suite, test_delete_raid);
+ CU_ADD_TEST(suite, test_create_raid_invalid_args);
+ CU_ADD_TEST(suite, test_delete_raid_invalid_args);
+ CU_ADD_TEST(suite, test_io_channel);
+ CU_ADD_TEST(suite, test_reset_io);
+ CU_ADD_TEST(suite, test_write_io);
+ CU_ADD_TEST(suite, test_read_io);
+ CU_ADD_TEST(suite, test_unmap_io);
+ CU_ADD_TEST(suite, test_io_failure);
+ CU_ADD_TEST(suite, test_multi_raid_no_io);
+ CU_ADD_TEST(suite, test_multi_raid_with_io);
+ CU_ADD_TEST(suite, test_io_type_supported);
+ CU_ADD_TEST(suite, test_create_raid_from_config);
+ CU_ADD_TEST(suite, test_create_raid_from_config_invalid_params);
+ CU_ADD_TEST(suite, test_raid_json_dump_info);
+ CU_ADD_TEST(suite, test_context_size);
+ CU_ADD_TEST(suite, test_raid_level_conversions);
+
+ allocate_threads(1);
+ set_thread(0);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ set_test_opts();
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ free_threads();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/bdev/raid/raid5.c/.gitignore b/src/spdk/test/unit/lib/bdev/raid/raid5.c/.gitignore
new file mode 100644
index 000000000..946026bf5
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/raid/raid5.c/.gitignore
@@ -0,0 +1 @@
+raid5_ut
diff --git a/src/spdk/test/unit/lib/bdev/raid/raid5.c/Makefile b/src/spdk/test/unit/lib/bdev/raid/raid5.c/Makefile
new file mode 100644
index 000000000..ddb733333
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/raid/raid5.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../../..)
+
+TEST_FILE = raid5_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/bdev/raid/raid5.c/raid5_ut.c b/src/spdk/test/unit/lib/bdev/raid/raid5.c/raid5_ut.c
new file mode 100644
index 000000000..ba30f327b
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/raid/raid5.c/raid5_ut.c
@@ -0,0 +1,214 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE AiRE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+#include "spdk_cunit.h"
+#include "spdk/env.h"
+#include "spdk_internal/mock.h"
+
+#include "bdev/raid/raid5.c"
+
+DEFINE_STUB_V(raid_bdev_module_list_add, (struct raid_bdev_module *raid_module));
+DEFINE_STUB_V(raid_bdev_io_complete, (struct raid_bdev_io *raid_io,
+ enum spdk_bdev_io_status status));
+
+struct raid5_params {
+ uint8_t num_base_bdevs;
+ uint64_t base_bdev_blockcnt;
+ uint32_t base_bdev_blocklen;
+ uint32_t strip_size;
+};
+
+static struct raid5_params *g_params;
+static size_t g_params_count;
+
+#define ARRAY_FOR_EACH(a, e) \
+ for (e = a; e < a + SPDK_COUNTOF(a); e++)
+
+#define RAID5_PARAMS_FOR_EACH(p) \
+ for (p = g_params; p < g_params + g_params_count; p++)
+
+static int
+test_setup(void)
+{
+ uint8_t num_base_bdevs_values[] = { 3, 4, 5 };
+ uint64_t base_bdev_blockcnt_values[] = { 1, 1024, 1024 * 1024 };
+ uint32_t base_bdev_blocklen_values[] = { 512, 4096 };
+ uint32_t strip_size_kb_values[] = { 1, 4, 128 };
+ uint8_t *num_base_bdevs;
+ uint64_t *base_bdev_blockcnt;
+ uint32_t *base_bdev_blocklen;
+ uint32_t *strip_size_kb;
+ struct raid5_params *params;
+
+ g_params_count = SPDK_COUNTOF(num_base_bdevs_values) *
+ SPDK_COUNTOF(base_bdev_blockcnt_values) *
+ SPDK_COUNTOF(base_bdev_blocklen_values) *
+ SPDK_COUNTOF(strip_size_kb_values);
+ g_params = calloc(g_params_count, sizeof(*g_params));
+ if (!g_params) {
+ return -ENOMEM;
+ }
+
+ params = g_params;
+
+ ARRAY_FOR_EACH(num_base_bdevs_values, num_base_bdevs) {
+ ARRAY_FOR_EACH(base_bdev_blockcnt_values, base_bdev_blockcnt) {
+ ARRAY_FOR_EACH(base_bdev_blocklen_values, base_bdev_blocklen) {
+ ARRAY_FOR_EACH(strip_size_kb_values, strip_size_kb) {
+ params->num_base_bdevs = *num_base_bdevs;
+ params->base_bdev_blockcnt = *base_bdev_blockcnt;
+ params->base_bdev_blocklen = *base_bdev_blocklen;
+ params->strip_size = *strip_size_kb * 1024 / *base_bdev_blocklen;
+ if (params->strip_size == 0 ||
+ params->strip_size > *base_bdev_blockcnt) {
+ g_params_count--;
+ continue;
+ }
+ params++;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int
+test_cleanup(void)
+{
+ free(g_params);
+ return 0;
+}
+
+static struct raid_bdev *
+create_raid_bdev(struct raid5_params *params)
+{
+ struct raid_bdev *raid_bdev;
+ struct raid_base_bdev_info *base_info;
+
+ raid_bdev = calloc(1, sizeof(*raid_bdev));
+ SPDK_CU_ASSERT_FATAL(raid_bdev != NULL);
+
+ raid_bdev->module = &g_raid5_module;
+ raid_bdev->num_base_bdevs = params->num_base_bdevs;
+ raid_bdev->base_bdev_info = calloc(raid_bdev->num_base_bdevs,
+ sizeof(struct raid_base_bdev_info));
+ SPDK_CU_ASSERT_FATAL(raid_bdev->base_bdev_info != NULL);
+
+ RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) {
+ base_info->bdev = calloc(1, sizeof(*base_info->bdev));
+ SPDK_CU_ASSERT_FATAL(base_info->bdev != NULL);
+
+ base_info->bdev->blockcnt = params->base_bdev_blockcnt;
+ base_info->bdev->blocklen = params->base_bdev_blocklen;
+ }
+
+ raid_bdev->strip_size = params->strip_size;
+ raid_bdev->strip_size_shift = spdk_u32log2(raid_bdev->strip_size);
+ raid_bdev->bdev.blocklen = params->base_bdev_blocklen;
+
+ return raid_bdev;
+}
+
+static void
+delete_raid_bdev(struct raid_bdev *raid_bdev)
+{
+ struct raid_base_bdev_info *base_info;
+
+ RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) {
+ free(base_info->bdev);
+ }
+ free(raid_bdev->base_bdev_info);
+ free(raid_bdev);
+}
+
+static struct raid5_info *
+create_raid5(struct raid5_params *params)
+{
+ struct raid_bdev *raid_bdev = create_raid_bdev(params);
+
+ SPDK_CU_ASSERT_FATAL(raid5_start(raid_bdev) == 0);
+
+ return raid_bdev->module_private;
+}
+
+static void
+delete_raid5(struct raid5_info *r5info)
+{
+ struct raid_bdev *raid_bdev = r5info->raid_bdev;
+
+ raid5_stop(raid_bdev);
+
+ delete_raid_bdev(raid_bdev);
+}
+
+static void
+test_raid5_start(void)
+{
+ struct raid5_params *params;
+
+ RAID5_PARAMS_FOR_EACH(params) {
+ struct raid5_info *r5info;
+
+ r5info = create_raid5(params);
+
+ CU_ASSERT_EQUAL(r5info->stripe_blocks, params->strip_size * (params->num_base_bdevs - 1));
+ CU_ASSERT_EQUAL(r5info->total_stripes, params->base_bdev_blockcnt / params->strip_size);
+ CU_ASSERT_EQUAL(r5info->raid_bdev->bdev.blockcnt,
+ (params->base_bdev_blockcnt - params->base_bdev_blockcnt % params->strip_size) *
+ (params->num_base_bdevs - 1));
+ CU_ASSERT_EQUAL(r5info->raid_bdev->bdev.optimal_io_boundary, r5info->stripe_blocks);
+
+ delete_raid5(r5info);
+ }
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("raid5", test_setup, test_cleanup);
+ CU_ADD_TEST(suite, test_raid5_start);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/bdev/scsi_nvme.c/.gitignore b/src/spdk/test/unit/lib/bdev/scsi_nvme.c/.gitignore
new file mode 100644
index 000000000..75800527d
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/scsi_nvme.c/.gitignore
@@ -0,0 +1 @@
+scsi_nvme_ut
diff --git a/src/spdk/test/unit/lib/bdev/scsi_nvme.c/Makefile b/src/spdk/test/unit/lib/bdev/scsi_nvme.c/Makefile
new file mode 100644
index 000000000..0dbe788db
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/scsi_nvme.c/Makefile
@@ -0,0 +1,37 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) 2016 FUJITSU LIMITED, All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of the copyright holder nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = scsi_nvme_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/bdev/scsi_nvme.c/scsi_nvme_ut.c b/src/spdk/test/unit/lib/bdev/scsi_nvme.c/scsi_nvme_ut.c
new file mode 100644
index 000000000..ef27d7c09
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/scsi_nvme.c/scsi_nvme_ut.c
@@ -0,0 +1,131 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 FUJITSU LIMITED, All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "bdev/scsi_nvme.c"
+
+static int
+null_init(void)
+{
+ return 0;
+}
+
+static int
+null_clean(void)
+{
+ return 0;
+}
+
+static void
+scsi_nvme_translate_test(void)
+{
+ struct spdk_bdev_io bdev_io;
+ int sc, sk, asc, ascq;
+
+ /* SPDK_NVME_SCT_GENERIC */
+ bdev_io.internal.error.nvme.sct = SPDK_NVME_SCT_GENERIC;
+ bdev_io.internal.error.nvme.sc = SPDK_NVME_SC_ABORTED_POWER_LOSS;
+ spdk_scsi_nvme_translate(&bdev_io, &sc, &sk, &asc, &ascq);
+ CU_ASSERT_EQUAL(sc, SPDK_SCSI_STATUS_TASK_ABORTED);
+ CU_ASSERT_EQUAL(sk, SPDK_SCSI_SENSE_ABORTED_COMMAND);
+ CU_ASSERT_EQUAL(asc, SPDK_SCSI_ASC_WARNING);
+ CU_ASSERT_EQUAL(ascq, SPDK_SCSI_ASCQ_POWER_LOSS_EXPECTED);
+
+ bdev_io.internal.error.nvme.sc = SPDK_NVME_SC_INVALID_NUM_SGL_DESCIRPTORS;
+ spdk_scsi_nvme_translate(&bdev_io, &sc, &sk, &asc, &ascq);
+ CU_ASSERT_EQUAL(sc, SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT_EQUAL(sk, SPDK_SCSI_SENSE_ILLEGAL_REQUEST);
+ CU_ASSERT_EQUAL(asc, SPDK_SCSI_ASC_NO_ADDITIONAL_SENSE);
+ CU_ASSERT_EQUAL(ascq, SPDK_SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+
+ /* SPDK_NVME_SCT_COMMAND_SPECIFIC */
+ bdev_io.internal.error.nvme.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
+ bdev_io.internal.error.nvme.sc = SPDK_NVME_SC_INVALID_FORMAT;
+ spdk_scsi_nvme_translate(&bdev_io, &sc, &sk, &asc, &ascq);
+ CU_ASSERT_EQUAL(sc, SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT_EQUAL(sk, SPDK_SCSI_SENSE_ILLEGAL_REQUEST);
+ CU_ASSERT_EQUAL(asc, SPDK_SCSI_ASC_FORMAT_COMMAND_FAILED);
+ CU_ASSERT_EQUAL(ascq, SPDK_SCSI_ASCQ_FORMAT_COMMAND_FAILED);
+
+ bdev_io.internal.error.nvme.sc = SPDK_NVME_SC_OVERLAPPING_RANGE;
+ spdk_scsi_nvme_translate(&bdev_io, &sc, &sk, &asc, &ascq);
+ CU_ASSERT_EQUAL(sc, SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT_EQUAL(sk, SPDK_SCSI_SENSE_ILLEGAL_REQUEST);
+ CU_ASSERT_EQUAL(asc, SPDK_SCSI_ASC_NO_ADDITIONAL_SENSE);
+ CU_ASSERT_EQUAL(ascq, SPDK_SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+
+ /* SPDK_NVME_SCT_MEDIA_ERROR */
+ bdev_io.internal.error.nvme.sct = SPDK_NVME_SCT_MEDIA_ERROR;
+ bdev_io.internal.error.nvme.sc = SPDK_NVME_SC_GUARD_CHECK_ERROR;
+ spdk_scsi_nvme_translate(&bdev_io, &sc, &sk, &asc, &ascq);
+ CU_ASSERT_EQUAL(sc, SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT_EQUAL(sk, SPDK_SCSI_SENSE_MEDIUM_ERROR);
+ CU_ASSERT_EQUAL(asc, SPDK_SCSI_ASC_LOGICAL_BLOCK_GUARD_CHECK_FAILED);
+ CU_ASSERT_EQUAL(ascq, SPDK_SCSI_ASCQ_LOGICAL_BLOCK_GUARD_CHECK_FAILED);
+
+ bdev_io.internal.error.nvme.sc = SPDK_NVME_SC_DEALLOCATED_OR_UNWRITTEN_BLOCK;
+ spdk_scsi_nvme_translate(&bdev_io, &sc, &sk, &asc, &ascq);
+ CU_ASSERT_EQUAL(sc, SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT_EQUAL(sk, SPDK_SCSI_SENSE_ILLEGAL_REQUEST);
+ CU_ASSERT_EQUAL(asc, SPDK_SCSI_ASC_NO_ADDITIONAL_SENSE);
+ CU_ASSERT_EQUAL(ascq, SPDK_SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+
+ /* SPDK_NVME_SCT_VENDOR_SPECIFIC */
+ bdev_io.internal.error.nvme.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
+ bdev_io.internal.error.nvme.sc = 0xff;
+ spdk_scsi_nvme_translate(&bdev_io, &sc, &sk, &asc, &ascq);
+ CU_ASSERT_EQUAL(sc, SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT_EQUAL(sk, SPDK_SCSI_SENSE_ILLEGAL_REQUEST);
+ CU_ASSERT_EQUAL(asc, SPDK_SCSI_ASC_NO_ADDITIONAL_SENSE);
+ CU_ASSERT_EQUAL(ascq, SPDK_SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("scsi_nvme_suite", null_init, null_clean);
+
+ CU_ADD_TEST(suite, scsi_nvme_translate_test);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/bdev/vbdev_lvol.c/.gitignore b/src/spdk/test/unit/lib/bdev/vbdev_lvol.c/.gitignore
new file mode 100644
index 000000000..5f2f6fdff
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/vbdev_lvol.c/.gitignore
@@ -0,0 +1 @@
+vbdev_lvol_ut
diff --git a/src/spdk/test/unit/lib/bdev/vbdev_lvol.c/Makefile b/src/spdk/test/unit/lib/bdev/vbdev_lvol.c/Makefile
new file mode 100644
index 000000000..a44f51372
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/vbdev_lvol.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../../)
+
+TEST_FILE = vbdev_lvol_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/bdev/vbdev_lvol.c/vbdev_lvol_ut.c b/src/spdk/test/unit/lib/bdev/vbdev_lvol.c/vbdev_lvol_ut.c
new file mode 100644
index 000000000..a963bd3b7
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/vbdev_lvol.c/vbdev_lvol_ut.c
@@ -0,0 +1,1440 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+#include "spdk/string.h"
+
+#include "bdev/lvol/vbdev_lvol.c"
+
+#include "unit/lib/json_mock.c"
+
+#define SPDK_BS_PAGE_SIZE 0x1000
+
+int g_lvolerrno;
+int g_lvserrno;
+int g_cluster_size;
+int g_registered_bdevs;
+int g_num_lvols = 0;
+struct spdk_lvol_store *g_lvs = NULL;
+struct spdk_lvol *g_lvol = NULL;
+struct lvol_store_bdev *g_lvs_bdev = NULL;
+struct spdk_bdev *g_base_bdev = NULL;
+struct spdk_bdev_io *g_io = NULL;
+struct spdk_io_channel *g_ch = NULL;
+
+static struct spdk_bdev g_bdev = {};
+static struct spdk_lvol_store *g_lvol_store = NULL;
+bool lvol_store_initialize_fail = false;
+bool lvol_store_initialize_cb_fail = false;
+bool lvol_already_opened = false;
+bool g_examine_done = false;
+bool g_bdev_alias_already_exists = false;
+bool g_lvs_with_name_already_exists = false;
+
+int
+spdk_bdev_alias_add(struct spdk_bdev *bdev, const char *alias)
+{
+ struct spdk_bdev_alias *tmp;
+
+ CU_ASSERT(alias != NULL);
+ CU_ASSERT(bdev != NULL);
+ if (g_bdev_alias_already_exists) {
+ return -EEXIST;
+ }
+
+ tmp = calloc(1, sizeof(*tmp));
+ SPDK_CU_ASSERT_FATAL(tmp != NULL);
+
+ tmp->alias = strdup(alias);
+ SPDK_CU_ASSERT_FATAL(tmp->alias != NULL);
+
+ TAILQ_INSERT_TAIL(&bdev->aliases, tmp, tailq);
+
+ return 0;
+}
+
+int
+spdk_bdev_alias_del(struct spdk_bdev *bdev, const char *alias)
+{
+ struct spdk_bdev_alias *tmp;
+
+ CU_ASSERT(bdev != NULL);
+
+ TAILQ_FOREACH(tmp, &bdev->aliases, tailq) {
+ SPDK_CU_ASSERT_FATAL(alias != NULL);
+ if (strncmp(alias, tmp->alias, SPDK_LVOL_NAME_MAX) == 0) {
+ TAILQ_REMOVE(&bdev->aliases, tmp, tailq);
+ free(tmp->alias);
+ free(tmp);
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
+void
+spdk_bdev_alias_del_all(struct spdk_bdev *bdev)
+{
+ struct spdk_bdev_alias *p, *tmp;
+
+ TAILQ_FOREACH_SAFE(p, &bdev->aliases, tailq, tmp) {
+ TAILQ_REMOVE(&bdev->aliases, p, tailq);
+ free(p->alias);
+ free(p);
+ }
+}
+
+void
+spdk_bdev_destruct_done(struct spdk_bdev *bdev, int bdeverrno)
+{
+}
+
+void
+spdk_lvs_rename(struct spdk_lvol_store *lvs, const char *new_name,
+ spdk_lvs_op_complete cb_fn, void *cb_arg)
+{
+ if (g_lvs_with_name_already_exists) {
+ g_lvolerrno = -EEXIST;
+ } else {
+ snprintf(lvs->name, sizeof(lvs->name), "%s", new_name);
+ g_lvolerrno = 0;
+ }
+
+ cb_fn(cb_arg, g_lvolerrno);
+}
+
+void
+spdk_lvol_rename(struct spdk_lvol *lvol, const char *new_name,
+ spdk_lvol_op_complete cb_fn, void *cb_arg)
+{
+ struct spdk_lvol *tmp;
+
+ if (strncmp(lvol->name, new_name, SPDK_LVOL_NAME_MAX) == 0) {
+ cb_fn(cb_arg, 0);
+ return;
+ }
+
+ TAILQ_FOREACH(tmp, &lvol->lvol_store->lvols, link) {
+ if (strncmp(tmp->name, new_name, SPDK_LVOL_NAME_MAX) == 0) {
+ SPDK_ERRLOG("Lvol %s already exists in lvol store %s\n", new_name, lvol->lvol_store->name);
+ cb_fn(cb_arg, -EEXIST);
+ return;
+ }
+ }
+
+ snprintf(lvol->name, sizeof(lvol->name), "%s", new_name);
+
+ cb_fn(cb_arg, g_lvolerrno);
+}
+
+void
+spdk_lvol_open(struct spdk_lvol *lvol, spdk_lvol_op_with_handle_complete cb_fn, void *cb_arg)
+{
+ cb_fn(cb_arg, lvol, g_lvolerrno);
+}
+
+uint64_t
+spdk_blob_get_num_clusters(struct spdk_blob *b)
+{
+ return 0;
+}
+
+int
+spdk_blob_get_clones(struct spdk_blob_store *bs, spdk_blob_id blobid, spdk_blob_id *ids,
+ size_t *count)
+{
+ *count = 0;
+ return 0;
+}
+
+spdk_blob_id
+spdk_blob_get_parent_snapshot(struct spdk_blob_store *bs, spdk_blob_id blobid)
+{
+ return 0;
+}
+
+bool g_blob_is_read_only = false;
+
+bool
+spdk_blob_is_read_only(struct spdk_blob *blob)
+{
+ return g_blob_is_read_only;
+}
+
+bool
+spdk_blob_is_snapshot(struct spdk_blob *blob)
+{
+ return false;
+}
+
+bool
+spdk_blob_is_clone(struct spdk_blob *blob)
+{
+ return false;
+}
+
+bool
+spdk_blob_is_thin_provisioned(struct spdk_blob *blob)
+{
+ return false;
+}
+
+static struct spdk_lvol *_lvol_create(struct spdk_lvol_store *lvs);
+
+void
+spdk_lvs_load(struct spdk_bs_dev *dev,
+ spdk_lvs_op_with_handle_complete cb_fn, void *cb_arg)
+{
+ struct spdk_lvol_store *lvs = NULL;
+ int i;
+ int lvserrno = g_lvserrno;
+
+ if (lvserrno != 0) {
+ /* On error blobstore destroys bs_dev itself,
+ * by puttin back io channels.
+ * This operation is asynchronous, and completed
+ * after calling the callback for lvol. */
+ cb_fn(cb_arg, g_lvol_store, lvserrno);
+ dev->destroy(dev);
+ return;
+ }
+
+ lvs = calloc(1, sizeof(*lvs));
+ SPDK_CU_ASSERT_FATAL(lvs != NULL);
+ TAILQ_INIT(&lvs->lvols);
+ TAILQ_INIT(&lvs->pending_lvols);
+ spdk_uuid_generate(&lvs->uuid);
+ lvs->bs_dev = dev;
+ for (i = 0; i < g_num_lvols; i++) {
+ _lvol_create(lvs);
+ }
+
+ cb_fn(cb_arg, lvs, lvserrno);
+}
+
+int
+spdk_bs_bdev_claim(struct spdk_bs_dev *bs_dev, struct spdk_bdev_module *module)
+{
+ if (lvol_already_opened == true) {
+ return -1;
+ }
+
+ lvol_already_opened = true;
+
+ return 0;
+}
+
+void
+spdk_bdev_unregister(struct spdk_bdev *vbdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
+{
+ int rc;
+
+ SPDK_CU_ASSERT_FATAL(vbdev != NULL);
+ rc = vbdev->fn_table->destruct(vbdev->ctxt);
+
+ SPDK_CU_ASSERT_FATAL(cb_fn != NULL);
+ cb_fn(cb_arg, rc);
+}
+
+void
+spdk_bdev_module_finish_done(void)
+{
+ return;
+}
+
+uint64_t
+spdk_bs_get_page_size(struct spdk_blob_store *bs)
+{
+ return SPDK_BS_PAGE_SIZE;
+}
+
+uint64_t
+spdk_bs_get_io_unit_size(struct spdk_blob_store *bs)
+{
+ return SPDK_BS_PAGE_SIZE;
+}
+
+static void
+bdev_blob_destroy(struct spdk_bs_dev *bs_dev)
+{
+ CU_ASSERT(bs_dev != NULL);
+ free(bs_dev);
+ lvol_already_opened = false;
+}
+
+struct spdk_bs_dev *
+spdk_bdev_create_bs_dev(struct spdk_bdev *bdev, spdk_bdev_remove_cb_t remove_cb, void *remove_ctx)
+{
+ struct spdk_bs_dev *bs_dev;
+
+ if (lvol_already_opened == true || bdev == NULL) {
+ return NULL;
+ }
+
+ bs_dev = calloc(1, sizeof(*bs_dev));
+ SPDK_CU_ASSERT_FATAL(bs_dev != NULL);
+ bs_dev->destroy = bdev_blob_destroy;
+
+ return bs_dev;
+}
+
+void
+spdk_lvs_opts_init(struct spdk_lvs_opts *opts)
+{
+}
+
+int
+spdk_lvs_init(struct spdk_bs_dev *bs_dev, struct spdk_lvs_opts *o,
+ spdk_lvs_op_with_handle_complete cb_fn, void *cb_arg)
+{
+ struct spdk_lvol_store *lvs;
+ int error = 0;
+
+ if (lvol_store_initialize_fail) {
+ return -1;
+ }
+
+ if (lvol_store_initialize_cb_fail) {
+ bs_dev->destroy(bs_dev);
+ lvs = NULL;
+ error = -1;
+ } else {
+ lvs = calloc(1, sizeof(*lvs));
+ SPDK_CU_ASSERT_FATAL(lvs != NULL);
+ TAILQ_INIT(&lvs->lvols);
+ TAILQ_INIT(&lvs->pending_lvols);
+ spdk_uuid_generate(&lvs->uuid);
+ snprintf(lvs->name, sizeof(lvs->name), "%s", o->name);
+ lvs->bs_dev = bs_dev;
+ error = 0;
+ }
+ cb_fn(cb_arg, lvs, error);
+
+ return 0;
+}
+
+int
+spdk_lvs_unload(struct spdk_lvol_store *lvs, spdk_lvs_op_complete cb_fn, void *cb_arg)
+{
+ struct spdk_lvol *lvol, *tmp;
+
+ TAILQ_FOREACH_SAFE(lvol, &lvs->lvols, link, tmp) {
+ TAILQ_REMOVE(&lvs->lvols, lvol, link);
+ free(lvol);
+ }
+ g_lvol_store = NULL;
+
+ lvs->bs_dev->destroy(lvs->bs_dev);
+ free(lvs);
+
+ if (cb_fn != NULL) {
+ cb_fn(cb_arg, 0);
+ }
+
+ return 0;
+}
+
+int
+spdk_lvs_destroy(struct spdk_lvol_store *lvs, spdk_lvs_op_complete cb_fn,
+ void *cb_arg)
+{
+ struct spdk_lvol *lvol, *tmp;
+ char *alias;
+
+ TAILQ_FOREACH_SAFE(lvol, &lvs->lvols, link, tmp) {
+ TAILQ_REMOVE(&lvs->lvols, lvol, link);
+
+ alias = spdk_sprintf_alloc("%s/%s", lvs->name, lvol->name);
+ if (alias == NULL) {
+ SPDK_ERRLOG("Cannot alloc memory for alias\n");
+ return -1;
+ }
+ spdk_bdev_alias_del(lvol->bdev, alias);
+
+ free(alias);
+ free(lvol);
+ }
+ g_lvol_store = NULL;
+
+ lvs->bs_dev->destroy(lvs->bs_dev);
+ free(lvs);
+
+ if (cb_fn != NULL) {
+ cb_fn(cb_arg, 0);
+ }
+
+ return 0;
+}
+
+void
+spdk_lvol_resize(struct spdk_lvol *lvol, size_t sz, spdk_lvol_op_complete cb_fn, void *cb_arg)
+{
+ cb_fn(cb_arg, 0);
+}
+
+void
+spdk_lvol_set_read_only(struct spdk_lvol *lvol, spdk_lvol_op_complete cb_fn, void *cb_arg)
+{
+ cb_fn(cb_arg, 0);
+}
+
+int
+spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size)
+{
+ bdev->blockcnt = size;
+ return 0;
+}
+
+uint64_t
+spdk_bs_get_cluster_size(struct spdk_blob_store *bs)
+{
+ return g_cluster_size;
+}
+
+struct spdk_bdev *
+spdk_bdev_get_by_name(const char *bdev_name)
+{
+ if (!strcmp(g_base_bdev->name, bdev_name)) {
+ return g_base_bdev;
+ }
+
+ return NULL;
+}
+
+void
+spdk_lvol_close(struct spdk_lvol *lvol, spdk_lvol_op_complete cb_fn, void *cb_arg)
+{
+ lvol->ref_count--;
+
+ SPDK_CU_ASSERT_FATAL(cb_fn != NULL);
+ cb_fn(cb_arg, 0);
+}
+
+bool
+spdk_lvol_deletable(struct spdk_lvol *lvol)
+{
+ return true;
+}
+
+void
+spdk_lvol_destroy(struct spdk_lvol *lvol, spdk_lvol_op_complete cb_fn, void *cb_arg)
+{
+ if (lvol->ref_count != 0) {
+ cb_fn(cb_arg, -ENODEV);
+ }
+
+ TAILQ_REMOVE(&lvol->lvol_store->lvols, lvol, link);
+
+ SPDK_CU_ASSERT_FATAL(cb_fn != NULL);
+ cb_fn(cb_arg, 0);
+
+ g_lvol = NULL;
+ free(lvol);
+}
+
+void
+spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
+{
+ bdev_io->internal.status = status;
+}
+
+struct spdk_io_channel *spdk_lvol_get_io_channel(struct spdk_lvol *lvol)
+{
+ CU_ASSERT(lvol == g_lvol);
+ return g_ch;
+}
+
+void
+spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
+{
+ CU_ASSERT(cb == lvol_get_buf_cb);
+}
+
+void
+spdk_blob_io_read(struct spdk_blob *blob, struct spdk_io_channel *channel,
+ void *payload, uint64_t offset, uint64_t length,
+ spdk_blob_op_complete cb_fn, void *cb_arg)
+{
+ CU_ASSERT(blob == NULL);
+ CU_ASSERT(channel == g_ch);
+ CU_ASSERT(offset == g_io->u.bdev.offset_blocks);
+ CU_ASSERT(length == g_io->u.bdev.num_blocks);
+ cb_fn(cb_arg, 0);
+}
+
+void
+spdk_blob_io_write(struct spdk_blob *blob, struct spdk_io_channel *channel,
+ void *payload, uint64_t offset, uint64_t length,
+ spdk_blob_op_complete cb_fn, void *cb_arg)
+{
+ CU_ASSERT(blob == NULL);
+ CU_ASSERT(channel == g_ch);
+ CU_ASSERT(offset == g_io->u.bdev.offset_blocks);
+ CU_ASSERT(length == g_io->u.bdev.num_blocks);
+ cb_fn(cb_arg, 0);
+}
+
+void
+spdk_blob_io_unmap(struct spdk_blob *blob, struct spdk_io_channel *channel,
+ uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg)
+{
+ CU_ASSERT(blob == NULL);
+ CU_ASSERT(channel == g_ch);
+ CU_ASSERT(offset == g_io->u.bdev.offset_blocks);
+ CU_ASSERT(length == g_io->u.bdev.num_blocks);
+ cb_fn(cb_arg, 0);
+}
+
+void
+spdk_blob_io_write_zeroes(struct spdk_blob *blob, struct spdk_io_channel *channel,
+ uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg)
+{
+ CU_ASSERT(blob == NULL);
+ CU_ASSERT(channel == g_ch);
+ CU_ASSERT(offset == g_io->u.bdev.offset_blocks);
+ CU_ASSERT(length == g_io->u.bdev.num_blocks);
+ cb_fn(cb_arg, 0);
+}
+
+void
+spdk_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel,
+ struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
+ spdk_blob_op_complete cb_fn, void *cb_arg)
+{
+ CU_ASSERT(blob == NULL);
+ CU_ASSERT(channel == g_ch);
+ CU_ASSERT(offset == g_io->u.bdev.offset_blocks);
+ CU_ASSERT(length == g_io->u.bdev.num_blocks);
+ cb_fn(cb_arg, 0);
+}
+
+void
+spdk_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel,
+ struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
+ spdk_blob_op_complete cb_fn, void *cb_arg)
+{
+ CU_ASSERT(blob == NULL);
+ CU_ASSERT(channel == g_ch);
+ CU_ASSERT(offset == g_io->u.bdev.offset_blocks);
+ CU_ASSERT(length == g_io->u.bdev.num_blocks);
+ cb_fn(cb_arg, 0);
+}
+
+void
+spdk_bdev_module_list_add(struct spdk_bdev_module *bdev_module)
+{
+}
+
+const char *
+spdk_bdev_get_name(const struct spdk_bdev *bdev)
+{
+ return "test";
+}
+
+int
+spdk_bdev_register(struct spdk_bdev *vbdev)
+{
+ TAILQ_INIT(&vbdev->aliases);
+
+ g_registered_bdevs++;
+ return 0;
+}
+
+void
+spdk_bdev_module_examine_done(struct spdk_bdev_module *module)
+{
+ SPDK_CU_ASSERT_FATAL(g_examine_done != true);
+ g_examine_done = true;
+}
+
+static struct spdk_lvol *
+_lvol_create(struct spdk_lvol_store *lvs)
+{
+ struct spdk_lvol *lvol = calloc(1, sizeof(*lvol));
+
+ SPDK_CU_ASSERT_FATAL(lvol != NULL);
+
+ lvol->lvol_store = lvs;
+ lvol->ref_count++;
+ snprintf(lvol->unique_id, sizeof(lvol->unique_id), "%s", "UNIT_TEST_UUID");
+
+ TAILQ_INSERT_TAIL(&lvol->lvol_store->lvols, lvol, link);
+
+ return lvol;
+}
+
+int
+spdk_lvol_create(struct spdk_lvol_store *lvs, const char *name, size_t sz,
+ bool thin_provision, enum lvol_clear_method clear_method, spdk_lvol_op_with_handle_complete cb_fn,
+ void *cb_arg)
+{
+ struct spdk_lvol *lvol;
+
+ lvol = _lvol_create(lvs);
+ snprintf(lvol->name, sizeof(lvol->name), "%s", name);
+ cb_fn(cb_arg, lvol, 0);
+
+ return 0;
+}
+
+void
+spdk_lvol_create_snapshot(struct spdk_lvol *lvol, const char *snapshot_name,
+ spdk_lvol_op_with_handle_complete cb_fn, void *cb_arg)
+{
+ struct spdk_lvol *snap;
+
+ snap = _lvol_create(lvol->lvol_store);
+ snprintf(snap->name, sizeof(snap->name), "%s", snapshot_name);
+ cb_fn(cb_arg, snap, 0);
+}
+
+void
+spdk_lvol_create_clone(struct spdk_lvol *lvol, const char *clone_name,
+ spdk_lvol_op_with_handle_complete cb_fn, void *cb_arg)
+{
+ struct spdk_lvol *clone;
+
+ clone = _lvol_create(lvol->lvol_store);
+ snprintf(clone->name, sizeof(clone->name), "%s", clone_name);
+ cb_fn(cb_arg, clone, 0);
+}
+
+static void
+lvol_store_op_complete(void *cb_arg, int lvserrno)
+{
+ g_lvserrno = lvserrno;
+ return;
+}
+
+static void
+lvol_store_op_with_handle_complete(void *cb_arg, struct spdk_lvol_store *lvs, int lvserrno)
+{
+ g_lvserrno = lvserrno;
+ g_lvol_store = lvs;
+ return;
+}
+
+static void
+vbdev_lvol_create_complete(void *cb_arg, struct spdk_lvol *lvol, int lvolerrno)
+{
+ g_lvolerrno = lvolerrno;
+ g_lvol = lvol;
+}
+
+static void
+vbdev_lvol_resize_complete(void *cb_arg, int lvolerrno)
+{
+ g_lvolerrno = lvolerrno;
+}
+
+static void
+vbdev_lvol_set_read_only_complete(void *cb_arg, int lvolerrno)
+{
+ g_lvolerrno = lvolerrno;
+}
+
+static void
+vbdev_lvol_rename_complete(void *cb_arg, int lvolerrno)
+{
+ g_lvolerrno = lvolerrno;
+}
+
+static void
+ut_lvs_destroy(void)
+{
+ int rc = 0;
+ int sz = 10;
+ struct spdk_lvol_store *lvs;
+
+ /* Lvol store is successfully created */
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, LVS_CLEAR_WITH_UNMAP, lvol_store_op_with_handle_complete,
+ NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(g_lvol_store->bs_dev != NULL);
+
+ lvs = g_lvol_store;
+ g_lvol_store = NULL;
+
+ spdk_uuid_generate(&lvs->uuid);
+
+ /* Successfully create lvol, which should be unloaded with lvs later */
+ g_lvolerrno = -1;
+ rc = vbdev_lvol_create(lvs, "lvol", sz, false, LVOL_CLEAR_WITH_DEFAULT, vbdev_lvol_create_complete,
+ NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvolerrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ /* Unload lvol store */
+ vbdev_lvs_destruct(lvs, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+}
+
+static void
+ut_lvol_init(void)
+{
+ struct spdk_lvol_store *lvs;
+ int sz = 10;
+ int rc;
+
+ /* Lvol store is successfully created */
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, LVS_CLEAR_WITH_UNMAP, lvol_store_op_with_handle_complete,
+ NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(g_lvol_store->bs_dev != NULL);
+ lvs = g_lvol_store;
+
+ /* Successful lvol create */
+ g_lvolerrno = -1;
+ rc = vbdev_lvol_create(lvs, "lvol", sz, false, LVOL_CLEAR_WITH_DEFAULT, vbdev_lvol_create_complete,
+ NULL);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(g_lvol != NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+
+ /* Successful lvol destroy */
+ vbdev_lvol_destroy(g_lvol, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvol == NULL);
+
+ /* Destroy lvol store */
+ vbdev_lvs_destruct(lvs, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+}
+
+static void
+ut_lvol_snapshot(void)
+{
+ struct spdk_lvol_store *lvs;
+ int sz = 10;
+ int rc;
+ struct spdk_lvol *lvol = NULL;
+
+ /* Lvol store is successfully created */
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, LVS_CLEAR_WITH_UNMAP, lvol_store_op_with_handle_complete,
+ NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(g_lvol_store->bs_dev != NULL);
+ lvs = g_lvol_store;
+
+ /* Successful lvol create */
+ g_lvolerrno = -1;
+ rc = vbdev_lvol_create(lvs, "lvol", sz, false, LVOL_CLEAR_WITH_DEFAULT, vbdev_lvol_create_complete,
+ NULL);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+
+ lvol = g_lvol;
+
+ /* Successful snap create */
+ vbdev_lvol_create_snapshot(lvol, "snap", vbdev_lvol_create_complete, NULL);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(g_lvol != NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+
+ /* Successful lvol destroy */
+ vbdev_lvol_destroy(g_lvol, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvol == NULL);
+
+ /* Successful snap destroy */
+ g_lvol = lvol;
+ vbdev_lvol_destroy(g_lvol, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvol == NULL);
+
+ /* Destroy lvol store */
+ vbdev_lvs_destruct(lvs, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+}
+
+static void
+ut_lvol_clone(void)
+{
+ struct spdk_lvol_store *lvs;
+ int sz = 10;
+ int rc;
+ struct spdk_lvol *lvol = NULL;
+ struct spdk_lvol *snap = NULL;
+ struct spdk_lvol *clone = NULL;
+
+ /* Lvol store is successfully created */
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, LVS_CLEAR_WITH_UNMAP, lvol_store_op_with_handle_complete,
+ NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(g_lvol_store->bs_dev != NULL);
+ lvs = g_lvol_store;
+
+ /* Successful lvol create */
+ g_lvolerrno = -1;
+ rc = vbdev_lvol_create(lvs, "lvol", sz, false, LVOL_CLEAR_WITH_DEFAULT, vbdev_lvol_create_complete,
+ NULL);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+
+ lvol = g_lvol;
+
+ /* Successful snap create */
+ vbdev_lvol_create_snapshot(lvol, "snap", vbdev_lvol_create_complete, NULL);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+
+ snap = g_lvol;
+
+ /* Successful clone create */
+ vbdev_lvol_create_clone(snap, "clone", vbdev_lvol_create_complete, NULL);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+
+ clone = g_lvol;
+
+ /* Successful lvol destroy */
+ g_lvol = lvol;
+ vbdev_lvol_destroy(g_lvol, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvol == NULL);
+
+ /* Successful clone destroy */
+ g_lvol = clone;
+ vbdev_lvol_destroy(g_lvol, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvol == NULL);
+
+ /* Successful lvol destroy */
+ g_lvol = snap;
+ vbdev_lvol_destroy(g_lvol, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvol == NULL);
+
+ /* Destroy lvol store */
+ vbdev_lvs_destruct(lvs, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+}
+
+static void
+ut_lvol_hotremove(void)
+{
+ int rc = 0;
+
+ lvol_store_initialize_fail = false;
+ lvol_store_initialize_cb_fail = false;
+ lvol_already_opened = false;
+
+ /* Lvol store is successfully created */
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, LVS_CLEAR_WITH_UNMAP, lvol_store_op_with_handle_complete,
+ NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(g_lvol_store->bs_dev != NULL);
+
+ /* Hot remove callback with NULL - stability check */
+ vbdev_lvs_hotremove_cb(NULL);
+
+ /* Hot remove lvs on bdev removal */
+ vbdev_lvs_hotremove_cb(&g_bdev);
+
+ CU_ASSERT(g_lvol_store == NULL);
+ CU_ASSERT(TAILQ_EMPTY(&g_spdk_lvol_pairs));
+
+}
+
+static void
+ut_lvs_examine_check(bool success)
+{
+ struct lvol_store_bdev *lvs_bdev;
+
+ /* Examine was finished regardless of result */
+ CU_ASSERT(g_examine_done == true);
+ g_examine_done = false;
+
+ if (success) {
+ SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&g_spdk_lvol_pairs));
+ lvs_bdev = TAILQ_FIRST(&g_spdk_lvol_pairs);
+ SPDK_CU_ASSERT_FATAL(lvs_bdev != NULL);
+ g_lvol_store = lvs_bdev->lvs;
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(g_lvol_store->bs_dev != NULL);
+ } else {
+ SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&g_spdk_lvol_pairs));
+ g_lvol_store = NULL;
+ }
+}
+
+static void
+ut_lvol_examine(void)
+{
+ /* Examine unsuccessfully - bdev already opened */
+ g_lvserrno = -1;
+ lvol_already_opened = true;
+ vbdev_lvs_examine(&g_bdev);
+ ut_lvs_examine_check(false);
+
+ /* Examine unsuccessfully - fail on lvol store */
+ g_lvserrno = -1;
+ lvol_already_opened = false;
+ vbdev_lvs_examine(&g_bdev);
+ ut_lvs_examine_check(false);
+
+ /* Examine successfully
+ * - one lvol fails to load
+ * - lvs is loaded with no lvols present */
+ g_lvserrno = 0;
+ g_lvolerrno = -1;
+ g_num_lvols = 1;
+ lvol_already_opened = false;
+ g_registered_bdevs = 0;
+ vbdev_lvs_examine(&g_bdev);
+ ut_lvs_examine_check(true);
+ CU_ASSERT(g_registered_bdevs == 0);
+ CU_ASSERT(TAILQ_EMPTY(&g_lvol_store->lvols));
+ vbdev_lvs_destruct(g_lvol_store, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+
+ /* Examine successfully */
+ g_lvserrno = 0;
+ g_lvolerrno = 0;
+ g_registered_bdevs = 0;
+ lvol_already_opened = false;
+ vbdev_lvs_examine(&g_bdev);
+ ut_lvs_examine_check(true);
+ CU_ASSERT(g_registered_bdevs != 0);
+ SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&g_lvol_store->lvols));
+ vbdev_lvs_destruct(g_lvol_store, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+}
+
+static void
+ut_lvol_rename(void)
+{
+ struct spdk_lvol_store *lvs;
+ struct spdk_lvol *lvol;
+ struct spdk_lvol *lvol2;
+ int sz = 10;
+ int rc;
+
+ /* Lvol store is successfully created */
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, LVS_CLEAR_WITH_UNMAP, lvol_store_op_with_handle_complete,
+ NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(g_lvol_store->bs_dev != NULL);
+ lvs = g_lvol_store;
+
+ /* Successful lvols create */
+ g_lvolerrno = -1;
+ rc = vbdev_lvol_create(lvs, "lvol", sz, false, LVOL_CLEAR_WITH_DEFAULT, vbdev_lvol_create_complete,
+ NULL);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(g_lvol != NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+ lvol = g_lvol;
+
+ g_lvolerrno = -1;
+ rc = vbdev_lvol_create(lvs, "lvol2", sz, false, LVOL_CLEAR_WITH_DEFAULT, vbdev_lvol_create_complete,
+ NULL);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(g_lvol != NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+ lvol2 = g_lvol;
+
+ /* Successful rename lvol */
+ vbdev_lvol_rename(lvol, "new_lvol_name", vbdev_lvol_rename_complete, NULL);
+ SPDK_CU_ASSERT_FATAL(g_lvolerrno == 0);
+ CU_ASSERT_STRING_EQUAL(lvol->name, "new_lvol_name");
+
+ /* Renaming lvol with name already existing */
+ g_bdev_alias_already_exists = true;
+ vbdev_lvol_rename(lvol2, "new_lvol_name", vbdev_lvol_rename_complete, NULL);
+ g_bdev_alias_already_exists = false;
+ SPDK_CU_ASSERT_FATAL(g_lvolerrno != 0);
+ CU_ASSERT_STRING_NOT_EQUAL(lvol2->name, "new_lvol_name");
+
+ /* Renaming lvol with it's own name */
+ vbdev_lvol_rename(lvol, "new_lvol_name", vbdev_lvol_rename_complete, NULL);
+ SPDK_CU_ASSERT_FATAL(g_lvolerrno == 0);
+ CU_ASSERT_STRING_EQUAL(lvol->name, "new_lvol_name");
+
+ /* Successful lvols destroy */
+ vbdev_lvol_destroy(lvol, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvol == NULL);
+
+ vbdev_lvol_destroy(lvol2, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvol == NULL);
+
+ /* Destroy lvol store */
+ vbdev_lvs_destruct(lvs, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+}
+
+static void
+ut_lvol_destroy(void)
+{
+ struct spdk_lvol_store *lvs;
+ struct spdk_lvol *lvol;
+ struct spdk_lvol *lvol2;
+ int sz = 10;
+ int rc;
+
+ /* Lvol store is successfully created */
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, LVS_CLEAR_WITH_UNMAP, lvol_store_op_with_handle_complete,
+ NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(g_lvol_store->bs_dev != NULL);
+ lvs = g_lvol_store;
+
+ /* Successful lvols create */
+ g_lvolerrno = -1;
+ rc = vbdev_lvol_create(lvs, "lvol", sz, false, LVOL_CLEAR_WITH_DEFAULT, vbdev_lvol_create_complete,
+ NULL);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(g_lvol != NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+ lvol = g_lvol;
+
+ g_lvolerrno = -1;
+ rc = vbdev_lvol_create(lvs, "lvol2", sz, false, LVOL_CLEAR_WITH_DEFAULT, vbdev_lvol_create_complete,
+ NULL);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(g_lvol != NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+ lvol2 = g_lvol;
+
+ /* Successful lvols destroy */
+ vbdev_lvol_destroy(lvol, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvol == NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+
+ /* Hot remove lvol bdev */
+ vbdev_lvol_unregister(lvol2);
+
+ /* Unload lvol store */
+ vbdev_lvs_unload(lvs, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+}
+
+static void
+ut_lvol_resize(void)
+{
+ struct spdk_lvol_store *lvs;
+ struct spdk_lvol *lvol;
+ int sz = 10;
+ int rc = 0;
+
+ /* Lvol store is successfully created */
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, LVS_CLEAR_WITH_UNMAP, lvol_store_op_with_handle_complete,
+ NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(g_lvol_store->bs_dev != NULL);
+ lvs = g_lvol_store;
+
+ /* Successful lvol create */
+ g_lvolerrno = -1;
+ rc = vbdev_lvol_create(lvs, "lvol", sz, false, LVOL_CLEAR_WITH_DEFAULT, vbdev_lvol_create_complete,
+ NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvolerrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ lvol = g_lvol;
+
+ /* Successful lvol resize */
+ g_lvolerrno = -1;
+ vbdev_lvol_resize(lvol, 20, vbdev_lvol_resize_complete, NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+ CU_ASSERT(lvol->bdev->blockcnt == 20 * g_cluster_size / lvol->bdev->blocklen);
+
+ /* Resize with NULL lvol */
+ vbdev_lvol_resize(NULL, 20, vbdev_lvol_resize_complete, NULL);
+ CU_ASSERT(g_lvolerrno != 0);
+
+ /* Successful lvol destroy */
+ vbdev_lvol_destroy(lvol, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvol == NULL);
+
+ /* Destroy lvol store */
+ vbdev_lvs_destruct(lvs, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+}
+
+static void
+ut_lvol_set_read_only(void)
+{
+ struct spdk_lvol_store *lvs;
+ struct spdk_lvol *lvol;
+ int sz = 10;
+ int rc = 0;
+
+ /* Lvol store is successfully created */
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, LVS_CLEAR_WITH_UNMAP, lvol_store_op_with_handle_complete,
+ NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(g_lvol_store->bs_dev != NULL);
+ lvs = g_lvol_store;
+
+ /* Successful lvol create */
+ g_lvolerrno = -1;
+ rc = vbdev_lvol_create(lvs, "lvol", sz, false, LVOL_CLEAR_WITH_DEFAULT, vbdev_lvol_create_complete,
+ NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvolerrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ lvol = g_lvol;
+
+ /* Successful set lvol as read only */
+ g_lvolerrno = -1;
+ vbdev_lvol_set_read_only(lvol, vbdev_lvol_set_read_only_complete, NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+
+ /* Successful lvol destroy */
+ vbdev_lvol_destroy(lvol, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvol == NULL);
+
+ /* Destroy lvol store */
+ vbdev_lvs_destruct(lvs, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+}
+
+static void
+ut_lvs_unload(void)
+{
+ int rc = 0;
+ int sz = 10;
+ struct spdk_lvol_store *lvs;
+
+ /* Lvol store is successfully created */
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, LVS_CLEAR_WITH_UNMAP, lvol_store_op_with_handle_complete,
+ NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(g_lvol_store->bs_dev != NULL);
+
+ lvs = g_lvol_store;
+ g_lvol_store = NULL;
+
+ spdk_uuid_generate(&lvs->uuid);
+
+ /* Successfully create lvol, which should be destroyed with lvs later */
+ g_lvolerrno = -1;
+ rc = vbdev_lvol_create(lvs, "lvol", sz, false, LVOL_CLEAR_WITH_DEFAULT, vbdev_lvol_create_complete,
+ NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvolerrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ /* Unload lvol store */
+ vbdev_lvs_unload(lvs, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+ CU_ASSERT(g_lvol != NULL);
+}
+
+static void
+ut_lvs_init(void)
+{
+ int rc = 0;
+ struct spdk_lvol_store *lvs;
+
+ /* spdk_lvs_init() fails */
+ lvol_store_initialize_fail = true;
+
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, LVS_CLEAR_WITH_UNMAP, lvol_store_op_with_handle_complete,
+ NULL);
+ CU_ASSERT(rc != 0);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+
+ lvol_store_initialize_fail = false;
+
+ /* spdk_lvs_init_cb() fails */
+ lvol_store_initialize_cb_fail = true;
+
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, LVS_CLEAR_WITH_UNMAP, lvol_store_op_with_handle_complete,
+ NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno != 0);
+ CU_ASSERT(g_lvol_store == NULL);
+
+ lvol_store_initialize_cb_fail = false;
+
+ /* Lvol store is successfully created */
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, LVS_CLEAR_WITH_UNMAP, lvol_store_op_with_handle_complete,
+ NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(g_lvol_store->bs_dev != NULL);
+
+ lvs = g_lvol_store;
+ g_lvol_store = NULL;
+
+ /* Bdev with lvol store already claimed */
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, LVS_CLEAR_WITH_UNMAP, lvol_store_op_with_handle_complete,
+ NULL);
+ CU_ASSERT(rc != 0);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+
+ /* Destruct lvol store */
+ vbdev_lvs_destruct(lvs, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+}
+
+static void
+ut_vbdev_lvol_get_io_channel(void)
+{
+ struct spdk_io_channel *ch;
+
+ g_lvol = calloc(1, sizeof(struct spdk_lvol));
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ ch = vbdev_lvol_get_io_channel(g_lvol);
+ CU_ASSERT(ch == g_ch);
+
+ free(g_lvol);
+}
+
+static void
+ut_vbdev_lvol_io_type_supported(void)
+{
+ struct spdk_lvol *lvol;
+ bool ret;
+
+ lvol = calloc(1, sizeof(struct spdk_lvol));
+ SPDK_CU_ASSERT_FATAL(lvol != NULL);
+
+ g_blob_is_read_only = false;
+
+ /* Supported types */
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_READ);
+ CU_ASSERT(ret == true);
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_WRITE);
+ CU_ASSERT(ret == true);
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_RESET);
+ CU_ASSERT(ret == true);
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_UNMAP);
+ CU_ASSERT(ret == true);
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_WRITE_ZEROES);
+ CU_ASSERT(ret == true);
+
+ /* Unsupported types */
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_FLUSH);
+ CU_ASSERT(ret == false);
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_NVME_ADMIN);
+ CU_ASSERT(ret == false);
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_NVME_IO);
+ CU_ASSERT(ret == false);
+
+ g_blob_is_read_only = true;
+
+ /* Supported types */
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_READ);
+ CU_ASSERT(ret == true);
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_RESET);
+ CU_ASSERT(ret == true);
+
+ /* Unsupported types */
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_WRITE);
+ CU_ASSERT(ret == false);
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_UNMAP);
+ CU_ASSERT(ret == false);
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_WRITE_ZEROES);
+ CU_ASSERT(ret == false);
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_FLUSH);
+ CU_ASSERT(ret == false);
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_NVME_ADMIN);
+ CU_ASSERT(ret == false);
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_NVME_IO);
+ CU_ASSERT(ret == false);
+
+ free(lvol);
+}
+
+static void
+ut_lvol_read_write(void)
+{
+ g_io = calloc(1, sizeof(struct spdk_bdev_io));
+ SPDK_CU_ASSERT_FATAL(g_io != NULL);
+ g_base_bdev = calloc(1, sizeof(struct spdk_bdev));
+ SPDK_CU_ASSERT_FATAL(g_base_bdev != NULL);
+ g_lvol = calloc(1, sizeof(struct spdk_lvol));
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ g_io->bdev = g_base_bdev;
+ g_io->bdev->ctxt = g_lvol;
+ g_io->u.bdev.offset_blocks = 20;
+ g_io->u.bdev.num_blocks = 20;
+
+ lvol_read(g_ch, g_io);
+ CU_ASSERT(g_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ lvol_write(g_lvol, g_ch, g_io);
+ CU_ASSERT(g_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ free(g_io);
+ free(g_base_bdev);
+ free(g_lvol);
+}
+
+static void
+ut_vbdev_lvol_submit_request(void)
+{
+ struct spdk_lvol request_lvol = {};
+ g_io = calloc(1, sizeof(struct spdk_bdev_io));
+ SPDK_CU_ASSERT_FATAL(g_io != NULL);
+ g_base_bdev = calloc(1, sizeof(struct spdk_bdev));
+ SPDK_CU_ASSERT_FATAL(g_base_bdev != NULL);
+ g_io->bdev = g_base_bdev;
+
+ g_io->type = SPDK_BDEV_IO_TYPE_READ;
+ g_base_bdev->ctxt = &request_lvol;
+ vbdev_lvol_submit_request(g_ch, g_io);
+
+ free(g_io);
+ free(g_base_bdev);
+}
+
+static void
+ut_lvs_rename(void)
+{
+ int rc = 0;
+ int sz = 10;
+ struct spdk_lvol_store *lvs;
+
+ /* Lvol store is successfully created */
+ rc = vbdev_lvs_create(&g_bdev, "old_lvs_name", 0, LVS_CLEAR_WITH_UNMAP,
+ lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(g_lvol_store->bs_dev != NULL);
+
+ lvs = g_lvol_store;
+ g_lvol_store = NULL;
+
+ g_base_bdev = calloc(1, sizeof(*g_base_bdev));
+ SPDK_CU_ASSERT_FATAL(g_base_bdev != NULL);
+
+ /* Successfully create lvol, which should be destroyed with lvs later */
+ g_lvolerrno = -1;
+ rc = vbdev_lvol_create(lvs, "lvol", sz, false, LVOL_CLEAR_WITH_DEFAULT, vbdev_lvol_create_complete,
+ NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvolerrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ /* Trying to rename lvs with lvols created */
+ vbdev_lvs_rename(lvs, "new_lvs_name", lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT_STRING_EQUAL(lvs->name, "new_lvs_name");
+ CU_ASSERT_STRING_EQUAL(TAILQ_FIRST(&g_lvol->bdev->aliases)->alias, "new_lvs_name/lvol");
+
+ /* Trying to rename lvs with name already used by another lvs */
+ /* This is a bdev_lvol test, so g_lvs_with_name_already_exists simulates
+ * existing lvs with name 'another_new_lvs_name' and this name in fact is not compared */
+ g_lvs_with_name_already_exists = true;
+ vbdev_lvs_rename(lvs, "another_new_lvs_name", lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == -EEXIST);
+ CU_ASSERT_STRING_EQUAL(lvs->name, "new_lvs_name");
+ CU_ASSERT_STRING_EQUAL(TAILQ_FIRST(&g_lvol->bdev->aliases)->alias, "new_lvs_name/lvol");
+ g_lvs_with_name_already_exists = false;
+
+ /* Unload lvol store */
+ g_lvol_store = lvs;
+ vbdev_lvs_destruct(g_lvol_store, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+
+ free(g_base_bdev->name);
+ free(g_base_bdev);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("lvol", NULL, NULL);
+
+ CU_ADD_TEST(suite, ut_lvs_init);
+ CU_ADD_TEST(suite, ut_lvol_init);
+ CU_ADD_TEST(suite, ut_lvol_snapshot);
+ CU_ADD_TEST(suite, ut_lvol_clone);
+ CU_ADD_TEST(suite, ut_lvs_destroy);
+ CU_ADD_TEST(suite, ut_lvs_unload);
+ CU_ADD_TEST(suite, ut_lvol_resize);
+ CU_ADD_TEST(suite, ut_lvol_set_read_only);
+ CU_ADD_TEST(suite, ut_lvol_hotremove);
+ CU_ADD_TEST(suite, ut_vbdev_lvol_get_io_channel);
+ CU_ADD_TEST(suite, ut_vbdev_lvol_io_type_supported);
+ CU_ADD_TEST(suite, ut_lvol_read_write);
+ CU_ADD_TEST(suite, ut_vbdev_lvol_submit_request);
+ CU_ADD_TEST(suite, ut_lvol_examine);
+ CU_ADD_TEST(suite, ut_lvol_rename);
+ CU_ADD_TEST(suite, ut_lvol_destroy);
+ CU_ADD_TEST(suite, ut_lvs_rename);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/bdev/vbdev_zone_block.c/.gitignore b/src/spdk/test/unit/lib/bdev/vbdev_zone_block.c/.gitignore
new file mode 100644
index 000000000..a1d7547aa
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/vbdev_zone_block.c/.gitignore
@@ -0,0 +1 @@
+vbdev_zone_block_ut
diff --git a/src/spdk/test/unit/lib/bdev/vbdev_zone_block.c/Makefile b/src/spdk/test/unit/lib/bdev/vbdev_zone_block.c/Makefile
new file mode 100644
index 000000000..81a9575d5
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/vbdev_zone_block.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = vbdev_zone_block_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/bdev/vbdev_zone_block.c/vbdev_zone_block_ut.c b/src/spdk/test/unit/lib/bdev/vbdev_zone_block.c/vbdev_zone_block_ut.c
new file mode 100644
index 000000000..d0ee553e3
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/vbdev_zone_block.c/vbdev_zone_block_ut.c
@@ -0,0 +1,1502 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+#include "spdk_cunit.h"
+#include "spdk/env.h"
+#include "spdk_internal/mock.h"
+#include "spdk/thread.h"
+#include "common/lib/test_env.c"
+#include "bdev/zone_block/vbdev_zone_block.c"
+#include "bdev/zone_block/vbdev_zone_block_rpc.c"
+
+#define BLOCK_CNT (1024ul * 1024ul * 1024ul * 1024ul)
+#define BLOCK_SIZE 4096
+
+/* Globals */
+uint64_t g_block_cnt;
+struct io_output *g_io_output = NULL;
+uint32_t g_max_io_size;
+uint32_t g_io_output_index;
+uint32_t g_io_comp_status;
+uint8_t g_rpc_err;
+uint8_t g_json_decode_obj_construct;
+static TAILQ_HEAD(, spdk_bdev) g_bdev_list = TAILQ_HEAD_INITIALIZER(g_bdev_list);
+void *g_rpc_req = NULL;
+static struct spdk_thread *g_thread;
+
+struct io_output {
+ struct spdk_bdev_desc *desc;
+ struct spdk_io_channel *ch;
+ uint64_t offset_blocks;
+ uint64_t num_blocks;
+ spdk_bdev_io_completion_cb cb;
+ void *cb_arg;
+ enum spdk_bdev_io_type iotype;
+};
+
+DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
+DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
+DEFINE_STUB(spdk_json_decode_string, int, (const struct spdk_json_val *val, void *out), 0);
+DEFINE_STUB(spdk_json_decode_uint64, int, (const struct spdk_json_val *val, void *out), 0);
+DEFINE_STUB_V(spdk_bdev_module_examine_done, (struct spdk_bdev_module *module));
+DEFINE_STUB(spdk_json_write_name, int, (struct spdk_json_write_ctx *w, const char *name), 0);
+DEFINE_STUB(spdk_json_write_object_begin, int, (struct spdk_json_write_ctx *w), 0);
+DEFINE_STUB(spdk_json_write_named_string, int, (struct spdk_json_write_ctx *w,
+ const char *name, const char *val), 0);
+DEFINE_STUB(spdk_bdev_io_type_supported, bool, (struct spdk_bdev *bdev,
+ enum spdk_bdev_io_type io_type), true);
+DEFINE_STUB(spdk_json_write_bool, int, (struct spdk_json_write_ctx *w, bool val), 0);
+DEFINE_STUB(spdk_json_write_named_object_begin, int, (struct spdk_json_write_ctx *w,
+ const char *name), 0);
+DEFINE_STUB(spdk_json_write_object_end, int, (struct spdk_json_write_ctx *w), 0);
+DEFINE_STUB_V(spdk_rpc_register_method, (const char *method, spdk_rpc_method_handler func,
+ uint32_t state_mask));
+DEFINE_STUB_V(spdk_jsonrpc_end_result, (struct spdk_jsonrpc_request *request,
+ struct spdk_json_write_ctx *w));
+DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_desc *desc),
+ (void *)0);
+
+static void
+set_test_opts(void)
+{
+ g_max_io_size = 1024;
+}
+
+static void
+init_test_globals(uint64_t block_cnt)
+{
+ g_io_output = calloc(g_max_io_size, sizeof(struct io_output));
+ SPDK_CU_ASSERT_FATAL(g_io_output != NULL);
+ g_io_output_index = 0;
+ g_block_cnt = block_cnt;
+}
+
+static void
+free_test_globals(void)
+{
+ free(g_io_output);
+ g_io_output = NULL;
+}
+
+void
+spdk_bdev_free_io(struct spdk_bdev_io *bdev_io)
+{
+ free(bdev_io);
+}
+
+int
+spdk_bdev_open(struct spdk_bdev *bdev, bool write, spdk_bdev_remove_cb_t remove_cb,
+ void *remove_ctx, struct spdk_bdev_desc **_desc)
+{
+ *_desc = (void *)bdev;
+ return 0;
+}
+
+struct spdk_bdev *
+spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc)
+{
+ return (void *)desc;
+}
+
+int
+spdk_bdev_register(struct spdk_bdev *bdev)
+{
+ CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(bdev->name));
+ TAILQ_INSERT_TAIL(&g_bdev_list, bdev, internal.link);
+
+ return 0;
+}
+
+void
+spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
+{
+ CU_ASSERT_EQUAL(spdk_bdev_get_by_name(bdev->name), bdev);
+ TAILQ_REMOVE(&g_bdev_list, bdev, internal.link);
+
+ bdev->fn_table->destruct(bdev->ctxt);
+
+ if (cb_fn) {
+ cb_fn(cb_arg, 0);
+ }
+}
+
+int spdk_json_write_named_uint64(struct spdk_json_write_ctx *w, const char *name, uint64_t val)
+{
+ struct rpc_construct_zone_block *req = g_rpc_req;
+ if (strcmp(name, "zone_capacity") == 0) {
+ CU_ASSERT(req->zone_capacity == val);
+ } else if (strcmp(name, "optimal_open_zones") == 0) {
+ CU_ASSERT(req->optimal_open_zones == val);
+ }
+
+ return 0;
+}
+
+const char *
+spdk_bdev_get_name(const struct spdk_bdev *bdev)
+{
+ return bdev->name;
+}
+
+bool
+spdk_bdev_is_zoned(const struct spdk_bdev *bdev)
+{
+ return bdev->zoned;
+}
+
+int
+spdk_json_write_string(struct spdk_json_write_ctx *w, const char *val)
+{
+ return 0;
+}
+
+int
+spdk_bdev_module_claim_bdev(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
+ struct spdk_bdev_module *module)
+{
+ if (bdev->internal.claim_module != NULL) {
+ return -1;
+ }
+ bdev->internal.claim_module = module;
+ return 0;
+}
+
+void
+spdk_bdev_module_release_bdev(struct spdk_bdev *bdev)
+{
+ CU_ASSERT(bdev->internal.claim_module != NULL);
+ bdev->internal.claim_module = NULL;
+}
+
+void
+spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
+{
+ g_io_comp_status = ((status == SPDK_BDEV_IO_STATUS_SUCCESS) ? true : false);
+}
+
+int
+spdk_json_decode_object(const struct spdk_json_val *values,
+ const struct spdk_json_object_decoder *decoders, size_t num_decoders,
+ void *out)
+{
+ struct rpc_construct_zone_block *construct, *_construct;
+ struct rpc_delete_zone_block *delete, *_delete;
+
+ if (g_json_decode_obj_construct) {
+ construct = g_rpc_req;
+ _construct = out;
+
+ _construct->name = strdup(construct->name);
+ SPDK_CU_ASSERT_FATAL(_construct->name != NULL);
+ _construct->base_bdev = strdup(construct->base_bdev);
+ SPDK_CU_ASSERT_FATAL(_construct->base_bdev != NULL);
+ _construct->zone_capacity = construct->zone_capacity;
+ _construct->optimal_open_zones = construct->optimal_open_zones;
+ } else {
+ delete = g_rpc_req;
+ _delete = out;
+
+ _delete->name = strdup(delete->name);
+ SPDK_CU_ASSERT_FATAL(_delete->name != NULL);
+ }
+
+ return 0;
+}
+
+struct spdk_json_write_ctx *
+spdk_jsonrpc_begin_result(struct spdk_jsonrpc_request *request)
+{
+ return (void *)1;
+}
+
+static struct spdk_bdev *
+create_nvme_bdev(void)
+{
+ struct spdk_bdev *base_bdev;
+ char *name = "Nvme0n1";
+ base_bdev = calloc(1, sizeof(struct spdk_bdev));
+ SPDK_CU_ASSERT_FATAL(base_bdev != NULL);
+ base_bdev->name = strdup(name);
+ SPDK_CU_ASSERT_FATAL(base_bdev->name != NULL);
+ base_bdev->blocklen = BLOCK_SIZE;
+ base_bdev->blockcnt = g_block_cnt;
+ base_bdev->write_unit_size = 1;
+ TAILQ_INSERT_TAIL(&g_bdev_list, base_bdev, internal.link);
+
+ return base_bdev;
+}
+
+static void
+base_bdevs_cleanup(void)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev *bdev_next;
+
+ if (!TAILQ_EMPTY(&g_bdev_list)) {
+ TAILQ_FOREACH_SAFE(bdev, &g_bdev_list, internal.link, bdev_next) {
+ free(bdev->name);
+ TAILQ_REMOVE(&g_bdev_list, bdev, internal.link);
+ free(bdev);
+ }
+ }
+}
+
+struct spdk_bdev *
+spdk_bdev_get_by_name(const char *bdev_name)
+{
+ struct spdk_bdev *bdev;
+
+ if (!TAILQ_EMPTY(&g_bdev_list)) {
+ TAILQ_FOREACH(bdev, &g_bdev_list, internal.link) {
+ if (strcmp(bdev_name, bdev->name) == 0) {
+ return bdev;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+void
+spdk_jsonrpc_send_error_response(struct spdk_jsonrpc_request *request,
+ int error_code, const char *msg)
+{
+ g_rpc_err = 1;
+}
+
+void
+spdk_jsonrpc_send_error_response_fmt(struct spdk_jsonrpc_request *request,
+ int error_code, const char *fmt, ...)
+{
+ g_rpc_err = 1;
+}
+
+static void
+set_io_output(struct io_output *output,
+ struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg,
+ enum spdk_bdev_io_type iotype)
+{
+ output->desc = desc;
+ output->ch = ch;
+ output->offset_blocks = offset_blocks;
+ output->num_blocks = num_blocks;
+ output->cb = cb;
+ output->cb_arg = cb_arg;
+ output->iotype = iotype;
+}
+
+int
+spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ struct io_output *output = &g_io_output[g_io_output_index];
+ struct spdk_bdev_io *child_io;
+
+ set_io_output(output, desc, ch, offset_blocks, num_blocks, cb, cb_arg,
+ SPDK_BDEV_IO_TYPE_UNMAP);
+ g_io_output_index++;
+
+ child_io = calloc(1, sizeof(struct spdk_bdev_io));
+ SPDK_CU_ASSERT_FATAL(child_io != NULL);
+ cb(child_io, true, cb_arg);
+
+ return 0;
+}
+
+int
+spdk_bdev_writev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct iovec *iov, int iovcnt, void *md,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ struct io_output *output = &g_io_output[g_io_output_index];
+ struct spdk_bdev_io *child_io;
+
+ SPDK_CU_ASSERT_FATAL(g_io_output_index < g_max_io_size);
+
+ set_io_output(output, desc, ch, offset_blocks, num_blocks, cb, cb_arg,
+ SPDK_BDEV_IO_TYPE_WRITE);
+ g_io_output_index++;
+
+ child_io = calloc(1, sizeof(struct spdk_bdev_io));
+ SPDK_CU_ASSERT_FATAL(child_io != NULL);
+ child_io->internal.desc = desc;
+ child_io->type = SPDK_BDEV_IO_TYPE_WRITE;
+ child_io->u.bdev.iovs = iov;
+ child_io->u.bdev.iovcnt = iovcnt;
+ child_io->u.bdev.md_buf = md;
+ child_io->u.bdev.num_blocks = num_blocks;
+ child_io->u.bdev.offset_blocks = offset_blocks;
+ cb(child_io, true, cb_arg);
+
+ return 0;
+}
+
+
+int
+spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct iovec *iov, int iovcnt,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+
+ return spdk_bdev_writev_blocks_with_md(desc, ch, iov, iovcnt, NULL, offset_blocks, num_blocks,
+ cb, cb_arg);
+}
+
+int
+spdk_bdev_readv_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct iovec *iov, int iovcnt, void *md,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ struct io_output *output = &g_io_output[g_io_output_index];
+ struct spdk_bdev_io *child_io;
+
+ SPDK_CU_ASSERT_FATAL(g_io_output_index < g_max_io_size);
+ set_io_output(output, desc, ch, offset_blocks, num_blocks, cb, cb_arg,
+ SPDK_BDEV_IO_TYPE_READ);
+ g_io_output_index++;
+
+ child_io = calloc(1, sizeof(struct spdk_bdev_io));
+ SPDK_CU_ASSERT_FATAL(child_io != NULL);
+ cb(child_io, true, cb_arg);
+
+ return 0;
+}
+
+int
+spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct iovec *iov, int iovcnt,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+
+ return spdk_bdev_readv_blocks_with_md(desc, ch, iov, iovcnt, NULL, offset_blocks, num_blocks,
+ cb, cb_arg);
+}
+
+static void
+verify_config_present(const char *name, bool presence)
+{
+ struct bdev_zone_block_config *cfg;
+ bool cfg_found;
+
+ cfg_found = false;
+
+ TAILQ_FOREACH(cfg, &g_bdev_configs, link) {
+ if (cfg->vbdev_name != NULL) {
+ if (strcmp(name, cfg->vbdev_name) == 0) {
+ cfg_found = true;
+ break;
+ }
+ }
+ }
+
+ if (presence == true) {
+ CU_ASSERT(cfg_found == true);
+ } else {
+ CU_ASSERT(cfg_found == false);
+ }
+}
+
+static void
+verify_bdev_present(const char *name, bool presence)
+{
+ struct bdev_zone_block *bdev;
+ bool bdev_found = false;
+
+ TAILQ_FOREACH(bdev, &g_bdev_nodes, link) {
+ if (strcmp(bdev->bdev.name, name) == 0) {
+ bdev_found = true;
+ break;
+ }
+ }
+ if (presence == true) {
+ CU_ASSERT(bdev_found == true);
+ } else {
+ CU_ASSERT(bdev_found == false);
+ }
+}
+
+static void
+initialize_create_req(const char *vbdev_name, const char *base_name,
+ uint64_t zone_capacity, uint64_t optimal_open_zones, bool create_base_bdev)
+{
+ struct rpc_construct_zone_block *r;
+
+ r = g_rpc_req = calloc(1, sizeof(struct rpc_construct_zone_block));
+ SPDK_CU_ASSERT_FATAL(r != NULL);
+
+ r->name = strdup(vbdev_name);
+ SPDK_CU_ASSERT_FATAL(r->name != NULL);
+ r->base_bdev = strdup(base_name);
+ SPDK_CU_ASSERT_FATAL(r->base_bdev != NULL);
+ r->zone_capacity = zone_capacity;
+ r->optimal_open_zones = optimal_open_zones;
+
+ if (create_base_bdev == true) {
+ create_nvme_bdev();
+ }
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 1;
+}
+
+static void
+free_create_req(void)
+{
+ struct rpc_construct_zone_block *r = g_rpc_req;
+
+ free(r->name);
+ free(r->base_bdev);
+ free(r);
+ g_rpc_req = NULL;
+}
+
+static void
+initialize_delete_req(const char *vbdev_name)
+{
+ struct rpc_delete_zone_block *r;
+
+ r = g_rpc_req = calloc(1, sizeof(struct rpc_delete_zone_block));
+ SPDK_CU_ASSERT_FATAL(r != NULL);
+ r->name = strdup(vbdev_name);
+ SPDK_CU_ASSERT_FATAL(r->name != NULL);
+
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 0;
+}
+
+static void
+free_delete_req(void)
+{
+ struct rpc_delete_zone_block *r = g_rpc_req;
+
+ free(r->name);
+ free(r);
+ g_rpc_req = NULL;
+}
+
+static void
+verify_zone_config(bool presence)
+{
+ struct rpc_construct_zone_block *r = g_rpc_req;
+ struct bdev_zone_block_config *cfg = NULL;
+
+ TAILQ_FOREACH(cfg, &g_bdev_configs, link) {
+ if (strcmp(r->name, cfg->vbdev_name) == 0) {
+ if (presence == false) {
+ break;
+ }
+ CU_ASSERT(strcmp(r->base_bdev, cfg->bdev_name) == 0);
+ CU_ASSERT(r->zone_capacity == cfg->zone_capacity);
+ CU_ASSERT(spdk_max(r->optimal_open_zones, 1) == cfg->optimal_open_zones);
+ break;
+ }
+ }
+
+ if (presence) {
+ CU_ASSERT(cfg != NULL);
+ } else {
+ CU_ASSERT(cfg == NULL);
+ }
+}
+
+static void
+verify_zone_bdev(bool presence)
+{
+ struct rpc_construct_zone_block *r = g_rpc_req;
+ struct block_zone *zone;
+ struct bdev_zone_block *bdev;
+ bool bdev_found = false;
+ uint32_t i;
+ uint64_t expected_num_zones;
+ uint64_t expected_optimal_open_zones;
+
+ TAILQ_FOREACH(bdev, &g_bdev_nodes, link) {
+ if (strcmp(bdev->bdev.name, r->name) == 0) {
+ bdev_found = true;
+ if (presence == false) {
+ break;
+ }
+
+ expected_optimal_open_zones = spdk_max(r->optimal_open_zones, 1);
+ expected_num_zones = g_block_cnt / spdk_align64pow2(r->zone_capacity) / expected_optimal_open_zones;
+ expected_num_zones *= expected_optimal_open_zones;
+
+ CU_ASSERT(bdev->num_zones == expected_num_zones);
+ CU_ASSERT(bdev->bdev.zoned == true);
+ CU_ASSERT(bdev->bdev.blockcnt == expected_num_zones * spdk_align64pow2(r->zone_capacity));
+ CU_ASSERT(bdev->bdev.blocklen == BLOCK_SIZE);
+ CU_ASSERT(bdev->bdev.ctxt == bdev);
+ CU_ASSERT(bdev->bdev.fn_table == &zone_block_fn_table);
+ CU_ASSERT(bdev->bdev.module == &bdev_zoned_if);
+ CU_ASSERT(bdev->bdev.write_unit_size == 1);
+ CU_ASSERT(bdev->bdev.zone_size == spdk_align64pow2(r->zone_capacity));
+ CU_ASSERT(bdev->bdev.optimal_open_zones == expected_optimal_open_zones);
+ CU_ASSERT(bdev->bdev.max_open_zones == 0);
+
+ for (i = 0; i < bdev->num_zones; i++) {
+ zone = &bdev->zones[i];
+ CU_ASSERT(zone->zone_info.state == SPDK_BDEV_ZONE_STATE_FULL);
+ CU_ASSERT(zone->zone_info.capacity == r->zone_capacity);
+ }
+ break;
+ }
+ }
+
+ if (presence == true) {
+ CU_ASSERT(bdev_found == true);
+ } else {
+ CU_ASSERT(bdev_found == false);
+ }
+}
+
+static void
+send_create_vbdev(char *vdev_name, char *name, uint64_t zone_capacity, uint64_t optimal_open_zones,
+ bool create_bdev, bool success)
+{
+ initialize_create_req(vdev_name, name, zone_capacity, optimal_open_zones, create_bdev);
+ rpc_zone_block_create(NULL, NULL);
+ CU_ASSERT(g_rpc_err != success);
+ verify_zone_config(success);
+ verify_zone_bdev(success);
+ free_create_req();
+}
+
+static void
+send_delete_vbdev(char *name, bool success)
+{
+ initialize_delete_req(name);
+ rpc_zone_block_delete(NULL, NULL);
+ verify_config_present(name, false);
+ verify_bdev_present(name, false);
+ CU_ASSERT(g_rpc_err != success);
+ free_delete_req();
+}
+
+static void
+test_cleanup(void)
+{
+ CU_ASSERT(spdk_thread_is_idle(g_thread));
+ zone_block_finish();
+ base_bdevs_cleanup();
+ free_test_globals();
+}
+
+static void
+test_zone_block_create(void)
+{
+ struct spdk_bdev *bdev;
+ char *name = "Nvme0n1";
+ size_t num_zones = 16;
+ size_t zone_capacity = BLOCK_CNT / num_zones;
+
+ init_test_globals(BLOCK_CNT);
+ CU_ASSERT(zone_block_init() == 0);
+
+ /* Create zoned virtual device before nvme device */
+ verify_config_present("zone_dev1", false);
+ verify_bdev_present("zone_dev1", false);
+ initialize_create_req("zone_dev1", name, zone_capacity, 1, false);
+ rpc_zone_block_create(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_zone_config(true);
+ verify_zone_bdev(false);
+ bdev = create_nvme_bdev();
+ zone_block_examine(bdev);
+ verify_zone_bdev(true);
+ free_create_req();
+
+ /* Delete bdev */
+ send_delete_vbdev("zone_dev1", true);
+
+ /* Create zoned virtual device and verify its correctness */
+ verify_config_present("zone_dev1", false);
+ send_create_vbdev("zone_dev1", name, zone_capacity, 1, false, true);
+ send_delete_vbdev("zone_dev1", true);
+
+ while (spdk_thread_poll(g_thread, 0, 0) > 0) {}
+ test_cleanup();
+}
+
+static void
+test_zone_block_create_invalid(void)
+{
+ char *name = "Nvme0n1";
+ size_t num_zones = 8;
+ size_t zone_capacity = BLOCK_CNT / num_zones;
+
+ init_test_globals(BLOCK_CNT);
+ CU_ASSERT(zone_block_init() == 0);
+
+ /* Create zoned virtual device and verify its correctness */
+ verify_config_present("zone_dev1", false);
+ verify_bdev_present("zone_dev1", false);
+ send_create_vbdev("zone_dev1", name, zone_capacity, 1, true, true);
+
+ /* Try to create another zoned virtual device on the same bdev */
+ send_create_vbdev("zone_dev2", name, zone_capacity, 1, false, false);
+
+ /* Try to create zoned virtual device on the zoned bdev */
+ send_create_vbdev("zone_dev2", "zone_dev1", zone_capacity, 1, false, false);
+
+ /* Unclaim the base bdev */
+ send_delete_vbdev("zone_dev1", true);
+
+ /* Try to create zoned virtual device with 0 zone size */
+ send_create_vbdev("zone_dev1", name, 0, 1, false, false);
+
+ /* Try to create zoned virtual device with 0 optimal number of zones */
+ send_create_vbdev("zone_dev1", name, zone_capacity, 0, false, false);
+
+ while (spdk_thread_poll(g_thread, 0, 0) > 0) {}
+ test_cleanup();
+}
+
+static void
+bdev_io_zone_info_initialize(struct spdk_bdev_io *bdev_io, struct spdk_bdev *bdev,
+ uint64_t zone_id, uint32_t num_zones)
+{
+ bdev_io->bdev = bdev;
+ bdev_io->type = SPDK_BDEV_IO_TYPE_GET_ZONE_INFO;
+
+ bdev_io->u.zone_mgmt.zone_id = zone_id;
+
+ bdev_io->u.zone_mgmt.num_zones = num_zones;
+ if (num_zones) {
+ bdev_io->u.zone_mgmt.buf = calloc(num_zones, sizeof(struct spdk_bdev_zone_info));
+ SPDK_CU_ASSERT_FATAL(bdev_io->u.zone_mgmt.buf != NULL);
+ }
+}
+
+static void
+bdev_io_zone_initialize(struct spdk_bdev_io *bdev_io, struct spdk_bdev *bdev,
+ uint64_t zone_id, uint32_t num_zones, uint8_t zone_action)
+{
+ bdev_io->bdev = bdev;
+ bdev_io->type = SPDK_BDEV_IO_TYPE_ZONE_MANAGEMENT;
+
+ bdev_io->u.zone_mgmt.zone_action = zone_action;
+ bdev_io->u.zone_mgmt.zone_id = zone_id;
+}
+
+static void
+bdev_io_zone_cleanup(struct spdk_bdev_io *bdev_io)
+{
+ free(bdev_io->u.zone_mgmt.buf);
+ free(bdev_io);
+}
+
+static void
+bdev_io_initialize(struct spdk_bdev_io *bdev_io, struct spdk_bdev *bdev,
+ uint64_t lba, uint64_t blocks, int16_t iotype)
+{
+ bdev_io->bdev = bdev;
+ bdev_io->u.bdev.offset_blocks = lba;
+ bdev_io->u.bdev.num_blocks = blocks;
+ bdev_io->type = iotype;
+
+ if (bdev_io->type == SPDK_BDEV_IO_TYPE_UNMAP || bdev_io->type == SPDK_BDEV_IO_TYPE_FLUSH) {
+ return;
+ }
+
+ bdev_io->u.bdev.iovcnt = 1;
+ bdev_io->u.bdev.iovs = &bdev_io->iov;
+ bdev_io->u.bdev.iovs->iov_base = calloc(1, bdev_io->u.bdev.num_blocks * BLOCK_SIZE);
+ SPDK_CU_ASSERT_FATAL(bdev_io->u.bdev.iovs->iov_base != NULL);
+ bdev_io->u.bdev.iovs->iov_len = bdev_io->u.bdev.num_blocks * BLOCK_SIZE;
+}
+
+static void
+bdev_io_cleanup(struct spdk_bdev_io *bdev_io)
+{
+ free(bdev_io->iov.iov_base);
+ free(bdev_io);
+}
+
+static struct bdev_zone_block *
+create_and_get_vbdev(char *vdev_name, char *name, uint64_t num_zones, uint64_t optimal_open_zones,
+ bool create_bdev)
+{
+ size_t zone_size = g_block_cnt / num_zones;
+ struct bdev_zone_block *bdev = NULL;
+
+ send_create_vbdev(vdev_name, name, zone_size, optimal_open_zones, create_bdev, true);
+
+ TAILQ_FOREACH(bdev, &g_bdev_nodes, link) {
+ if (strcmp(bdev->bdev.name, vdev_name) == 0) {
+ break;
+ }
+ }
+
+ SPDK_CU_ASSERT_FATAL(bdev != NULL);
+ return bdev;
+}
+
+static void
+test_supported_io_types(void)
+{
+ struct bdev_zone_block *bdev;
+ char *name = "Nvme0n1";
+ uint32_t num_zones = 8;
+
+ init_test_globals(BLOCK_CNT);
+ CU_ASSERT(zone_block_init() == 0);
+
+ /* Create zone dev */
+ bdev = create_and_get_vbdev("zone_dev1", name, num_zones, 1, true);
+
+ CU_ASSERT(zone_block_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_ZONE_MANAGEMENT) == true);
+ CU_ASSERT(zone_block_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_ZONE_APPEND) == true);
+ CU_ASSERT(zone_block_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_READ) == true);
+ CU_ASSERT(zone_block_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_WRITE) == true);
+
+ CU_ASSERT(zone_block_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_NVME_ADMIN) == false);
+ CU_ASSERT(zone_block_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_NVME_IO) == false);
+ CU_ASSERT(zone_block_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_NVME_IO_MD) == false);
+ CU_ASSERT(zone_block_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_UNMAP) == false);
+ CU_ASSERT(zone_block_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_FLUSH) == false);
+ CU_ASSERT(zone_block_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_RESET) == false);
+ CU_ASSERT(zone_block_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_WRITE_ZEROES) == false);
+ CU_ASSERT(zone_block_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_ZCOPY) == false);
+
+ send_delete_vbdev("zone_dev1", true);
+ while (spdk_thread_poll(g_thread, 0, 0) > 0) {}
+ test_cleanup();
+}
+
+static void
+send_zone_info(struct bdev_zone_block *bdev, struct spdk_io_channel *ch, uint64_t zone_id,
+ uint64_t wp,
+ enum spdk_bdev_zone_state state, uint32_t output_index, bool success)
+{
+ struct spdk_bdev_io *bdev_io;
+ struct spdk_bdev_zone_info *info;
+
+ bdev_io = calloc(1, sizeof(struct spdk_bdev_io));
+ SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
+ bdev_io_zone_info_initialize(bdev_io, &bdev->bdev, zone_id, 1);
+ memset(g_io_output, 0, (g_max_io_size * sizeof(struct io_output)));
+ g_io_output_index = output_index;
+
+ g_io_comp_status = !success;
+ zone_block_submit_request(ch, bdev_io);
+ CU_ASSERT(g_io_comp_status == success);
+
+ if (success) {
+ info = (struct spdk_bdev_zone_info *)bdev_io->u.zone_mgmt.buf;
+ CU_ASSERT(info->zone_id == zone_id);
+ CU_ASSERT(info->capacity == bdev->zone_capacity);
+ CU_ASSERT(info->write_pointer == wp);
+ CU_ASSERT(info->state == state);
+ }
+
+ bdev_io_zone_cleanup(bdev_io);
+}
+
+static void
+test_get_zone_info(void)
+{
+ struct spdk_io_channel *ch;
+ struct bdev_zone_block *bdev;
+ struct spdk_bdev_io *bdev_io;
+ char *name = "Nvme0n1";
+ uint32_t num_zones = 8, i;
+ struct spdk_bdev_zone_info *info;
+
+ init_test_globals(BLOCK_CNT);
+ CU_ASSERT(zone_block_init() == 0);
+
+ /* Create zone dev */
+ bdev = create_and_get_vbdev("zone_dev1", name, num_zones, 1, true);
+
+ ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct zone_block_io_channel));
+ SPDK_CU_ASSERT_FATAL(ch != NULL);
+
+ /* Get info about each zone */
+ for (i = 0; i < num_zones; i++) {
+ send_zone_info(bdev, ch, i * bdev->bdev.zone_size,
+ i * bdev->bdev.zone_size + bdev->zone_capacity, SPDK_BDEV_ZONE_STATE_FULL, 0, true);
+ }
+
+ /* Send info asking for 0 zones */
+ bdev_io = calloc(1, sizeof(struct spdk_bdev_io));
+ SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
+ bdev_io_zone_info_initialize(bdev_io, &bdev->bdev, 0, 0);
+ memset(g_io_output, 0, (g_max_io_size * sizeof(struct io_output)));
+ g_io_output_index = 0;
+ zone_block_submit_request(ch, bdev_io);
+ CU_ASSERT(g_io_comp_status);
+ bdev_io_zone_cleanup(bdev_io);
+
+ /* Send info asking for all zones */
+ bdev_io = calloc(1, sizeof(struct spdk_bdev_io));
+ SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
+ bdev_io_zone_info_initialize(bdev_io, &bdev->bdev, 0, num_zones);
+ memset(g_io_output, 0, (g_max_io_size * sizeof(struct io_output)));
+ g_io_output_index = 0;
+ zone_block_submit_request(ch, bdev_io);
+ CU_ASSERT(g_io_comp_status);
+
+ for (i = 0; i < num_zones; i++) {
+ info = &(((struct spdk_bdev_zone_info *)bdev_io->u.zone_mgmt.buf)[i]);
+ CU_ASSERT(info->zone_id == i * bdev->bdev.zone_size);
+ CU_ASSERT(info->capacity == bdev->zone_capacity);
+ CU_ASSERT(info->write_pointer == i * bdev->bdev.zone_size + bdev->zone_capacity);
+ CU_ASSERT(info->state == SPDK_BDEV_ZONE_STATE_FULL);
+ }
+ bdev_io_zone_cleanup(bdev_io);
+
+ /* Send info asking for too many zones */
+ bdev_io = calloc(1, sizeof(struct spdk_bdev_io));
+ SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
+ bdev_io_zone_info_initialize(bdev_io, &bdev->bdev, 0, num_zones + 1);
+ memset(g_io_output, 0, (g_max_io_size * sizeof(struct io_output)));
+ g_io_output_index = 0;
+ zone_block_submit_request(ch, bdev_io);
+ CU_ASSERT(!g_io_comp_status);
+ bdev_io_zone_cleanup(bdev_io);
+
+ /* Send info with misaligned start LBA */
+ send_zone_info(bdev, ch, 1, 0, SPDK_BDEV_ZONE_STATE_FULL, 0, false);
+
+ /* Send info with too high LBA */
+ send_zone_info(bdev, ch, num_zones * bdev->bdev.zone_size, 0, SPDK_BDEV_ZONE_STATE_FULL, 0,
+ false);
+
+ /* Delete zone dev */
+ send_delete_vbdev("zone_dev1", true);
+
+ while (spdk_thread_poll(g_thread, 0, 0) > 0) {}
+ free(ch);
+
+ test_cleanup();
+}
+
+static void
+send_zone_management(struct bdev_zone_block *bdev, struct spdk_io_channel *ch, uint64_t zone_id,
+ uint32_t output_index, enum spdk_bdev_zone_action action, bool success)
+{
+ struct spdk_bdev_io *bdev_io;
+
+ bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct zone_block_io));
+ SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
+ bdev_io_zone_initialize(bdev_io, &bdev->bdev, zone_id, 1, action);
+ memset(g_io_output, 0, (g_max_io_size * sizeof(struct io_output)));
+ g_io_output_index = output_index;
+
+ g_io_comp_status = !success;
+ zone_block_submit_request(ch, bdev_io);
+
+ CU_ASSERT(g_io_comp_status == success);
+ bdev_io_zone_cleanup(bdev_io);
+}
+
+static void
+send_reset_zone(struct bdev_zone_block *bdev, struct spdk_io_channel *ch, uint64_t zone_id,
+ uint32_t output_index, bool success)
+{
+ send_zone_management(bdev, ch, zone_id, output_index, SPDK_BDEV_ZONE_RESET, success);
+}
+
+static void
+send_open_zone(struct bdev_zone_block *bdev, struct spdk_io_channel *ch, uint64_t zone_id,
+ uint32_t output_index, bool success)
+{
+ send_zone_management(bdev, ch, zone_id, output_index, SPDK_BDEV_ZONE_OPEN, success);
+}
+
+static void
+send_close_zone(struct bdev_zone_block *bdev, struct spdk_io_channel *ch, uint64_t zone_id,
+ uint32_t output_index, bool success)
+{
+ send_zone_management(bdev, ch, zone_id, output_index, SPDK_BDEV_ZONE_CLOSE, success);
+}
+
+static void
+send_finish_zone(struct bdev_zone_block *bdev, struct spdk_io_channel *ch, uint64_t zone_id,
+ uint32_t output_index, bool success)
+{
+ send_zone_management(bdev, ch, zone_id, output_index, SPDK_BDEV_ZONE_FINISH, success);
+}
+
+static void
+test_reset_zone(void)
+{
+ struct spdk_io_channel *ch;
+ struct bdev_zone_block *bdev;
+ char *name = "Nvme0n1";
+ uint32_t num_zones = 16;
+ uint64_t zone_id;
+ uint32_t output_index = 0;
+
+ init_test_globals(BLOCK_CNT);
+ CU_ASSERT(zone_block_init() == 0);
+
+ /* Create zone dev */
+ bdev = create_and_get_vbdev("zone_dev1", name, num_zones, 1, true);
+
+ ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct zone_block_io_channel));
+ SPDK_CU_ASSERT_FATAL(ch != NULL);
+
+ /* Send reset to zone 0 */
+ zone_id = 0;
+ send_reset_zone(bdev, ch, zone_id, output_index, true);
+ send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_EMPTY, output_index, true);
+
+ /* Send reset to last zone */
+ zone_id = (num_zones - 1) * bdev->bdev.zone_size;
+ send_reset_zone(bdev, ch, zone_id, output_index, true);
+ send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_EMPTY, output_index, true);
+
+ /* Send reset with misaligned LBA */
+ zone_id = 1;
+ send_reset_zone(bdev, ch, zone_id, output_index, false);
+
+ /* Send reset to non-existing zone */
+ zone_id = num_zones * bdev->bdev.zone_size;
+ send_reset_zone(bdev, ch, zone_id, output_index, false);
+
+ /* Send reset to already resetted zone */
+ zone_id = 0;
+ send_reset_zone(bdev, ch, zone_id, output_index, true);
+ send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_EMPTY, output_index, true);
+
+ /* Delete zone dev */
+ send_delete_vbdev("zone_dev1", true);
+
+ while (spdk_thread_poll(g_thread, 0, 0) > 0) {}
+ free(ch);
+
+ test_cleanup();
+}
+
+static void
+send_write_zone(struct bdev_zone_block *bdev, struct spdk_io_channel *ch, uint64_t lba,
+ uint64_t blocks, uint32_t output_index, bool success)
+{
+ struct spdk_bdev_io *bdev_io;
+
+ bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct zone_block_io));
+ SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
+ bdev_io_initialize(bdev_io, &bdev->bdev, lba, blocks, SPDK_BDEV_IO_TYPE_WRITE);
+ memset(g_io_output, 0, (g_max_io_size * sizeof(struct io_output)));
+ g_io_output_index = output_index;
+
+ g_io_comp_status = !success;
+ zone_block_submit_request(ch, bdev_io);
+
+ CU_ASSERT(g_io_comp_status == success);
+ bdev_io_cleanup(bdev_io);
+}
+
+static void
+send_read_zone(struct bdev_zone_block *bdev, struct spdk_io_channel *ch, uint64_t lba,
+ uint64_t blocks, uint32_t output_index, bool success)
+{
+ struct spdk_bdev_io *bdev_io;
+
+ bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct zone_block_io));
+ SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
+ bdev_io_initialize(bdev_io, &bdev->bdev, lba, blocks, SPDK_BDEV_IO_TYPE_READ);
+ memset(g_io_output, 0, (g_max_io_size * sizeof(struct io_output)));
+ g_io_output_index = output_index;
+
+ g_io_comp_status = !success;
+ zone_block_submit_request(ch, bdev_io);
+
+ CU_ASSERT(g_io_comp_status == success);
+ bdev_io_cleanup(bdev_io);
+}
+
+static void
+send_append_zone(struct bdev_zone_block *bdev, struct spdk_io_channel *ch, uint64_t lba,
+ uint64_t blocks, uint32_t output_index, bool success, uint64_t wp)
+{
+ struct spdk_bdev_io *bdev_io;
+
+ bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct zone_block_io));
+ SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
+ bdev_io_initialize(bdev_io, &bdev->bdev, lba, blocks, SPDK_BDEV_IO_TYPE_ZONE_APPEND);
+ memset(g_io_output, 0, (g_max_io_size * sizeof(struct io_output)));
+ g_io_output_index = output_index;
+
+ g_io_comp_status = !success;
+ zone_block_submit_request(ch, bdev_io);
+
+ CU_ASSERT(g_io_comp_status == success);
+ if (success) {
+ CU_ASSERT(bdev_io->u.bdev.offset_blocks == wp);
+ }
+ bdev_io_cleanup(bdev_io);
+}
+
+static void
+test_open_zone(void)
+{
+ struct spdk_io_channel *ch;
+ struct bdev_zone_block *bdev;
+ char *name = "Nvme0n1";
+ uint32_t num_zones = 16;
+ uint64_t zone_id;
+ uint32_t output_index = 0, i;
+
+ init_test_globals(BLOCK_CNT);
+ CU_ASSERT(zone_block_init() == 0);
+
+ /* Create zone dev */
+ bdev = create_and_get_vbdev("zone_dev1", name, num_zones, 1, true);
+
+ ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct zone_block_io_channel));
+ SPDK_CU_ASSERT_FATAL(ch != NULL);
+
+ /* Try to open full zone */
+ zone_id = 0;
+ send_open_zone(bdev, ch, zone_id, output_index, false);
+
+ /* Open all zones */
+ for (i = 0; i < num_zones; i++) {
+ zone_id = i * bdev->bdev.zone_size;
+ send_reset_zone(bdev, ch, zone_id, output_index, true);
+ send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_EMPTY, output_index, true);
+ }
+ for (i = 0; i < num_zones; i++) {
+ zone_id = i * bdev->bdev.zone_size;
+ send_open_zone(bdev, ch, zone_id, output_index, true);
+ send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_OPEN, output_index, true);
+ }
+
+ /* Reset one of the zones and open it again */
+ zone_id = 0;
+ send_reset_zone(bdev, ch, zone_id, output_index, true);
+ send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_EMPTY, output_index, true);
+ send_open_zone(bdev, ch, zone_id, output_index, true);
+ send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_OPEN, output_index, true);
+
+ /* Send open with misaligned LBA */
+ zone_id = 0;
+ send_reset_zone(bdev, ch, zone_id, output_index, true);
+ zone_id = 1;
+ send_open_zone(bdev, ch, zone_id, output_index, false);
+
+ /* Send open to non-existing zone */
+ zone_id = num_zones * bdev->bdev.zone_size;
+ send_open_zone(bdev, ch, zone_id, output_index, false);
+
+ /* Send open to already opened zone */
+ zone_id = bdev->bdev.zone_size;
+ send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_OPEN, output_index, true);
+ send_open_zone(bdev, ch, zone_id, output_index, true);
+ send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_OPEN, output_index, true);
+
+ /* Delete zone dev */
+ send_delete_vbdev("zone_dev1", true);
+
+ while (spdk_thread_poll(g_thread, 0, 0) > 0) {}
+ free(ch);
+
+ test_cleanup();
+}
+
+static void
+test_zone_write(void)
+{
+ struct spdk_io_channel *ch;
+ struct bdev_zone_block *bdev;
+ char *name = "Nvme0n1";
+ uint32_t num_zones = 20;
+ uint64_t zone_id, lba, block_len;
+ uint32_t output_index = 0, i;
+
+ init_test_globals(20 * 1024ul);
+ CU_ASSERT(zone_block_init() == 0);
+
+ /* Create zone dev */
+ bdev = create_and_get_vbdev("zone_dev1", name, num_zones, 1, true);
+
+ ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct zone_block_io_channel));
+ SPDK_CU_ASSERT_FATAL(ch != NULL);
+
+ /* Write to full zone */
+ lba = 0;
+ send_write_zone(bdev, ch, lba, 1, output_index, false);
+
+ /* Write out of device range */
+ lba = g_block_cnt;
+ send_write_zone(bdev, ch, lba, 1, output_index, false);
+
+ /* Write 1 sector to zone 0 */
+ lba = 0;
+ send_reset_zone(bdev, ch, lba, output_index, true);
+ send_write_zone(bdev, ch, lba, 1, output_index, true);
+ send_zone_info(bdev, ch, lba, 1, SPDK_BDEV_ZONE_STATE_OPEN, output_index, true);
+
+ /* Write to another zone */
+ lba = bdev->bdev.zone_size;
+ send_reset_zone(bdev, ch, lba, output_index, true);
+ send_write_zone(bdev, ch, lba, 5, output_index, true);
+ send_zone_info(bdev, ch, lba, lba + 5, SPDK_BDEV_ZONE_STATE_OPEN, output_index, true);
+
+ /* Fill zone 0 and verify zone state change */
+ block_len = 15;
+ send_write_zone(bdev, ch, 1, block_len, output_index, true);
+ block_len = 16;
+ for (i = block_len; i < bdev->bdev.zone_size; i += block_len) {
+ send_write_zone(bdev, ch, i, block_len, output_index, true);
+ }
+ send_zone_info(bdev, ch, 0, bdev->bdev.zone_size, SPDK_BDEV_ZONE_STATE_FULL, output_index,
+ true);
+
+ /* Write to wrong write pointer */
+ lba = bdev->bdev.zone_size;
+ send_write_zone(bdev, ch, lba + 7, 1, output_index, false);
+ /* Write to already written sectors */
+ send_write_zone(bdev, ch, lba, 1, output_index, false);
+
+ /* Write to two zones at once */
+ for (i = 0; i < num_zones; i++) {
+ zone_id = i * bdev->bdev.zone_size;
+ send_reset_zone(bdev, ch, zone_id, output_index, true);
+ send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_EMPTY, output_index, true);
+ }
+ block_len = 16;
+ for (i = 0; i < bdev->bdev.zone_size - block_len; i += block_len) {
+ send_write_zone(bdev, ch, i, block_len, output_index, true);
+ }
+ send_write_zone(bdev, ch, bdev->bdev.zone_size - block_len, 32, output_index, false);
+
+ /* Delete zone dev */
+ send_delete_vbdev("zone_dev1", true);
+
+ while (spdk_thread_poll(g_thread, 0, 0) > 0) {}
+ free(ch);
+
+ test_cleanup();
+}
+
+static void
+test_zone_read(void)
+{
+ struct spdk_io_channel *ch;
+ struct bdev_zone_block *bdev;
+ char *name = "Nvme0n1";
+ uint32_t num_zones = 20;
+ uint64_t lba, block_len;
+ uint32_t output_index = 0;
+
+ init_test_globals(20 * 1024ul);
+ CU_ASSERT(zone_block_init() == 0);
+
+ /* Create zone dev */
+ bdev = create_and_get_vbdev("zone_dev1", name, num_zones, 1, true);
+
+ ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct zone_block_io_channel));
+ SPDK_CU_ASSERT_FATAL(ch != NULL);
+
+ /* Read out of device range */
+ block_len = 16;
+ lba = g_block_cnt - block_len / 2;
+ send_read_zone(bdev, ch, lba, block_len, output_index, false);
+
+ block_len = 1;
+ lba = g_block_cnt;
+ send_read_zone(bdev, ch, lba, block_len, output_index, false);
+
+ /* Read from full zone */
+ lba = 0;
+ send_read_zone(bdev, ch, lba, 1, output_index, true);
+
+ /* Read from empty zone */
+ send_reset_zone(bdev, ch, lba, output_index, true);
+ send_read_zone(bdev, ch, lba, 1, output_index, true);
+
+ /* Read written sectors from open zone */
+ send_write_zone(bdev, ch, lba, 1, output_index, true);
+ send_read_zone(bdev, ch, lba, 1, output_index, true);
+
+ /* Read partially written sectors from open zone */
+ send_read_zone(bdev, ch, lba, 2, output_index, true);
+
+ /* Read unwritten sectors from open zone */
+ lba = 2;
+ send_read_zone(bdev, ch, lba, 1, output_index, true);
+
+ /* Read from two zones at once */
+ block_len = 16;
+ lba = bdev->bdev.zone_size - block_len / 2;
+ send_read_zone(bdev, ch, lba, block_len, output_index, false);
+
+ /* Delete zone dev */
+ send_delete_vbdev("zone_dev1", true);
+
+ while (spdk_thread_poll(g_thread, 0, 0) > 0) {}
+ free(ch);
+ test_cleanup();
+}
+
+static void
+test_close_zone(void)
+{
+ struct spdk_io_channel *ch;
+ struct bdev_zone_block *bdev;
+ char *name = "Nvme0n1";
+ uint32_t num_zones = 20;
+ uint64_t zone_id;
+ uint32_t output_index = 0;
+
+ init_test_globals(20 * 1024ul);
+ CU_ASSERT(zone_block_init() == 0);
+
+ /* Create zone dev */
+ bdev = create_and_get_vbdev("zone_dev1", name, num_zones, 1, true);
+
+ ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct zone_block_io_channel));
+ SPDK_CU_ASSERT_FATAL(ch != NULL);
+
+ /* Try to close a full zone */
+ zone_id = 0;
+ send_close_zone(bdev, ch, zone_id, output_index, false);
+
+ /* Try to close an empty zone */
+ send_reset_zone(bdev, ch, zone_id, output_index, true);
+ send_close_zone(bdev, ch, zone_id, output_index, false);
+
+ /* Close an open zone */
+ send_open_zone(bdev, ch, zone_id, output_index, true);
+ send_close_zone(bdev, ch, zone_id, output_index, true);
+ send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_CLOSED, output_index, true);
+
+ /* Close a closed zone */
+ send_close_zone(bdev, ch, zone_id, output_index, true);
+ send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_CLOSED, output_index, true);
+
+ /* Send close to last zone */
+ zone_id = (num_zones - 1) * bdev->bdev.zone_size;
+ send_reset_zone(bdev, ch, zone_id, output_index, true);
+ send_open_zone(bdev, ch, zone_id, output_index, true);
+ send_close_zone(bdev, ch, zone_id, output_index, true);
+ send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_CLOSED, output_index, true);
+
+ /* Send close with misaligned LBA */
+ zone_id = 1;
+ send_close_zone(bdev, ch, zone_id, output_index, false);
+
+ /* Send close to non-existing zone */
+ zone_id = num_zones * bdev->bdev.zone_size;
+ send_close_zone(bdev, ch, zone_id, output_index, false);
+
+ /* Delete zone dev */
+ send_delete_vbdev("zone_dev1", true);
+
+ while (spdk_thread_poll(g_thread, 0, 0) > 0) {}
+ free(ch);
+ test_cleanup();
+}
+
+static void
+test_finish_zone(void)
+{
+ struct spdk_io_channel *ch;
+ struct bdev_zone_block *bdev;
+ char *name = "Nvme0n1";
+ uint32_t num_zones = 20;
+ uint64_t zone_id, wp;
+ uint32_t output_index = 0;
+
+ init_test_globals(20 * 1024ul);
+ CU_ASSERT(zone_block_init() == 0);
+
+ /* Create zone dev */
+ bdev = create_and_get_vbdev("zone_dev1", name, num_zones, 1, true);
+
+ ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct zone_block_io_channel));
+ SPDK_CU_ASSERT_FATAL(ch != NULL);
+
+ /* Reset an unused zone */
+ send_reset_zone(bdev, ch, bdev->bdev.zone_size, output_index, true);
+
+ /* Finish a full zone */
+ zone_id = 0;
+ wp = bdev->bdev.zone_size;
+ send_finish_zone(bdev, ch, zone_id, output_index, true);
+ send_zone_info(bdev, ch, zone_id, wp, SPDK_BDEV_ZONE_STATE_FULL, output_index, true);
+
+ /* Finish an empty zone */
+ send_reset_zone(bdev, ch, zone_id, output_index, true);
+ send_finish_zone(bdev, ch, zone_id, output_index, true);
+ send_zone_info(bdev, ch, zone_id, wp, SPDK_BDEV_ZONE_STATE_FULL, output_index, true);
+
+ /* Finish an open zone */
+ send_reset_zone(bdev, ch, zone_id, output_index, true);
+ send_write_zone(bdev, ch, zone_id, 1, output_index, true);
+ send_finish_zone(bdev, ch, zone_id, output_index, true);
+ send_zone_info(bdev, ch, zone_id, wp, SPDK_BDEV_ZONE_STATE_FULL, output_index, true);
+
+ /* Send finish with misaligned LBA */
+ zone_id = 1;
+ send_finish_zone(bdev, ch, zone_id, output_index, false);
+
+ /* Send finish to non-existing zone */
+ zone_id = num_zones * bdev->bdev.zone_size;
+ send_finish_zone(bdev, ch, zone_id, output_index, false);
+
+ /* Make sure unused zone wasn't written to */
+ zone_id = bdev->bdev.zone_size;
+ send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_EMPTY, output_index, true);
+
+ /* Delete zone dev */
+ send_delete_vbdev("zone_dev1", true);
+
+ while (spdk_thread_poll(g_thread, 0, 0) > 0) {}
+ free(ch);
+
+ test_cleanup();
+}
+
+static void
+test_append_zone(void)
+{
+ struct spdk_io_channel *ch;
+ struct bdev_zone_block *bdev;
+ char *name = "Nvme0n1";
+ uint32_t num_zones = 20;
+ uint64_t zone_id, block_len, i;
+ uint32_t output_index = 0;
+
+ init_test_globals(20 * 1024ul);
+ CU_ASSERT(zone_block_init() == 0);
+
+ /* Create zone dev */
+ bdev = create_and_get_vbdev("zone_dev1", name, num_zones, 1, true);
+
+ ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct zone_block_io_channel));
+ SPDK_CU_ASSERT_FATAL(ch != NULL);
+
+ /* Append to full zone */
+ zone_id = 0;
+ send_append_zone(bdev, ch, zone_id, 1, output_index, false, 0);
+
+ /* Append out of device range */
+ zone_id = g_block_cnt;
+ send_append_zone(bdev, ch, zone_id, 1, output_index, false, 0);
+
+ /* Append 1 sector to zone 0 */
+ zone_id = 0;
+ send_reset_zone(bdev, ch, zone_id, output_index, true);
+ send_append_zone(bdev, ch, zone_id, 1, output_index, true, zone_id);
+ send_zone_info(bdev, ch, zone_id, 1, SPDK_BDEV_ZONE_STATE_OPEN, output_index, true);
+
+ /* Append to another zone */
+ zone_id = bdev->bdev.zone_size;
+ send_reset_zone(bdev, ch, zone_id, output_index, true);
+ send_append_zone(bdev, ch, zone_id, 5, output_index, true, zone_id);
+ send_zone_info(bdev, ch, zone_id, zone_id + 5, SPDK_BDEV_ZONE_STATE_OPEN, output_index, true);
+
+ /* Fill zone 0 and verify zone state change */
+ zone_id = 0;
+ block_len = 15;
+ send_append_zone(bdev, ch, zone_id, block_len, output_index, true, 1);
+ block_len++;
+ for (i = block_len; i < bdev->zone_capacity; i += block_len) {
+ send_append_zone(bdev, ch, zone_id, block_len, output_index, true, i);
+ }
+ send_zone_info(bdev, ch, zone_id, bdev->bdev.zone_size, SPDK_BDEV_ZONE_STATE_FULL, output_index,
+ true);
+
+ /* Append to two zones at once */
+ for (i = 0; i < num_zones; i++) {
+ zone_id = i * bdev->bdev.zone_size;
+ send_reset_zone(bdev, ch, zone_id, output_index, true);
+ send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_EMPTY, output_index, true);
+ }
+
+ zone_id = 0;
+ block_len = 16;
+ for (i = 0; i < bdev->zone_capacity - block_len; i += block_len) {
+ send_append_zone(bdev, ch, zone_id, block_len, output_index, true, zone_id + i);
+ }
+ send_append_zone(bdev, ch, zone_id, 32, output_index, false, 0);
+ /* Delete zone dev */
+ send_delete_vbdev("zone_dev1", true);
+
+ while (spdk_thread_poll(g_thread, 0, 0) > 0) {}
+ free(ch);
+
+ test_cleanup();
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("zone_block", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_zone_block_create);
+ CU_ADD_TEST(suite, test_zone_block_create_invalid);
+ CU_ADD_TEST(suite, test_get_zone_info);
+ CU_ADD_TEST(suite, test_supported_io_types);
+ CU_ADD_TEST(suite, test_reset_zone);
+ CU_ADD_TEST(suite, test_open_zone);
+ CU_ADD_TEST(suite, test_zone_write);
+ CU_ADD_TEST(suite, test_zone_read);
+ CU_ADD_TEST(suite, test_close_zone);
+ CU_ADD_TEST(suite, test_finish_zone);
+ CU_ADD_TEST(suite, test_append_zone);
+
+ g_thread = spdk_thread_create("test", NULL);
+ spdk_set_thread(g_thread);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ set_test_opts();
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+
+ spdk_thread_exit(g_thread);
+ while (!spdk_thread_is_exited(g_thread)) {
+ spdk_thread_poll(g_thread, 0, 0);
+ }
+ spdk_thread_destroy(g_thread);
+
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/blob/Makefile b/src/spdk/test/unit/lib/blob/Makefile
new file mode 100644
index 000000000..a039a423e
--- /dev/null
+++ b/src/spdk/test/unit/lib/blob/Makefile
@@ -0,0 +1,49 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+CUNIT_VERSION = $(shell echo "\#include <CUnit/CUnit.h>" | $(CC) -E -dM - | sed -n -e 's/.*VERSION "\([0-9\.\-]*\).*/\1/p')
+ifeq ($(CUNIT_VERSION),2.1-3)
+DIRS-y = blob.c
+else
+$(warning "blob_ut.c compilation skipped, only CUnit version 2.1-3 is supported")
+endif
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/blob/blob.c/.gitignore b/src/spdk/test/unit/lib/blob/blob.c/.gitignore
new file mode 100644
index 000000000..553f54655
--- /dev/null
+++ b/src/spdk/test/unit/lib/blob/blob.c/.gitignore
@@ -0,0 +1 @@
+blob_ut
diff --git a/src/spdk/test/unit/lib/blob/blob.c/Makefile b/src/spdk/test/unit/lib/blob/blob.c/Makefile
new file mode 100644
index 000000000..fc449a5c8
--- /dev/null
+++ b/src/spdk/test/unit/lib/blob/blob.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = blob_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/blob/blob.c/blob_ut.c b/src/spdk/test/unit/lib/blob/blob.c/blob_ut.c
new file mode 100644
index 000000000..6e51842e3
--- /dev/null
+++ b/src/spdk/test/unit/lib/blob/blob.c/blob_ut.c
@@ -0,0 +1,6693 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+#include "spdk/blob.h"
+#include "spdk/string.h"
+#include "spdk_internal/thread.h"
+
+#include "common/lib/ut_multithread.c"
+#include "../bs_dev_common.c"
+#include "blob/blobstore.c"
+#include "blob/request.c"
+#include "blob/zeroes.c"
+#include "blob/blob_bs_dev.c"
+
+struct spdk_blob_store *g_bs;
+spdk_blob_id g_blobid;
+struct spdk_blob *g_blob;
+int g_bserrno;
+struct spdk_xattr_names *g_names;
+int g_done;
+char *g_xattr_names[] = {"first", "second", "third"};
+char *g_xattr_values[] = {"one", "two", "three"};
+uint64_t g_ctx = 1729;
+bool g_use_extent_table = false;
+
+struct spdk_bs_super_block_ver1 {
+ uint8_t signature[8];
+ uint32_t version;
+ uint32_t length;
+ uint32_t clean; /* If there was a clean shutdown, this is 1. */
+ spdk_blob_id super_blob;
+
+ uint32_t cluster_size; /* In bytes */
+
+ uint32_t used_page_mask_start; /* Offset from beginning of disk, in pages */
+ uint32_t used_page_mask_len; /* Count, in pages */
+
+ uint32_t used_cluster_mask_start; /* Offset from beginning of disk, in pages */
+ uint32_t used_cluster_mask_len; /* Count, in pages */
+
+ uint32_t md_start; /* Offset from beginning of disk, in pages */
+ uint32_t md_len; /* Count, in pages */
+
+ uint8_t reserved[4036];
+ uint32_t crc;
+} __attribute__((packed));
+SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_super_block_ver1) == 0x1000, "Invalid super block size");
+
+static struct spdk_blob *ut_blob_create_and_open(struct spdk_blob_store *bs,
+ struct spdk_blob_opts *blob_opts);
+static void ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob);
+static void suite_blob_setup(void);
+static void suite_blob_cleanup(void);
+
+static void
+_get_xattr_value(void *arg, const char *name,
+ const void **value, size_t *value_len)
+{
+ uint64_t i;
+
+ SPDK_CU_ASSERT_FATAL(value_len != NULL);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(arg == &g_ctx);
+
+ for (i = 0; i < sizeof(g_xattr_names); i++) {
+ if (!strcmp(name, g_xattr_names[i])) {
+ *value_len = strlen(g_xattr_values[i]);
+ *value = g_xattr_values[i];
+ break;
+ }
+ }
+}
+
+static void
+_get_xattr_value_null(void *arg, const char *name,
+ const void **value, size_t *value_len)
+{
+ SPDK_CU_ASSERT_FATAL(value_len != NULL);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(arg == NULL);
+
+ *value_len = 0;
+ *value = NULL;
+}
+
+static int
+_get_snapshots_count(struct spdk_blob_store *bs)
+{
+ struct spdk_blob_list *snapshot = NULL;
+ int count = 0;
+
+ TAILQ_FOREACH(snapshot, &bs->snapshots, link) {
+ count += 1;
+ }
+
+ return count;
+}
+
+static void
+ut_spdk_blob_opts_init(struct spdk_blob_opts *opts)
+{
+ spdk_blob_opts_init(opts);
+ opts->use_extent_table = g_use_extent_table;
+}
+
+static void
+bs_op_complete(void *cb_arg, int bserrno)
+{
+ g_bserrno = bserrno;
+}
+
+static void
+bs_op_with_handle_complete(void *cb_arg, struct spdk_blob_store *bs,
+ int bserrno)
+{
+ g_bs = bs;
+ g_bserrno = bserrno;
+}
+
+static void
+blob_op_complete(void *cb_arg, int bserrno)
+{
+ g_bserrno = bserrno;
+}
+
+static void
+blob_op_with_id_complete(void *cb_arg, spdk_blob_id blobid, int bserrno)
+{
+ g_blobid = blobid;
+ g_bserrno = bserrno;
+}
+
+static void
+blob_op_with_handle_complete(void *cb_arg, struct spdk_blob *blb, int bserrno)
+{
+ g_blob = blb;
+ g_bserrno = bserrno;
+}
+
+static void
+ut_bs_reload(struct spdk_blob_store **bs, struct spdk_bs_opts *opts)
+{
+ struct spdk_bs_dev *dev;
+
+ /* Unload the blob store */
+ spdk_bs_unload(*bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ dev = init_dev();
+ /* Load an existing blob store */
+ spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ *bs = g_bs;
+
+ g_bserrno = -1;
+}
+
+static void
+ut_bs_dirty_load(struct spdk_blob_store **bs, struct spdk_bs_opts *opts)
+{
+ struct spdk_bs_dev *dev;
+
+ /* Dirty shutdown */
+ bs_free(*bs);
+
+ dev = init_dev();
+ /* Load an existing blob store */
+ spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ *bs = g_bs;
+
+ g_bserrno = -1;
+}
+
+static void
+blob_init(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+
+ dev = init_dev();
+
+ /* should fail for an unsupported blocklen */
+ dev->blocklen = 500;
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -EINVAL);
+
+ dev = init_dev();
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+}
+
+static void
+blob_super(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ spdk_blob_id blobid;
+ struct spdk_blob_opts blob_opts;
+
+ /* Get the super blob without having set one */
+ spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -ENOENT);
+ CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
+
+ /* Create a blob */
+ ut_spdk_blob_opts_init(&blob_opts);
+ spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ /* Set the blob as the super blob */
+ spdk_bs_set_super(bs, blobid, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Get the super blob */
+ spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(blobid == g_blobid);
+}
+
+static void
+blob_open(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob;
+ struct spdk_blob_opts blob_opts;
+ spdk_blob_id blobid, blobid2;
+
+ ut_spdk_blob_opts_init(&blob_opts);
+ spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ blob = g_blob;
+
+ blobid2 = spdk_blob_get_id(blob);
+ CU_ASSERT(blobid == blobid2);
+
+ /* Try to open file again. It should return success. */
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(blob == g_blob);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /*
+ * Close the file a second time, releasing the second reference. This
+ * should succeed.
+ */
+ blob = g_blob;
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /*
+ * Try to open file again. It should succeed. This tests the case
+ * where the file is opened, closed, then re-opened again.
+ */
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ blob = g_blob;
+
+ ut_blob_close_and_delete(bs, blob);
+}
+
+static void
+blob_create(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob;
+ struct spdk_blob_opts opts;
+ spdk_blob_id blobid;
+
+ /* Create blob with 10 clusters */
+
+ ut_spdk_blob_opts_init(&opts);
+ opts.num_clusters = 10;
+
+ spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Create blob with 0 clusters */
+
+ ut_spdk_blob_opts_init(&opts);
+ opts.num_clusters = 0;
+
+ spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Create blob with default options (opts == NULL) */
+
+ spdk_bs_create_blob_ext(bs, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Try to create blob with size larger than blobstore */
+
+ ut_spdk_blob_opts_init(&opts);
+ opts.num_clusters = bs->total_clusters + 1;
+
+ spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -ENOSPC);
+}
+
+static void
+blob_create_fail(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob_opts opts;
+ spdk_blob_id blobid;
+ uint32_t used_blobids_count = spdk_bit_array_count_set(bs->used_blobids);
+ uint32_t used_md_pages_count = spdk_bit_array_count_set(bs->used_md_pages);
+
+ /* NULL callback */
+ ut_spdk_blob_opts_init(&opts);
+ opts.xattrs.names = g_xattr_names;
+ opts.xattrs.get_value = NULL;
+ opts.xattrs.count = 1;
+ opts.xattrs.ctx = &g_ctx;
+
+ blobid = spdk_bit_array_find_first_clear(bs->used_md_pages, 0);
+ spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -EINVAL);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count);
+ CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count);
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -ENOENT);
+ SPDK_CU_ASSERT_FATAL(g_blob == NULL);
+
+ ut_bs_reload(&bs, NULL);
+ CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count);
+ CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count);
+
+ spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_blob == NULL);
+ CU_ASSERT(g_bserrno == -ENOENT);
+}
+
+static void
+blob_create_internal(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob;
+ struct spdk_blob_opts opts;
+ struct spdk_blob_xattr_opts internal_xattrs;
+ const void *value;
+ size_t value_len;
+ spdk_blob_id blobid;
+ int rc;
+
+ /* Create blob with custom xattrs */
+
+ ut_spdk_blob_opts_init(&opts);
+ blob_xattrs_init(&internal_xattrs);
+ internal_xattrs.count = 3;
+ internal_xattrs.names = g_xattr_names;
+ internal_xattrs.get_value = _get_xattr_value;
+ internal_xattrs.ctx = &g_ctx;
+
+ bs_create_blob(bs, &opts, &internal_xattrs, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+
+ rc = blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len, true);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(value_len == strlen(g_xattr_values[0]));
+ CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
+
+ rc = blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len, true);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(value_len == strlen(g_xattr_values[1]));
+ CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
+
+ rc = blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len, true);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(value_len == strlen(g_xattr_values[2]));
+ CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
+
+ rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len);
+ CU_ASSERT(rc != 0);
+
+ rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len);
+ CU_ASSERT(rc != 0);
+
+ rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len);
+ CU_ASSERT(rc != 0);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Create blob with NULL internal options */
+
+ bs_create_blob(bs, NULL, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ CU_ASSERT(TAILQ_FIRST(&g_blob->xattrs_internal) == NULL);
+
+ blob = g_blob;
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+}
+
+static void
+blob_thin_provision(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_blob *blob;
+ struct spdk_blob_opts opts;
+ struct spdk_bs_opts bs_opts;
+ spdk_blob_id blobid;
+
+ dev = init_dev();
+ spdk_bs_opts_init(&bs_opts);
+ snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
+
+ /* Initialize a new blob store */
+ spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+
+ bs = g_bs;
+
+ /* Create blob with thin provisioning enabled */
+
+ ut_spdk_blob_opts_init(&opts);
+ opts.thin_provision = true;
+ opts.num_clusters = 10;
+
+ blob = ut_blob_create_and_open(bs, &opts);
+ blobid = spdk_blob_get_id(blob);
+ CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Do not shut down cleanly. This makes sure that when we load again
+ * and try to recover a valid used_cluster map, that blobstore will
+ * ignore clusters with index 0 since these are unallocated clusters.
+ */
+ ut_bs_dirty_load(&bs, &bs_opts);
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+ CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
+
+ ut_blob_close_and_delete(bs, blob);
+
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+}
+
+static void
+blob_snapshot(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob;
+ struct spdk_blob *snapshot, *snapshot2;
+ struct spdk_blob_bs_dev *blob_bs_dev;
+ struct spdk_blob_opts opts;
+ struct spdk_blob_xattr_opts xattrs;
+ spdk_blob_id blobid;
+ spdk_blob_id snapshotid;
+ spdk_blob_id snapshotid2;
+ const void *value;
+ size_t value_len;
+ int rc;
+ spdk_blob_id ids[2];
+ size_t count;
+
+ /* Create blob with 10 clusters */
+ ut_spdk_blob_opts_init(&opts);
+ opts.num_clusters = 10;
+
+ blob = ut_blob_create_and_open(bs, &opts);
+ blobid = spdk_blob_get_id(blob);
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
+
+ /* Create snapshot from blob */
+ CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
+ spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
+ snapshotid = g_blobid;
+
+ spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ snapshot = g_blob;
+ CU_ASSERT(snapshot->data_ro == true);
+ CU_ASSERT(snapshot->md_ro == true);
+ CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
+
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
+ CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
+ CU_ASSERT(spdk_mem_all_zero(blob->active.clusters,
+ blob->active.num_clusters * sizeof(blob->active.clusters[0])));
+
+ /* Try to create snapshot from clone with xattrs */
+ xattrs.names = g_xattr_names;
+ xattrs.get_value = _get_xattr_value;
+ xattrs.count = 3;
+ xattrs.ctx = &g_ctx;
+ spdk_bs_create_snapshot(bs, blobid, &xattrs, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
+ snapshotid2 = g_blobid;
+
+ spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ snapshot2 = g_blob;
+ CU_ASSERT(snapshot2->data_ro == true);
+ CU_ASSERT(snapshot2->md_ro == true);
+ CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 10);
+
+ /* Confirm that blob is backed by snapshot2 and snapshot2 is backed by snapshot */
+ CU_ASSERT(snapshot->back_bs_dev == NULL);
+ SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL);
+ SPDK_CU_ASSERT_FATAL(snapshot2->back_bs_dev != NULL);
+
+ blob_bs_dev = (struct spdk_blob_bs_dev *)blob->back_bs_dev;
+ CU_ASSERT(blob_bs_dev->blob == snapshot2);
+
+ blob_bs_dev = (struct spdk_blob_bs_dev *)snapshot2->back_bs_dev;
+ CU_ASSERT(blob_bs_dev->blob == snapshot);
+
+ rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[0], &value, &value_len);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(value_len == strlen(g_xattr_values[0]));
+ CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
+
+ rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[1], &value, &value_len);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(value_len == strlen(g_xattr_values[1]));
+ CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
+
+ rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[2], &value, &value_len);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(value_len == strlen(g_xattr_values[2]));
+ CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
+
+ /* Confirm that blob is clone of snapshot2, and snapshot2 is clone of snapshot */
+ count = 2;
+ CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
+ CU_ASSERT(count == 1);
+ CU_ASSERT(ids[0] == blobid);
+
+ count = 2;
+ CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
+ CU_ASSERT(count == 1);
+ CU_ASSERT(ids[0] == snapshotid2);
+
+ /* Try to create snapshot from snapshot */
+ spdk_bs_create_snapshot(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -EINVAL);
+ CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
+ CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
+
+ /* Delete blob and confirm that it is no longer on snapshot2 clone list */
+ ut_blob_close_and_delete(bs, blob);
+ count = 2;
+ CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
+ CU_ASSERT(count == 0);
+
+ /* Delete snapshot2 and confirm that it is no longer on snapshot clone list */
+ ut_blob_close_and_delete(bs, snapshot2);
+ CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
+ count = 2;
+ CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
+ CU_ASSERT(count == 0);
+
+ ut_blob_close_and_delete(bs, snapshot);
+ CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
+}
+
+static void
+blob_snapshot_freeze_io(void)
+{
+ struct spdk_io_channel *channel;
+ struct spdk_bs_channel *bs_channel;
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob;
+ struct spdk_blob_opts opts;
+ spdk_blob_id blobid;
+ uint32_t num_of_pages = 10;
+ uint8_t payload_read[num_of_pages * SPDK_BS_PAGE_SIZE];
+ uint8_t payload_write[num_of_pages * SPDK_BS_PAGE_SIZE];
+ uint8_t payload_zero[num_of_pages * SPDK_BS_PAGE_SIZE];
+
+ memset(payload_write, 0xE5, sizeof(payload_write));
+ memset(payload_read, 0x00, sizeof(payload_read));
+ memset(payload_zero, 0x00, sizeof(payload_zero));
+
+ /* Test freeze I/O during snapshot */
+ channel = spdk_bs_alloc_io_channel(bs);
+ bs_channel = spdk_io_channel_get_ctx(channel);
+
+ /* Create blob with 10 clusters */
+ ut_spdk_blob_opts_init(&opts);
+ opts.num_clusters = 10;
+ opts.thin_provision = false;
+
+ blob = ut_blob_create_and_open(bs, &opts);
+ blobid = spdk_blob_get_id(blob);
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
+
+ spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+
+ /* This is implementation specific.
+ * Flag 'frozen_io' is set in _spdk_bs_snapshot_freeze_cpl callback.
+ * Four async I/O operations happen before that. */
+ poll_thread_times(0, 3);
+
+ CU_ASSERT(TAILQ_EMPTY(&bs_channel->queued_io));
+
+ /* Blob I/O should be frozen here */
+ CU_ASSERT(blob->frozen_refcnt == 1);
+
+ /* Write to the blob */
+ spdk_blob_io_write(blob, channel, payload_write, 0, num_of_pages, blob_op_complete, NULL);
+
+ /* Verify that I/O is queued */
+ CU_ASSERT(!TAILQ_EMPTY(&bs_channel->queued_io));
+ /* Verify that payload is not written to disk */
+ CU_ASSERT(memcmp(payload_zero, &g_dev_buffer[blob->active.clusters[0]*SPDK_BS_PAGE_SIZE],
+ SPDK_BS_PAGE_SIZE) == 0);
+
+ /* Finish all operations including spdk_bs_create_snapshot */
+ poll_threads();
+
+ /* Verify snapshot */
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+
+ /* Verify that blob has unset frozen_io */
+ CU_ASSERT(blob->frozen_refcnt == 0);
+
+ /* Verify that postponed I/O completed successfully by comparing payload */
+ spdk_blob_io_read(blob, channel, payload_read, 0, num_of_pages, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_write, payload_read, num_of_pages * SPDK_BS_PAGE_SIZE) == 0);
+
+ spdk_bs_free_io_channel(channel);
+ poll_threads();
+
+ ut_blob_close_and_delete(bs, blob);
+}
+
+static void
+blob_clone(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob_opts opts;
+ struct spdk_blob *blob, *snapshot, *clone;
+ spdk_blob_id blobid, cloneid, snapshotid;
+ struct spdk_blob_xattr_opts xattrs;
+ const void *value;
+ size_t value_len;
+ int rc;
+
+ /* Create blob with 10 clusters */
+
+ ut_spdk_blob_opts_init(&opts);
+ opts.num_clusters = 10;
+
+ blob = ut_blob_create_and_open(bs, &opts);
+ blobid = spdk_blob_get_id(blob);
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
+
+ /* Create snapshot */
+ spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ snapshotid = g_blobid;
+
+ spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ snapshot = g_blob;
+ CU_ASSERT(snapshot->data_ro == true);
+ CU_ASSERT(snapshot->md_ro == true);
+ CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
+
+ spdk_blob_close(snapshot, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Create clone from snapshot with xattrs */
+ xattrs.names = g_xattr_names;
+ xattrs.get_value = _get_xattr_value;
+ xattrs.count = 3;
+ xattrs.ctx = &g_ctx;
+
+ spdk_bs_create_clone(bs, snapshotid, &xattrs, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ cloneid = g_blobid;
+
+ spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ clone = g_blob;
+ CU_ASSERT(clone->data_ro == false);
+ CU_ASSERT(clone->md_ro == false);
+ CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10);
+
+ rc = spdk_blob_get_xattr_value(clone, g_xattr_names[0], &value, &value_len);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(value_len == strlen(g_xattr_values[0]));
+ CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
+
+ rc = spdk_blob_get_xattr_value(clone, g_xattr_names[1], &value, &value_len);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(value_len == strlen(g_xattr_values[1]));
+ CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
+
+ rc = spdk_blob_get_xattr_value(clone, g_xattr_names[2], &value, &value_len);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(value_len == strlen(g_xattr_values[2]));
+ CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
+
+
+ spdk_blob_close(clone, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Try to create clone from not read only blob */
+ spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -EINVAL);
+ CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
+
+ /* Mark blob as read only */
+ spdk_blob_set_read_only(blob);
+ spdk_blob_sync_md(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Create clone from read only blob */
+ spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ cloneid = g_blobid;
+
+ spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ clone = g_blob;
+ CU_ASSERT(clone->data_ro == false);
+ CU_ASSERT(clone->md_ro == false);
+ CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10);
+
+ ut_blob_close_and_delete(bs, clone);
+ ut_blob_close_and_delete(bs, blob);
+}
+
+static void
+_blob_inflate(bool decouple_parent)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob_opts opts;
+ struct spdk_blob *blob, *snapshot;
+ spdk_blob_id blobid, snapshotid;
+ struct spdk_io_channel *channel;
+ uint64_t free_clusters;
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ SPDK_CU_ASSERT_FATAL(channel != NULL);
+
+ /* Create blob with 10 clusters */
+
+ ut_spdk_blob_opts_init(&opts);
+ opts.num_clusters = 10;
+ opts.thin_provision = true;
+
+ blob = ut_blob_create_and_open(bs, &opts);
+ blobid = spdk_blob_get_id(blob);
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
+ CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true);
+
+ /* 1) Blob with no parent */
+ if (decouple_parent) {
+ /* Decouple parent of blob with no parent (should fail) */
+ spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno != 0);
+ } else {
+ /* Inflate of thin blob with no parent should made it thick */
+ spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == false);
+ }
+
+ spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ snapshotid = g_blobid;
+
+ CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true);
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
+
+ spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ snapshot = g_blob;
+ CU_ASSERT(snapshot->data_ro == true);
+ CU_ASSERT(snapshot->md_ro == true);
+ CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
+
+ spdk_blob_close(snapshot, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ free_clusters = spdk_bs_free_cluster_count(bs);
+
+ /* 2) Blob with parent */
+ if (!decouple_parent) {
+ /* Do full blob inflation */
+ spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ /* all 10 clusters should be allocated */
+ CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 10);
+ } else {
+ /* Decouple parent of blob */
+ spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ /* when only parent is removed, none of the clusters should be allocated */
+ CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters);
+ }
+
+ /* Now, it should be possible to delete snapshot */
+ spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
+ CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == decouple_parent);
+
+ spdk_bs_free_io_channel(channel);
+ poll_threads();
+
+ ut_blob_close_and_delete(bs, blob);
+}
+
+static void
+blob_inflate(void)
+{
+ _blob_inflate(false);
+ _blob_inflate(true);
+}
+
+static void
+blob_delete(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob_opts blob_opts;
+ spdk_blob_id blobid;
+
+ /* Create a blob and then delete it. */
+ ut_spdk_blob_opts_init(&blob_opts);
+ spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid > 0);
+ blobid = g_blobid;
+
+ spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Try to open the blob */
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -ENOENT);
+}
+
+static void
+blob_resize_test(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob;
+ uint64_t free_clusters;
+
+ free_clusters = spdk_bs_free_cluster_count(bs);
+
+ blob = ut_blob_create_and_open(bs, NULL);
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+
+ /* Confirm that resize fails if blob is marked read-only. */
+ blob->md_ro = true;
+ spdk_blob_resize(blob, 5, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -EPERM);
+ blob->md_ro = false;
+
+ /* The blob started at 0 clusters. Resize it to be 5. */
+ spdk_blob_resize(blob, 5, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs));
+
+ /* Shrink the blob to 3 clusters. This will not actually release
+ * the old clusters until the blob is synced.
+ */
+ spdk_blob_resize(blob, 3, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ /* Verify there are still 5 clusters in use */
+ CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs));
+
+ spdk_blob_sync_md(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ /* Now there are only 3 clusters in use */
+ CU_ASSERT((free_clusters - 3) == spdk_bs_free_cluster_count(bs));
+
+ /* Resize the blob to be 10 clusters. Growth takes effect immediately. */
+ spdk_blob_resize(blob, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT((free_clusters - 10) == spdk_bs_free_cluster_count(bs));
+
+ /* Try to resize the blob to size larger than blobstore. */
+ spdk_blob_resize(blob, bs->total_clusters + 1, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -ENOSPC);
+
+ ut_blob_close_and_delete(bs, blob);
+}
+
+static void
+blob_read_only(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_blob *blob;
+ struct spdk_bs_opts opts;
+ spdk_blob_id blobid;
+ int rc;
+
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
+
+ spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ blob = ut_blob_create_and_open(bs, NULL);
+ blobid = spdk_blob_get_id(blob);
+
+ rc = spdk_blob_set_read_only(blob);
+ CU_ASSERT(rc == 0);
+
+ CU_ASSERT(blob->data_ro == false);
+ CU_ASSERT(blob->md_ro == false);
+
+ spdk_blob_sync_md(blob, bs_op_complete, NULL);
+ poll_threads();
+
+ CU_ASSERT(blob->data_ro == true);
+ CU_ASSERT(blob->md_ro == true);
+ CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+
+ CU_ASSERT(blob->data_ro == true);
+ CU_ASSERT(blob->md_ro == true);
+ CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ ut_bs_reload(&bs, &opts);
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+
+ CU_ASSERT(blob->data_ro == true);
+ CU_ASSERT(blob->md_ro == true);
+ CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
+
+ ut_blob_close_and_delete(bs, blob);
+
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+}
+
+static void
+channel_ops(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_io_channel *channel;
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ CU_ASSERT(channel != NULL);
+
+ spdk_bs_free_io_channel(channel);
+ poll_threads();
+}
+
+static void
+blob_write(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob = g_blob;
+ struct spdk_io_channel *channel;
+ uint64_t pages_per_cluster;
+ uint8_t payload[10 * 4096];
+
+ pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs);
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ CU_ASSERT(channel != NULL);
+
+ /* Write to a blob with 0 size */
+ spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -EINVAL);
+
+ /* Resize the blob */
+ spdk_blob_resize(blob, 5, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Confirm that write fails if blob is marked read-only. */
+ blob->data_ro = true;
+ spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -EPERM);
+ blob->data_ro = false;
+
+ /* Write to the blob */
+ spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Write starting beyond the end */
+ spdk_blob_io_write(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete,
+ NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -EINVAL);
+
+ /* Write starting at a valid location but going off the end */
+ spdk_blob_io_write(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1,
+ blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -EINVAL);
+
+ spdk_bs_free_io_channel(channel);
+ poll_threads();
+}
+
+static void
+blob_read(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob = g_blob;
+ struct spdk_io_channel *channel;
+ uint64_t pages_per_cluster;
+ uint8_t payload[10 * 4096];
+
+ pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs);
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ CU_ASSERT(channel != NULL);
+
+ /* Read from a blob with 0 size */
+ spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -EINVAL);
+
+ /* Resize the blob */
+ spdk_blob_resize(blob, 5, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Confirm that read passes if blob is marked read-only. */
+ blob->data_ro = true;
+ spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ blob->data_ro = false;
+
+ /* Read from the blob */
+ spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Read starting beyond the end */
+ spdk_blob_io_read(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete,
+ NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -EINVAL);
+
+ /* Read starting at a valid location but going off the end */
+ spdk_blob_io_read(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1,
+ blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -EINVAL);
+
+ spdk_bs_free_io_channel(channel);
+ poll_threads();
+}
+
+static void
+blob_rw_verify(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob = g_blob;
+ struct spdk_io_channel *channel;
+ uint8_t payload_read[10 * 4096];
+ uint8_t payload_write[10 * 4096];
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ CU_ASSERT(channel != NULL);
+
+ spdk_blob_resize(blob, 32, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ memset(payload_write, 0xE5, sizeof(payload_write));
+ spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ memset(payload_read, 0x00, sizeof(payload_read));
+ spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_write, payload_read, 4 * 4096) == 0);
+
+ spdk_bs_free_io_channel(channel);
+ poll_threads();
+}
+
+static void
+blob_rw_verify_iov(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob;
+ struct spdk_io_channel *channel;
+ uint8_t payload_read[10 * 4096];
+ uint8_t payload_write[10 * 4096];
+ struct iovec iov_read[3];
+ struct iovec iov_write[3];
+ void *buf;
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ CU_ASSERT(channel != NULL);
+
+ blob = ut_blob_create_and_open(bs, NULL);
+
+ spdk_blob_resize(blob, 2, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /*
+ * Manually adjust the offset of the blob's second cluster. This allows
+ * us to make sure that the readv/write code correctly accounts for I/O
+ * that cross cluster boundaries. Start by asserting that the allocated
+ * clusters are where we expect before modifying the second cluster.
+ */
+ CU_ASSERT(blob->active.clusters[0] == 1 * 256);
+ CU_ASSERT(blob->active.clusters[1] == 2 * 256);
+ blob->active.clusters[1] = 3 * 256;
+
+ memset(payload_write, 0xE5, sizeof(payload_write));
+ iov_write[0].iov_base = payload_write;
+ iov_write[0].iov_len = 1 * 4096;
+ iov_write[1].iov_base = payload_write + 1 * 4096;
+ iov_write[1].iov_len = 5 * 4096;
+ iov_write[2].iov_base = payload_write + 6 * 4096;
+ iov_write[2].iov_len = 4 * 4096;
+ /*
+ * Choose a page offset just before the cluster boundary. The first 6 pages of payload
+ * will get written to the first cluster, the last 4 to the second cluster.
+ */
+ spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ memset(payload_read, 0xAA, sizeof(payload_read));
+ iov_read[0].iov_base = payload_read;
+ iov_read[0].iov_len = 3 * 4096;
+ iov_read[1].iov_base = payload_read + 3 * 4096;
+ iov_read[1].iov_len = 4 * 4096;
+ iov_read[2].iov_base = payload_read + 7 * 4096;
+ iov_read[2].iov_len = 3 * 4096;
+ spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
+
+ buf = calloc(1, 256 * 4096);
+ SPDK_CU_ASSERT_FATAL(buf != NULL);
+ /* Check that cluster 2 on "disk" was not modified. */
+ CU_ASSERT(memcmp(buf, &g_dev_buffer[512 * 4096], 256 * 4096) == 0);
+ free(buf);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_free_io_channel(channel);
+ poll_threads();
+}
+
+static uint32_t
+bs_channel_get_req_count(struct spdk_io_channel *_channel)
+{
+ struct spdk_bs_channel *channel = spdk_io_channel_get_ctx(_channel);
+ struct spdk_bs_request_set *set;
+ uint32_t count = 0;
+
+ TAILQ_FOREACH(set, &channel->reqs, link) {
+ count++;
+ }
+
+ return count;
+}
+
+static void
+blob_rw_verify_iov_nomem(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob = g_blob;
+ struct spdk_io_channel *channel;
+ uint8_t payload_write[10 * 4096];
+ struct iovec iov_write[3];
+ uint32_t req_count;
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ CU_ASSERT(channel != NULL);
+
+ spdk_blob_resize(blob, 2, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /*
+ * Choose a page offset just before the cluster boundary. The first 6 pages of payload
+ * will get written to the first cluster, the last 4 to the second cluster.
+ */
+ iov_write[0].iov_base = payload_write;
+ iov_write[0].iov_len = 1 * 4096;
+ iov_write[1].iov_base = payload_write + 1 * 4096;
+ iov_write[1].iov_len = 5 * 4096;
+ iov_write[2].iov_base = payload_write + 6 * 4096;
+ iov_write[2].iov_len = 4 * 4096;
+ MOCK_SET(calloc, NULL);
+ req_count = bs_channel_get_req_count(channel);
+ spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno = -ENOMEM);
+ CU_ASSERT(req_count == bs_channel_get_req_count(channel));
+ MOCK_CLEAR(calloc);
+
+ spdk_bs_free_io_channel(channel);
+ poll_threads();
+}
+
+static void
+blob_rw_iov_read_only(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob = g_blob;
+ struct spdk_io_channel *channel;
+ uint8_t payload_read[4096];
+ uint8_t payload_write[4096];
+ struct iovec iov_read;
+ struct iovec iov_write;
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ CU_ASSERT(channel != NULL);
+
+ spdk_blob_resize(blob, 2, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Verify that writev failed if read_only flag is set. */
+ blob->data_ro = true;
+ iov_write.iov_base = payload_write;
+ iov_write.iov_len = sizeof(payload_write);
+ spdk_blob_io_writev(blob, channel, &iov_write, 1, 0, 1, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -EPERM);
+
+ /* Verify that reads pass if data_ro flag is set. */
+ iov_read.iov_base = payload_read;
+ iov_read.iov_len = sizeof(payload_read);
+ spdk_blob_io_readv(blob, channel, &iov_read, 1, 0, 1, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_free_io_channel(channel);
+ poll_threads();
+}
+
+static void
+_blob_io_read_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel,
+ uint8_t *payload, uint64_t offset, uint64_t length,
+ spdk_blob_op_complete cb_fn, void *cb_arg)
+{
+ uint64_t i;
+ uint8_t *buf;
+ uint64_t page_size = spdk_bs_get_page_size(blob->bs);
+
+ /* To be sure that operation is NOT splitted, read one page at the time */
+ buf = payload;
+ for (i = 0; i < length; i++) {
+ spdk_blob_io_read(blob, channel, buf, i + offset, 1, blob_op_complete, NULL);
+ poll_threads();
+ if (g_bserrno != 0) {
+ /* Pass the error code up */
+ break;
+ }
+ buf += page_size;
+ }
+
+ cb_fn(cb_arg, g_bserrno);
+}
+
+static void
+_blob_io_write_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel,
+ uint8_t *payload, uint64_t offset, uint64_t length,
+ spdk_blob_op_complete cb_fn, void *cb_arg)
+{
+ uint64_t i;
+ uint8_t *buf;
+ uint64_t page_size = spdk_bs_get_page_size(blob->bs);
+
+ /* To be sure that operation is NOT splitted, write one page at the time */
+ buf = payload;
+ for (i = 0; i < length; i++) {
+ spdk_blob_io_write(blob, channel, buf, i + offset, 1, blob_op_complete, NULL);
+ poll_threads();
+ if (g_bserrno != 0) {
+ /* Pass the error code up */
+ break;
+ }
+ buf += page_size;
+ }
+
+ cb_fn(cb_arg, g_bserrno);
+}
+
+static void
+blob_operation_split_rw(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob;
+ struct spdk_io_channel *channel;
+ struct spdk_blob_opts opts;
+ uint64_t cluster_size;
+
+ uint64_t payload_size;
+ uint8_t *payload_read;
+ uint8_t *payload_write;
+ uint8_t *payload_pattern;
+
+ uint64_t page_size;
+ uint64_t pages_per_cluster;
+ uint64_t pages_per_payload;
+
+ uint64_t i;
+
+ cluster_size = spdk_bs_get_cluster_size(bs);
+ page_size = spdk_bs_get_page_size(bs);
+ pages_per_cluster = cluster_size / page_size;
+ pages_per_payload = pages_per_cluster * 5;
+ payload_size = cluster_size * 5;
+
+ payload_read = malloc(payload_size);
+ SPDK_CU_ASSERT_FATAL(payload_read != NULL);
+
+ payload_write = malloc(payload_size);
+ SPDK_CU_ASSERT_FATAL(payload_write != NULL);
+
+ payload_pattern = malloc(payload_size);
+ SPDK_CU_ASSERT_FATAL(payload_pattern != NULL);
+
+ /* Prepare random pattern to write */
+ memset(payload_pattern, 0xFF, payload_size);
+ for (i = 0; i < pages_per_payload; i++) {
+ *((uint64_t *)(payload_pattern + page_size * i)) = (i + 1);
+ }
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ SPDK_CU_ASSERT_FATAL(channel != NULL);
+
+ /* Create blob */
+ ut_spdk_blob_opts_init(&opts);
+ opts.thin_provision = false;
+ opts.num_clusters = 5;
+
+ blob = ut_blob_create_and_open(bs, &opts);
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
+
+ /* Initial read should return zeroed payload */
+ memset(payload_read, 0xFF, payload_size);
+ spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
+
+ /* Fill whole blob except last page */
+ spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload - 1,
+ blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Write last page with a pattern */
+ spdk_blob_io_write(blob, channel, payload_pattern, pages_per_payload - 1, 1,
+ blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Read whole blob and check consistency */
+ memset(payload_read, 0xFF, payload_size);
+ spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0);
+ CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0);
+
+ /* Fill whole blob except first page */
+ spdk_blob_io_write(blob, channel, payload_pattern, 1, pages_per_payload - 1,
+ blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Write first page with a pattern */
+ spdk_blob_io_write(blob, channel, payload_pattern, 0, 1,
+ blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Read whole blob and check consistency */
+ memset(payload_read, 0xFF, payload_size);
+ spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0);
+ CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0);
+
+
+ /* Fill whole blob with a pattern (5 clusters) */
+
+ /* 1. Read test. */
+ _blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload,
+ blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ memset(payload_read, 0xFF, payload_size);
+ spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
+ poll_threads();
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
+
+ /* 2. Write test. */
+ spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload,
+ blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ memset(payload_read, 0xFF, payload_size);
+ _blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
+
+ spdk_bs_free_io_channel(channel);
+ poll_threads();
+
+ g_blob = NULL;
+ g_blobid = 0;
+
+ free(payload_read);
+ free(payload_write);
+ free(payload_pattern);
+
+ ut_blob_close_and_delete(bs, blob);
+}
+
+static void
+blob_operation_split_rw_iov(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob;
+ struct spdk_io_channel *channel;
+ struct spdk_blob_opts opts;
+ uint64_t cluster_size;
+
+ uint64_t payload_size;
+ uint8_t *payload_read;
+ uint8_t *payload_write;
+ uint8_t *payload_pattern;
+
+ uint64_t page_size;
+ uint64_t pages_per_cluster;
+ uint64_t pages_per_payload;
+
+ struct iovec iov_read[2];
+ struct iovec iov_write[2];
+
+ uint64_t i, j;
+
+ cluster_size = spdk_bs_get_cluster_size(bs);
+ page_size = spdk_bs_get_page_size(bs);
+ pages_per_cluster = cluster_size / page_size;
+ pages_per_payload = pages_per_cluster * 5;
+ payload_size = cluster_size * 5;
+
+ payload_read = malloc(payload_size);
+ SPDK_CU_ASSERT_FATAL(payload_read != NULL);
+
+ payload_write = malloc(payload_size);
+ SPDK_CU_ASSERT_FATAL(payload_write != NULL);
+
+ payload_pattern = malloc(payload_size);
+ SPDK_CU_ASSERT_FATAL(payload_pattern != NULL);
+
+ /* Prepare random pattern to write */
+ for (i = 0; i < pages_per_payload; i++) {
+ for (j = 0; j < page_size / sizeof(uint64_t); j++) {
+ uint64_t *tmp;
+
+ tmp = (uint64_t *)payload_pattern;
+ tmp += ((page_size * i) / sizeof(uint64_t)) + j;
+ *tmp = i + 1;
+ }
+ }
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ SPDK_CU_ASSERT_FATAL(channel != NULL);
+
+ /* Create blob */
+ ut_spdk_blob_opts_init(&opts);
+ opts.thin_provision = false;
+ opts.num_clusters = 5;
+
+ blob = ut_blob_create_and_open(bs, &opts);
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
+
+ /* Initial read should return zeroes payload */
+ memset(payload_read, 0xFF, payload_size);
+ iov_read[0].iov_base = payload_read;
+ iov_read[0].iov_len = cluster_size * 3;
+ iov_read[1].iov_base = payload_read + cluster_size * 3;
+ iov_read[1].iov_len = cluster_size * 2;
+ spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
+
+ /* First of iovs fills whole blob except last page and second of iovs writes last page
+ * with a pattern. */
+ iov_write[0].iov_base = payload_pattern;
+ iov_write[0].iov_len = payload_size - page_size;
+ iov_write[1].iov_base = payload_pattern;
+ iov_write[1].iov_len = page_size;
+ spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Read whole blob and check consistency */
+ memset(payload_read, 0xFF, payload_size);
+ iov_read[0].iov_base = payload_read;
+ iov_read[0].iov_len = cluster_size * 2;
+ iov_read[1].iov_base = payload_read + cluster_size * 2;
+ iov_read[1].iov_len = cluster_size * 3;
+ spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0);
+ CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0);
+
+ /* First of iovs fills only first page and second of iovs writes whole blob except
+ * first page with a pattern. */
+ iov_write[0].iov_base = payload_pattern;
+ iov_write[0].iov_len = page_size;
+ iov_write[1].iov_base = payload_pattern;
+ iov_write[1].iov_len = payload_size - page_size;
+ spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Read whole blob and check consistency */
+ memset(payload_read, 0xFF, payload_size);
+ iov_read[0].iov_base = payload_read;
+ iov_read[0].iov_len = cluster_size * 4;
+ iov_read[1].iov_base = payload_read + cluster_size * 4;
+ iov_read[1].iov_len = cluster_size;
+ spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0);
+ CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0);
+
+
+ /* Fill whole blob with a pattern (5 clusters) */
+
+ /* 1. Read test. */
+ _blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload,
+ blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ memset(payload_read, 0xFF, payload_size);
+ iov_read[0].iov_base = payload_read;
+ iov_read[0].iov_len = cluster_size;
+ iov_read[1].iov_base = payload_read + cluster_size;
+ iov_read[1].iov_len = cluster_size * 4;
+ spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
+
+ /* 2. Write test. */
+ iov_write[0].iov_base = payload_read;
+ iov_write[0].iov_len = cluster_size * 2;
+ iov_write[1].iov_base = payload_read + cluster_size * 2;
+ iov_write[1].iov_len = cluster_size * 3;
+ spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ memset(payload_read, 0xFF, payload_size);
+ _blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
+
+ spdk_bs_free_io_channel(channel);
+ poll_threads();
+
+ g_blob = NULL;
+ g_blobid = 0;
+
+ free(payload_read);
+ free(payload_write);
+ free(payload_pattern);
+
+ ut_blob_close_and_delete(bs, blob);
+}
+
+static void
+blob_unmap(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob;
+ struct spdk_io_channel *channel;
+ struct spdk_blob_opts opts;
+ uint8_t payload[4096];
+ int i;
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ CU_ASSERT(channel != NULL);
+
+ ut_spdk_blob_opts_init(&opts);
+ opts.num_clusters = 10;
+
+ blob = ut_blob_create_and_open(bs, &opts);
+
+ spdk_blob_resize(blob, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ memset(payload, 0, sizeof(payload));
+ payload[0] = 0xFF;
+
+ /*
+ * Set first byte of every cluster to 0xFF.
+ * First cluster on device is reserved so let's start from cluster number 1
+ */
+ for (i = 1; i < 11; i++) {
+ g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] = 0xFF;
+ }
+
+ /* Confirm writes */
+ for (i = 0; i < 10; i++) {
+ payload[0] = 0;
+ spdk_blob_io_read(blob, channel, &payload, i * SPDK_BLOB_OPTS_CLUSTER_SZ / 4096, 1,
+ blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(payload[0] == 0xFF);
+ }
+
+ /* Mark some clusters as unallocated */
+ blob->active.clusters[1] = 0;
+ blob->active.clusters[2] = 0;
+ blob->active.clusters[3] = 0;
+ blob->active.clusters[6] = 0;
+ blob->active.clusters[8] = 0;
+
+ /* Unmap clusters by resizing to 0 */
+ spdk_blob_resize(blob, 0, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_blob_sync_md(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Confirm that only 'allocated' clusters were unmapped */
+ for (i = 1; i < 11; i++) {
+ switch (i) {
+ case 2:
+ case 3:
+ case 4:
+ case 7:
+ case 9:
+ CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0xFF);
+ break;
+ default:
+ CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0);
+ break;
+ }
+ }
+
+ spdk_bs_free_io_channel(channel);
+ poll_threads();
+
+ ut_blob_close_and_delete(bs, blob);
+}
+
+static void
+blob_iter(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob;
+ spdk_blob_id blobid;
+ struct spdk_blob_opts blob_opts;
+
+ spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_blob == NULL);
+ CU_ASSERT(g_bserrno == -ENOENT);
+
+ ut_spdk_blob_opts_init(&blob_opts);
+ spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_blob != NULL);
+ CU_ASSERT(g_bserrno == 0);
+ blob = g_blob;
+ CU_ASSERT(spdk_blob_get_id(blob) == blobid);
+
+ spdk_bs_iter_next(bs, blob, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_blob == NULL);
+ CU_ASSERT(g_bserrno == -ENOENT);
+}
+
+static void
+blob_xattr(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob = g_blob;
+ spdk_blob_id blobid = spdk_blob_get_id(blob);
+ uint64_t length;
+ int rc;
+ const char *name1, *name2;
+ const void *value;
+ size_t value_len;
+ struct spdk_xattr_names *names;
+
+ /* Test that set_xattr fails if md_ro flag is set. */
+ blob->md_ro = true;
+ rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
+ CU_ASSERT(rc == -EPERM);
+
+ blob->md_ro = false;
+ rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
+ CU_ASSERT(rc == 0);
+
+ length = 2345;
+ rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
+ CU_ASSERT(rc == 0);
+
+ /* Overwrite "length" xattr. */
+ length = 3456;
+ rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
+ CU_ASSERT(rc == 0);
+
+ /* get_xattr should still work even if md_ro flag is set. */
+ value = NULL;
+ blob->md_ro = true;
+ rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(*(uint64_t *)value == length);
+ CU_ASSERT(value_len == 8);
+ blob->md_ro = false;
+
+ rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
+ CU_ASSERT(rc == -ENOENT);
+
+ names = NULL;
+ rc = spdk_blob_get_xattr_names(blob, &names);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(names != NULL);
+ CU_ASSERT(spdk_xattr_names_get_count(names) == 2);
+ name1 = spdk_xattr_names_get_name(names, 0);
+ SPDK_CU_ASSERT_FATAL(name1 != NULL);
+ CU_ASSERT(!strcmp(name1, "name") || !strcmp(name1, "length"));
+ name2 = spdk_xattr_names_get_name(names, 1);
+ SPDK_CU_ASSERT_FATAL(name2 != NULL);
+ CU_ASSERT(!strcmp(name2, "name") || !strcmp(name2, "length"));
+ CU_ASSERT(strcmp(name1, name2));
+ spdk_xattr_names_free(names);
+
+ /* Confirm that remove_xattr fails if md_ro is set to true. */
+ blob->md_ro = true;
+ rc = spdk_blob_remove_xattr(blob, "name");
+ CU_ASSERT(rc == -EPERM);
+
+ blob->md_ro = false;
+ rc = spdk_blob_remove_xattr(blob, "name");
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_blob_remove_xattr(blob, "foobar");
+ CU_ASSERT(rc == -ENOENT);
+
+ /* Set internal xattr */
+ length = 7898;
+ rc = blob_set_xattr(blob, "internal", &length, sizeof(length), true);
+ CU_ASSERT(rc == 0);
+ rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(*(uint64_t *)value == length);
+ /* try to get public xattr with same name */
+ rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len);
+ CU_ASSERT(rc != 0);
+ rc = blob_get_xattr_value(blob, "internal", &value, &value_len, false);
+ CU_ASSERT(rc != 0);
+ /* Check if SPDK_BLOB_INTERNAL_XATTR is set */
+ CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) ==
+ SPDK_BLOB_INTERNAL_XATTR);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+
+ /* Check if xattrs are persisted */
+ ut_bs_reload(&bs, NULL);
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+
+ rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(*(uint64_t *)value == length);
+
+ /* try to get internal xattr trough public call */
+ rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len);
+ CU_ASSERT(rc != 0);
+
+ rc = blob_remove_xattr(blob, "internal", true);
+ CU_ASSERT(rc == 0);
+
+ CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) == 0);
+}
+
+static void
+bs_load(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ spdk_blob_id blobid;
+ struct spdk_blob *blob;
+ struct spdk_bs_super_block *super_block;
+ uint64_t length;
+ int rc;
+ const void *value;
+ size_t value_len;
+ struct spdk_bs_opts opts;
+ struct spdk_blob_opts blob_opts;
+
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
+
+ /* Initialize a new blob store */
+ spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ /* Try to open a blobid that does not exist */
+ spdk_bs_open_blob(bs, 0, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -ENOENT);
+ CU_ASSERT(g_blob == NULL);
+
+ /* Create a blob */
+ blob = ut_blob_create_and_open(bs, NULL);
+ blobid = spdk_blob_get_id(blob);
+
+ /* Try again to open valid blob but without the upper bit set */
+ spdk_bs_open_blob(bs, blobid & 0xFFFFFFFF, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -ENOENT);
+ CU_ASSERT(g_blob == NULL);
+
+ /* Set some xattrs */
+ rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
+ CU_ASSERT(rc == 0);
+
+ length = 2345;
+ rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
+ CU_ASSERT(rc == 0);
+
+ /* Resize the blob */
+ spdk_blob_resize(blob, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ blob = NULL;
+ g_blob = NULL;
+ g_blobid = SPDK_BLOBID_INVALID;
+
+ /* Unload the blob store */
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+ g_blob = NULL;
+ g_blobid = 0;
+
+ super_block = (struct spdk_bs_super_block *)g_dev_buffer;
+ CU_ASSERT(super_block->clean == 1);
+
+ /* Load should fail for device with an unsupported blocklen */
+ dev = init_dev();
+ dev->blocklen = SPDK_BS_PAGE_SIZE * 2;
+ spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -EINVAL);
+
+ /* Load should when max_md_ops is set to zero */
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ opts.max_md_ops = 0;
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -EINVAL);
+
+ /* Load should when max_channel_ops is set to zero */
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ opts.max_channel_ops = 0;
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -EINVAL);
+
+ /* Load an existing blob store */
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ super_block = (struct spdk_bs_super_block *)g_dev_buffer;
+ CU_ASSERT(super_block->clean == 1);
+ CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ blob = g_blob;
+
+ /* Verify that blobstore is marked dirty after first metadata sync */
+ spdk_blob_sync_md(blob, blob_op_complete, NULL);
+ CU_ASSERT(super_block->clean == 1);
+
+ /* Get the xattrs */
+ value = NULL;
+ rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(*(uint64_t *)value == length);
+ CU_ASSERT(value_len == 8);
+
+ rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
+ CU_ASSERT(rc == -ENOENT);
+
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ blob = NULL;
+ g_blob = NULL;
+
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+
+ /* Load should fail: bdev size < saved size */
+ dev = init_dev();
+ dev->blockcnt /= 2;
+
+ spdk_bs_opts_init(&opts);
+ snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+
+ CU_ASSERT(g_bserrno == -EILSEQ);
+
+ /* Load should succeed: bdev size > saved size */
+ dev = init_dev();
+ dev->blockcnt *= 4;
+
+ spdk_bs_opts_init(&opts);
+ snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ CU_ASSERT(g_bserrno == 0);
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+
+
+ /* Test compatibility mode */
+
+ dev = init_dev();
+ super_block->size = 0;
+ super_block->crc = blob_md_page_calc_crc(super_block);
+
+ spdk_bs_opts_init(&opts);
+ snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ /* Create a blob */
+ ut_spdk_blob_opts_init(&blob_opts);
+ spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+
+ /* Blobstore should update number of blocks in super_block */
+ CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
+ CU_ASSERT(super_block->clean == 0);
+
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(super_block->clean == 1);
+ g_bs = NULL;
+
+}
+
+static void
+bs_load_pending_removal(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob_opts opts;
+ struct spdk_blob *blob, *snapshot;
+ spdk_blob_id blobid, snapshotid;
+ const void *value;
+ size_t value_len;
+ int rc;
+
+ /* Create blob */
+ ut_spdk_blob_opts_init(&opts);
+ opts.num_clusters = 10;
+
+ blob = ut_blob_create_and_open(bs, &opts);
+ blobid = spdk_blob_get_id(blob);
+
+ /* Create snapshot */
+ spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ snapshotid = g_blobid;
+
+ spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ snapshot = g_blob;
+
+ /* Set SNAPSHOT_PENDING_REMOVAL xattr */
+ snapshot->md_ro = false;
+ rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true);
+ CU_ASSERT(rc == 0);
+ snapshot->md_ro = true;
+
+ spdk_blob_close(snapshot, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Reload blobstore */
+ ut_bs_reload(&bs, NULL);
+
+ /* Snapshot should not be removed as blob is still pointing to it */
+ spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ snapshot = g_blob;
+
+ /* SNAPSHOT_PENDING_REMOVAL xattr should be removed during load */
+ rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len);
+ CU_ASSERT(rc != 0);
+
+ /* Set SNAPSHOT_PENDING_REMOVAL xattr again */
+ snapshot->md_ro = false;
+ rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true);
+ CU_ASSERT(rc == 0);
+ snapshot->md_ro = true;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+
+ /* Remove parent_id from blob by removing BLOB_SNAPSHOT xattr */
+ blob_remove_xattr(blob, BLOB_SNAPSHOT, true);
+
+ spdk_blob_sync_md(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_blob_close(snapshot, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Reload blobstore */
+ ut_bs_reload(&bs, NULL);
+
+ /* Snapshot should be removed as blob is not pointing to it anymore */
+ spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno != 0);
+}
+
+static void
+bs_load_custom_cluster_size(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_bs_super_block *super_block;
+ struct spdk_bs_opts opts;
+ uint32_t custom_cluster_size = 4194304; /* 4MiB */
+ uint32_t cluster_sz;
+ uint64_t total_clusters;
+
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ opts.cluster_sz = custom_cluster_size;
+ snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
+
+ /* Initialize a new blob store */
+ spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+ cluster_sz = bs->cluster_sz;
+ total_clusters = bs->total_clusters;
+
+ /* Unload the blob store */
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+ g_blob = NULL;
+ g_blobid = 0;
+
+ super_block = (struct spdk_bs_super_block *)g_dev_buffer;
+ CU_ASSERT(super_block->clean == 1);
+
+ /* Load an existing blob store */
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+ /* Compare cluster size and number to one after initialization */
+ CU_ASSERT(cluster_sz == bs->cluster_sz);
+ CU_ASSERT(total_clusters == bs->total_clusters);
+
+ super_block = (struct spdk_bs_super_block *)g_dev_buffer;
+ CU_ASSERT(super_block->clean == 1);
+ CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
+
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(super_block->clean == 1);
+ g_bs = NULL;
+}
+
+static void
+bs_type(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_bs_opts opts;
+
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
+
+ /* Initialize a new blob store */
+ spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ /* Unload the blob store */
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+ g_blob = NULL;
+ g_blobid = 0;
+
+ /* Load non existing blobstore type */
+ dev = init_dev();
+ snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING");
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno != 0);
+
+ /* Load with empty blobstore type */
+ dev = init_dev();
+ memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+
+ /* Initialize a new blob store with empty bstype */
+ dev = init_dev();
+ memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+
+ /* Load non existing blobstore type */
+ dev = init_dev();
+ snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING");
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno != 0);
+
+ /* Load with empty blobstore type */
+ dev = init_dev();
+ memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+}
+
+static void
+bs_super_block(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_bs_super_block *super_block;
+ struct spdk_bs_opts opts;
+ struct spdk_bs_super_block_ver1 super_block_v1;
+
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
+
+ /* Initialize a new blob store */
+ spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ /* Unload the blob store */
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+ g_blob = NULL;
+ g_blobid = 0;
+
+ /* Load an existing blob store with version newer than supported */
+ super_block = (struct spdk_bs_super_block *)g_dev_buffer;
+ super_block->version++;
+
+ dev = init_dev();
+ memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno != 0);
+
+ /* Create a new blob store with super block version 1 */
+ dev = init_dev();
+ super_block_v1.version = 1;
+ memcpy(super_block_v1.signature, "SPDKBLOB", sizeof(super_block_v1.signature));
+ super_block_v1.length = 0x1000;
+ super_block_v1.clean = 1;
+ super_block_v1.super_blob = 0xFFFFFFFFFFFFFFFF;
+ super_block_v1.cluster_size = 0x100000;
+ super_block_v1.used_page_mask_start = 0x01;
+ super_block_v1.used_page_mask_len = 0x01;
+ super_block_v1.used_cluster_mask_start = 0x02;
+ super_block_v1.used_cluster_mask_len = 0x01;
+ super_block_v1.md_start = 0x03;
+ super_block_v1.md_len = 0x40;
+ memset(super_block_v1.reserved, 0, 4036);
+ super_block_v1.crc = blob_md_page_calc_crc(&super_block_v1);
+ memcpy(g_dev_buffer, &super_block_v1, sizeof(struct spdk_bs_super_block_ver1));
+
+ memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+}
+
+/*
+ * Create a blobstore and then unload it.
+ */
+static void
+bs_unload(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob;
+
+ /* Create a blob and open it. */
+ blob = ut_blob_create_and_open(bs, NULL);
+
+ /* Try to unload blobstore, should fail with open blob */
+ g_bserrno = -1;
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -EBUSY);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+
+ /* Close the blob, then successfully unload blobstore */
+ g_bserrno = -1;
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+}
+
+/*
+ * Create a blobstore with a cluster size different than the default, and ensure it is
+ * persisted.
+ */
+static void
+bs_cluster_sz(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_bs_opts opts;
+ uint32_t cluster_sz;
+
+ /* Set cluster size to zero */
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ opts.cluster_sz = 0;
+
+ /* Initialize a new blob store */
+ spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -EINVAL);
+ SPDK_CU_ASSERT_FATAL(g_bs == NULL);
+
+ /*
+ * Set cluster size to blobstore page size,
+ * to work it is required to be at least twice the blobstore page size.
+ */
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ opts.cluster_sz = SPDK_BS_PAGE_SIZE;
+
+ /* Initialize a new blob store */
+ spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -ENOMEM);
+ SPDK_CU_ASSERT_FATAL(g_bs == NULL);
+
+ /*
+ * Set cluster size to lower than page size,
+ * to work it is required to be at least twice the blobstore page size.
+ */
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ opts.cluster_sz = SPDK_BS_PAGE_SIZE - 1;
+
+ /* Initialize a new blob store */
+ spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -EINVAL);
+ SPDK_CU_ASSERT_FATAL(g_bs == NULL);
+
+ /* Set cluster size to twice the default */
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ opts.cluster_sz *= 2;
+ cluster_sz = opts.cluster_sz;
+
+ /* Initialize a new blob store */
+ spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
+
+ ut_bs_reload(&bs, &opts);
+
+ CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
+
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+}
+
+/*
+ * Create a blobstore, reload it and ensure total usable cluster count
+ * stays the same.
+ */
+static void
+bs_usable_clusters(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob;
+ uint32_t clusters;
+ int i;
+
+
+ clusters = spdk_bs_total_data_cluster_count(bs);
+
+ ut_bs_reload(&bs, NULL);
+
+ CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
+
+ /* Create and resize blobs to make sure that useable cluster count won't change */
+ for (i = 0; i < 4; i++) {
+ g_bserrno = -1;
+ g_blobid = SPDK_BLOBID_INVALID;
+ blob = ut_blob_create_and_open(bs, NULL);
+
+ spdk_blob_resize(blob, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ g_bserrno = -1;
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
+ }
+
+ /* Reload the blob store to make sure that nothing changed */
+ ut_bs_reload(&bs, NULL);
+
+ CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
+}
+
+/*
+ * Test resizing of the metadata blob. This requires creating enough blobs
+ * so that one cluster is not enough to fit the metadata for those blobs.
+ * To induce this condition to happen more quickly, we reduce the cluster
+ * size to 16KB, which means only 4 4KB blob metadata pages can fit.
+ */
+static void
+bs_resize_md(void)
+{
+ struct spdk_blob_store *bs;
+ const int CLUSTER_PAGE_COUNT = 4;
+ const int NUM_BLOBS = CLUSTER_PAGE_COUNT * 4;
+ struct spdk_bs_dev *dev;
+ struct spdk_bs_opts opts;
+ struct spdk_blob *blob;
+ struct spdk_blob_opts blob_opts;
+ uint32_t cluster_sz;
+ spdk_blob_id blobids[NUM_BLOBS];
+ int i;
+
+
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ opts.cluster_sz = CLUSTER_PAGE_COUNT * 4096;
+ cluster_sz = opts.cluster_sz;
+
+ /* Initialize a new blob store */
+ spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
+
+ ut_spdk_blob_opts_init(&blob_opts);
+
+ for (i = 0; i < NUM_BLOBS; i++) {
+ g_bserrno = -1;
+ g_blobid = SPDK_BLOBID_INVALID;
+ spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobids[i] = g_blobid;
+ }
+
+ ut_bs_reload(&bs, &opts);
+
+ CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
+
+ for (i = 0; i < NUM_BLOBS; i++) {
+ g_bserrno = -1;
+ g_blob = NULL;
+ spdk_bs_open_blob(bs, blobids[i], blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ blob = g_blob;
+ g_bserrno = -1;
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ }
+
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+}
+
+static void
+bs_destroy(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+
+ /* Initialize a new blob store */
+ dev = init_dev();
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ /* Destroy the blob store */
+ g_bserrno = -1;
+ spdk_bs_destroy(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Loading an non-existent blob store should fail. */
+ g_bs = NULL;
+ dev = init_dev();
+
+ g_bserrno = 0;
+ spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno != 0);
+}
+
+/* Try to hit all of the corner cases associated with serializing
+ * a blob to disk
+ */
+static void
+blob_serialize_test(void)
+{
+ struct spdk_bs_dev *dev;
+ struct spdk_bs_opts opts;
+ struct spdk_blob_store *bs;
+ spdk_blob_id blobid[2];
+ struct spdk_blob *blob[2];
+ uint64_t i;
+ char *value;
+ int rc;
+
+ dev = init_dev();
+
+ /* Initialize a new blobstore with very small clusters */
+ spdk_bs_opts_init(&opts);
+ opts.cluster_sz = dev->blocklen * 8;
+ spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ /* Create and open two blobs */
+ for (i = 0; i < 2; i++) {
+ blob[i] = ut_blob_create_and_open(bs, NULL);
+ blobid[i] = spdk_blob_get_id(blob[i]);
+
+ /* Set a fairly large xattr on both blobs to eat up
+ * metadata space
+ */
+ value = calloc(dev->blocklen - 64, sizeof(char));
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ memset(value, i, dev->blocklen / 2);
+ rc = spdk_blob_set_xattr(blob[i], "name", value, dev->blocklen - 64);
+ CU_ASSERT(rc == 0);
+ free(value);
+ }
+
+ /* Resize the blobs, alternating 1 cluster at a time.
+ * This thwarts run length encoding and will cause spill
+ * over of the extents.
+ */
+ for (i = 0; i < 6; i++) {
+ spdk_blob_resize(blob[i % 2], (i / 2) + 1, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ }
+
+ for (i = 0; i < 2; i++) {
+ spdk_blob_sync_md(blob[i], blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ }
+
+ /* Close the blobs */
+ for (i = 0; i < 2; i++) {
+ spdk_blob_close(blob[i], blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ }
+
+ ut_bs_reload(&bs, &opts);
+
+ for (i = 0; i < 2; i++) {
+ blob[i] = NULL;
+
+ spdk_bs_open_blob(bs, blobid[i], blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ blob[i] = g_blob;
+
+ CU_ASSERT(spdk_blob_get_num_clusters(blob[i]) == 3);
+
+ spdk_blob_close(blob[i], blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ }
+
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+}
+
+static void
+blob_crc(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob;
+ spdk_blob_id blobid;
+ uint32_t page_num;
+ int index;
+ struct spdk_blob_md_page *page;
+
+ blob = ut_blob_create_and_open(bs, NULL);
+ blobid = spdk_blob_get_id(blob);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ page_num = bs_blobid_to_page(blobid);
+ index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num);
+ page = (struct spdk_blob_md_page *)&g_dev_buffer[index];
+ page->crc = 0;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -EINVAL);
+ CU_ASSERT(g_blob == NULL);
+ g_bserrno = 0;
+
+ spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -EINVAL);
+}
+
+static void
+super_block_crc(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_bs_super_block *super_block;
+
+ dev = init_dev();
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+
+ super_block = (struct spdk_bs_super_block *)g_dev_buffer;
+ super_block->crc = 0;
+ dev = init_dev();
+
+ /* Load an existing blob store */
+ g_bserrno = 0;
+ spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -EILSEQ);
+}
+
+/* For blob dirty shutdown test case we do the following sub-test cases:
+ * 1 Initialize new blob store and create 1 super blob with some xattrs, then we
+ * dirty shutdown and reload the blob store and verify the xattrs.
+ * 2 Resize the blob from 10 clusters to 20 clusters and then dirty shutdown,
+ * reload the blob store and verify the clusters number.
+ * 3 Create the second blob and then dirty shutdown, reload the blob store
+ * and verify the second blob.
+ * 4 Delete the second blob and then dirty shutdown, reload the blob store
+ * and verify the second blob is invalid.
+ * 5 Create the second blob again and also create the third blob, modify the
+ * md of second blob which makes the md invalid, and then dirty shutdown,
+ * reload the blob store verify the second blob, it should invalid and also
+ * verify the third blob, it should correct.
+ */
+static void
+blob_dirty_shutdown(void)
+{
+ int rc;
+ int index;
+ struct spdk_blob_store *bs = g_bs;
+ spdk_blob_id blobid1, blobid2, blobid3;
+ struct spdk_blob *blob = g_blob;
+ uint64_t length;
+ uint64_t free_clusters;
+ const void *value;
+ size_t value_len;
+ uint32_t page_num;
+ struct spdk_blob_md_page *page;
+ struct spdk_blob_opts blob_opts;
+
+ /* Create first blob */
+ blobid1 = spdk_blob_get_id(blob);
+
+ /* Set some xattrs */
+ rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
+ CU_ASSERT(rc == 0);
+
+ length = 2345;
+ rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
+ CU_ASSERT(rc == 0);
+
+ /* Put xattr that fits exactly single page.
+ * This results in adding additional pages to MD.
+ * First is flags and smaller xattr, second the large xattr,
+ * third are just the extents.
+ */
+ size_t xattr_length = 4072 - sizeof(struct spdk_blob_md_descriptor_xattr) -
+ strlen("large_xattr");
+ char *xattr = calloc(xattr_length, sizeof(char));
+ SPDK_CU_ASSERT_FATAL(xattr != NULL);
+ rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
+ free(xattr);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+
+ /* Resize the blob */
+ spdk_blob_resize(blob, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Set the blob as the super blob */
+ spdk_bs_set_super(bs, blobid1, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ free_clusters = spdk_bs_free_cluster_count(bs);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ blob = NULL;
+ g_blob = NULL;
+ g_blobid = SPDK_BLOBID_INVALID;
+
+ ut_bs_dirty_load(&bs, NULL);
+
+ /* Get the super blob */
+ spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(blobid1 == g_blobid);
+
+ spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ blob = g_blob;
+
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+
+ /* Get the xattrs */
+ value = NULL;
+ rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(*(uint64_t *)value == length);
+ CU_ASSERT(value_len == 8);
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
+
+ /* Resize the blob */
+ spdk_blob_resize(blob, 20, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ free_clusters = spdk_bs_free_cluster_count(bs);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ blob = NULL;
+ g_blob = NULL;
+ g_blobid = SPDK_BLOBID_INVALID;
+
+ ut_bs_dirty_load(&bs, NULL);
+
+ spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ blob = g_blob;
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 20);
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ blob = NULL;
+ g_blob = NULL;
+ g_blobid = SPDK_BLOBID_INVALID;
+
+ /* Create second blob */
+ blob = ut_blob_create_and_open(bs, NULL);
+ blobid2 = spdk_blob_get_id(blob);
+
+ /* Set some xattrs */
+ rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1);
+ CU_ASSERT(rc == 0);
+
+ length = 5432;
+ rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
+ CU_ASSERT(rc == 0);
+
+ /* Resize the blob */
+ spdk_blob_resize(blob, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ free_clusters = spdk_bs_free_cluster_count(bs);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ blob = NULL;
+ g_blob = NULL;
+ g_blobid = SPDK_BLOBID_INVALID;
+
+ ut_bs_dirty_load(&bs, NULL);
+
+ spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ blob = g_blob;
+
+ /* Get the xattrs */
+ value = NULL;
+ rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(*(uint64_t *)value == length);
+ CU_ASSERT(value_len == 8);
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+
+ ut_blob_close_and_delete(bs, blob);
+
+ free_clusters = spdk_bs_free_cluster_count(bs);
+
+ ut_bs_dirty_load(&bs, NULL);
+
+ spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno != 0);
+ CU_ASSERT(g_blob == NULL);
+
+ spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ blob = g_blob;
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ ut_bs_reload(&bs, NULL);
+
+ /* Create second blob */
+ ut_spdk_blob_opts_init(&blob_opts);
+ spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid2 = g_blobid;
+
+ /* Create third blob */
+ spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid3 = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ blob = g_blob;
+
+ /* Set some xattrs for second blob */
+ rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1);
+ CU_ASSERT(rc == 0);
+
+ length = 5432;
+ rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
+ CU_ASSERT(rc == 0);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ blob = NULL;
+ g_blob = NULL;
+ g_blobid = SPDK_BLOBID_INVALID;
+
+ spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ blob = g_blob;
+
+ /* Set some xattrs for third blob */
+ rc = spdk_blob_set_xattr(blob, "name", "log2.txt", strlen("log2.txt") + 1);
+ CU_ASSERT(rc == 0);
+
+ length = 5432;
+ rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
+ CU_ASSERT(rc == 0);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ blob = NULL;
+ g_blob = NULL;
+ g_blobid = SPDK_BLOBID_INVALID;
+
+ /* Mark second blob as invalid */
+ page_num = bs_blobid_to_page(blobid2);
+
+ index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num);
+ page = (struct spdk_blob_md_page *)&g_dev_buffer[index];
+ page->sequence_num = 1;
+ page->crc = blob_md_page_calc_crc(page);
+
+ free_clusters = spdk_bs_free_cluster_count(bs);
+
+ ut_bs_dirty_load(&bs, NULL);
+
+ spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno != 0);
+ CU_ASSERT(g_blob == NULL);
+
+ spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ blob = g_blob;
+
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+}
+
+static void
+blob_flags(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ spdk_blob_id blobid_invalid, blobid_data_ro, blobid_md_ro;
+ struct spdk_blob *blob_invalid, *blob_data_ro, *blob_md_ro;
+ struct spdk_blob_opts blob_opts;
+ int rc;
+
+ /* Create three blobs - one each for testing invalid, data_ro and md_ro flags. */
+ blob_invalid = ut_blob_create_and_open(bs, NULL);
+ blobid_invalid = spdk_blob_get_id(blob_invalid);
+
+ blob_data_ro = ut_blob_create_and_open(bs, NULL);
+ blobid_data_ro = spdk_blob_get_id(blob_data_ro);
+
+ ut_spdk_blob_opts_init(&blob_opts);
+ blob_opts.clear_method = BLOB_CLEAR_WITH_WRITE_ZEROES;
+ blob_md_ro = ut_blob_create_and_open(bs, &blob_opts);
+ blobid_md_ro = spdk_blob_get_id(blob_md_ro);
+ CU_ASSERT((blob_md_ro->md_ro_flags & SPDK_BLOB_MD_RO_FLAGS_MASK) == BLOB_CLEAR_WITH_WRITE_ZEROES);
+
+ /* Change the size of blob_data_ro to check if flags are serialized
+ * when blob has non zero number of extents */
+ spdk_blob_resize(blob_data_ro, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Set the xattr to check if flags are serialized
+ * when blob has non zero number of xattrs */
+ rc = spdk_blob_set_xattr(blob_md_ro, "name", "log.txt", strlen("log.txt") + 1);
+ CU_ASSERT(rc == 0);
+
+ blob_invalid->invalid_flags = (1ULL << 63);
+ blob_invalid->state = SPDK_BLOB_STATE_DIRTY;
+ blob_data_ro->data_ro_flags = (1ULL << 62);
+ blob_data_ro->state = SPDK_BLOB_STATE_DIRTY;
+ blob_md_ro->md_ro_flags = (1ULL << 61);
+ blob_md_ro->state = SPDK_BLOB_STATE_DIRTY;
+
+ g_bserrno = -1;
+ spdk_blob_sync_md(blob_invalid, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ g_bserrno = -1;
+ spdk_blob_sync_md(blob_data_ro, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ g_bserrno = -1;
+ spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ g_bserrno = -1;
+ spdk_blob_close(blob_invalid, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ blob_invalid = NULL;
+ g_bserrno = -1;
+ spdk_blob_close(blob_data_ro, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ blob_data_ro = NULL;
+ g_bserrno = -1;
+ spdk_blob_close(blob_md_ro, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ blob_md_ro = NULL;
+
+ g_blob = NULL;
+ g_blobid = SPDK_BLOBID_INVALID;
+
+ ut_bs_reload(&bs, NULL);
+
+ g_blob = NULL;
+ g_bserrno = 0;
+ spdk_bs_open_blob(bs, blobid_invalid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno != 0);
+ CU_ASSERT(g_blob == NULL);
+
+ g_blob = NULL;
+ g_bserrno = -1;
+ spdk_bs_open_blob(bs, blobid_data_ro, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob_data_ro = g_blob;
+ /* If an unknown data_ro flag was found, the blob should be marked both data and md read-only. */
+ CU_ASSERT(blob_data_ro->data_ro == true);
+ CU_ASSERT(blob_data_ro->md_ro == true);
+ CU_ASSERT(spdk_blob_get_num_clusters(blob_data_ro) == 10);
+
+ g_blob = NULL;
+ g_bserrno = -1;
+ spdk_bs_open_blob(bs, blobid_md_ro, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob_md_ro = g_blob;
+ CU_ASSERT(blob_md_ro->data_ro == false);
+ CU_ASSERT(blob_md_ro->md_ro == true);
+
+ g_bserrno = -1;
+ spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ ut_blob_close_and_delete(bs, blob_data_ro);
+ ut_blob_close_and_delete(bs, blob_md_ro);
+}
+
+static void
+bs_version(void)
+{
+ struct spdk_bs_super_block *super;
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_blob *blob;
+ struct spdk_blob_opts blob_opts;
+ spdk_blob_id blobid;
+
+ /* Unload the blob store */
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+
+ /*
+ * Change the bs version on disk. This will allow us to
+ * test that the version does not get modified automatically
+ * when loading and unloading the blobstore.
+ */
+ super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
+ CU_ASSERT(super->version == SPDK_BS_VERSION);
+ CU_ASSERT(super->clean == 1);
+ super->version = 2;
+ /*
+ * Version 2 metadata does not have a used blobid mask, so clear
+ * those fields in the super block and zero the corresponding
+ * region on "disk". We will use this to ensure blob IDs are
+ * correctly reconstructed.
+ */
+ memset(&g_dev_buffer[super->used_blobid_mask_start * SPDK_BS_PAGE_SIZE], 0,
+ super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE);
+ super->used_blobid_mask_start = 0;
+ super->used_blobid_mask_len = 0;
+ super->crc = blob_md_page_calc_crc(super);
+
+ /* Load an existing blob store */
+ dev = init_dev();
+ spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ CU_ASSERT(super->clean == 1);
+ bs = g_bs;
+
+ /*
+ * Create a blob - just to make sure that when we unload it
+ * results in writing the super block (since metadata pages
+ * were allocated.
+ */
+ ut_spdk_blob_opts_init(&blob_opts);
+ spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ /* Unload the blob store */
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+ CU_ASSERT(super->version == 2);
+ CU_ASSERT(super->used_blobid_mask_start == 0);
+ CU_ASSERT(super->used_blobid_mask_len == 0);
+
+ dev = init_dev();
+ spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ g_blob = NULL;
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ blob = g_blob;
+
+ ut_blob_close_and_delete(bs, blob);
+
+ CU_ASSERT(super->version == 2);
+ CU_ASSERT(super->used_blobid_mask_start == 0);
+ CU_ASSERT(super->used_blobid_mask_len == 0);
+}
+
+static void
+blob_set_xattrs_test(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob;
+ struct spdk_blob_opts opts;
+ const void *value;
+ size_t value_len;
+ char *xattr;
+ size_t xattr_length;
+ int rc;
+
+ /* Create blob with extra attributes */
+ ut_spdk_blob_opts_init(&opts);
+
+ opts.xattrs.names = g_xattr_names;
+ opts.xattrs.get_value = _get_xattr_value;
+ opts.xattrs.count = 3;
+ opts.xattrs.ctx = &g_ctx;
+
+ blob = ut_blob_create_and_open(bs, &opts);
+
+ /* Get the xattrs */
+ value = NULL;
+
+ rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(value_len == strlen(g_xattr_values[0]));
+ CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
+
+ rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(value_len == strlen(g_xattr_values[1]));
+ CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
+
+ rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(value_len == strlen(g_xattr_values[2]));
+ CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
+
+ /* Try to get non existing attribute */
+
+ rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
+ CU_ASSERT(rc == -ENOENT);
+
+ /* Try xattr exceeding maximum length of descriptor in single page */
+ xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
+ strlen("large_xattr") + 1;
+ xattr = calloc(xattr_length, sizeof(char));
+ SPDK_CU_ASSERT_FATAL(xattr != NULL);
+ rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
+ free(xattr);
+ SPDK_CU_ASSERT_FATAL(rc == -ENOMEM);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ blob = NULL;
+ g_blob = NULL;
+ g_blobid = SPDK_BLOBID_INVALID;
+
+ /* NULL callback */
+ ut_spdk_blob_opts_init(&opts);
+ opts.xattrs.names = g_xattr_names;
+ opts.xattrs.get_value = NULL;
+ opts.xattrs.count = 1;
+ opts.xattrs.ctx = &g_ctx;
+
+ spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -EINVAL);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+
+ /* NULL values */
+ ut_spdk_blob_opts_init(&opts);
+ opts.xattrs.names = g_xattr_names;
+ opts.xattrs.get_value = _get_xattr_value_null;
+ opts.xattrs.count = 1;
+ opts.xattrs.ctx = NULL;
+
+ spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == -EINVAL);
+}
+
+static void
+blob_thin_prov_alloc(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob;
+ struct spdk_blob_opts opts;
+ spdk_blob_id blobid;
+ uint64_t free_clusters;
+
+ free_clusters = spdk_bs_free_cluster_count(bs);
+
+ /* Set blob as thin provisioned */
+ ut_spdk_blob_opts_init(&opts);
+ opts.thin_provision = true;
+
+ blob = ut_blob_create_and_open(bs, &opts);
+ blobid = spdk_blob_get_id(blob);
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+
+ CU_ASSERT(blob->active.num_clusters == 0);
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
+
+ /* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
+ spdk_blob_resize(blob, 5, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+ CU_ASSERT(blob->active.num_clusters == 5);
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
+
+ /* Grow it to 1TB - still unallocated */
+ spdk_blob_resize(blob, 262144, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+ CU_ASSERT(blob->active.num_clusters == 262144);
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144);
+
+ spdk_blob_sync_md(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ /* Sync must not change anything */
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+ CU_ASSERT(blob->active.num_clusters == 262144);
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144);
+ /* Since clusters are not allocated,
+ * number of metadata pages is expected to be minimal.
+ */
+ CU_ASSERT(blob->active.num_pages == 1);
+
+ /* Shrink the blob to 3 clusters - still unallocated */
+ spdk_blob_resize(blob, 3, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+ CU_ASSERT(blob->active.num_clusters == 3);
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3);
+
+ spdk_blob_sync_md(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ /* Sync must not change anything */
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+ CU_ASSERT(blob->active.num_clusters == 3);
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ ut_bs_reload(&bs, NULL);
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+
+ /* Check that clusters allocation and size is still the same */
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+ CU_ASSERT(blob->active.num_clusters == 3);
+
+ ut_blob_close_and_delete(bs, blob);
+}
+
+static void
+blob_insert_cluster_msg_test(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob;
+ struct spdk_blob_opts opts;
+ spdk_blob_id blobid;
+ uint64_t free_clusters;
+ uint64_t new_cluster = 0;
+ uint32_t cluster_num = 3;
+ uint32_t extent_page = 0;
+
+ free_clusters = spdk_bs_free_cluster_count(bs);
+
+ /* Set blob as thin provisioned */
+ ut_spdk_blob_opts_init(&opts);
+ opts.thin_provision = true;
+ opts.num_clusters = 4;
+
+ blob = ut_blob_create_and_open(bs, &opts);
+ blobid = spdk_blob_get_id(blob);
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+
+ CU_ASSERT(blob->active.num_clusters == 4);
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 4);
+ CU_ASSERT(blob->active.clusters[cluster_num] == 0);
+
+ /* Specify cluster_num to allocate and new_cluster will be returned to insert on md_thread.
+ * This is to simulate behaviour when cluster is allocated after blob creation.
+ * Such as _spdk_bs_allocate_and_copy_cluster(). */
+ bs_allocate_cluster(blob, cluster_num, &new_cluster, &extent_page, false);
+ CU_ASSERT(blob->active.clusters[cluster_num] == 0);
+
+ blob_insert_cluster_on_md_thread(blob, cluster_num, new_cluster, extent_page,
+ blob_op_complete, NULL);
+ poll_threads();
+
+ CU_ASSERT(blob->active.clusters[cluster_num] != 0);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ ut_bs_reload(&bs, NULL);
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+
+ CU_ASSERT(blob->active.clusters[cluster_num] != 0);
+
+ ut_blob_close_and_delete(bs, blob);
+}
+
+static void
+blob_thin_prov_rw(void)
+{
+ static const uint8_t zero[10 * 4096] = { 0 };
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob;
+ struct spdk_io_channel *channel, *channel_thread1;
+ struct spdk_blob_opts opts;
+ uint64_t free_clusters;
+ uint64_t page_size;
+ uint8_t payload_read[10 * 4096];
+ uint8_t payload_write[10 * 4096];
+ uint64_t write_bytes;
+ uint64_t read_bytes;
+
+ free_clusters = spdk_bs_free_cluster_count(bs);
+ page_size = spdk_bs_get_page_size(bs);
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ CU_ASSERT(channel != NULL);
+
+ ut_spdk_blob_opts_init(&opts);
+ opts.thin_provision = true;
+
+ blob = ut_blob_create_and_open(bs, &opts);
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+
+ CU_ASSERT(blob->active.num_clusters == 0);
+
+ /* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
+ spdk_blob_resize(blob, 5, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+ CU_ASSERT(blob->active.num_clusters == 5);
+
+ spdk_blob_sync_md(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ /* Sync must not change anything */
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+ CU_ASSERT(blob->active.num_clusters == 5);
+
+ /* Payload should be all zeros from unallocated clusters */
+ memset(payload_read, 0xFF, sizeof(payload_read));
+ spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
+
+ write_bytes = g_dev_write_bytes;
+ read_bytes = g_dev_read_bytes;
+
+ /* Perform write on thread 1. That will allocate cluster on thread 0 via send_msg */
+ set_thread(1);
+ channel_thread1 = spdk_bs_alloc_io_channel(bs);
+ CU_ASSERT(channel_thread1 != NULL);
+ memset(payload_write, 0xE5, sizeof(payload_write));
+ spdk_blob_io_write(blob, channel_thread1, payload_write, 4, 10, blob_op_complete, NULL);
+ CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
+ /* Perform write on thread 0. That will try to allocate cluster,
+ * but fail due to another thread issuing the cluster allocation first. */
+ set_thread(0);
+ memset(payload_write, 0xE5, sizeof(payload_write));
+ spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
+ CU_ASSERT(free_clusters - 2 == spdk_bs_free_cluster_count(bs));
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
+ /* For thin-provisioned blob we need to write 20 pages plus one page metadata and
+ * read 0 bytes */
+ if (g_use_extent_table) {
+ /* Add one more page for EXTENT_PAGE write */
+ CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 22);
+ } else {
+ CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 21);
+ }
+ CU_ASSERT(g_dev_read_bytes - read_bytes == 0);
+
+ spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
+
+ ut_blob_close_and_delete(bs, blob);
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+
+ set_thread(1);
+ spdk_bs_free_io_channel(channel_thread1);
+ set_thread(0);
+ spdk_bs_free_io_channel(channel);
+ poll_threads();
+ g_blob = NULL;
+ g_blobid = 0;
+}
+
+static void
+blob_thin_prov_rle(void)
+{
+ static const uint8_t zero[10 * 4096] = { 0 };
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob;
+ struct spdk_io_channel *channel;
+ struct spdk_blob_opts opts;
+ spdk_blob_id blobid;
+ uint64_t free_clusters;
+ uint64_t page_size;
+ uint8_t payload_read[10 * 4096];
+ uint8_t payload_write[10 * 4096];
+ uint64_t write_bytes;
+ uint64_t read_bytes;
+ uint64_t io_unit;
+
+ free_clusters = spdk_bs_free_cluster_count(bs);
+ page_size = spdk_bs_get_page_size(bs);
+
+ ut_spdk_blob_opts_init(&opts);
+ opts.thin_provision = true;
+ opts.num_clusters = 5;
+
+ blob = ut_blob_create_and_open(bs, &opts);
+ blobid = spdk_blob_get_id(blob);
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ CU_ASSERT(channel != NULL);
+
+ /* Target specifically second cluster in a blob as first allocation */
+ io_unit = bs_cluster_to_page(bs, 1) * bs_io_unit_per_page(bs);
+
+ /* Payload should be all zeros from unallocated clusters */
+ memset(payload_read, 0xFF, sizeof(payload_read));
+ spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
+
+ write_bytes = g_dev_write_bytes;
+ read_bytes = g_dev_read_bytes;
+
+ /* Issue write to second cluster in a blob */
+ memset(payload_write, 0xE5, sizeof(payload_write));
+ spdk_blob_io_write(blob, channel, payload_write, io_unit, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
+ /* For thin-provisioned blob we need to write 10 pages plus one page metadata and
+ * read 0 bytes */
+ if (g_use_extent_table) {
+ /* Add one more page for EXTENT_PAGE write */
+ CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 12);
+ } else {
+ CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 11);
+ }
+ CU_ASSERT(g_dev_read_bytes - read_bytes == 0);
+
+ spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
+
+ spdk_bs_free_io_channel(channel);
+ poll_threads();
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ ut_bs_reload(&bs, NULL);
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ CU_ASSERT(channel != NULL);
+
+ /* Read second cluster after blob reload to confirm data written */
+ spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
+
+ spdk_bs_free_io_channel(channel);
+ poll_threads();
+
+ ut_blob_close_and_delete(bs, blob);
+}
+
+static void
+blob_thin_prov_rw_iov(void)
+{
+ static const uint8_t zero[10 * 4096] = { 0 };
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob;
+ struct spdk_io_channel *channel;
+ struct spdk_blob_opts opts;
+ uint64_t free_clusters;
+ uint8_t payload_read[10 * 4096];
+ uint8_t payload_write[10 * 4096];
+ struct iovec iov_read[3];
+ struct iovec iov_write[3];
+
+ free_clusters = spdk_bs_free_cluster_count(bs);
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ CU_ASSERT(channel != NULL);
+
+ ut_spdk_blob_opts_init(&opts);
+ opts.thin_provision = true;
+
+ blob = ut_blob_create_and_open(bs, &opts);
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+
+ CU_ASSERT(blob->active.num_clusters == 0);
+
+ /* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
+ spdk_blob_resize(blob, 5, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+ CU_ASSERT(blob->active.num_clusters == 5);
+
+ spdk_blob_sync_md(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ /* Sync must not change anything */
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+ CU_ASSERT(blob->active.num_clusters == 5);
+
+ /* Payload should be all zeros from unallocated clusters */
+ memset(payload_read, 0xAA, sizeof(payload_read));
+ iov_read[0].iov_base = payload_read;
+ iov_read[0].iov_len = 3 * 4096;
+ iov_read[1].iov_base = payload_read + 3 * 4096;
+ iov_read[1].iov_len = 4 * 4096;
+ iov_read[2].iov_base = payload_read + 7 * 4096;
+ iov_read[2].iov_len = 3 * 4096;
+ spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
+
+ memset(payload_write, 0xE5, sizeof(payload_write));
+ iov_write[0].iov_base = payload_write;
+ iov_write[0].iov_len = 1 * 4096;
+ iov_write[1].iov_base = payload_write + 1 * 4096;
+ iov_write[1].iov_len = 5 * 4096;
+ iov_write[2].iov_base = payload_write + 6 * 4096;
+ iov_write[2].iov_len = 4 * 4096;
+
+ spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ memset(payload_read, 0xAA, sizeof(payload_read));
+ iov_read[0].iov_base = payload_read;
+ iov_read[0].iov_len = 3 * 4096;
+ iov_read[1].iov_base = payload_read + 3 * 4096;
+ iov_read[1].iov_len = 4 * 4096;
+ iov_read[2].iov_base = payload_read + 7 * 4096;
+ iov_read[2].iov_len = 3 * 4096;
+ spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
+
+ spdk_bs_free_io_channel(channel);
+ poll_threads();
+
+ ut_blob_close_and_delete(bs, blob);
+}
+
+struct iter_ctx {
+ int current_iter;
+ spdk_blob_id blobid[4];
+};
+
+static void
+test_iter(void *arg, struct spdk_blob *blob, int bserrno)
+{
+ struct iter_ctx *iter_ctx = arg;
+ spdk_blob_id blobid;
+
+ CU_ASSERT(bserrno == 0);
+ blobid = spdk_blob_get_id(blob);
+ CU_ASSERT(blobid == iter_ctx->blobid[iter_ctx->current_iter++]);
+}
+
+static void
+bs_load_iter_test(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct iter_ctx iter_ctx = { 0 };
+ struct spdk_blob *blob;
+ int i, rc;
+ struct spdk_bs_opts opts;
+
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
+
+ /* Initialize a new blob store */
+ spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ for (i = 0; i < 4; i++) {
+ blob = ut_blob_create_and_open(bs, NULL);
+ iter_ctx.blobid[i] = spdk_blob_get_id(blob);
+
+ /* Just save the blobid as an xattr for testing purposes. */
+ rc = spdk_blob_set_xattr(blob, "blobid", &iter_ctx.blobid[i], sizeof(spdk_blob_id));
+ CU_ASSERT(rc == 0);
+
+ /* Resize the blob */
+ spdk_blob_resize(blob, i, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ }
+
+ g_bserrno = -1;
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
+ opts.iter_cb_fn = test_iter;
+ opts.iter_cb_arg = &iter_ctx;
+
+ /* Test blob iteration during load after a clean shutdown. */
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ /* Dirty shutdown */
+ bs_free(bs);
+
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
+ opts.iter_cb_fn = test_iter;
+ iter_ctx.current_iter = 0;
+ opts.iter_cb_arg = &iter_ctx;
+
+ /* Test blob iteration during load after a dirty shutdown. */
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+}
+
+static void
+blob_snapshot_rw(void)
+{
+ static const uint8_t zero[10 * 4096] = { 0 };
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob, *snapshot;
+ struct spdk_io_channel *channel;
+ struct spdk_blob_opts opts;
+ spdk_blob_id blobid, snapshotid;
+ uint64_t free_clusters;
+ uint64_t cluster_size;
+ uint64_t page_size;
+ uint8_t payload_read[10 * 4096];
+ uint8_t payload_write[10 * 4096];
+ uint64_t write_bytes;
+ uint64_t read_bytes;
+
+ free_clusters = spdk_bs_free_cluster_count(bs);
+ cluster_size = spdk_bs_get_cluster_size(bs);
+ page_size = spdk_bs_get_page_size(bs);
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ CU_ASSERT(channel != NULL);
+
+ ut_spdk_blob_opts_init(&opts);
+ opts.thin_provision = true;
+ opts.num_clusters = 5;
+
+ blob = ut_blob_create_and_open(bs, &opts);
+ blobid = spdk_blob_get_id(blob);
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
+
+ memset(payload_read, 0xFF, sizeof(payload_read));
+ spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
+
+ memset(payload_write, 0xE5, sizeof(payload_write));
+ spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
+
+ /* Create snapshot from blob */
+ spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ snapshotid = g_blobid;
+
+ spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ snapshot = g_blob;
+ CU_ASSERT(snapshot->data_ro == true);
+ CU_ASSERT(snapshot->md_ro == true);
+
+ CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
+
+ write_bytes = g_dev_write_bytes;
+ read_bytes = g_dev_read_bytes;
+
+ memset(payload_write, 0xAA, sizeof(payload_write));
+ spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
+
+ /* For a clone we need to allocate and copy one cluster, update one page of metadata
+ * and then write 10 pages of payload.
+ */
+ if (g_use_extent_table) {
+ /* Add one more page for EXTENT_PAGE write */
+ CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 12 + cluster_size);
+ } else {
+ CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 11 + cluster_size);
+ }
+ CU_ASSERT(g_dev_read_bytes - read_bytes == cluster_size);
+
+ spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
+
+ /* Data on snapshot should not change after write to clone */
+ memset(payload_write, 0xE5, sizeof(payload_write));
+ spdk_blob_io_read(snapshot, channel, payload_read, 4, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
+
+ ut_blob_close_and_delete(bs, blob);
+ ut_blob_close_and_delete(bs, snapshot);
+
+ spdk_bs_free_io_channel(channel);
+ poll_threads();
+ g_blob = NULL;
+ g_blobid = 0;
+}
+
+static void
+blob_snapshot_rw_iov(void)
+{
+ static const uint8_t zero[10 * 4096] = { 0 };
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob, *snapshot;
+ struct spdk_io_channel *channel;
+ struct spdk_blob_opts opts;
+ spdk_blob_id blobid, snapshotid;
+ uint64_t free_clusters;
+ uint8_t payload_read[10 * 4096];
+ uint8_t payload_write[10 * 4096];
+ struct iovec iov_read[3];
+ struct iovec iov_write[3];
+
+ free_clusters = spdk_bs_free_cluster_count(bs);
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ CU_ASSERT(channel != NULL);
+
+ ut_spdk_blob_opts_init(&opts);
+ opts.thin_provision = true;
+ opts.num_clusters = 5;
+
+ blob = ut_blob_create_and_open(bs, &opts);
+ blobid = spdk_blob_get_id(blob);
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
+
+ /* Create snapshot from blob */
+ spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ snapshotid = g_blobid;
+
+ spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ snapshot = g_blob;
+ CU_ASSERT(snapshot->data_ro == true);
+ CU_ASSERT(snapshot->md_ro == true);
+ CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
+
+ /* Payload should be all zeros from unallocated clusters */
+ memset(payload_read, 0xAA, sizeof(payload_read));
+ iov_read[0].iov_base = payload_read;
+ iov_read[0].iov_len = 3 * 4096;
+ iov_read[1].iov_base = payload_read + 3 * 4096;
+ iov_read[1].iov_len = 4 * 4096;
+ iov_read[2].iov_base = payload_read + 7 * 4096;
+ iov_read[2].iov_len = 3 * 4096;
+ spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
+
+ memset(payload_write, 0xE5, sizeof(payload_write));
+ iov_write[0].iov_base = payload_write;
+ iov_write[0].iov_len = 1 * 4096;
+ iov_write[1].iov_base = payload_write + 1 * 4096;
+ iov_write[1].iov_len = 5 * 4096;
+ iov_write[2].iov_base = payload_write + 6 * 4096;
+ iov_write[2].iov_len = 4 * 4096;
+
+ spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ memset(payload_read, 0xAA, sizeof(payload_read));
+ iov_read[0].iov_base = payload_read;
+ iov_read[0].iov_len = 3 * 4096;
+ iov_read[1].iov_base = payload_read + 3 * 4096;
+ iov_read[1].iov_len = 4 * 4096;
+ iov_read[2].iov_base = payload_read + 7 * 4096;
+ iov_read[2].iov_len = 3 * 4096;
+ spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
+
+ spdk_bs_free_io_channel(channel);
+ poll_threads();
+
+ ut_blob_close_and_delete(bs, blob);
+ ut_blob_close_and_delete(bs, snapshot);
+}
+
+/**
+ * Inflate / decouple parent rw unit tests.
+ *
+ * --------------
+ * original blob: 0 1 2 3 4
+ * ,---------+---------+---------+---------+---------.
+ * snapshot |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx| - |
+ * +---------+---------+---------+---------+---------+
+ * snapshot2 | - |yyyyyyyyy| - |yyyyyyyyy| - |
+ * +---------+---------+---------+---------+---------+
+ * blob | - |zzzzzzzzz| - | - | - |
+ * '---------+---------+---------+---------+---------'
+ * . . . . . .
+ * -------- . . . . . .
+ * inflate: . . . . . .
+ * ,---------+---------+---------+---------+---------.
+ * blob |xxxxxxxxx|zzzzzzzzz|xxxxxxxxx|yyyyyyyyy|000000000|
+ * '---------+---------+---------+---------+---------'
+ *
+ * NOTE: needs to allocate 4 clusters, thin provisioning removed, dependency
+ * on snapshot2 and snapshot removed . . .
+ * . . . . . .
+ * ---------------- . . . . . .
+ * decouple parent: . . . . . .
+ * ,---------+---------+---------+---------+---------.
+ * snapshot |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx| - |
+ * +---------+---------+---------+---------+---------+
+ * blob | - |zzzzzzzzz| - |yyyyyyyyy| - |
+ * '---------+---------+---------+---------+---------'
+ *
+ * NOTE: needs to allocate 1 cluster, 3 clusters unallocated, dependency
+ * on snapshot2 removed and on snapshot still exists. Snapshot2
+ * should remain a clone of snapshot.
+ */
+static void
+_blob_inflate_rw(bool decouple_parent)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob *blob, *snapshot, *snapshot2;
+ struct spdk_io_channel *channel;
+ struct spdk_blob_opts opts;
+ spdk_blob_id blobid, snapshotid, snapshot2id;
+ uint64_t free_clusters;
+ uint64_t cluster_size;
+
+ uint64_t payload_size;
+ uint8_t *payload_read;
+ uint8_t *payload_write;
+ uint8_t *payload_clone;
+
+ uint64_t pages_per_cluster;
+ uint64_t pages_per_payload;
+
+ int i;
+ spdk_blob_id ids[2];
+ size_t count;
+
+ free_clusters = spdk_bs_free_cluster_count(bs);
+ cluster_size = spdk_bs_get_cluster_size(bs);
+ pages_per_cluster = cluster_size / spdk_bs_get_page_size(bs);
+ pages_per_payload = pages_per_cluster * 5;
+
+ payload_size = cluster_size * 5;
+
+ payload_read = malloc(payload_size);
+ SPDK_CU_ASSERT_FATAL(payload_read != NULL);
+
+ payload_write = malloc(payload_size);
+ SPDK_CU_ASSERT_FATAL(payload_write != NULL);
+
+ payload_clone = malloc(payload_size);
+ SPDK_CU_ASSERT_FATAL(payload_clone != NULL);
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ SPDK_CU_ASSERT_FATAL(channel != NULL);
+
+ /* Create blob */
+ ut_spdk_blob_opts_init(&opts);
+ opts.thin_provision = true;
+ opts.num_clusters = 5;
+
+ blob = ut_blob_create_and_open(bs, &opts);
+ blobid = spdk_blob_get_id(blob);
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
+
+ /* 1) Initial read should return zeroed payload */
+ memset(payload_read, 0xFF, payload_size);
+ spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
+ blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
+
+ /* Fill whole blob with a pattern, except last cluster (to be sure it
+ * isn't allocated) */
+ memset(payload_write, 0xE5, payload_size - cluster_size);
+ spdk_blob_io_write(blob, channel, payload_write, 0, pages_per_payload -
+ pages_per_cluster, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
+
+ /* 2) Create snapshot from blob (first level) */
+ spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ snapshotid = g_blobid;
+
+ spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ snapshot = g_blob;
+ CU_ASSERT(snapshot->data_ro == true);
+ CU_ASSERT(snapshot->md_ro == true);
+
+ CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
+
+ /* Write every second cluster with a pattern.
+ *
+ * Last cluster shouldn't be written, to be sure that snapshot nor clone
+ * doesn't allocate it.
+ *
+ * payload_clone stores expected result on "blob" read at the time and
+ * is used only to check data consistency on clone before and after
+ * inflation. Initially we fill it with a backing snapshots pattern
+ * used before.
+ */
+ memset(payload_clone, 0xE5, payload_size - cluster_size);
+ memset(payload_clone + payload_size - cluster_size, 0x00, cluster_size);
+ memset(payload_write, 0xAA, payload_size);
+ for (i = 1; i < 5; i += 2) {
+ spdk_blob_io_write(blob, channel, payload_write, i * pages_per_cluster,
+ pages_per_cluster, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Update expected result */
+ memcpy(payload_clone + (cluster_size * i), payload_write,
+ cluster_size);
+ }
+ CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
+
+ /* Check data consistency on clone */
+ memset(payload_read, 0xFF, payload_size);
+ spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
+ blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
+
+ /* 3) Create second levels snapshot from blob */
+ spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ snapshot2id = g_blobid;
+
+ spdk_bs_open_blob(bs, snapshot2id, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ snapshot2 = g_blob;
+ CU_ASSERT(snapshot2->data_ro == true);
+ CU_ASSERT(snapshot2->md_ro == true);
+
+ CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 5);
+
+ CU_ASSERT(snapshot2->parent_id == snapshotid);
+
+ /* Write one cluster on the top level blob. This cluster (1) covers
+ * already allocated cluster in the snapshot2, so shouldn't be inflated
+ * at all */
+ spdk_blob_io_write(blob, channel, payload_write, pages_per_cluster,
+ pages_per_cluster, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Update expected result */
+ memcpy(payload_clone + cluster_size, payload_write, cluster_size);
+
+ /* Check data consistency on clone */
+ memset(payload_read, 0xFF, payload_size);
+ spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
+ blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
+
+
+ /* Close all blobs */
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_blob_close(snapshot2, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_blob_close(snapshot, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Check snapshot-clone relations */
+ count = 2;
+ CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
+ CU_ASSERT(count == 1);
+ CU_ASSERT(ids[0] == snapshot2id);
+
+ count = 2;
+ CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
+ CU_ASSERT(count == 1);
+ CU_ASSERT(ids[0] == blobid);
+
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshot2id);
+
+ free_clusters = spdk_bs_free_cluster_count(bs);
+ if (!decouple_parent) {
+ /* Do full blob inflation */
+ spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* All clusters should be inflated (except one already allocated
+ * in a top level blob) */
+ CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 4);
+
+ /* Check if relation tree updated correctly */
+ count = 2;
+ CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
+
+ /* snapshotid have one clone */
+ CU_ASSERT(count == 1);
+ CU_ASSERT(ids[0] == snapshot2id);
+
+ /* snapshot2id have no clones */
+ count = 2;
+ CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
+ CU_ASSERT(count == 0);
+
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
+ } else {
+ /* Decouple parent of blob */
+ spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Only one cluster from a parent should be inflated (second one
+ * is covered by a cluster written on a top level blob, and
+ * already allocated) */
+ CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 1);
+
+ /* Check if relation tree updated correctly */
+ count = 2;
+ CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
+
+ /* snapshotid have two clones now */
+ CU_ASSERT(count == 2);
+ CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
+ CU_ASSERT(ids[0] == snapshot2id || ids[1] == snapshot2id);
+
+ /* snapshot2id have no clones */
+ count = 2;
+ CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
+ CU_ASSERT(count == 0);
+
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
+ }
+
+ /* Try to delete snapshot2 (should pass) */
+ spdk_bs_delete_blob(bs, snapshot2id, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Try to delete base snapshot */
+ spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Reopen blob after snapshot deletion */
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
+
+ /* Check data consistency on inflated blob */
+ memset(payload_read, 0xFF, payload_size);
+ spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
+ blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
+
+ spdk_bs_free_io_channel(channel);
+ poll_threads();
+
+ free(payload_read);
+ free(payload_write);
+ free(payload_clone);
+
+ ut_blob_close_and_delete(bs, blob);
+}
+
+static void
+blob_inflate_rw(void)
+{
+ _blob_inflate_rw(false);
+ _blob_inflate_rw(true);
+}
+
+/**
+ * Snapshot-clones relation test
+ *
+ * snapshot
+ * |
+ * +-----+-----+
+ * | |
+ * blob(ro) snapshot2
+ * | |
+ * clone2 clone
+ */
+static void
+blob_relations(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_bs_opts bs_opts;
+ struct spdk_blob_opts opts;
+ struct spdk_blob *blob, *snapshot, *snapshot2, *clone, *clone2;
+ spdk_blob_id blobid, cloneid, snapshotid, cloneid2, snapshotid2;
+ int rc;
+ size_t count;
+ spdk_blob_id ids[10] = {};
+
+ dev = init_dev();
+ spdk_bs_opts_init(&bs_opts);
+ snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
+
+ spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ /* 1. Create blob with 10 clusters */
+
+ ut_spdk_blob_opts_init(&opts);
+ opts.num_clusters = 10;
+
+ blob = ut_blob_create_and_open(bs, &opts);
+ blobid = spdk_blob_get_id(blob);
+
+ CU_ASSERT(!spdk_blob_is_read_only(blob));
+ CU_ASSERT(!spdk_blob_is_snapshot(blob));
+ CU_ASSERT(!spdk_blob_is_clone(blob));
+ CU_ASSERT(!spdk_blob_is_thin_provisioned(blob));
+
+ /* blob should not have underlying snapshot nor clones */
+ CU_ASSERT(blob->parent_id == SPDK_BLOBID_INVALID);
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, blobid, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 0);
+
+
+ /* 2. Create snapshot */
+
+ spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ snapshotid = g_blobid;
+
+ spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ snapshot = g_blob;
+
+ CU_ASSERT(spdk_blob_is_read_only(snapshot));
+ CU_ASSERT(spdk_blob_is_snapshot(snapshot));
+ CU_ASSERT(!spdk_blob_is_clone(snapshot));
+ CU_ASSERT(snapshot->parent_id == SPDK_BLOBID_INVALID);
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID);
+
+ /* Check if original blob is converted to the clone of snapshot */
+ CU_ASSERT(!spdk_blob_is_read_only(blob));
+ CU_ASSERT(!spdk_blob_is_snapshot(blob));
+ CU_ASSERT(spdk_blob_is_clone(blob));
+ CU_ASSERT(spdk_blob_is_thin_provisioned(blob));
+ CU_ASSERT(blob->parent_id == snapshotid);
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
+
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 1);
+ CU_ASSERT(ids[0] == blobid);
+
+
+ /* 3. Create clone from snapshot */
+
+ spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ cloneid = g_blobid;
+
+ spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ clone = g_blob;
+
+ CU_ASSERT(!spdk_blob_is_read_only(clone));
+ CU_ASSERT(!spdk_blob_is_snapshot(clone));
+ CU_ASSERT(spdk_blob_is_clone(clone));
+ CU_ASSERT(spdk_blob_is_thin_provisioned(clone));
+ CU_ASSERT(clone->parent_id == snapshotid);
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid);
+
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 0);
+
+ /* Check if clone is on the snapshot's list */
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
+ CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid);
+
+
+ /* 4. Create snapshot of the clone */
+
+ spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ snapshotid2 = g_blobid;
+
+ spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ snapshot2 = g_blob;
+
+ CU_ASSERT(spdk_blob_is_read_only(snapshot2));
+ CU_ASSERT(spdk_blob_is_snapshot(snapshot2));
+ CU_ASSERT(spdk_blob_is_clone(snapshot2));
+ CU_ASSERT(snapshot2->parent_id == snapshotid);
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid);
+
+ /* Check if clone is converted to the clone of snapshot2 and snapshot2
+ * is a child of snapshot */
+ CU_ASSERT(!spdk_blob_is_read_only(clone));
+ CU_ASSERT(!spdk_blob_is_snapshot(clone));
+ CU_ASSERT(spdk_blob_is_clone(clone));
+ CU_ASSERT(spdk_blob_is_thin_provisioned(clone));
+ CU_ASSERT(clone->parent_id == snapshotid2);
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
+
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 1);
+ CU_ASSERT(ids[0] == cloneid);
+
+
+ /* 5. Try to create clone from read only blob */
+
+ /* Mark blob as read only */
+ spdk_blob_set_read_only(blob);
+ spdk_blob_sync_md(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Check if previously created blob is read only clone */
+ CU_ASSERT(spdk_blob_is_read_only(blob));
+ CU_ASSERT(!spdk_blob_is_snapshot(blob));
+ CU_ASSERT(spdk_blob_is_clone(blob));
+ CU_ASSERT(spdk_blob_is_thin_provisioned(blob));
+
+ /* Create clone from read only blob */
+ spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ cloneid2 = g_blobid;
+
+ spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ clone2 = g_blob;
+
+ CU_ASSERT(!spdk_blob_is_read_only(clone2));
+ CU_ASSERT(!spdk_blob_is_snapshot(clone2));
+ CU_ASSERT(spdk_blob_is_clone(clone2));
+ CU_ASSERT(spdk_blob_is_thin_provisioned(clone2));
+
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
+
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, blobid, ids, &count);
+ CU_ASSERT(rc == 0);
+
+ CU_ASSERT(count == 1);
+ CU_ASSERT(ids[0] == cloneid2);
+
+ /* Close blobs */
+
+ spdk_blob_close(clone2, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_blob_close(clone, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_blob_close(snapshot, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_blob_close(snapshot2, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Try to delete snapshot with more than 1 clone */
+ spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno != 0);
+
+ ut_bs_reload(&bs, &bs_opts);
+
+ /* NULL ids array should return number of clones in count */
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, snapshotid, NULL, &count);
+ CU_ASSERT(rc == -ENOMEM);
+ CU_ASSERT(count == 2);
+
+ /* incorrect array size */
+ count = 1;
+ rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
+ CU_ASSERT(rc == -ENOMEM);
+ CU_ASSERT(count == 2);
+
+
+ /* Verify structure of loaded blob store */
+
+ /* snapshot */
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID);
+
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 2);
+ CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
+ CU_ASSERT(ids[0] == snapshotid2 || ids[1] == snapshotid2);
+
+ /* blob */
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, blobid, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 1);
+ CU_ASSERT(ids[0] == cloneid2);
+
+ /* clone */
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 0);
+
+ /* snapshot2 */
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid);
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 1);
+ CU_ASSERT(ids[0] == cloneid);
+
+ /* clone2 */
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, cloneid2, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 0);
+
+ /* Try to delete blob that user should not be able to remove */
+
+ spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno != 0);
+
+ /* Remove all blobs */
+
+ spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ g_bs = NULL;
+}
+
+/**
+ * Snapshot-clones relation test 2
+ *
+ * snapshot1
+ * |
+ * snapshot2
+ * |
+ * +-----+-----+
+ * | |
+ * blob(ro) snapshot3
+ * | |
+ * | snapshot4
+ * | | |
+ * clone2 clone clone3
+ */
+static void
+blob_relations2(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_bs_opts bs_opts;
+ struct spdk_blob_opts opts;
+ struct spdk_blob *blob, *snapshot1, *snapshot2, *snapshot3, *snapshot4, *clone, *clone2;
+ spdk_blob_id blobid, snapshotid1, snapshotid2, snapshotid3, snapshotid4, cloneid, cloneid2,
+ cloneid3;
+ int rc;
+ size_t count;
+ spdk_blob_id ids[10] = {};
+
+ dev = init_dev();
+ spdk_bs_opts_init(&bs_opts);
+ snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
+
+ spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ /* 1. Create blob with 10 clusters */
+
+ ut_spdk_blob_opts_init(&opts);
+ opts.num_clusters = 10;
+
+ blob = ut_blob_create_and_open(bs, &opts);
+ blobid = spdk_blob_get_id(blob);
+
+ /* 2. Create snapshot1 */
+
+ spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ snapshotid1 = g_blobid;
+
+ spdk_bs_open_blob(bs, snapshotid1, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ snapshot1 = g_blob;
+
+ CU_ASSERT(snapshot1->parent_id == SPDK_BLOBID_INVALID);
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid1) == SPDK_BLOBID_INVALID);
+
+ CU_ASSERT(blob->parent_id == snapshotid1);
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1);
+
+ /* Check if blob is the clone of snapshot1 */
+ CU_ASSERT(blob->parent_id == snapshotid1);
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1);
+
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, snapshotid1, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 1);
+ CU_ASSERT(ids[0] == blobid);
+
+ /* 3. Create another snapshot */
+
+ spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ snapshotid2 = g_blobid;
+
+ spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ snapshot2 = g_blob;
+
+ CU_ASSERT(spdk_blob_is_clone(snapshot2));
+ CU_ASSERT(snapshot2->parent_id == snapshotid1);
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid1);
+
+ /* Check if snapshot2 is the clone of snapshot1 and blob
+ * is a child of snapshot2 */
+ CU_ASSERT(blob->parent_id == snapshotid2);
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2);
+
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 1);
+ CU_ASSERT(ids[0] == blobid);
+
+ /* 4. Create clone from snapshot */
+
+ spdk_bs_create_clone(bs, snapshotid2, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ cloneid = g_blobid;
+
+ spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ clone = g_blob;
+
+ CU_ASSERT(clone->parent_id == snapshotid2);
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
+
+ /* Check if clone is on the snapshot's list */
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 2);
+ CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
+ CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid);
+
+ /* 5. Create snapshot of the clone */
+
+ spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ snapshotid3 = g_blobid;
+
+ spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ snapshot3 = g_blob;
+
+ CU_ASSERT(snapshot3->parent_id == snapshotid2);
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2);
+
+ /* Check if clone is converted to the clone of snapshot3 and snapshot3
+ * is a child of snapshot2 */
+ CU_ASSERT(clone->parent_id == snapshotid3);
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
+
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 1);
+ CU_ASSERT(ids[0] == cloneid);
+
+ /* 6. Create another snapshot of the clone */
+
+ spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ snapshotid4 = g_blobid;
+
+ spdk_bs_open_blob(bs, snapshotid4, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ snapshot4 = g_blob;
+
+ CU_ASSERT(snapshot4->parent_id == snapshotid3);
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid4) == snapshotid3);
+
+ /* Check if clone is converted to the clone of snapshot4 and snapshot4
+ * is a child of snapshot3 */
+ CU_ASSERT(clone->parent_id == snapshotid4);
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid4);
+
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, snapshotid4, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 1);
+ CU_ASSERT(ids[0] == cloneid);
+
+ /* 7. Remove snapshot 4 */
+
+ ut_blob_close_and_delete(bs, snapshot4);
+
+ /* Check if relations are back to state from before creating snapshot 4 */
+ CU_ASSERT(clone->parent_id == snapshotid3);
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
+
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 1);
+ CU_ASSERT(ids[0] == cloneid);
+
+ /* 8. Create second clone of snapshot 3 and try to remove snapshot 3 */
+
+ spdk_bs_create_clone(bs, snapshotid3, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ cloneid3 = g_blobid;
+
+ spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno != 0);
+
+ /* 9. Open snapshot 3 again and try to remove it while clone 3 is closed */
+
+ spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ snapshot3 = g_blob;
+
+ spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno != 0);
+
+ spdk_blob_close(snapshot3, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_delete_blob(bs, cloneid3, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* 10. Remove snapshot 1 */
+
+ ut_blob_close_and_delete(bs, snapshot1);
+
+ /* Check if relations are back to state from before creating snapshot 4 (before step 6) */
+ CU_ASSERT(snapshot2->parent_id == SPDK_BLOBID_INVALID);
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID);
+
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 2);
+ CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
+ CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3);
+
+ /* 11. Try to create clone from read only blob */
+
+ /* Mark blob as read only */
+ spdk_blob_set_read_only(blob);
+ spdk_blob_sync_md(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Create clone from read only blob */
+ spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ cloneid2 = g_blobid;
+
+ spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ clone2 = g_blob;
+
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
+
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, blobid, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 1);
+ CU_ASSERT(ids[0] == cloneid2);
+
+ /* Close blobs */
+
+ spdk_blob_close(clone2, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_blob_close(clone, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_blob_close(snapshot2, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_blob_close(snapshot3, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ ut_bs_reload(&bs, &bs_opts);
+
+ /* Verify structure of loaded blob store */
+
+ /* snapshot2 */
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID);
+
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 2);
+ CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
+ CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3);
+
+ /* blob */
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2);
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, blobid, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 1);
+ CU_ASSERT(ids[0] == cloneid2);
+
+ /* clone */
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 0);
+
+ /* snapshot3 */
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2);
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 1);
+ CU_ASSERT(ids[0] == cloneid);
+
+ /* clone2 */
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, cloneid2, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 0);
+
+ /* Try to delete all blobs in the worse possible order */
+
+ spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno != 0);
+
+ spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno != 0);
+
+ spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ g_bs = NULL;
+}
+
+static void
+blobstore_clean_power_failure(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_blob *blob;
+ struct spdk_power_failure_thresholds thresholds = {};
+ bool clean = false;
+ struct spdk_bs_super_block *super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
+ struct spdk_bs_super_block super_copy = {};
+
+ thresholds.general_threshold = 1;
+ while (!clean) {
+ /* Create bs and blob */
+ suite_blob_setup();
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ bs = g_bs;
+ blob = g_blob;
+
+ /* Super block should not change for rest of the UT,
+ * save it and compare later. */
+ memcpy(&super_copy, super, sizeof(struct spdk_bs_super_block));
+ SPDK_CU_ASSERT_FATAL(super->clean == 0);
+ SPDK_CU_ASSERT_FATAL(bs->clean == 0);
+
+ /* Force bs/super block in a clean state.
+ * Along with marking blob dirty, to cause blob persist. */
+ blob->state = SPDK_BLOB_STATE_DIRTY;
+ bs->clean = 1;
+ super->clean = 1;
+ super->crc = blob_md_page_calc_crc(super);
+
+ g_bserrno = -1;
+ dev_set_power_failure_thresholds(thresholds);
+ spdk_blob_sync_md(blob, blob_op_complete, NULL);
+ poll_threads();
+ dev_reset_power_failure_event();
+
+ if (g_bserrno == 0) {
+ /* After successful md sync, both bs and super block
+ * should be marked as not clean. */
+ SPDK_CU_ASSERT_FATAL(bs->clean == 0);
+ SPDK_CU_ASSERT_FATAL(super->clean == 0);
+ clean = true;
+ }
+
+ /* Depending on the point of failure, super block was either updated or not. */
+ super_copy.clean = super->clean;
+ super_copy.crc = blob_md_page_calc_crc(&super_copy);
+ /* Compare that the values in super block remained unchanged. */
+ SPDK_CU_ASSERT_FATAL(!memcmp(&super_copy, super, sizeof(struct spdk_bs_super_block)));
+
+ /* Delete blob and unload bs */
+ suite_blob_cleanup();
+
+ thresholds.general_threshold++;
+ }
+}
+
+static void
+blob_delete_snapshot_power_failure(void)
+{
+ struct spdk_bs_dev *dev;
+ struct spdk_blob_store *bs;
+ struct spdk_blob_opts opts;
+ struct spdk_blob *blob, *snapshot;
+ struct spdk_power_failure_thresholds thresholds = {};
+ spdk_blob_id blobid, snapshotid;
+ const void *value;
+ size_t value_len;
+ size_t count;
+ spdk_blob_id ids[3] = {};
+ int rc;
+ bool deleted = false;
+ int delete_snapshot_bserrno = -1;
+
+ thresholds.general_threshold = 1;
+ while (!deleted) {
+ dev = init_dev();
+
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ /* Create blob */
+ ut_spdk_blob_opts_init(&opts);
+ opts.num_clusters = 10;
+
+ spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ /* Create snapshot */
+ spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ snapshotid = g_blobid;
+ SPDK_CU_ASSERT_FATAL(spdk_bit_array_get(bs->used_clusters, 1));
+ SPDK_CU_ASSERT_FATAL(!spdk_bit_array_get(bs->used_clusters, 11));
+
+ dev_set_power_failure_thresholds(thresholds);
+
+ spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
+ poll_threads();
+ delete_snapshot_bserrno = g_bserrno;
+
+ /* Do not shut down cleanly. Assumption is that after snapshot deletion
+ * reports success, changes to both blobs should already persisted. */
+ dev_reset_power_failure_event();
+ ut_bs_dirty_load(&bs, NULL);
+
+ SPDK_CU_ASSERT_FATAL(spdk_bit_array_get(bs->used_clusters, 1));
+ SPDK_CU_ASSERT_FATAL(!spdk_bit_array_get(bs->used_clusters, 11));
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+ SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true);
+
+ spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+
+ if (g_bserrno == 0) {
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ snapshot = g_blob;
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 1);
+ CU_ASSERT(ids[0] == blobid);
+ rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len);
+ CU_ASSERT(rc != 0);
+ SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false);
+
+ spdk_blob_close(snapshot, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ } else {
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
+ /* Snapshot might have been left in unrecoverable state, so it does not open.
+ * Yet delete might perform further changes to the clone after that.
+ * This UT should test until snapshot is deleted and delete call succeeds. */
+ if (delete_snapshot_bserrno == 0) {
+ deleted = true;
+ }
+ }
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ thresholds.general_threshold++;
+ }
+}
+
+static void
+blob_create_snapshot_power_failure(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_blob_opts opts;
+ struct spdk_blob *blob, *snapshot;
+ struct spdk_power_failure_thresholds thresholds = {};
+ spdk_blob_id blobid, snapshotid;
+ const void *value;
+ size_t value_len;
+ size_t count;
+ spdk_blob_id ids[3] = {};
+ int rc;
+ bool created = false;
+ int create_snapshot_bserrno = -1;
+
+ thresholds.general_threshold = 1;
+ while (!created) {
+ dev = init_dev();
+
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ /* Create blob */
+ ut_spdk_blob_opts_init(&opts);
+ opts.num_clusters = 10;
+
+ spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+ SPDK_CU_ASSERT_FATAL(spdk_bit_array_get(bs->used_clusters, 1));
+ SPDK_CU_ASSERT_FATAL(!spdk_bit_array_get(bs->used_clusters, 11));
+
+ dev_set_power_failure_thresholds(thresholds);
+
+ /* Create snapshot */
+ spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ create_snapshot_bserrno = g_bserrno;
+ snapshotid = g_blobid;
+ SPDK_CU_ASSERT_FATAL(spdk_bit_array_get(bs->used_clusters, 1));
+ SPDK_CU_ASSERT_FATAL(!spdk_bit_array_get(bs->used_clusters, 11));
+
+ /* Do not shut down cleanly. Assumption is that after create snapshot
+ * reports success, both blobs should be power-fail safe. */
+ dev_reset_power_failure_event();
+ ut_bs_dirty_load(&bs, NULL);
+
+ SPDK_CU_ASSERT_FATAL(spdk_bit_array_get(bs->used_clusters, 1));
+ SPDK_CU_ASSERT_FATAL(!spdk_bit_array_get(bs->used_clusters, 11));
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+
+ if (snapshotid != SPDK_BLOBID_INVALID) {
+ spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ }
+
+ if ((snapshotid != SPDK_BLOBID_INVALID) && (g_bserrno == 0)) {
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ snapshot = g_blob;
+ SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true);
+ SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false);
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 1);
+ CU_ASSERT(ids[0] == blobid);
+ rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_IN_PROGRESS, &value, &value_len);
+ CU_ASSERT(rc != 0);
+
+ spdk_blob_close(snapshot, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ if (create_snapshot_bserrno == 0) {
+ created = true;
+ }
+ } else {
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
+ SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == false);
+ }
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ thresholds.general_threshold++;
+ }
+}
+
+static void
+test_io_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
+{
+ uint8_t payload_ff[64 * 512];
+ uint8_t payload_aa[64 * 512];
+ uint8_t payload_00[64 * 512];
+ uint8_t *cluster0, *cluster1;
+
+ memset(payload_ff, 0xFF, sizeof(payload_ff));
+ memset(payload_aa, 0xAA, sizeof(payload_aa));
+ memset(payload_00, 0x00, sizeof(payload_00));
+
+ /* Try to perform I/O with io unit = 512 */
+ spdk_blob_io_write(blob, channel, payload_ff, 0, 1, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* If thin provisioned is set cluster should be allocated now */
+ SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0);
+ cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
+
+ /* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character.
+ * Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */
+ /* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
+ CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0);
+
+ /* Verify write with offset on first page */
+ spdk_blob_io_write(blob, channel, payload_ff, 2, 1, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
+ CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0);
+
+ /* Verify write with offset on first page */
+ spdk_blob_io_write(blob, channel, payload_ff, 4, 4, blob_op_complete, NULL);
+ poll_threads();
+
+ /* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */
+ CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0);
+
+ /* Verify write with offset on second page */
+ spdk_blob_io_write(blob, channel, payload_ff, 8, 4, blob_op_complete, NULL);
+ poll_threads();
+
+ /* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */
+ CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
+
+ /* Verify write across multiple pages */
+ spdk_blob_io_write(blob, channel, payload_aa, 4, 8, blob_op_complete, NULL);
+ poll_threads();
+
+ /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */
+ CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
+
+ /* Verify write across multiple clusters */
+ spdk_blob_io_write(blob, channel, payload_ff, 28, 8, blob_op_complete, NULL);
+ poll_threads();
+
+ SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
+ cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
+
+ /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
+ * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
+ CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
+
+ CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
+ CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0);
+
+ /* Verify write to second cluster */
+ spdk_blob_io_write(blob, channel, payload_ff, 32 + 12, 2, blob_op_complete, NULL);
+ poll_threads();
+
+ SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
+ cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
+
+ /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
+ * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */
+ CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
+
+ CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
+ CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0);
+ CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0);
+ CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0);
+}
+
+static void
+test_io_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
+{
+ uint8_t payload_read[64 * 512];
+ uint8_t payload_ff[64 * 512];
+ uint8_t payload_aa[64 * 512];
+ uint8_t payload_00[64 * 512];
+
+ memset(payload_ff, 0xFF, sizeof(payload_ff));
+ memset(payload_aa, 0xAA, sizeof(payload_aa));
+ memset(payload_00, 0x00, sizeof(payload_00));
+
+ /* Read only first io unit */
+ /* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
+ * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
+ * payload_read: F000 0000 | 0000 0000 ... */
+ memset(payload_read, 0x00, sizeof(payload_read));
+ spdk_blob_io_read(blob, channel, payload_read, 0, 1, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0);
+
+ /* Read four io_units starting from offset = 2
+ * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
+ * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
+ * payload_read: F0AA 0000 | 0000 0000 ... */
+
+ memset(payload_read, 0x00, sizeof(payload_read));
+ spdk_blob_io_read(blob, channel, payload_read, 2, 4, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
+
+ /* Read eight io_units across multiple pages
+ * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
+ * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
+ * payload_read: AAAA AAAA | 0000 0000 ... */
+ memset(payload_read, 0x00, sizeof(payload_read));
+ spdk_blob_io_read(blob, channel, payload_read, 4, 8, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
+
+ /* Read eight io_units across multiple clusters
+ * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ]
+ * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
+ * payload_read: FFFF FFFF | 0000 0000 ... */
+ memset(payload_read, 0x00, sizeof(payload_read));
+ spdk_blob_io_read(blob, channel, payload_read, 28, 8, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
+
+ /* Read four io_units from second cluster
+ * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
+ * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ]
+ * payload_read: 00FF 0000 | 0000 0000 ... */
+ memset(payload_read, 0x00, sizeof(payload_read));
+ spdk_blob_io_read(blob, channel, payload_read, 32 + 10, 4, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
+
+ /* Read second cluster
+ * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
+ * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ]
+ * payload_read: FFFF 0000 | 0000 FF00 ... */
+ memset(payload_read, 0x00, sizeof(payload_read));
+ spdk_blob_io_read(blob, channel, payload_read, 32, 32, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0);
+
+ /* Read whole two clusters
+ * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
+ * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */
+ memset(payload_read, 0x00, sizeof(payload_read));
+ spdk_blob_io_read(blob, channel, payload_read, 0, 64, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0);
+
+ CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0);
+}
+
+
+static void
+test_io_unmap(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
+{
+ uint8_t payload_ff[64 * 512];
+ uint8_t payload_aa[64 * 512];
+ uint8_t payload_00[64 * 512];
+ uint8_t *cluster0, *cluster1;
+
+ memset(payload_ff, 0xFF, sizeof(payload_ff));
+ memset(payload_aa, 0xAA, sizeof(payload_aa));
+ memset(payload_00, 0x00, sizeof(payload_00));
+
+ cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
+ cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
+
+ /* Unmap */
+ spdk_blob_io_unmap(blob, channel, 0, 64, blob_op_complete, NULL);
+ poll_threads();
+
+ CU_ASSERT(g_bserrno == 0);
+
+ CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0);
+ CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0);
+}
+
+static void
+test_io_zeroes(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
+{
+ uint8_t payload_ff[64 * 512];
+ uint8_t payload_aa[64 * 512];
+ uint8_t payload_00[64 * 512];
+ uint8_t *cluster0, *cluster1;
+
+ memset(payload_ff, 0xFF, sizeof(payload_ff));
+ memset(payload_aa, 0xAA, sizeof(payload_aa));
+ memset(payload_00, 0x00, sizeof(payload_00));
+
+ cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
+ cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
+
+ /* Write zeroes */
+ spdk_blob_io_write_zeroes(blob, channel, 0, 64, blob_op_complete, NULL);
+ poll_threads();
+
+ CU_ASSERT(g_bserrno == 0);
+
+ CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0);
+ CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0);
+}
+
+
+static void
+test_iov_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
+{
+ uint8_t payload_ff[64 * 512];
+ uint8_t payload_aa[64 * 512];
+ uint8_t payload_00[64 * 512];
+ uint8_t *cluster0, *cluster1;
+ struct iovec iov[4];
+
+ memset(payload_ff, 0xFF, sizeof(payload_ff));
+ memset(payload_aa, 0xAA, sizeof(payload_aa));
+ memset(payload_00, 0x00, sizeof(payload_00));
+
+ /* Try to perform I/O with io unit = 512 */
+ iov[0].iov_base = payload_ff;
+ iov[0].iov_len = 1 * 512;
+ spdk_blob_io_writev(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* If thin provisioned is set cluster should be allocated now */
+ SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0);
+ cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
+
+ /* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character.
+ * Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */
+ /* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
+ CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0);
+
+ /* Verify write with offset on first page */
+ iov[0].iov_base = payload_ff;
+ iov[0].iov_len = 1 * 512;
+ spdk_blob_io_writev(blob, channel, iov, 1, 2, 1, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
+ CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0);
+
+ /* Verify write with offset on first page */
+ iov[0].iov_base = payload_ff;
+ iov[0].iov_len = 4 * 512;
+ spdk_blob_io_writev(blob, channel, iov, 1, 4, 4, blob_op_complete, NULL);
+ poll_threads();
+
+ /* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */
+ CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0);
+
+ /* Verify write with offset on second page */
+ iov[0].iov_base = payload_ff;
+ iov[0].iov_len = 4 * 512;
+ spdk_blob_io_writev(blob, channel, iov, 1, 8, 4, blob_op_complete, NULL);
+ poll_threads();
+
+ /* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */
+ CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
+
+ /* Verify write across multiple pages */
+ iov[0].iov_base = payload_aa;
+ iov[0].iov_len = 8 * 512;
+ spdk_blob_io_writev(blob, channel, iov, 1, 4, 8, blob_op_complete, NULL);
+ poll_threads();
+
+ /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */
+ CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
+
+ /* Verify write across multiple clusters */
+
+ iov[0].iov_base = payload_ff;
+ iov[0].iov_len = 8 * 512;
+ spdk_blob_io_writev(blob, channel, iov, 1, 28, 8, blob_op_complete, NULL);
+ poll_threads();
+
+ SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
+ cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
+
+ /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
+ * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
+ CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 16 * 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
+
+ CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
+ CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0);
+
+ /* Verify write to second cluster */
+
+ iov[0].iov_base = payload_ff;
+ iov[0].iov_len = 2 * 512;
+ spdk_blob_io_writev(blob, channel, iov, 1, 32 + 12, 2, blob_op_complete, NULL);
+ poll_threads();
+
+ SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
+ cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
+
+ /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
+ * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */
+ CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
+
+ CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
+ CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0);
+ CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0);
+ CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0);
+}
+
+static void
+test_iov_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
+{
+ uint8_t payload_read[64 * 512];
+ uint8_t payload_ff[64 * 512];
+ uint8_t payload_aa[64 * 512];
+ uint8_t payload_00[64 * 512];
+ struct iovec iov[4];
+
+ memset(payload_ff, 0xFF, sizeof(payload_ff));
+ memset(payload_aa, 0xAA, sizeof(payload_aa));
+ memset(payload_00, 0x00, sizeof(payload_00));
+
+ /* Read only first io unit */
+ /* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
+ * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
+ * payload_read: F000 0000 | 0000 0000 ... */
+ memset(payload_read, 0x00, sizeof(payload_read));
+ iov[0].iov_base = payload_read;
+ iov[0].iov_len = 1 * 512;
+ spdk_blob_io_readv(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL);
+ poll_threads();
+
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0);
+
+ /* Read four io_units starting from offset = 2
+ * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
+ * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
+ * payload_read: F0AA 0000 | 0000 0000 ... */
+
+ memset(payload_read, 0x00, sizeof(payload_read));
+ iov[0].iov_base = payload_read;
+ iov[0].iov_len = 4 * 512;
+ spdk_blob_io_readv(blob, channel, iov, 1, 2, 4, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
+
+ /* Read eight io_units across multiple pages
+ * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
+ * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
+ * payload_read: AAAA AAAA | 0000 0000 ... */
+ memset(payload_read, 0x00, sizeof(payload_read));
+ iov[0].iov_base = payload_read;
+ iov[0].iov_len = 4 * 512;
+ iov[1].iov_base = payload_read + 4 * 512;
+ iov[1].iov_len = 4 * 512;
+ spdk_blob_io_readv(blob, channel, iov, 2, 4, 8, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
+
+ /* Read eight io_units across multiple clusters
+ * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ]
+ * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
+ * payload_read: FFFF FFFF | 0000 0000 ... */
+ memset(payload_read, 0x00, sizeof(payload_read));
+ iov[0].iov_base = payload_read;
+ iov[0].iov_len = 2 * 512;
+ iov[1].iov_base = payload_read + 2 * 512;
+ iov[1].iov_len = 2 * 512;
+ iov[2].iov_base = payload_read + 4 * 512;
+ iov[2].iov_len = 2 * 512;
+ iov[3].iov_base = payload_read + 6 * 512;
+ iov[3].iov_len = 2 * 512;
+ spdk_blob_io_readv(blob, channel, iov, 4, 28, 8, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
+
+ /* Read four io_units from second cluster
+ * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
+ * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ]
+ * payload_read: 00FF 0000 | 0000 0000 ... */
+ memset(payload_read, 0x00, sizeof(payload_read));
+ iov[0].iov_base = payload_read;
+ iov[0].iov_len = 1 * 512;
+ iov[1].iov_base = payload_read + 1 * 512;
+ iov[1].iov_len = 3 * 512;
+ spdk_blob_io_readv(blob, channel, iov, 2, 32 + 10, 4, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
+
+ /* Read second cluster
+ * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
+ * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ]
+ * payload_read: FFFF 0000 | 0000 FF00 ... */
+ memset(payload_read, 0x00, sizeof(payload_read));
+ iov[0].iov_base = payload_read;
+ iov[0].iov_len = 1 * 512;
+ iov[1].iov_base = payload_read + 1 * 512;
+ iov[1].iov_len = 2 * 512;
+ iov[2].iov_base = payload_read + 3 * 512;
+ iov[2].iov_len = 4 * 512;
+ iov[3].iov_base = payload_read + 7 * 512;
+ iov[3].iov_len = 25 * 512;
+ spdk_blob_io_readv(blob, channel, iov, 4, 32, 32, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0);
+
+ /* Read whole two clusters
+ * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
+ * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */
+ memset(payload_read, 0x00, sizeof(payload_read));
+ iov[0].iov_base = payload_read;
+ iov[0].iov_len = 1 * 512;
+ iov[1].iov_base = payload_read + 1 * 512;
+ iov[1].iov_len = 8 * 512;
+ iov[2].iov_base = payload_read + 9 * 512;
+ iov[2].iov_len = 16 * 512;
+ iov[3].iov_base = payload_read + 25 * 512;
+ iov[3].iov_len = 39 * 512;
+ spdk_blob_io_readv(blob, channel, iov, 4, 0, 64, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0);
+
+ CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0);
+}
+
+static void
+blob_io_unit(void)
+{
+ struct spdk_bs_opts bsopts;
+ struct spdk_blob_opts opts;
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_blob *blob, *snapshot, *clone;
+ spdk_blob_id blobid;
+ struct spdk_io_channel *channel;
+
+ /* Create dev with 512 bytes io unit size */
+
+ spdk_bs_opts_init(&bsopts);
+ bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4; /* 8 * 4 = 32 io_unit */
+ snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE");
+
+ /* Try to initialize a new blob store with unsupported io_unit */
+ dev = init_dev();
+ dev->blocklen = 512;
+ dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen;
+
+ /* Initialize a new blob store */
+ spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512);
+ channel = spdk_bs_alloc_io_channel(bs);
+
+ /* Create thick provisioned blob */
+ ut_spdk_blob_opts_init(&opts);
+ opts.thin_provision = false;
+ opts.num_clusters = 32;
+
+ blob = ut_blob_create_and_open(bs, &opts);
+ blobid = spdk_blob_get_id(blob);
+
+ test_io_write(dev, blob, channel);
+ test_io_read(dev, blob, channel);
+ test_io_zeroes(dev, blob, channel);
+
+ test_iov_write(dev, blob, channel);
+ test_iov_read(dev, blob, channel);
+
+ test_io_unmap(dev, blob, channel);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ blob = NULL;
+ g_blob = NULL;
+
+ /* Create thin provisioned blob */
+
+ ut_spdk_blob_opts_init(&opts);
+ opts.thin_provision = true;
+ opts.num_clusters = 32;
+
+ blob = ut_blob_create_and_open(bs, &opts);
+ blobid = spdk_blob_get_id(blob);
+
+ test_io_write(dev, blob, channel);
+ test_io_read(dev, blob, channel);
+
+ test_io_zeroes(dev, blob, channel);
+
+ test_iov_write(dev, blob, channel);
+ test_iov_read(dev, blob, channel);
+
+ /* Create snapshot */
+
+ spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ snapshot = g_blob;
+
+ spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ clone = g_blob;
+
+ test_io_read(dev, blob, channel);
+ test_io_read(dev, snapshot, channel);
+ test_io_read(dev, clone, channel);
+
+ test_iov_read(dev, blob, channel);
+ test_iov_read(dev, snapshot, channel);
+ test_iov_read(dev, clone, channel);
+
+ /* Inflate clone */
+
+ spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
+ poll_threads();
+
+ CU_ASSERT(g_bserrno == 0);
+
+ test_io_read(dev, clone, channel);
+
+ test_io_unmap(dev, clone, channel);
+
+ test_iov_write(dev, clone, channel);
+ test_iov_read(dev, clone, channel);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ spdk_blob_close(snapshot, blob_op_complete, NULL);
+ spdk_blob_close(clone, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ blob = NULL;
+ g_blob = NULL;
+
+ spdk_bs_free_io_channel(channel);
+ poll_threads();
+
+ /* Unload the blob store */
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+ g_blob = NULL;
+ g_blobid = 0;
+}
+
+static void
+blob_io_unit_compatiblity(void)
+{
+ struct spdk_bs_opts bsopts;
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_bs_super_block *super;
+
+ /* Create dev with 512 bytes io unit size */
+
+ spdk_bs_opts_init(&bsopts);
+ bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4; /* 8 * 4 = 32 io_unit */
+ snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE");
+
+ /* Try to initialize a new blob store with unsupported io_unit */
+ dev = init_dev();
+ dev->blocklen = 512;
+ dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen;
+
+ /* Initialize a new blob store */
+ spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512);
+
+ /* Unload the blob store */
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Modify super block to behave like older version.
+ * Check if loaded io unit size equals SPDK_BS_PAGE_SIZE */
+ super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
+ super->io_unit_size = 0;
+ super->crc = blob_md_page_calc_crc(super);
+
+ dev = init_dev();
+ dev->blocklen = 512;
+ dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen;
+
+ spdk_bs_load(dev, &bsopts, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ CU_ASSERT(spdk_bs_get_io_unit_size(bs) == SPDK_BS_PAGE_SIZE);
+
+ /* Unload the blob store */
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ g_bs = NULL;
+ g_blob = NULL;
+ g_blobid = 0;
+}
+
+static void
+blob_simultaneous_operations(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob_opts opts;
+ struct spdk_blob *blob, *snapshot;
+ spdk_blob_id blobid, snapshotid;
+ struct spdk_io_channel *channel;
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ SPDK_CU_ASSERT_FATAL(channel != NULL);
+
+ ut_spdk_blob_opts_init(&opts);
+ opts.num_clusters = 10;
+
+ blob = ut_blob_create_and_open(bs, &opts);
+ blobid = spdk_blob_get_id(blob);
+
+ /* Create snapshot and try to remove blob in the same time:
+ * - snapshot should be created successfully
+ * - delete operation should fail w -EBUSY */
+ CU_ASSERT(blob->locked_operation_in_progress == false);
+ spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ CU_ASSERT(blob->locked_operation_in_progress == true);
+ spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
+ CU_ASSERT(blob->locked_operation_in_progress == true);
+ /* Deletion failure */
+ CU_ASSERT(g_bserrno == -EBUSY);
+ poll_threads();
+ CU_ASSERT(blob->locked_operation_in_progress == false);
+ /* Snapshot creation success */
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+
+ snapshotid = g_blobid;
+
+ spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ snapshot = g_blob;
+
+ /* Inflate blob and try to remove blob in the same time:
+ * - blob should be inflated successfully
+ * - delete operation should fail w -EBUSY */
+ CU_ASSERT(blob->locked_operation_in_progress == false);
+ spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
+ CU_ASSERT(blob->locked_operation_in_progress == true);
+ spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
+ CU_ASSERT(blob->locked_operation_in_progress == true);
+ /* Deletion failure */
+ CU_ASSERT(g_bserrno == -EBUSY);
+ poll_threads();
+ CU_ASSERT(blob->locked_operation_in_progress == false);
+ /* Inflation success */
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Clone snapshot and try to remove snapshot in the same time:
+ * - snapshot should be cloned successfully
+ * - delete operation should fail w -EBUSY */
+ CU_ASSERT(blob->locked_operation_in_progress == false);
+ spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
+ spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
+ /* Deletion failure */
+ CU_ASSERT(g_bserrno == -EBUSY);
+ poll_threads();
+ CU_ASSERT(blob->locked_operation_in_progress == false);
+ /* Clone created */
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Resize blob and try to remove blob in the same time:
+ * - blob should be resized successfully
+ * - delete operation should fail w -EBUSY */
+ CU_ASSERT(blob->locked_operation_in_progress == false);
+ spdk_blob_resize(blob, 50, blob_op_complete, NULL);
+ CU_ASSERT(blob->locked_operation_in_progress == true);
+ spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
+ CU_ASSERT(blob->locked_operation_in_progress == true);
+ /* Deletion failure */
+ CU_ASSERT(g_bserrno == -EBUSY);
+ poll_threads();
+ CU_ASSERT(blob->locked_operation_in_progress == false);
+ /* Blob resized successfully */
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Issue two consecutive blob syncs, neither should fail.
+ * Force sync to actually occur by marking blob dirty each time.
+ * Execution of sync should not be enough to complete the operation,
+ * since disk I/O is required to complete it. */
+ g_bserrno = -1;
+
+ blob->state = SPDK_BLOB_STATE_DIRTY;
+ spdk_blob_sync_md(blob, blob_op_complete, NULL);
+ SPDK_CU_ASSERT_FATAL(g_bserrno == -1);
+
+ blob->state = SPDK_BLOB_STATE_DIRTY;
+ spdk_blob_sync_md(blob, blob_op_complete, NULL);
+ SPDK_CU_ASSERT_FATAL(g_bserrno == -1);
+
+ uint32_t completions = 0;
+ while (completions < 2) {
+ SPDK_CU_ASSERT_FATAL(poll_thread_times(0, 1));
+ if (g_bserrno == 0) {
+ g_bserrno = -1;
+ completions++;
+ }
+ /* Never should the g_bserrno be other than -1.
+ * It would mean that either of syncs failed. */
+ SPDK_CU_ASSERT_FATAL(g_bserrno == -1);
+ }
+
+ spdk_bs_free_io_channel(channel);
+ poll_threads();
+
+ ut_blob_close_and_delete(bs, snapshot);
+ ut_blob_close_and_delete(bs, blob);
+}
+
+static void
+blob_persist_test(void)
+{
+ struct spdk_blob_store *bs = g_bs;
+ struct spdk_blob_opts opts;
+ struct spdk_blob *blob;
+ spdk_blob_id blobid;
+ struct spdk_io_channel *channel;
+ char *xattr;
+ size_t xattr_length;
+ int rc;
+ uint32_t page_count_clear, page_count_xattr;
+ uint64_t poller_iterations;
+ bool run_poller;
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ SPDK_CU_ASSERT_FATAL(channel != NULL);
+
+ ut_spdk_blob_opts_init(&opts);
+ opts.num_clusters = 10;
+
+ blob = ut_blob_create_and_open(bs, &opts);
+ blobid = spdk_blob_get_id(blob);
+
+ /* Save the amount of md pages used after creation of a blob.
+ * This should be consistent after removing xattr. */
+ page_count_clear = spdk_bit_array_count_set(bs->used_md_pages);
+ SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear);
+ SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear);
+
+ /* Add xattr with maximum length of descriptor to exceed single metadata page. */
+ xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
+ strlen("large_xattr");
+ xattr = calloc(xattr_length, sizeof(char));
+ SPDK_CU_ASSERT_FATAL(xattr != NULL);
+
+ rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ spdk_blob_sync_md(blob, blob_op_complete, NULL);
+ poll_threads();
+ SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
+
+ /* Save the amount of md pages used after adding the large xattr */
+ page_count_xattr = spdk_bit_array_count_set(bs->used_md_pages);
+ SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr);
+ SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr);
+
+ /* Add xattr to a blob and sync it. While sync is occuring, remove the xattr and sync again.
+ * Interrupt the first sync after increasing number of poller iterations, until it succeeds.
+ * Expectation is that after second sync completes no xattr is saved in metadata. */
+ poller_iterations = 1;
+ run_poller = true;
+ while (run_poller) {
+ rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ g_bserrno = -1;
+ spdk_blob_sync_md(blob, blob_op_complete, NULL);
+ poll_thread_times(0, poller_iterations);
+ if (g_bserrno == 0) {
+ /* Poller iteration count was high enough for first sync to complete.
+ * Verify that blob takes up enough of md_pages to store the xattr. */
+ SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr);
+ SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr);
+ SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_xattr);
+ run_poller = false;
+ }
+ rc = spdk_blob_remove_xattr(blob, "large_xattr");
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ spdk_blob_sync_md(blob, blob_op_complete, NULL);
+ poll_threads();
+ SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear);
+ SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear);
+ SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_clear);
+
+ /* Reload bs and re-open blob to verify that xattr was not persisted. */
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+
+ ut_bs_reload(&bs, NULL);
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+
+ rc = spdk_blob_get_xattr_value(blob, "large_xattr", (const void **)&xattr, &xattr_length);
+ SPDK_CU_ASSERT_FATAL(rc == -ENOENT);
+
+ poller_iterations++;
+ /* Stop at high iteration count to prevent infinite loop.
+ * This value should be enough for first md sync to complete in any case. */
+ SPDK_CU_ASSERT_FATAL(poller_iterations < 50);
+ }
+
+ free(xattr);
+
+ ut_blob_close_and_delete(bs, blob);
+
+ spdk_bs_free_io_channel(channel);
+ poll_threads();
+}
+
+static void
+suite_bs_setup(void)
+{
+ struct spdk_bs_dev *dev;
+
+ dev = init_dev();
+ memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_bs != NULL);
+}
+
+static void
+suite_bs_cleanup(void)
+{
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+ memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
+}
+
+static struct spdk_blob *
+ut_blob_create_and_open(struct spdk_blob_store *bs, struct spdk_blob_opts *blob_opts)
+{
+ struct spdk_blob *blob;
+ struct spdk_blob_opts create_blob_opts;
+ spdk_blob_id blobid;
+
+ if (blob_opts == NULL) {
+ ut_spdk_blob_opts_init(&create_blob_opts);
+ blob_opts = &create_blob_opts;
+ }
+
+ spdk_bs_create_blob_ext(bs, blob_opts, blob_op_with_id_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+ g_blobid = -1;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ blob = g_blob;
+
+ g_blob = NULL;
+ g_bserrno = -1;
+
+ return blob;
+}
+
+static void
+ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob)
+{
+ spdk_blob_id blobid = spdk_blob_get_id(blob);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ g_blob = NULL;
+
+ spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
+ poll_threads();
+ CU_ASSERT(g_bserrno == 0);
+ g_bserrno = -1;
+}
+
+static void
+suite_blob_setup(void)
+{
+ suite_bs_setup();
+ CU_ASSERT(g_bs != NULL);
+
+ g_blob = ut_blob_create_and_open(g_bs, NULL);
+ CU_ASSERT(g_blob != NULL);
+}
+
+static void
+suite_blob_cleanup(void)
+{
+ ut_blob_close_and_delete(g_bs, g_blob);
+ CU_ASSERT(g_blob == NULL);
+
+ suite_bs_cleanup();
+ CU_ASSERT(g_bs == NULL);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite, suite_bs, suite_blob;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("blob", NULL, NULL);
+ suite_bs = CU_add_suite_with_setup_and_teardown("blob_bs", NULL, NULL,
+ suite_bs_setup, suite_bs_cleanup);
+ suite_blob = CU_add_suite_with_setup_and_teardown("blob_blob", NULL, NULL,
+ suite_blob_setup, suite_blob_cleanup);
+
+ CU_ADD_TEST(suite, blob_init);
+ CU_ADD_TEST(suite_bs, blob_open);
+ CU_ADD_TEST(suite_bs, blob_create);
+ CU_ADD_TEST(suite_bs, blob_create_fail);
+ CU_ADD_TEST(suite_bs, blob_create_internal);
+ CU_ADD_TEST(suite, blob_thin_provision);
+ CU_ADD_TEST(suite_bs, blob_snapshot);
+ CU_ADD_TEST(suite_bs, blob_clone);
+ CU_ADD_TEST(suite_bs, blob_inflate);
+ CU_ADD_TEST(suite_bs, blob_delete);
+ CU_ADD_TEST(suite_bs, blob_resize_test);
+ CU_ADD_TEST(suite, blob_read_only);
+ CU_ADD_TEST(suite_bs, channel_ops);
+ CU_ADD_TEST(suite_bs, blob_super);
+ CU_ADD_TEST(suite_blob, blob_write);
+ CU_ADD_TEST(suite_blob, blob_read);
+ CU_ADD_TEST(suite_blob, blob_rw_verify);
+ CU_ADD_TEST(suite_bs, blob_rw_verify_iov);
+ CU_ADD_TEST(suite_blob, blob_rw_verify_iov_nomem);
+ CU_ADD_TEST(suite_blob, blob_rw_iov_read_only);
+ CU_ADD_TEST(suite_bs, blob_unmap);
+ CU_ADD_TEST(suite_bs, blob_iter);
+ CU_ADD_TEST(suite_blob, blob_xattr);
+ CU_ADD_TEST(suite, bs_load);
+ CU_ADD_TEST(suite_bs, bs_load_pending_removal);
+ CU_ADD_TEST(suite, bs_load_custom_cluster_size);
+ CU_ADD_TEST(suite_bs, bs_unload);
+ CU_ADD_TEST(suite, bs_cluster_sz);
+ CU_ADD_TEST(suite_bs, bs_usable_clusters);
+ CU_ADD_TEST(suite, bs_resize_md);
+ CU_ADD_TEST(suite, bs_destroy);
+ CU_ADD_TEST(suite, bs_type);
+ CU_ADD_TEST(suite, bs_super_block);
+ CU_ADD_TEST(suite, blob_serialize_test);
+ CU_ADD_TEST(suite_bs, blob_crc);
+ CU_ADD_TEST(suite, super_block_crc);
+ CU_ADD_TEST(suite_blob, blob_dirty_shutdown);
+ CU_ADD_TEST(suite_bs, blob_flags);
+ CU_ADD_TEST(suite_bs, bs_version);
+ CU_ADD_TEST(suite_bs, blob_set_xattrs_test);
+ CU_ADD_TEST(suite_bs, blob_thin_prov_alloc);
+ CU_ADD_TEST(suite_bs, blob_insert_cluster_msg_test);
+ CU_ADD_TEST(suite_bs, blob_thin_prov_rw);
+ CU_ADD_TEST(suite_bs, blob_thin_prov_rle);
+ CU_ADD_TEST(suite_bs, blob_thin_prov_rw_iov);
+ CU_ADD_TEST(suite, bs_load_iter_test);
+ CU_ADD_TEST(suite_bs, blob_snapshot_rw);
+ CU_ADD_TEST(suite_bs, blob_snapshot_rw_iov);
+ CU_ADD_TEST(suite, blob_relations);
+ CU_ADD_TEST(suite, blob_relations2);
+ CU_ADD_TEST(suite, blobstore_clean_power_failure);
+ CU_ADD_TEST(suite, blob_delete_snapshot_power_failure);
+ CU_ADD_TEST(suite, blob_create_snapshot_power_failure);
+ CU_ADD_TEST(suite_bs, blob_inflate_rw);
+ CU_ADD_TEST(suite_bs, blob_snapshot_freeze_io);
+ CU_ADD_TEST(suite_bs, blob_operation_split_rw);
+ CU_ADD_TEST(suite_bs, blob_operation_split_rw_iov);
+ CU_ADD_TEST(suite, blob_io_unit);
+ CU_ADD_TEST(suite, blob_io_unit_compatiblity);
+ CU_ADD_TEST(suite_bs, blob_simultaneous_operations);
+ CU_ADD_TEST(suite_bs, blob_persist_test);
+
+ allocate_threads(2);
+ set_thread(0);
+
+ g_dev_buffer = calloc(1, DEV_BUFFER_SIZE);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ g_use_extent_table = false;
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ g_use_extent_table = true;
+ CU_basic_run_tests();
+ num_failures += CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ free(g_dev_buffer);
+
+ free_threads();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/blob/bs_dev_common.c b/src/spdk/test/unit/lib/blob/bs_dev_common.c
new file mode 100644
index 000000000..4e94fef8b
--- /dev/null
+++ b/src/spdk/test/unit/lib/blob/bs_dev_common.c
@@ -0,0 +1,395 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/thread.h"
+#include "bs_scheduler.c"
+
+
+#define DEV_BUFFER_SIZE (64 * 1024 * 1024)
+#define DEV_BUFFER_BLOCKLEN (4096)
+#define DEV_BUFFER_BLOCKCNT (DEV_BUFFER_SIZE / DEV_BUFFER_BLOCKLEN)
+uint8_t *g_dev_buffer;
+uint64_t g_dev_write_bytes;
+uint64_t g_dev_read_bytes;
+
+struct spdk_power_failure_counters {
+ uint64_t general_counter;
+ uint64_t read_counter;
+ uint64_t write_counter;
+ uint64_t unmap_counter;
+ uint64_t write_zero_counter;
+ uint64_t flush_counter;
+};
+
+static struct spdk_power_failure_counters g_power_failure_counters = {};
+
+struct spdk_power_failure_thresholds {
+ uint64_t general_threshold;
+ uint64_t read_threshold;
+ uint64_t write_threshold;
+ uint64_t unmap_threshold;
+ uint64_t write_zero_threshold;
+ uint64_t flush_threshold;
+};
+
+static struct spdk_power_failure_thresholds g_power_failure_thresholds = {};
+
+static uint64_t g_power_failure_rc;
+
+void dev_reset_power_failure_event(void);
+void dev_reset_power_failure_counters(void);
+void dev_set_power_failure_thresholds(struct spdk_power_failure_thresholds thresholds);
+
+void
+dev_reset_power_failure_event(void)
+{
+ memset(&g_power_failure_counters, 0, sizeof(g_power_failure_counters));
+ memset(&g_power_failure_thresholds, 0, sizeof(g_power_failure_thresholds));
+ g_power_failure_rc = 0;
+}
+
+void
+dev_reset_power_failure_counters(void)
+{
+ memset(&g_power_failure_counters, 0, sizeof(g_power_failure_counters));
+ g_power_failure_rc = 0;
+}
+
+/**
+ * Set power failure event. Power failure will occur after given number
+ * of IO operations. It may occure after number of particular operations
+ * (read, write, unmap, write zero or flush) or after given number of
+ * any IO operations (general_treshold). Value 0 means that the treshold
+ * is disabled. Any other value is the number of operation starting from
+ * which power failure event will happen.
+ */
+void
+dev_set_power_failure_thresholds(struct spdk_power_failure_thresholds thresholds)
+{
+ g_power_failure_thresholds = thresholds;
+}
+
+/* Define here for UT only. */
+struct spdk_io_channel g_io_channel;
+
+static struct spdk_io_channel *
+dev_create_channel(struct spdk_bs_dev *dev)
+{
+ return &g_io_channel;
+}
+
+static void
+dev_destroy_channel(struct spdk_bs_dev *dev, struct spdk_io_channel *channel)
+{
+}
+
+static void
+dev_destroy(struct spdk_bs_dev *dev)
+{
+ free(dev);
+}
+
+
+static void
+dev_complete_cb(void *arg)
+{
+ struct spdk_bs_dev_cb_args *cb_args = arg;
+
+ cb_args->cb_fn(cb_args->channel, cb_args->cb_arg, g_power_failure_rc);
+}
+
+static void
+dev_complete(void *arg)
+{
+ _bs_send_msg(dev_complete_cb, arg, NULL);
+}
+
+static void
+dev_read(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, void *payload,
+ uint64_t lba, uint32_t lba_count,
+ struct spdk_bs_dev_cb_args *cb_args)
+{
+ uint64_t offset, length;
+
+ if (g_power_failure_thresholds.read_threshold != 0) {
+ g_power_failure_counters.read_counter++;
+ }
+
+ if (g_power_failure_thresholds.general_threshold != 0) {
+ g_power_failure_counters.general_counter++;
+ }
+
+ if ((g_power_failure_thresholds.read_threshold == 0 ||
+ g_power_failure_counters.read_counter < g_power_failure_thresholds.read_threshold) &&
+ (g_power_failure_thresholds.general_threshold == 0 ||
+ g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) {
+ offset = lba * dev->blocklen;
+ length = lba_count * dev->blocklen;
+ SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
+
+ memcpy(payload, &g_dev_buffer[offset], length);
+ g_dev_read_bytes += length;
+ } else {
+ g_power_failure_rc = -EIO;
+ }
+
+ spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
+}
+
+static void
+dev_write(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, void *payload,
+ uint64_t lba, uint32_t lba_count,
+ struct spdk_bs_dev_cb_args *cb_args)
+{
+ uint64_t offset, length;
+
+ if (g_power_failure_thresholds.write_threshold != 0) {
+ g_power_failure_counters.write_counter++;
+ }
+
+ if (g_power_failure_thresholds.general_threshold != 0) {
+ g_power_failure_counters.general_counter++;
+ }
+
+ if ((g_power_failure_thresholds.write_threshold == 0 ||
+ g_power_failure_counters.write_counter < g_power_failure_thresholds.write_threshold) &&
+ (g_power_failure_thresholds.general_threshold == 0 ||
+ g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) {
+ offset = lba * dev->blocklen;
+ length = lba_count * dev->blocklen;
+ SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
+
+ memcpy(&g_dev_buffer[offset], payload, length);
+ g_dev_write_bytes += length;
+ } else {
+ g_power_failure_rc = -EIO;
+ }
+
+ spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
+}
+
+static void
+__check_iov(struct iovec *iov, int iovcnt, uint64_t length)
+{
+ int i;
+
+ for (i = 0; i < iovcnt; i++) {
+ length -= iov[i].iov_len;
+ }
+
+ CU_ASSERT(length == 0);
+}
+
+static void
+dev_readv(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
+ struct iovec *iov, int iovcnt,
+ uint64_t lba, uint32_t lba_count,
+ struct spdk_bs_dev_cb_args *cb_args)
+{
+ uint64_t offset, length;
+ int i;
+
+ if (g_power_failure_thresholds.read_threshold != 0) {
+ g_power_failure_counters.read_counter++;
+ }
+
+ if (g_power_failure_thresholds.general_threshold != 0) {
+ g_power_failure_counters.general_counter++;
+ }
+
+ if ((g_power_failure_thresholds.read_threshold == 0 ||
+ g_power_failure_counters.read_counter < g_power_failure_thresholds.read_threshold) &&
+ (g_power_failure_thresholds.general_threshold == 0 ||
+ g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) {
+ offset = lba * dev->blocklen;
+ length = lba_count * dev->blocklen;
+ SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
+ __check_iov(iov, iovcnt, length);
+
+ for (i = 0; i < iovcnt; i++) {
+ memcpy(iov[i].iov_base, &g_dev_buffer[offset], iov[i].iov_len);
+ offset += iov[i].iov_len;
+ }
+
+ g_dev_read_bytes += length;
+ } else {
+ g_power_failure_rc = -EIO;
+ }
+
+ spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
+}
+
+static void
+dev_writev(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
+ struct iovec *iov, int iovcnt,
+ uint64_t lba, uint32_t lba_count,
+ struct spdk_bs_dev_cb_args *cb_args)
+{
+ uint64_t offset, length;
+ int i;
+
+ if (g_power_failure_thresholds.write_threshold != 0) {
+ g_power_failure_counters.write_counter++;
+ }
+
+ if (g_power_failure_thresholds.general_threshold != 0) {
+ g_power_failure_counters.general_counter++;
+ }
+
+ if ((g_power_failure_thresholds.write_threshold == 0 ||
+ g_power_failure_counters.write_counter < g_power_failure_thresholds.write_threshold) &&
+ (g_power_failure_thresholds.general_threshold == 0 ||
+ g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) {
+ offset = lba * dev->blocklen;
+ length = lba_count * dev->blocklen;
+ SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
+ __check_iov(iov, iovcnt, length);
+
+ for (i = 0; i < iovcnt; i++) {
+ memcpy(&g_dev_buffer[offset], iov[i].iov_base, iov[i].iov_len);
+ offset += iov[i].iov_len;
+ }
+
+ g_dev_write_bytes += length;
+ } else {
+ g_power_failure_rc = -EIO;
+ }
+
+ spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
+}
+
+static void
+dev_flush(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
+ struct spdk_bs_dev_cb_args *cb_args)
+{
+ if (g_power_failure_thresholds.flush_threshold != 0) {
+ g_power_failure_counters.flush_counter++;
+ }
+
+ if (g_power_failure_thresholds.general_threshold != 0) {
+ g_power_failure_counters.general_counter++;
+ }
+
+ if ((g_power_failure_thresholds.flush_threshold != 0 &&
+ g_power_failure_counters.flush_counter >= g_power_failure_thresholds.flush_threshold) ||
+ (g_power_failure_thresholds.general_threshold != 0 &&
+ g_power_failure_counters.general_counter >= g_power_failure_thresholds.general_threshold)) {
+ g_power_failure_rc = -EIO;
+ }
+
+ spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
+}
+
+static void
+dev_unmap(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
+ uint64_t lba, uint32_t lba_count,
+ struct spdk_bs_dev_cb_args *cb_args)
+{
+ uint64_t offset, length;
+
+ if (g_power_failure_thresholds.unmap_threshold != 0) {
+ g_power_failure_counters.unmap_counter++;
+ }
+
+ if (g_power_failure_thresholds.general_threshold != 0) {
+ g_power_failure_counters.general_counter++;
+ }
+
+ if ((g_power_failure_thresholds.unmap_threshold == 0 ||
+ g_power_failure_counters.unmap_counter < g_power_failure_thresholds.unmap_threshold) &&
+ (g_power_failure_thresholds.general_threshold == 0 ||
+ g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) {
+ offset = lba * dev->blocklen;
+ length = lba_count * dev->blocklen;
+ SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
+ memset(&g_dev_buffer[offset], 0, length);
+ } else {
+ g_power_failure_rc = -EIO;
+ }
+
+ spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
+}
+
+static void
+dev_write_zeroes(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
+ uint64_t lba, uint32_t lba_count,
+ struct spdk_bs_dev_cb_args *cb_args)
+{
+ uint64_t offset, length;
+
+ if (g_power_failure_thresholds.write_zero_threshold != 0) {
+ g_power_failure_counters.write_zero_counter++;
+ }
+
+ if (g_power_failure_thresholds.general_threshold != 0) {
+ g_power_failure_counters.general_counter++;
+ }
+
+ if ((g_power_failure_thresholds.write_zero_threshold == 0 ||
+ g_power_failure_counters.write_zero_counter < g_power_failure_thresholds.write_zero_threshold) &&
+ (g_power_failure_thresholds.general_threshold == 0 ||
+ g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) {
+ offset = lba * dev->blocklen;
+ length = lba_count * dev->blocklen;
+ SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
+ memset(&g_dev_buffer[offset], 0, length);
+ g_dev_write_bytes += length;
+ } else {
+ g_power_failure_rc = -EIO;
+ }
+
+ spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
+}
+
+static struct spdk_bs_dev *
+init_dev(void)
+{
+ struct spdk_bs_dev *dev = calloc(1, sizeof(*dev));
+
+ SPDK_CU_ASSERT_FATAL(dev != NULL);
+
+ dev->create_channel = dev_create_channel;
+ dev->destroy_channel = dev_destroy_channel;
+ dev->destroy = dev_destroy;
+ dev->read = dev_read;
+ dev->write = dev_write;
+ dev->readv = dev_readv;
+ dev->writev = dev_writev;
+ dev->flush = dev_flush;
+ dev->unmap = dev_unmap;
+ dev->write_zeroes = dev_write_zeroes;
+ dev->blockcnt = DEV_BUFFER_BLOCKCNT;
+ dev->blocklen = DEV_BUFFER_BLOCKLEN;
+
+ return dev;
+}
diff --git a/src/spdk/test/unit/lib/blob/bs_scheduler.c b/src/spdk/test/unit/lib/blob/bs_scheduler.c
new file mode 100644
index 000000000..4b58fa007
--- /dev/null
+++ b/src/spdk/test/unit/lib/blob/bs_scheduler.c
@@ -0,0 +1,87 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+bool g_scheduler_delay = false;
+
+struct scheduled_ops {
+ spdk_msg_fn fn;
+ void *ctx;
+
+ TAILQ_ENTRY(scheduled_ops) ops_queue;
+};
+
+static TAILQ_HEAD(, scheduled_ops) g_scheduled_ops = TAILQ_HEAD_INITIALIZER(g_scheduled_ops);
+
+void _bs_flush_scheduler(uint32_t);
+
+static void
+_bs_send_msg(spdk_msg_fn fn, void *ctx, void *thread_ctx)
+{
+ if (g_scheduler_delay) {
+ struct scheduled_ops *ops = calloc(1, sizeof(*ops));
+
+ SPDK_CU_ASSERT_FATAL(ops != NULL);
+ ops->fn = fn;
+ ops->ctx = ctx;
+ TAILQ_INSERT_TAIL(&g_scheduled_ops, ops, ops_queue);
+
+ } else {
+ fn(ctx);
+ }
+}
+
+static void
+_bs_flush_scheduler_single(void)
+{
+ struct scheduled_ops *op;
+ TAILQ_HEAD(, scheduled_ops) ops;
+ TAILQ_INIT(&ops);
+
+ TAILQ_SWAP(&g_scheduled_ops, &ops, scheduled_ops, ops_queue);
+
+ while (!TAILQ_EMPTY(&ops)) {
+ op = TAILQ_FIRST(&ops);
+ TAILQ_REMOVE(&ops, op, ops_queue);
+
+ op->fn(op->ctx);
+ free(op);
+ }
+}
+
+void
+_bs_flush_scheduler(uint32_t n)
+{
+ while (n--) {
+ _bs_flush_scheduler_single();
+ }
+}
diff --git a/src/spdk/test/unit/lib/blobfs/Makefile b/src/spdk/test/unit/lib/blobfs/Makefile
new file mode 100644
index 000000000..5a2c5b3f3
--- /dev/null
+++ b/src/spdk/test/unit/lib/blobfs/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = tree.c blobfs_async_ut blobfs_sync_ut blobfs_bdev.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/blobfs/blobfs_async_ut/.gitignore b/src/spdk/test/unit/lib/blobfs/blobfs_async_ut/.gitignore
new file mode 100644
index 000000000..aea3b021d
--- /dev/null
+++ b/src/spdk/test/unit/lib/blobfs/blobfs_async_ut/.gitignore
@@ -0,0 +1 @@
+blobfs_async_ut
diff --git a/src/spdk/test/unit/lib/blobfs/blobfs_async_ut/Makefile b/src/spdk/test/unit/lib/blobfs/blobfs_async_ut/Makefile
new file mode 100644
index 000000000..6de0fc248
--- /dev/null
+++ b/src/spdk/test/unit/lib/blobfs/blobfs_async_ut/Makefile
@@ -0,0 +1,39 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+SPDK_LIB_LIST = blob
+TEST_FILE = blobfs_async_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/blobfs/blobfs_async_ut/blobfs_async_ut.c b/src/spdk/test/unit/lib/blobfs/blobfs_async_ut/blobfs_async_ut.c
new file mode 100644
index 000000000..134b8bfe9
--- /dev/null
+++ b/src/spdk/test/unit/lib/blobfs/blobfs_async_ut/blobfs_async_ut.c
@@ -0,0 +1,704 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "CUnit/Basic.h"
+
+#include "common/lib/ut_multithread.c"
+
+#include "spdk_cunit.h"
+#include "blobfs/blobfs.c"
+#include "blobfs/tree.c"
+#include "blob/blobstore.h"
+
+#include "spdk_internal/thread.h"
+
+#include "unit/lib/blob/bs_dev_common.c"
+
+struct spdk_filesystem *g_fs;
+struct spdk_file *g_file;
+int g_fserrno;
+struct spdk_trace_histories *g_trace_histories;
+DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn));
+DEFINE_STUB_V(spdk_trace_register_description, (const char *name,
+ uint16_t tpoint_id, uint8_t owner_type,
+ uint8_t object_type, uint8_t new_object,
+ uint8_t arg1_is_ptr, const char *arg1_name));
+DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
+ uint32_t size, uint64_t object_id, uint64_t arg1));
+
+/* Return NULL to test hardcoded defaults. */
+struct spdk_conf_section *
+spdk_conf_find_section(struct spdk_conf *cp, const char *name)
+{
+ return NULL;
+}
+
+/* Return -1 to test hardcoded defaults. */
+int
+spdk_conf_section_get_intval(struct spdk_conf_section *sp, const char *key)
+{
+ return -1;
+}
+
+static void
+fs_op_complete(void *ctx, int fserrno)
+{
+ g_fserrno = fserrno;
+}
+
+static void
+fs_op_with_handle_complete(void *ctx, struct spdk_filesystem *fs, int fserrno)
+{
+ g_fs = fs;
+ g_fserrno = fserrno;
+}
+
+static void
+fs_poll_threads(void)
+{
+ poll_threads();
+ while (spdk_thread_poll(g_cache_pool_thread, 0, 0) > 0) {}
+}
+
+static void
+fs_init(void)
+{
+ struct spdk_filesystem *fs;
+ struct spdk_bs_dev *dev;
+
+ dev = init_dev();
+
+ spdk_fs_init(dev, NULL, NULL, fs_op_with_handle_complete, NULL);
+ fs_poll_threads();
+ SPDK_CU_ASSERT_FATAL(g_fs != NULL);
+ CU_ASSERT(g_fserrno == 0);
+ fs = g_fs;
+ SPDK_CU_ASSERT_FATAL(fs->bs->dev == dev);
+
+ g_fserrno = 1;
+ spdk_fs_unload(fs, fs_op_complete, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+}
+
+static void
+create_cb(void *ctx, int fserrno)
+{
+ g_fserrno = fserrno;
+}
+
+static void
+open_cb(void *ctx, struct spdk_file *f, int fserrno)
+{
+ g_fserrno = fserrno;
+ g_file = f;
+}
+
+static void
+delete_cb(void *ctx, int fserrno)
+{
+ g_fserrno = fserrno;
+}
+
+static void
+fs_open(void)
+{
+ struct spdk_filesystem *fs;
+ spdk_fs_iter iter;
+ struct spdk_bs_dev *dev;
+ struct spdk_file *file;
+ char name[257] = {'\0'};
+
+ dev = init_dev();
+ memset(name, 'a', sizeof(name) - 1);
+
+ spdk_fs_init(dev, NULL, NULL, fs_op_with_handle_complete, NULL);
+ fs_poll_threads();
+ SPDK_CU_ASSERT_FATAL(g_fs != NULL);
+ CU_ASSERT(g_fserrno == 0);
+ fs = g_fs;
+ SPDK_CU_ASSERT_FATAL(fs->bs->dev == dev);
+
+ g_fserrno = 0;
+ /* Open should fail, because the file name is too long. */
+ spdk_fs_open_file_async(fs, name, SPDK_BLOBFS_OPEN_CREATE, open_cb, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == -ENAMETOOLONG);
+
+ g_fserrno = 0;
+ spdk_fs_open_file_async(fs, "file1", 0, open_cb, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == -ENOENT);
+
+ g_file = NULL;
+ g_fserrno = 1;
+ spdk_fs_open_file_async(fs, "file1", SPDK_BLOBFS_OPEN_CREATE, open_cb, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_file != NULL);
+ CU_ASSERT(!strcmp("file1", g_file->name));
+ CU_ASSERT(g_file->ref_count == 1);
+
+ iter = spdk_fs_iter_first(fs);
+ CU_ASSERT(iter != NULL);
+ file = spdk_fs_iter_get_file(iter);
+ SPDK_CU_ASSERT_FATAL(file != NULL);
+ CU_ASSERT(!strcmp("file1", file->name));
+ iter = spdk_fs_iter_next(iter);
+ CU_ASSERT(iter == NULL);
+
+ g_fserrno = 0;
+ /* Delete should successful, we will mark the file as deleted. */
+ spdk_fs_delete_file_async(fs, "file1", delete_cb, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ CU_ASSERT(!TAILQ_EMPTY(&fs->files));
+
+ g_fserrno = 1;
+ spdk_file_close_async(g_file, fs_op_complete, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ CU_ASSERT(TAILQ_EMPTY(&fs->files));
+
+ g_fserrno = 1;
+ spdk_fs_unload(fs, fs_op_complete, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+}
+
+static void
+fs_create(void)
+{
+ struct spdk_filesystem *fs;
+ struct spdk_bs_dev *dev;
+ char name[257] = {'\0'};
+
+ dev = init_dev();
+ memset(name, 'a', sizeof(name) - 1);
+
+ spdk_fs_init(dev, NULL, NULL, fs_op_with_handle_complete, NULL);
+ fs_poll_threads();
+ SPDK_CU_ASSERT_FATAL(g_fs != NULL);
+ CU_ASSERT(g_fserrno == 0);
+ fs = g_fs;
+ SPDK_CU_ASSERT_FATAL(fs->bs->dev == dev);
+
+ g_fserrno = 0;
+ /* Create should fail, because the file name is too long. */
+ spdk_fs_create_file_async(fs, name, create_cb, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == -ENAMETOOLONG);
+
+ g_fserrno = 1;
+ spdk_fs_create_file_async(fs, "file1", create_cb, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+
+ g_fserrno = 1;
+ spdk_fs_create_file_async(fs, "file1", create_cb, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == -EEXIST);
+
+ g_fserrno = 1;
+ spdk_fs_delete_file_async(fs, "file1", delete_cb, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ CU_ASSERT(TAILQ_EMPTY(&fs->files));
+
+ g_fserrno = 1;
+ spdk_fs_unload(fs, fs_op_complete, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+}
+
+static void
+fs_truncate(void)
+{
+ struct spdk_filesystem *fs;
+ struct spdk_bs_dev *dev;
+
+ dev = init_dev();
+
+ spdk_fs_init(dev, NULL, NULL, fs_op_with_handle_complete, NULL);
+ fs_poll_threads();
+ SPDK_CU_ASSERT_FATAL(g_fs != NULL);
+ CU_ASSERT(g_fserrno == 0);
+ fs = g_fs;
+ SPDK_CU_ASSERT_FATAL(fs->bs->dev == dev);
+
+ g_file = NULL;
+ g_fserrno = 1;
+ spdk_fs_open_file_async(fs, "file1", SPDK_BLOBFS_OPEN_CREATE, open_cb, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_file != NULL);
+
+ g_fserrno = 1;
+ spdk_file_truncate_async(g_file, 18 * 1024 * 1024 + 1, fs_op_complete, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ CU_ASSERT(g_file->length == 18 * 1024 * 1024 + 1);
+
+ g_fserrno = 1;
+ spdk_file_truncate_async(g_file, 1, fs_op_complete, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ CU_ASSERT(g_file->length == 1);
+
+ g_fserrno = 1;
+ spdk_file_truncate_async(g_file, 18 * 1024 * 1024 + 1, fs_op_complete, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ CU_ASSERT(g_file->length == 18 * 1024 * 1024 + 1);
+
+ g_fserrno = 1;
+ spdk_file_close_async(g_file, fs_op_complete, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ CU_ASSERT(g_file->ref_count == 0);
+
+ g_fserrno = 1;
+ spdk_fs_delete_file_async(fs, "file1", delete_cb, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ CU_ASSERT(TAILQ_EMPTY(&fs->files));
+
+ g_fserrno = 1;
+ spdk_fs_unload(fs, fs_op_complete, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+}
+
+static void
+fs_rename(void)
+{
+ struct spdk_filesystem *fs;
+ struct spdk_file *file, *file2, *file_iter;
+ struct spdk_bs_dev *dev;
+
+ dev = init_dev();
+
+ spdk_fs_init(dev, NULL, NULL, fs_op_with_handle_complete, NULL);
+ fs_poll_threads();
+ SPDK_CU_ASSERT_FATAL(g_fs != NULL);
+ CU_ASSERT(g_fserrno == 0);
+ fs = g_fs;
+ SPDK_CU_ASSERT_FATAL(fs->bs->dev == dev);
+
+ g_fserrno = 1;
+ spdk_fs_create_file_async(fs, "file1", create_cb, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+
+ g_file = NULL;
+ g_fserrno = 1;
+ spdk_fs_open_file_async(fs, "file1", 0, open_cb, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_file != NULL);
+ CU_ASSERT(g_file->ref_count == 1);
+
+ file = g_file;
+ g_file = NULL;
+ g_fserrno = 1;
+ spdk_file_close_async(file, fs_op_complete, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ SPDK_CU_ASSERT_FATAL(file->ref_count == 0);
+
+ g_file = NULL;
+ g_fserrno = 1;
+ spdk_fs_open_file_async(fs, "file2", SPDK_BLOBFS_OPEN_CREATE, open_cb, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_file != NULL);
+ CU_ASSERT(g_file->ref_count == 1);
+
+ file2 = g_file;
+ g_file = NULL;
+ g_fserrno = 1;
+ spdk_file_close_async(file2, fs_op_complete, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ SPDK_CU_ASSERT_FATAL(file2->ref_count == 0);
+
+ /*
+ * Do a 3-way rename. This should delete the old "file2", then rename
+ * "file1" to "file2".
+ */
+ g_fserrno = 1;
+ spdk_fs_rename_file_async(fs, "file1", "file2", fs_op_complete, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ CU_ASSERT(file->ref_count == 0);
+ CU_ASSERT(!strcmp(file->name, "file2"));
+ CU_ASSERT(TAILQ_FIRST(&fs->files) == file);
+ CU_ASSERT(TAILQ_NEXT(file, tailq) == NULL);
+
+ g_fserrno = 0;
+ spdk_fs_delete_file_async(fs, "file1", delete_cb, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == -ENOENT);
+ CU_ASSERT(!TAILQ_EMPTY(&fs->files));
+ TAILQ_FOREACH(file_iter, &fs->files, tailq) {
+ if (file_iter == NULL) {
+ SPDK_CU_ASSERT_FATAL(false);
+ }
+ }
+
+ g_fserrno = 1;
+ spdk_fs_delete_file_async(fs, "file2", delete_cb, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ CU_ASSERT(TAILQ_EMPTY(&fs->files));
+
+ g_fserrno = 1;
+ spdk_fs_unload(fs, fs_op_complete, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+}
+
+static void
+fs_rw_async(void)
+{
+ struct spdk_filesystem *fs;
+ struct spdk_bs_dev *dev;
+ uint8_t w_buf[4096];
+ uint8_t r_buf[4096];
+
+ dev = init_dev();
+
+ spdk_fs_init(dev, NULL, NULL, fs_op_with_handle_complete, NULL);
+ fs_poll_threads();
+ SPDK_CU_ASSERT_FATAL(g_fs != NULL);
+ CU_ASSERT(g_fserrno == 0);
+ fs = g_fs;
+ SPDK_CU_ASSERT_FATAL(fs->bs->dev == dev);
+
+ g_file = NULL;
+ g_fserrno = 1;
+ spdk_fs_open_file_async(fs, "file1", SPDK_BLOBFS_OPEN_CREATE, open_cb, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_file != NULL);
+
+ /* Write file */
+ CU_ASSERT(g_file->length == 0);
+ g_fserrno = 1;
+ memset(w_buf, 0x5a, sizeof(w_buf));
+ spdk_file_write_async(g_file, fs->sync_target.sync_io_channel, w_buf, 0, 4096,
+ fs_op_complete, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ CU_ASSERT(g_file->length == 4096);
+
+ /* Read file */
+ g_fserrno = 1;
+ memset(r_buf, 0x0, sizeof(r_buf));
+ spdk_file_read_async(g_file, fs->sync_target.sync_io_channel, r_buf, 0, 4096,
+ fs_op_complete, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ CU_ASSERT(memcmp(r_buf, w_buf, sizeof(r_buf)) == 0);
+
+ g_fserrno = 1;
+ spdk_file_close_async(g_file, fs_op_complete, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+
+ g_fserrno = 1;
+ spdk_fs_unload(fs, fs_op_complete, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+}
+
+static void
+fs_writev_readv_async(void)
+{
+ struct spdk_filesystem *fs;
+ struct spdk_bs_dev *dev;
+ struct iovec w_iov[2];
+ struct iovec r_iov[2];
+ uint8_t w_buf[4096];
+ uint8_t r_buf[4096];
+
+ dev = init_dev();
+
+ spdk_fs_init(dev, NULL, NULL, fs_op_with_handle_complete, NULL);
+ fs_poll_threads();
+ SPDK_CU_ASSERT_FATAL(g_fs != NULL);
+ CU_ASSERT(g_fserrno == 0);
+ fs = g_fs;
+ SPDK_CU_ASSERT_FATAL(fs->bs->dev == dev);
+
+ g_file = NULL;
+ g_fserrno = 1;
+ spdk_fs_open_file_async(fs, "file1", SPDK_BLOBFS_OPEN_CREATE, open_cb, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_file != NULL);
+
+ /* Write file */
+ CU_ASSERT(g_file->length == 0);
+ g_fserrno = 1;
+ memset(w_buf, 0x5a, sizeof(w_buf));
+ w_iov[0].iov_base = w_buf;
+ w_iov[0].iov_len = 2048;
+ w_iov[1].iov_base = w_buf + 2048;
+ w_iov[1].iov_len = 2048;
+ spdk_file_writev_async(g_file, fs->sync_target.sync_io_channel,
+ w_iov, 2, 0, 4096, fs_op_complete, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ CU_ASSERT(g_file->length == 4096);
+
+ /* Read file */
+ g_fserrno = 1;
+ memset(r_buf, 0x0, sizeof(r_buf));
+ r_iov[0].iov_base = r_buf;
+ r_iov[0].iov_len = 2048;
+ r_iov[1].iov_base = r_buf + 2048;
+ r_iov[1].iov_len = 2048;
+ spdk_file_readv_async(g_file, fs->sync_target.sync_io_channel,
+ r_iov, 2, 0, 4096, fs_op_complete, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ CU_ASSERT(memcmp(r_buf, w_buf, sizeof(r_buf)) == 0);
+
+ /* Overwrite file with block aligned */
+ g_fserrno = 1;
+ memset(w_buf, 0x6a, sizeof(w_buf));
+ w_iov[0].iov_base = w_buf;
+ w_iov[0].iov_len = 2048;
+ w_iov[1].iov_base = w_buf + 2048;
+ w_iov[1].iov_len = 2048;
+ spdk_file_writev_async(g_file, fs->sync_target.sync_io_channel,
+ w_iov, 2, 0, 4096, fs_op_complete, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ CU_ASSERT(g_file->length == 4096);
+
+ /* Read file to verify the overwritten data */
+ g_fserrno = 1;
+ memset(r_buf, 0x0, sizeof(r_buf));
+ r_iov[0].iov_base = r_buf;
+ r_iov[0].iov_len = 2048;
+ r_iov[1].iov_base = r_buf + 2048;
+ r_iov[1].iov_len = 2048;
+ spdk_file_readv_async(g_file, fs->sync_target.sync_io_channel,
+ r_iov, 2, 0, 4096, fs_op_complete, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ CU_ASSERT(memcmp(r_buf, w_buf, sizeof(r_buf)) == 0);
+
+ g_fserrno = 1;
+ spdk_file_close_async(g_file, fs_op_complete, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+
+ g_fserrno = 1;
+ spdk_fs_unload(fs, fs_op_complete, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+}
+
+static void
+tree_find_buffer_ut(void)
+{
+ struct cache_tree *root;
+ struct cache_tree *level1_0;
+ struct cache_tree *level0_0_0;
+ struct cache_tree *level0_0_12;
+ struct cache_buffer *leaf_0_0_4;
+ struct cache_buffer *leaf_0_12_8;
+ struct cache_buffer *leaf_9_23_15;
+ struct cache_buffer *buffer;
+
+ level1_0 = calloc(1, sizeof(struct cache_tree));
+ SPDK_CU_ASSERT_FATAL(level1_0 != NULL);
+ level0_0_0 = calloc(1, sizeof(struct cache_tree));
+ SPDK_CU_ASSERT_FATAL(level0_0_0 != NULL);
+ level0_0_12 = calloc(1, sizeof(struct cache_tree));
+ SPDK_CU_ASSERT_FATAL(level0_0_12 != NULL);
+ leaf_0_0_4 = calloc(1, sizeof(struct cache_buffer));
+ SPDK_CU_ASSERT_FATAL(leaf_0_0_4 != NULL);
+ leaf_0_12_8 = calloc(1, sizeof(struct cache_buffer));
+ SPDK_CU_ASSERT_FATAL(leaf_0_12_8 != NULL);
+ leaf_9_23_15 = calloc(1, sizeof(struct cache_buffer));
+ SPDK_CU_ASSERT_FATAL(leaf_9_23_15 != NULL);
+
+ level1_0->level = 1;
+ level0_0_0->level = 0;
+ level0_0_12->level = 0;
+
+ leaf_0_0_4->offset = CACHE_BUFFER_SIZE * 4;
+ level0_0_0->u.buffer[4] = leaf_0_0_4;
+ level0_0_0->present_mask |= (1ULL << 4);
+
+ leaf_0_12_8->offset = CACHE_TREE_LEVEL_SIZE(1) * 12 + CACHE_BUFFER_SIZE * 8;
+ level0_0_12->u.buffer[8] = leaf_0_12_8;
+ level0_0_12->present_mask |= (1ULL << 8);
+
+ level1_0->u.tree[0] = level0_0_0;
+ level1_0->present_mask |= (1ULL << 0);
+ level1_0->u.tree[12] = level0_0_12;
+ level1_0->present_mask |= (1ULL << 12);
+
+ buffer = tree_find_buffer(NULL, 0);
+ CU_ASSERT(buffer == NULL);
+
+ buffer = tree_find_buffer(level0_0_0, 0);
+ CU_ASSERT(buffer == NULL);
+
+ buffer = tree_find_buffer(level0_0_0, CACHE_TREE_LEVEL_SIZE(0) + 1);
+ CU_ASSERT(buffer == NULL);
+
+ buffer = tree_find_buffer(level0_0_0, leaf_0_0_4->offset);
+ CU_ASSERT(buffer == leaf_0_0_4);
+
+ buffer = tree_find_buffer(level1_0, leaf_0_0_4->offset);
+ CU_ASSERT(buffer == leaf_0_0_4);
+
+ buffer = tree_find_buffer(level1_0, leaf_0_12_8->offset);
+ CU_ASSERT(buffer == leaf_0_12_8);
+
+ buffer = tree_find_buffer(level1_0, leaf_0_12_8->offset + CACHE_BUFFER_SIZE - 1);
+ CU_ASSERT(buffer == leaf_0_12_8);
+
+ buffer = tree_find_buffer(level1_0, leaf_0_12_8->offset - 1);
+ CU_ASSERT(buffer == NULL);
+
+ leaf_9_23_15->offset = CACHE_TREE_LEVEL_SIZE(2) * 9 +
+ CACHE_TREE_LEVEL_SIZE(1) * 23 +
+ CACHE_BUFFER_SIZE * 15;
+ root = tree_insert_buffer(level1_0, leaf_9_23_15);
+ CU_ASSERT(root != level1_0);
+ buffer = tree_find_buffer(root, leaf_9_23_15->offset);
+ CU_ASSERT(buffer == leaf_9_23_15);
+ tree_free_buffers(root);
+ free(root);
+}
+
+static void
+channel_ops(void)
+{
+ struct spdk_filesystem *fs;
+ struct spdk_bs_dev *dev;
+ struct spdk_io_channel *channel;
+
+ dev = init_dev();
+
+ spdk_fs_init(dev, NULL, NULL, fs_op_with_handle_complete, NULL);
+ fs_poll_threads();
+ SPDK_CU_ASSERT_FATAL(g_fs != NULL);
+ CU_ASSERT(g_fserrno == 0);
+ fs = g_fs;
+ SPDK_CU_ASSERT_FATAL(fs->bs->dev == dev);
+
+ channel = spdk_fs_alloc_io_channel(fs);
+ CU_ASSERT(channel != NULL);
+
+ spdk_fs_free_io_channel(channel);
+
+ g_fserrno = 1;
+ spdk_fs_unload(fs, fs_op_complete, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ g_fs = NULL;
+}
+
+static void
+channel_ops_sync(void)
+{
+ struct spdk_filesystem *fs;
+ struct spdk_bs_dev *dev;
+ struct spdk_fs_thread_ctx *channel;
+
+ dev = init_dev();
+
+ spdk_fs_init(dev, NULL, NULL, fs_op_with_handle_complete, NULL);
+ fs_poll_threads();
+ SPDK_CU_ASSERT_FATAL(g_fs != NULL);
+ CU_ASSERT(g_fserrno == 0);
+ fs = g_fs;
+ SPDK_CU_ASSERT_FATAL(fs->bs->dev == dev);
+
+ channel = spdk_fs_alloc_thread_ctx(fs);
+ CU_ASSERT(channel != NULL);
+
+ spdk_fs_free_thread_ctx(channel);
+
+ g_fserrno = 1;
+ spdk_fs_unload(fs, fs_op_complete, NULL);
+ fs_poll_threads();
+ CU_ASSERT(g_fserrno == 0);
+ g_fs = NULL;
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("blobfs_async_ut", NULL, NULL);
+
+ CU_ADD_TEST(suite, fs_init);
+ CU_ADD_TEST(suite, fs_open);
+ CU_ADD_TEST(suite, fs_create);
+ CU_ADD_TEST(suite, fs_truncate);
+ CU_ADD_TEST(suite, fs_rename);
+ CU_ADD_TEST(suite, fs_rw_async);
+ CU_ADD_TEST(suite, fs_writev_readv_async);
+ CU_ADD_TEST(suite, tree_find_buffer_ut);
+ CU_ADD_TEST(suite, channel_ops);
+ CU_ADD_TEST(suite, channel_ops_sync);
+
+ allocate_threads(1);
+ set_thread(0);
+
+ g_dev_buffer = calloc(1, DEV_BUFFER_SIZE);
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ free(g_dev_buffer);
+
+ free_threads();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/blobfs/blobfs_bdev.c/.gitignore b/src/spdk/test/unit/lib/blobfs/blobfs_bdev.c/.gitignore
new file mode 100644
index 000000000..0d29199be
--- /dev/null
+++ b/src/spdk/test/unit/lib/blobfs/blobfs_bdev.c/.gitignore
@@ -0,0 +1 @@
+blobfs_bdev_ut
diff --git a/src/spdk/test/unit/lib/blobfs/blobfs_bdev.c/Makefile b/src/spdk/test/unit/lib/blobfs/blobfs_bdev.c/Makefile
new file mode 100644
index 000000000..b2d666b1b
--- /dev/null
+++ b/src/spdk/test/unit/lib/blobfs/blobfs_bdev.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = blobfs_bdev_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/blobfs/blobfs_bdev.c/blobfs_bdev_ut.c b/src/spdk/test/unit/lib/blobfs/blobfs_bdev.c/blobfs_bdev_ut.c
new file mode 100644
index 000000000..425b29882
--- /dev/null
+++ b/src/spdk/test/unit/lib/blobfs/blobfs_bdev.c/blobfs_bdev_ut.c
@@ -0,0 +1,348 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+#include "spdk/string.h"
+#include "spdk/stdinc.h"
+
+#include "blobfs/bdev/blobfs_bdev.c"
+
+int g_fserrno;
+
+bool g_bdev_open_ext_fail = false;
+bool g_bdev_create_bs_dev_from_desc_fail = false;
+bool g_fs_load_fail = false;
+bool g_fs_unload_fail = false;
+bool g_bs_bdev_claim_fail = false;
+bool g_blobfs_fuse_start_fail = false;
+struct blobfs_bdev_operation_ctx *g_fs_ctx;
+
+const char *g_bdev_name = "ut_bdev";
+
+int
+spdk_bdev_open_ext(const char *bdev_name, bool write, spdk_bdev_event_cb_t event_cb,
+ void *event_ctx, struct spdk_bdev_desc **_desc)
+{
+ if (g_bdev_open_ext_fail) {
+ return -1;
+ }
+
+ return 0;
+}
+
+static void
+bs_dev_destroy(struct spdk_bs_dev *dev)
+{
+}
+
+struct spdk_bs_dev *
+spdk_bdev_create_bs_dev_from_desc(struct spdk_bdev_desc *desc)
+{
+ static struct spdk_bs_dev bs_dev;
+
+ if (g_bdev_create_bs_dev_from_desc_fail) {
+ return NULL;
+ }
+
+ bs_dev.destroy = bs_dev_destroy;
+ return &bs_dev;
+}
+
+void
+spdk_fs_load(struct spdk_bs_dev *dev, fs_send_request_fn send_request_fn,
+ spdk_fs_op_with_handle_complete cb_fn, void *cb_arg)
+{
+ int rc = 0;
+
+ if (g_fs_load_fail) {
+ rc = -1;
+ }
+
+ cb_fn(cb_arg, NULL, rc);
+
+ return;
+}
+
+void
+spdk_fs_unload(struct spdk_filesystem *fs, spdk_fs_op_complete cb_fn, void *cb_arg)
+{
+ int rc = 0;
+
+ if (g_fs_unload_fail) {
+ rc = -1;
+ }
+
+ cb_fn(cb_arg, rc);
+ return;
+}
+
+void
+spdk_fs_init(struct spdk_bs_dev *dev, struct spdk_blobfs_opts *opt,
+ fs_send_request_fn send_request_fn,
+ spdk_fs_op_with_handle_complete cb_fn, void *cb_arg)
+{
+ int rc = 0;
+
+ if (g_fs_load_fail) {
+ rc = -1;
+ }
+
+ cb_fn(cb_arg, NULL, rc);
+ return;
+}
+
+int
+spdk_bs_bdev_claim(struct spdk_bs_dev *bs_dev, struct spdk_bdev_module *module)
+{
+ if (g_bs_bdev_claim_fail) {
+ return -1;
+ }
+
+ return 0;
+}
+
+int
+blobfs_fuse_start(const char *bdev_name, const char *mountpoint, struct spdk_filesystem *fs,
+ blobfs_fuse_unmount_cb cb_fn, void *cb_arg, struct spdk_blobfs_fuse **_bfuse)
+{
+ if (g_blobfs_fuse_start_fail) {
+ return -1;
+ }
+
+ /* store the ctx for unmount operation */
+ g_fs_ctx = cb_arg;
+
+ return 0;
+}
+
+void
+spdk_bdev_close(struct spdk_bdev_desc *desc)
+{
+}
+
+int
+spdk_thread_send_msg(const struct spdk_thread *thread, spdk_msg_fn fn, void *ctx)
+{
+ fn(ctx);
+ return 0;
+}
+
+struct spdk_thread *
+spdk_get_thread(void)
+{
+ struct spdk_thread *thd = (struct spdk_thread *)0x1;
+
+ return thd;
+}
+
+const char *
+spdk_bdev_get_name(const struct spdk_bdev *bdev)
+{
+ return g_bdev_name;
+}
+
+void
+spdk_fs_opts_init(struct spdk_blobfs_opts *opts)
+{
+}
+
+void
+blobfs_fuse_send_request(fs_request_fn fn, void *arg)
+{
+}
+
+void
+blobfs_fuse_stop(struct spdk_blobfs_fuse *bfuse)
+{
+}
+
+static void
+blobfs_bdev_op_complete(void *cb_arg, int fserrno)
+{
+ g_fserrno = fserrno;
+}
+
+static void
+spdk_blobfs_bdev_detect_test(void)
+{
+ /* spdk_bdev_open_ext() fails */
+ g_bdev_open_ext_fail = true;
+ spdk_blobfs_bdev_detect(g_bdev_name, blobfs_bdev_op_complete, NULL);
+ CU_ASSERT(g_fserrno != 0);
+
+ g_bdev_open_ext_fail = false;
+
+ /* spdk_bdev_create_bs_dev_from_desc() fails */
+ g_bdev_create_bs_dev_from_desc_fail = true;
+ spdk_blobfs_bdev_detect(g_bdev_name, blobfs_bdev_op_complete, NULL);
+ CU_ASSERT(g_fserrno != 0);
+
+ g_bdev_create_bs_dev_from_desc_fail = false;
+
+ /* spdk_fs_load() fails */
+ g_fs_load_fail = true;
+ spdk_blobfs_bdev_detect(g_bdev_name, blobfs_bdev_op_complete, NULL);
+ CU_ASSERT(g_fserrno != 0);
+
+ g_fs_load_fail = false;
+
+ /* spdk_fs_unload() fails */
+ g_fs_unload_fail = true;
+ spdk_blobfs_bdev_detect(g_bdev_name, blobfs_bdev_op_complete, NULL);
+ CU_ASSERT(g_fserrno != 0);
+
+ g_fs_unload_fail = false;
+
+ /* no fail */
+ spdk_blobfs_bdev_detect(g_bdev_name, blobfs_bdev_op_complete, NULL);
+ CU_ASSERT(g_fserrno == 0);
+}
+
+static void
+spdk_blobfs_bdev_create_test(void)
+{
+ uint32_t cluster_sz = 1024 * 1024;
+
+ /* spdk_bdev_open_ext() fails */
+ g_bdev_open_ext_fail = true;
+ spdk_blobfs_bdev_create(g_bdev_name, cluster_sz, blobfs_bdev_op_complete, NULL);
+ CU_ASSERT(g_fserrno != 0);
+
+ g_bdev_open_ext_fail = false;
+
+ /* spdk_bdev_create_bs_dev_from_desc() fails */
+ g_bdev_create_bs_dev_from_desc_fail = true;
+ spdk_blobfs_bdev_create(g_bdev_name, cluster_sz, blobfs_bdev_op_complete, NULL);
+ CU_ASSERT(g_fserrno != 0);
+
+ g_bdev_create_bs_dev_from_desc_fail = false;
+
+ /* spdk_bs_bdev_claim() fails */
+ g_bs_bdev_claim_fail = true;
+ spdk_blobfs_bdev_create(g_bdev_name, cluster_sz, blobfs_bdev_op_complete, NULL);
+ CU_ASSERT(g_fserrno != 0);
+
+ g_bs_bdev_claim_fail = false;
+
+ /* spdk_fs_init() fails */
+ g_fs_load_fail = true;
+ spdk_blobfs_bdev_create(g_bdev_name, cluster_sz, blobfs_bdev_op_complete, NULL);
+ CU_ASSERT(g_fserrno != 0);
+
+ g_fs_load_fail = false;
+
+ /* spdk_fs_unload() fails */
+ g_fs_unload_fail = true;
+ spdk_blobfs_bdev_create(g_bdev_name, cluster_sz, blobfs_bdev_op_complete, NULL);
+ CU_ASSERT(g_fserrno != 0);
+
+ g_fs_unload_fail = false;
+
+ /* no fail */
+ spdk_blobfs_bdev_create(g_bdev_name, cluster_sz, blobfs_bdev_op_complete, NULL);
+ CU_ASSERT(g_fserrno == 0);
+}
+
+static void
+spdk_blobfs_bdev_mount_test(void)
+{
+#ifdef SPDK_CONFIG_FUSE
+ const char *mountpoint = "/mnt";
+
+ /* spdk_bdev_open_ext() fails */
+ g_bdev_open_ext_fail = true;
+ spdk_blobfs_bdev_mount(g_bdev_name, mountpoint, blobfs_bdev_op_complete, NULL);
+ CU_ASSERT(g_fserrno != 0);
+
+ g_bdev_open_ext_fail = false;
+
+ /* spdk_bdev_create_bs_dev_from_desc() fails */
+ g_bdev_create_bs_dev_from_desc_fail = true;
+ spdk_blobfs_bdev_mount(g_bdev_name, mountpoint, blobfs_bdev_op_complete, NULL);
+ CU_ASSERT(g_fserrno != 0);
+
+ g_bdev_create_bs_dev_from_desc_fail = false;
+
+ /* spdk_bs_bdev_claim() fails */
+ g_bs_bdev_claim_fail = true;
+ spdk_blobfs_bdev_mount(g_bdev_name, mountpoint, blobfs_bdev_op_complete, NULL);
+ CU_ASSERT(g_fserrno != 0);
+
+ g_bs_bdev_claim_fail = false;
+
+ /* spdk_fs_load() fails */
+ g_fs_load_fail = true;
+ spdk_blobfs_bdev_mount(g_bdev_name, mountpoint, blobfs_bdev_op_complete, NULL);
+ CU_ASSERT(g_fserrno != 0);
+
+ g_fs_load_fail = false;
+
+ /* blobfs_fuse_start() fails */
+ g_blobfs_fuse_start_fail = true;
+ spdk_blobfs_bdev_mount(g_bdev_name, mountpoint, blobfs_bdev_op_complete, NULL);
+ CU_ASSERT(g_fserrno != 0);
+
+ g_blobfs_fuse_start_fail = false;
+
+ /* no fail */
+ spdk_blobfs_bdev_mount(g_bdev_name, mountpoint, blobfs_bdev_op_complete, NULL);
+ CU_ASSERT(g_fserrno == 0);
+ CU_ASSERT(g_fs_ctx != NULL);
+
+ /* after mount operation success , we need make sure unmount operation success */
+ blobfs_bdev_unmount(g_fs_ctx);
+ CU_ASSERT(g_fserrno == 0);
+#endif
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("blobfs_bdev_ut", NULL, NULL);
+
+ CU_ADD_TEST(suite, spdk_blobfs_bdev_detect_test);
+ CU_ADD_TEST(suite, spdk_blobfs_bdev_create_test);
+ CU_ADD_TEST(suite, spdk_blobfs_bdev_mount_test);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/blobfs/blobfs_sync_ut/.gitignore b/src/spdk/test/unit/lib/blobfs/blobfs_sync_ut/.gitignore
new file mode 100644
index 000000000..93ef643ff
--- /dev/null
+++ b/src/spdk/test/unit/lib/blobfs/blobfs_sync_ut/.gitignore
@@ -0,0 +1 @@
+blobfs_sync_ut
diff --git a/src/spdk/test/unit/lib/blobfs/blobfs_sync_ut/Makefile b/src/spdk/test/unit/lib/blobfs/blobfs_sync_ut/Makefile
new file mode 100644
index 000000000..31961be12
--- /dev/null
+++ b/src/spdk/test/unit/lib/blobfs/blobfs_sync_ut/Makefile
@@ -0,0 +1,39 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+SPDK_LIB_LIST = blob
+TEST_FILE = blobfs_sync_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/blobfs/blobfs_sync_ut/blobfs_sync_ut.c b/src/spdk/test/unit/lib/blobfs/blobfs_sync_ut/blobfs_sync_ut.c
new file mode 100644
index 000000000..f9d00226c
--- /dev/null
+++ b/src/spdk/test/unit/lib/blobfs/blobfs_sync_ut/blobfs_sync_ut.c
@@ -0,0 +1,703 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk/blobfs.h"
+#include "spdk/env.h"
+#include "spdk/log.h"
+#include "spdk/thread.h"
+#include "spdk/barrier.h"
+#include "spdk_internal/thread.h"
+
+#include "spdk_cunit.h"
+#include "unit/lib/blob/bs_dev_common.c"
+#include "common/lib/test_env.c"
+#include "blobfs/blobfs.c"
+#include "blobfs/tree.c"
+
+struct spdk_filesystem *g_fs;
+struct spdk_file *g_file;
+int g_fserrno;
+struct spdk_thread *g_dispatch_thread = NULL;
+struct spdk_trace_histories *g_trace_histories;
+DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn));
+DEFINE_STUB_V(spdk_trace_register_description, (const char *name,
+ uint16_t tpoint_id, uint8_t owner_type,
+ uint8_t object_type, uint8_t new_object,
+ uint8_t arg1_is_ptr, const char *arg1_name));
+DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
+ uint32_t size, uint64_t object_id, uint64_t arg1));
+
+/* Return NULL to test hardcoded defaults. */
+struct spdk_conf_section *
+spdk_conf_find_section(struct spdk_conf *cp, const char *name)
+{
+ return NULL;
+}
+
+/* Return -1 to test hardcoded defaults. */
+int
+spdk_conf_section_get_intval(struct spdk_conf_section *sp, const char *key)
+{
+ return -1;
+}
+
+struct ut_request {
+ fs_request_fn fn;
+ void *arg;
+ volatile int done;
+};
+
+static void
+send_request(fs_request_fn fn, void *arg)
+{
+ spdk_thread_send_msg(g_dispatch_thread, (spdk_msg_fn)fn, arg);
+}
+
+static void
+ut_call_fn(void *arg)
+{
+ struct ut_request *req = arg;
+
+ req->fn(req->arg);
+ req->done = 1;
+}
+
+static void
+ut_send_request(fs_request_fn fn, void *arg)
+{
+ struct ut_request req;
+
+ req.fn = fn;
+ req.arg = arg;
+ req.done = 0;
+
+ spdk_thread_send_msg(g_dispatch_thread, ut_call_fn, &req);
+
+ /* Wait for this to finish */
+ while (req.done == 0) { }
+}
+
+static void
+fs_op_complete(void *ctx, int fserrno)
+{
+ g_fserrno = fserrno;
+}
+
+static void
+fs_op_with_handle_complete(void *ctx, struct spdk_filesystem *fs, int fserrno)
+{
+ g_fs = fs;
+ g_fserrno = fserrno;
+}
+
+static void
+fs_thread_poll(void)
+{
+ struct spdk_thread *thread;
+
+ thread = spdk_get_thread();
+ while (spdk_thread_poll(thread, 0, 0) > 0) {}
+ while (spdk_thread_poll(g_cache_pool_thread, 0, 0) > 0) {}
+}
+
+static void
+_fs_init(void *arg)
+{
+ struct spdk_bs_dev *dev;
+
+ g_fs = NULL;
+ g_fserrno = -1;
+ dev = init_dev();
+ spdk_fs_init(dev, NULL, send_request, fs_op_with_handle_complete, NULL);
+
+ fs_thread_poll();
+
+ SPDK_CU_ASSERT_FATAL(g_fs != NULL);
+ SPDK_CU_ASSERT_FATAL(g_fs->bdev == dev);
+ CU_ASSERT(g_fserrno == 0);
+}
+
+static void
+_fs_load(void *arg)
+{
+ struct spdk_bs_dev *dev;
+
+ g_fs = NULL;
+ g_fserrno = -1;
+ dev = init_dev();
+ spdk_fs_load(dev, send_request, fs_op_with_handle_complete, NULL);
+
+ fs_thread_poll();
+
+ SPDK_CU_ASSERT_FATAL(g_fs != NULL);
+ SPDK_CU_ASSERT_FATAL(g_fs->bdev == dev);
+ CU_ASSERT(g_fserrno == 0);
+}
+
+static void
+_fs_unload(void *arg)
+{
+ g_fserrno = -1;
+ spdk_fs_unload(g_fs, fs_op_complete, NULL);
+
+ fs_thread_poll();
+
+ CU_ASSERT(g_fserrno == 0);
+ g_fs = NULL;
+}
+
+static void
+_nop(void *arg)
+{
+}
+
+static void
+cache_read_after_write(void)
+{
+ uint64_t length;
+ int rc;
+ char w_buf[100], r_buf[100];
+ struct spdk_fs_thread_ctx *channel;
+ struct spdk_file_stat stat = {0};
+
+ ut_send_request(_fs_init, NULL);
+
+ channel = spdk_fs_alloc_thread_ctx(g_fs);
+
+ rc = spdk_fs_open_file(g_fs, channel, "testfile", SPDK_BLOBFS_OPEN_CREATE, &g_file);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_file != NULL);
+
+ length = (4 * 1024 * 1024);
+ rc = spdk_file_truncate(g_file, channel, length);
+ CU_ASSERT(rc == 0);
+
+ memset(w_buf, 0x5a, sizeof(w_buf));
+ spdk_file_write(g_file, channel, w_buf, 0, sizeof(w_buf));
+
+ CU_ASSERT(spdk_file_get_length(g_file) == length);
+
+ rc = spdk_file_truncate(g_file, channel, sizeof(w_buf));
+ CU_ASSERT(rc == 0);
+
+ spdk_file_close(g_file, channel);
+
+ fs_thread_poll();
+
+ rc = spdk_fs_file_stat(g_fs, channel, "testfile", &stat);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(sizeof(w_buf) == stat.size);
+
+ rc = spdk_fs_open_file(g_fs, channel, "testfile", 0, &g_file);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_file != NULL);
+
+ memset(r_buf, 0, sizeof(r_buf));
+ spdk_file_read(g_file, channel, r_buf, 0, sizeof(r_buf));
+ CU_ASSERT(memcmp(w_buf, r_buf, sizeof(r_buf)) == 0);
+
+ spdk_file_close(g_file, channel);
+
+ fs_thread_poll();
+
+ rc = spdk_fs_delete_file(g_fs, channel, "testfile");
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_fs_delete_file(g_fs, channel, "testfile");
+ CU_ASSERT(rc == -ENOENT);
+
+ spdk_fs_free_thread_ctx(channel);
+
+ ut_send_request(_fs_unload, NULL);
+}
+
+static void
+file_length(void)
+{
+ int rc;
+ char *buf;
+ uint64_t buf_length;
+ volatile uint64_t *length_flushed;
+ struct spdk_fs_thread_ctx *channel;
+ struct spdk_file_stat stat = {0};
+
+ ut_send_request(_fs_init, NULL);
+
+ channel = spdk_fs_alloc_thread_ctx(g_fs);
+
+ g_file = NULL;
+ rc = spdk_fs_open_file(g_fs, channel, "testfile", SPDK_BLOBFS_OPEN_CREATE, &g_file);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_file != NULL);
+
+ /* Write one CACHE_BUFFER. Filling at least one cache buffer triggers
+ * a flush to disk.
+ */
+ buf_length = CACHE_BUFFER_SIZE;
+ buf = calloc(1, buf_length);
+ spdk_file_write(g_file, channel, buf, 0, buf_length);
+ free(buf);
+
+ /* Spin until all of the data has been flushed to the SSD. There's been no
+ * sync operation yet, so the xattr on the file is still 0.
+ *
+ * length_flushed: This variable is modified by a different thread in this unit
+ * test. So we need to dereference it as a volatile to ensure the value is always
+ * re-read.
+ */
+ length_flushed = &g_file->length_flushed;
+ while (*length_flushed != buf_length) {}
+
+ /* Close the file. This causes an implicit sync which should write the
+ * length_flushed value as the "length" xattr on the file.
+ */
+ spdk_file_close(g_file, channel);
+
+ fs_thread_poll();
+
+ rc = spdk_fs_file_stat(g_fs, channel, "testfile", &stat);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(buf_length == stat.size);
+
+ spdk_fs_free_thread_ctx(channel);
+
+ /* Unload and reload the filesystem. The file length will be
+ * read during load from the length xattr. We want to make sure
+ * it matches what was written when the file was originally
+ * written and closed.
+ */
+ ut_send_request(_fs_unload, NULL);
+
+ ut_send_request(_fs_load, NULL);
+
+ channel = spdk_fs_alloc_thread_ctx(g_fs);
+
+ rc = spdk_fs_file_stat(g_fs, channel, "testfile", &stat);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(buf_length == stat.size);
+
+ g_file = NULL;
+ rc = spdk_fs_open_file(g_fs, channel, "testfile", 0, &g_file);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_file != NULL);
+
+ spdk_file_close(g_file, channel);
+
+ fs_thread_poll();
+
+ rc = spdk_fs_delete_file(g_fs, channel, "testfile");
+ CU_ASSERT(rc == 0);
+
+ spdk_fs_free_thread_ctx(channel);
+
+ ut_send_request(_fs_unload, NULL);
+}
+
+static void
+append_write_to_extend_blob(void)
+{
+ uint64_t blob_size, buf_length;
+ char *buf, append_buf[64];
+ int rc;
+ struct spdk_fs_thread_ctx *channel;
+
+ ut_send_request(_fs_init, NULL);
+
+ channel = spdk_fs_alloc_thread_ctx(g_fs);
+
+ /* create a file and write the file with blob_size - 1 data length */
+ rc = spdk_fs_open_file(g_fs, channel, "testfile", SPDK_BLOBFS_OPEN_CREATE, &g_file);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_file != NULL);
+
+ blob_size = __file_get_blob_size(g_file);
+
+ buf_length = blob_size - 1;
+ buf = calloc(1, buf_length);
+ rc = spdk_file_write(g_file, channel, buf, 0, buf_length);
+ CU_ASSERT(rc == 0);
+ free(buf);
+
+ spdk_file_close(g_file, channel);
+ fs_thread_poll();
+ spdk_fs_free_thread_ctx(channel);
+ ut_send_request(_fs_unload, NULL);
+
+ /* load existing file and write extra 2 bytes to cross blob boundary */
+ ut_send_request(_fs_load, NULL);
+
+ channel = spdk_fs_alloc_thread_ctx(g_fs);
+ g_file = NULL;
+ rc = spdk_fs_open_file(g_fs, channel, "testfile", 0, &g_file);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_file != NULL);
+
+ CU_ASSERT(g_file->length == buf_length);
+ CU_ASSERT(g_file->last == NULL);
+ CU_ASSERT(g_file->append_pos == buf_length);
+
+ rc = spdk_file_write(g_file, channel, append_buf, buf_length, 2);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(2 * blob_size == __file_get_blob_size(g_file));
+ spdk_file_close(g_file, channel);
+ fs_thread_poll();
+ CU_ASSERT(g_file->length == buf_length + 2);
+
+ spdk_fs_free_thread_ctx(channel);
+ ut_send_request(_fs_unload, NULL);
+}
+
+static void
+partial_buffer(void)
+{
+ int rc;
+ char *buf;
+ uint64_t buf_length;
+ struct spdk_fs_thread_ctx *channel;
+ struct spdk_file_stat stat = {0};
+
+ ut_send_request(_fs_init, NULL);
+
+ channel = spdk_fs_alloc_thread_ctx(g_fs);
+
+ g_file = NULL;
+ rc = spdk_fs_open_file(g_fs, channel, "testfile", SPDK_BLOBFS_OPEN_CREATE, &g_file);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_file != NULL);
+
+ /* Write one CACHE_BUFFER plus one byte. Filling at least one cache buffer triggers
+ * a flush to disk. We want to make sure the extra byte is not implicitly flushed.
+ * It should only get flushed once we sync or close the file.
+ */
+ buf_length = CACHE_BUFFER_SIZE + 1;
+ buf = calloc(1, buf_length);
+ spdk_file_write(g_file, channel, buf, 0, buf_length);
+ free(buf);
+
+ /* Send some nop messages to the dispatch thread. This will ensure any of the
+ * pending write operations are completed. A well-functioning blobfs should only
+ * issue one write for the filled CACHE_BUFFER - a buggy one might try to write
+ * the extra byte. So do a bunch of _nops to make sure all of them (even the buggy
+ * ones) get a chance to run. Note that we can't just send a message to the
+ * dispatch thread to call spdk_thread_poll() because the messages are themselves
+ * run in the context of spdk_thread_poll().
+ */
+ ut_send_request(_nop, NULL);
+ ut_send_request(_nop, NULL);
+ ut_send_request(_nop, NULL);
+ ut_send_request(_nop, NULL);
+ ut_send_request(_nop, NULL);
+ ut_send_request(_nop, NULL);
+
+ CU_ASSERT(g_file->length_flushed == CACHE_BUFFER_SIZE);
+
+ /* Close the file. This causes an implicit sync which should write the
+ * length_flushed value as the "length" xattr on the file.
+ */
+ spdk_file_close(g_file, channel);
+
+ fs_thread_poll();
+
+ rc = spdk_fs_file_stat(g_fs, channel, "testfile", &stat);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(buf_length == stat.size);
+
+ rc = spdk_fs_delete_file(g_fs, channel, "testfile");
+ CU_ASSERT(rc == 0);
+
+ spdk_fs_free_thread_ctx(channel);
+
+ ut_send_request(_fs_unload, NULL);
+}
+
+static void
+cache_write_null_buffer(void)
+{
+ uint64_t length;
+ int rc;
+ struct spdk_fs_thread_ctx *channel;
+ struct spdk_thread *thread;
+
+ ut_send_request(_fs_init, NULL);
+
+ channel = spdk_fs_alloc_thread_ctx(g_fs);
+
+ rc = spdk_fs_open_file(g_fs, channel, "testfile", SPDK_BLOBFS_OPEN_CREATE, &g_file);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_file != NULL);
+
+ length = 0;
+ rc = spdk_file_truncate(g_file, channel, length);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_file_write(g_file, channel, NULL, 0, 0);
+ CU_ASSERT(rc == 0);
+
+ spdk_file_close(g_file, channel);
+
+ fs_thread_poll();
+
+ rc = spdk_fs_delete_file(g_fs, channel, "testfile");
+ CU_ASSERT(rc == 0);
+
+ spdk_fs_free_thread_ctx(channel);
+
+ thread = spdk_get_thread();
+ while (spdk_thread_poll(thread, 0, 0) > 0) {}
+
+ ut_send_request(_fs_unload, NULL);
+}
+
+static void
+fs_create_sync(void)
+{
+ int rc;
+ struct spdk_fs_thread_ctx *channel;
+
+ ut_send_request(_fs_init, NULL);
+
+ channel = spdk_fs_alloc_thread_ctx(g_fs);
+ CU_ASSERT(channel != NULL);
+
+ rc = spdk_fs_create_file(g_fs, channel, "testfile");
+ CU_ASSERT(rc == 0);
+
+ /* Create should fail, because the file already exists. */
+ rc = spdk_fs_create_file(g_fs, channel, "testfile");
+ CU_ASSERT(rc != 0);
+
+ rc = spdk_fs_delete_file(g_fs, channel, "testfile");
+ CU_ASSERT(rc == 0);
+
+ spdk_fs_free_thread_ctx(channel);
+
+ fs_thread_poll();
+
+ ut_send_request(_fs_unload, NULL);
+}
+
+static void
+fs_rename_sync(void)
+{
+ int rc;
+ struct spdk_fs_thread_ctx *channel;
+
+ ut_send_request(_fs_init, NULL);
+
+ channel = spdk_fs_alloc_thread_ctx(g_fs);
+ CU_ASSERT(channel != NULL);
+
+ rc = spdk_fs_open_file(g_fs, channel, "testfile", SPDK_BLOBFS_OPEN_CREATE, &g_file);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_file != NULL);
+
+ CU_ASSERT(strcmp(spdk_file_get_name(g_file), "testfile") == 0);
+
+ rc = spdk_fs_rename_file(g_fs, channel, "testfile", "newtestfile");
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(strcmp(spdk_file_get_name(g_file), "newtestfile") == 0);
+
+ spdk_file_close(g_file, channel);
+
+ fs_thread_poll();
+
+ spdk_fs_free_thread_ctx(channel);
+
+ ut_send_request(_fs_unload, NULL);
+}
+
+static void
+cache_append_no_cache(void)
+{
+ int rc;
+ char buf[100];
+ struct spdk_fs_thread_ctx *channel;
+
+ ut_send_request(_fs_init, NULL);
+
+ channel = spdk_fs_alloc_thread_ctx(g_fs);
+
+ rc = spdk_fs_open_file(g_fs, channel, "testfile", SPDK_BLOBFS_OPEN_CREATE, &g_file);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_file != NULL);
+
+ spdk_file_write(g_file, channel, buf, 0 * sizeof(buf), sizeof(buf));
+ CU_ASSERT(spdk_file_get_length(g_file) == 1 * sizeof(buf));
+ spdk_file_write(g_file, channel, buf, 1 * sizeof(buf), sizeof(buf));
+ CU_ASSERT(spdk_file_get_length(g_file) == 2 * sizeof(buf));
+ spdk_file_sync(g_file, channel);
+
+ fs_thread_poll();
+
+ spdk_file_write(g_file, channel, buf, 2 * sizeof(buf), sizeof(buf));
+ CU_ASSERT(spdk_file_get_length(g_file) == 3 * sizeof(buf));
+ spdk_file_write(g_file, channel, buf, 3 * sizeof(buf), sizeof(buf));
+ CU_ASSERT(spdk_file_get_length(g_file) == 4 * sizeof(buf));
+ spdk_file_write(g_file, channel, buf, 4 * sizeof(buf), sizeof(buf));
+ CU_ASSERT(spdk_file_get_length(g_file) == 5 * sizeof(buf));
+
+ spdk_file_close(g_file, channel);
+
+ fs_thread_poll();
+
+ rc = spdk_fs_delete_file(g_fs, channel, "testfile");
+ CU_ASSERT(rc == 0);
+
+ spdk_fs_free_thread_ctx(channel);
+
+ ut_send_request(_fs_unload, NULL);
+}
+
+static void
+fs_delete_file_without_close(void)
+{
+ int rc;
+ struct spdk_fs_thread_ctx *channel;
+ struct spdk_file *file;
+
+ ut_send_request(_fs_init, NULL);
+ channel = spdk_fs_alloc_thread_ctx(g_fs);
+ CU_ASSERT(channel != NULL);
+
+ rc = spdk_fs_open_file(g_fs, channel, "testfile", SPDK_BLOBFS_OPEN_CREATE, &g_file);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_file != NULL);
+
+ rc = spdk_fs_delete_file(g_fs, channel, "testfile");
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_file->ref_count != 0);
+ CU_ASSERT(g_file->is_deleted == true);
+
+ rc = spdk_fs_open_file(g_fs, channel, "testfile", 0, &file);
+ CU_ASSERT(rc != 0);
+
+ spdk_file_close(g_file, channel);
+
+ fs_thread_poll();
+
+ rc = spdk_fs_open_file(g_fs, channel, "testfile", 0, &file);
+ CU_ASSERT(rc != 0);
+
+ spdk_fs_free_thread_ctx(channel);
+
+ ut_send_request(_fs_unload, NULL);
+
+}
+
+static bool g_thread_exit = false;
+
+static void
+terminate_spdk_thread(void *arg)
+{
+ g_thread_exit = true;
+}
+
+static void *
+spdk_thread(void *arg)
+{
+ struct spdk_thread *thread = arg;
+
+ spdk_set_thread(thread);
+
+ while (!g_thread_exit) {
+ spdk_thread_poll(thread, 0, 0);
+ }
+
+ return NULL;
+}
+
+int main(int argc, char **argv)
+{
+ struct spdk_thread *thread;
+ CU_pSuite suite = NULL;
+ pthread_t spdk_tid;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("blobfs_sync_ut", NULL, NULL);
+
+ CU_ADD_TEST(suite, cache_read_after_write);
+ CU_ADD_TEST(suite, file_length);
+ CU_ADD_TEST(suite, append_write_to_extend_blob);
+ CU_ADD_TEST(suite, partial_buffer);
+ CU_ADD_TEST(suite, cache_write_null_buffer);
+ CU_ADD_TEST(suite, fs_create_sync);
+ CU_ADD_TEST(suite, fs_rename_sync);
+ CU_ADD_TEST(suite, cache_append_no_cache);
+ CU_ADD_TEST(suite, fs_delete_file_without_close);
+
+ spdk_thread_lib_init(NULL, 0);
+
+ thread = spdk_thread_create("test_thread", NULL);
+ spdk_set_thread(thread);
+
+ g_dispatch_thread = spdk_thread_create("dispatch_thread", NULL);
+ pthread_create(&spdk_tid, NULL, spdk_thread, g_dispatch_thread);
+
+ g_dev_buffer = calloc(1, DEV_BUFFER_SIZE);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ free(g_dev_buffer);
+
+ ut_send_request(terminate_spdk_thread, NULL);
+ pthread_join(spdk_tid, NULL);
+
+ while (spdk_thread_poll(g_dispatch_thread, 0, 0) > 0) {}
+ while (spdk_thread_poll(thread, 0, 0) > 0) {}
+
+ spdk_set_thread(thread);
+ spdk_thread_exit(thread);
+ while (!spdk_thread_is_exited(thread)) {
+ spdk_thread_poll(thread, 0, 0);
+ }
+ spdk_thread_destroy(thread);
+
+ spdk_set_thread(g_dispatch_thread);
+ spdk_thread_exit(g_dispatch_thread);
+ while (!spdk_thread_is_exited(g_dispatch_thread)) {
+ spdk_thread_poll(g_dispatch_thread, 0, 0);
+ }
+ spdk_thread_destroy(g_dispatch_thread);
+
+ spdk_thread_lib_fini();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/blobfs/tree.c/.gitignore b/src/spdk/test/unit/lib/blobfs/tree.c/.gitignore
new file mode 100644
index 000000000..57e77bf71
--- /dev/null
+++ b/src/spdk/test/unit/lib/blobfs/tree.c/.gitignore
@@ -0,0 +1 @@
+tree_ut
diff --git a/src/spdk/test/unit/lib/blobfs/tree.c/Makefile b/src/spdk/test/unit/lib/blobfs/tree.c/Makefile
new file mode 100644
index 000000000..b3d57e873
--- /dev/null
+++ b/src/spdk/test/unit/lib/blobfs/tree.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = tree_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/blobfs/tree.c/tree_ut.c b/src/spdk/test/unit/lib/blobfs/tree.c/tree_ut.c
new file mode 100644
index 000000000..70f1d692a
--- /dev/null
+++ b/src/spdk/test/unit/lib/blobfs/tree.c/tree_ut.c
@@ -0,0 +1,150 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "blobfs/tree.c"
+
+void
+cache_buffer_free(struct cache_buffer *cache_buffer)
+{
+ free(cache_buffer);
+}
+
+static void
+blobfs_tree_op_test(void)
+{
+ struct cache_tree *tree;
+ struct cache_buffer *buffer[5];
+ struct cache_buffer *tmp_buffer;
+ int i;
+
+ for (i = 0; i < 5; i ++) {
+ buffer[i] = calloc(1, sizeof(struct cache_buffer));
+ SPDK_CU_ASSERT_FATAL(buffer[i]);
+ }
+
+ tree = calloc(1, sizeof(*tree));
+ SPDK_CU_ASSERT_FATAL(tree != NULL);
+
+ /* insert buffer[0] */
+ buffer[0]->offset = 0;
+ tree = tree_insert_buffer(tree, buffer[0]);
+ SPDK_CU_ASSERT_FATAL(tree != NULL);
+ CU_ASSERT(tree->level == 0);
+ tmp_buffer = tree_find_buffer(tree, buffer[0]->offset);
+ CU_ASSERT(tmp_buffer == buffer[0]);
+
+ /* insert buffer[1] */
+ buffer[1]->offset = CACHE_BUFFER_SIZE;
+ /* set the bytes_filled equal = bytes_filled with same non zero value, e.g., 32 */
+ buffer[1]->bytes_filled = buffer[1]->bytes_flushed = 32;
+ tree = tree_insert_buffer(tree, buffer[1]);
+ SPDK_CU_ASSERT_FATAL(tree != NULL);
+ CU_ASSERT(tree->level == 0);
+ tmp_buffer = tree_find_filled_buffer(tree, buffer[1]->offset);
+ CU_ASSERT(tmp_buffer == buffer[1]);
+
+ /* insert buffer[2] */
+ buffer[2]->offset = (CACHE_TREE_WIDTH - 1) * CACHE_BUFFER_SIZE;
+ tree = tree_insert_buffer(tree, buffer[2]);
+ SPDK_CU_ASSERT_FATAL(tree != NULL);
+ CU_ASSERT(tree->level == 0);
+ tmp_buffer = tree_find_buffer(tree, buffer[2]->offset);
+ CU_ASSERT(tmp_buffer == buffer[2]);
+ tmp_buffer = tree_find_filled_buffer(tree, buffer[2]->offset);
+ CU_ASSERT(tmp_buffer == NULL);
+
+ /* insert buffer[3], set an offset which can not be fit level 0 */
+ buffer[3]->offset = CACHE_TREE_LEVEL_SIZE(1);
+ tree = tree_insert_buffer(tree, buffer[3]);
+ SPDK_CU_ASSERT_FATAL(tree != NULL);
+ CU_ASSERT(tree->level == 1);
+ tmp_buffer = tree_find_buffer(tree, buffer[3]->offset);
+ CU_ASSERT(tmp_buffer == buffer[3]);
+
+ /* insert buffer[4], set an offset which can not be fit level 1 */
+ buffer[4]->offset = CACHE_TREE_LEVEL_SIZE(2);
+ tree = tree_insert_buffer(tree, buffer[4]);
+ SPDK_CU_ASSERT_FATAL(tree != NULL);
+ CU_ASSERT(tree->level == 2);
+ tmp_buffer = tree_find_buffer(tree, buffer[4]->offset);
+ CU_ASSERT(tmp_buffer == buffer[4]);
+
+ /* delete buffer[0] */
+ tree_remove_buffer(tree, buffer[0]);
+ /* check whether buffer[0] is still existed or not */
+ tmp_buffer = tree_find_buffer(tree, 0);
+ CU_ASSERT(tmp_buffer == NULL);
+
+ /* delete buffer[3] */
+ tree_remove_buffer(tree, buffer[3]);
+ /* check whether buffer[3] is still existed or not */
+ tmp_buffer = tree_find_buffer(tree, CACHE_TREE_LEVEL_SIZE(1));
+ CU_ASSERT(tmp_buffer == NULL);
+
+ /* free all buffers in the tree */
+ tree_free_buffers(tree);
+
+ /* check whether buffer[1] is still existed or not */
+ tmp_buffer = tree_find_buffer(tree, CACHE_BUFFER_SIZE);
+ CU_ASSERT(tmp_buffer == NULL);
+ /* check whether buffer[2] is still existed or not */
+ tmp_buffer = tree_find_buffer(tree, (CACHE_TREE_WIDTH - 1) * CACHE_BUFFER_SIZE);
+ CU_ASSERT(tmp_buffer == NULL);
+ /* check whether buffer[4] is still existed or not */
+ tmp_buffer = tree_find_buffer(tree, CACHE_TREE_LEVEL_SIZE(2));
+ CU_ASSERT(tmp_buffer == NULL);
+
+ /* According to tree_free_buffers, root will not be freed */
+ free(tree);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("tree", NULL, NULL);
+ CU_ADD_TEST(suite, blobfs_tree_op_test);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/event/Makefile b/src/spdk/test/unit/lib/event/Makefile
new file mode 100644
index 000000000..ea411460c
--- /dev/null
+++ b/src/spdk/test/unit/lib/event/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = subsystem.c app.c reactor.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/event/app.c/.gitignore b/src/spdk/test/unit/lib/event/app.c/.gitignore
new file mode 100644
index 000000000..123e16734
--- /dev/null
+++ b/src/spdk/test/unit/lib/event/app.c/.gitignore
@@ -0,0 +1 @@
+app_ut
diff --git a/src/spdk/test/unit/lib/event/app.c/Makefile b/src/spdk/test/unit/lib/event/app.c/Makefile
new file mode 100644
index 000000000..9ec2b97db
--- /dev/null
+++ b/src/spdk/test/unit/lib/event/app.c/Makefile
@@ -0,0 +1,39 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+SPDK_LIB_LIST = conf trace jsonrpc json
+TEST_FILE = app_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/event/app.c/app_ut.c b/src/spdk/test/unit/lib/event/app.c/app_ut.c
new file mode 100644
index 000000000..6077d6600
--- /dev/null
+++ b/src/spdk/test/unit/lib/event/app.c/app_ut.c
@@ -0,0 +1,193 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+#include "common/lib/test_env.c"
+#include "event/app.c"
+
+#define test_argc 6
+
+DEFINE_STUB_V(spdk_event_call, (struct spdk_event *event));
+DEFINE_STUB(spdk_event_allocate, struct spdk_event *, (uint32_t core, spdk_event_fn fn, void *arg1,
+ void *arg2), NULL);
+DEFINE_STUB_V(spdk_subsystem_init, (spdk_subsystem_init_fn cb_fn, void *cb_arg));
+DEFINE_STUB_V(spdk_rpc_register_method, (const char *method, spdk_rpc_method_handler func,
+ uint32_t state_mask));
+DEFINE_STUB_V(spdk_rpc_register_alias_deprecated, (const char *method, const char *alias));
+DEFINE_STUB_V(spdk_rpc_set_state, (uint32_t state));
+DEFINE_STUB(spdk_rpc_get_state, uint32_t, (void), SPDK_RPC_RUNTIME);
+DEFINE_STUB_V(spdk_app_json_config_load, (const char *json_config_file, const char *rpc_addr,
+ spdk_subsystem_init_fn cb_fn, void *cb_arg, bool stop_on_error));
+
+static void
+unittest_usage(void)
+{
+}
+
+static int
+unittest_parse_args(int ch, char *arg)
+{
+ return 0;
+}
+
+static void
+clean_opts(struct spdk_app_opts *opts)
+{
+ free(opts->pci_whitelist);
+ opts->pci_whitelist = NULL;
+ free(opts->pci_blacklist);
+ opts->pci_blacklist = NULL;
+ memset(opts, 0, sizeof(struct spdk_app_opts));
+}
+
+static void
+test_spdk_app_parse_args(void)
+{
+ spdk_app_parse_args_rvals_t rc;
+ struct spdk_app_opts opts = {};
+ struct option my_options[2] = {};
+ char *valid_argv[test_argc] = {"app_ut",
+ "--wait-for-rpc",
+ "-d",
+ "-p0",
+ "-B",
+ "0000:81:00.0"
+ };
+ char *invalid_argv_BW[test_argc] = {"app_ut",
+ "-B",
+ "0000:81:00.0",
+ "-W",
+ "0000:82:00.0",
+ "-cspdk.conf"
+ };
+ /* currently use -z as our new option */
+ char *argv_added_short_opt[test_argc] = {"app_ut",
+ "-z",
+ "-d",
+ "--wait-for-rpc",
+ "-p0",
+ "-cspdk.conf"
+ };
+ char *argv_added_long_opt[test_argc] = {"app_ut",
+ "-cspdk.conf",
+ "-d",
+ "-r/var/tmp/spdk.sock",
+ "--test-long-opt",
+ "--wait-for-rpc"
+ };
+ char *invalid_argv_missing_option[test_argc] = {"app_ut",
+ "-d",
+ "-p",
+ "--wait-for-rpc",
+ "--silence-noticelog"
+ "-R"
+ };
+
+ /* Test valid arguments. Expected result: PASS */
+ rc = spdk_app_parse_args(test_argc, valid_argv, &opts, "", NULL, unittest_parse_args, NULL);
+ CU_ASSERT_EQUAL(rc, SPDK_APP_PARSE_ARGS_SUCCESS);
+ optind = 1;
+ clean_opts(&opts);
+
+ /* Test invalid short option Expected result: FAIL */
+ rc = spdk_app_parse_args(test_argc, argv_added_short_opt, &opts, "", NULL, unittest_parse_args,
+ NULL);
+ CU_ASSERT_EQUAL(rc, SPDK_APP_PARSE_ARGS_FAIL);
+ optind = 1;
+ clean_opts(&opts);
+
+ /* Test valid global and local options. Expected result: PASS */
+ rc = spdk_app_parse_args(test_argc, argv_added_short_opt, &opts, "z", NULL, unittest_parse_args,
+ unittest_usage);
+ CU_ASSERT_EQUAL(rc, SPDK_APP_PARSE_ARGS_SUCCESS);
+ optind = 1;
+ clean_opts(&opts);
+
+ /* Test invalid long option Expected result: FAIL */
+ rc = spdk_app_parse_args(test_argc, argv_added_long_opt, &opts, "", NULL, unittest_parse_args,
+ NULL);
+ CU_ASSERT_EQUAL(rc, SPDK_APP_PARSE_ARGS_FAIL);
+ optind = 1;
+ clean_opts(&opts);
+
+ /* Test valid global and local options. Expected result: PASS */
+ my_options[0].name = "test-long-opt";
+ rc = spdk_app_parse_args(test_argc, argv_added_long_opt, &opts, "", my_options, unittest_parse_args,
+ unittest_usage);
+ CU_ASSERT_EQUAL(rc, SPDK_APP_PARSE_ARGS_SUCCESS);
+ optind = 1;
+ clean_opts(&opts);
+
+ /* Test overlapping global and local options. Expected result: FAIL */
+ rc = spdk_app_parse_args(test_argc, valid_argv, &opts, SPDK_APP_GETOPT_STRING, NULL,
+ unittest_parse_args, NULL);
+ CU_ASSERT_EQUAL(rc, SPDK_APP_PARSE_ARGS_FAIL);
+ optind = 1;
+ clean_opts(&opts);
+
+ /* Specify -B and -W options at the same time. Expected result: FAIL */
+ rc = spdk_app_parse_args(test_argc, invalid_argv_BW, &opts, "", NULL, unittest_parse_args, NULL);
+ SPDK_CU_ASSERT_FATAL(rc == SPDK_APP_PARSE_ARGS_FAIL);
+ optind = 1;
+ clean_opts(&opts);
+
+ /* Omit necessary argument to option */
+ rc = spdk_app_parse_args(test_argc, invalid_argv_missing_option, &opts, "", NULL,
+ unittest_parse_args, NULL);
+ CU_ASSERT_EQUAL(rc, SPDK_APP_PARSE_ARGS_FAIL);
+ optind = 1;
+ clean_opts(&opts);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("app_suite", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_spdk_app_parse_args);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/event/reactor.c/.gitignore b/src/spdk/test/unit/lib/event/reactor.c/.gitignore
new file mode 100644
index 000000000..c86b7dfcd
--- /dev/null
+++ b/src/spdk/test/unit/lib/event/reactor.c/.gitignore
@@ -0,0 +1 @@
+reactor_ut
diff --git a/src/spdk/test/unit/lib/event/reactor.c/Makefile b/src/spdk/test/unit/lib/event/reactor.c/Makefile
new file mode 100644
index 000000000..f7b3b5887
--- /dev/null
+++ b/src/spdk/test/unit/lib/event/reactor.c/Makefile
@@ -0,0 +1,39 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+SPDK_LIB_LIST = conf trace jsonrpc json
+TEST_FILE = reactor_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/event/reactor.c/reactor_ut.c b/src/spdk/test/unit/lib/event/reactor.c/reactor_ut.c
new file mode 100644
index 000000000..db50ea2f6
--- /dev/null
+++ b/src/spdk/test/unit/lib/event/reactor.c/reactor_ut.c
@@ -0,0 +1,455 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+#include "common/lib/test_env.c"
+#include "event/reactor.c"
+
+static void
+test_create_reactor(void)
+{
+ struct spdk_reactor reactor = {};
+
+ g_reactors = &reactor;
+
+ reactor_construct(&reactor, 0);
+
+ CU_ASSERT(spdk_reactor_get(0) == &reactor);
+
+ spdk_ring_free(reactor.events);
+ g_reactors = NULL;
+}
+
+static void
+test_init_reactors(void)
+{
+ uint32_t core;
+
+ allocate_cores(3);
+
+ CU_ASSERT(spdk_reactors_init() == 0);
+
+ CU_ASSERT(g_reactor_state == SPDK_REACTOR_STATE_INITIALIZED);
+ for (core = 0; core < 3; core++) {
+ CU_ASSERT(spdk_reactor_get(core) != NULL);
+ }
+
+ spdk_reactors_fini();
+
+ free_cores();
+}
+
+static void
+ut_event_fn(void *arg1, void *arg2)
+{
+ uint8_t *test1 = arg1;
+ uint8_t *test2 = arg2;
+
+ *test1 = 1;
+ *test2 = 0xFF;
+}
+
+static void
+test_event_call(void)
+{
+ uint8_t test1 = 0, test2 = 0;
+ struct spdk_event *evt;
+ struct spdk_reactor *reactor;
+
+ allocate_cores(1);
+
+ CU_ASSERT(spdk_reactors_init() == 0);
+
+ evt = spdk_event_allocate(0, ut_event_fn, &test1, &test2);
+ CU_ASSERT(evt != NULL);
+
+ spdk_event_call(evt);
+
+ reactor = spdk_reactor_get(0);
+ CU_ASSERT(reactor != NULL);
+
+ CU_ASSERT(event_queue_run_batch(reactor) == 1);
+ CU_ASSERT(test1 == 1);
+ CU_ASSERT(test2 == 0xFF);
+
+ spdk_reactors_fini();
+
+ free_cores();
+}
+
+static void
+test_schedule_thread(void)
+{
+ struct spdk_cpuset cpuset = {};
+ struct spdk_thread *thread;
+ struct spdk_reactor *reactor;
+ struct spdk_lw_thread *lw_thread;
+
+ allocate_cores(5);
+
+ CU_ASSERT(spdk_reactors_init() == 0);
+
+ spdk_cpuset_set_cpu(&cpuset, 3, true);
+ g_next_core = 4;
+
+ /* _reactor_schedule_thread() will be called in spdk_thread_create()
+ * at its end because it is passed to SPDK thread library by
+ * spdk_thread_lib_init().
+ */
+ thread = spdk_thread_create(NULL, &cpuset);
+ CU_ASSERT(thread != NULL);
+
+ reactor = spdk_reactor_get(3);
+ CU_ASSERT(reactor != NULL);
+
+ MOCK_SET(spdk_env_get_current_core, 3);
+
+ CU_ASSERT(event_queue_run_batch(reactor) == 1);
+
+ MOCK_CLEAR(spdk_env_get_current_core);
+
+ lw_thread = TAILQ_FIRST(&reactor->threads);
+ CU_ASSERT(lw_thread != NULL);
+ CU_ASSERT(spdk_thread_get_from_ctx(lw_thread) == thread);
+
+ TAILQ_REMOVE(&reactor->threads, lw_thread, link);
+ reactor->thread_count--;
+ spdk_set_thread(thread);
+ spdk_thread_exit(thread);
+ while (!spdk_thread_is_exited(thread)) {
+ spdk_thread_poll(thread, 0, 0);
+ }
+ spdk_thread_destroy(thread);
+ spdk_set_thread(NULL);
+
+ spdk_reactors_fini();
+
+ free_cores();
+}
+
+static void
+test_reschedule_thread(void)
+{
+ struct spdk_cpuset cpuset = {};
+ struct spdk_thread *thread;
+ struct spdk_reactor *reactor;
+ struct spdk_lw_thread *lw_thread;
+
+ allocate_cores(3);
+
+ CU_ASSERT(spdk_reactors_init() == 0);
+
+ spdk_cpuset_set_cpu(&g_reactor_core_mask, 0, true);
+ spdk_cpuset_set_cpu(&g_reactor_core_mask, 1, true);
+ spdk_cpuset_set_cpu(&g_reactor_core_mask, 2, true);
+ g_next_core = 0;
+
+ /* Create and schedule the thread to core 1. */
+ spdk_cpuset_set_cpu(&cpuset, 1, true);
+
+ thread = spdk_thread_create(NULL, &cpuset);
+ CU_ASSERT(thread != NULL);
+ lw_thread = spdk_thread_get_ctx(thread);
+
+ reactor = spdk_reactor_get(1);
+ CU_ASSERT(reactor != NULL);
+ MOCK_SET(spdk_env_get_current_core, 1);
+
+ CU_ASSERT(event_queue_run_batch(reactor) == 1);
+ CU_ASSERT(TAILQ_FIRST(&reactor->threads) == lw_thread);
+
+ spdk_set_thread(thread);
+
+ /* Call spdk_thread_set_cpumask() twice with different cpumask values.
+ * The cpumask of the 2nd call will be used in reschedule operation.
+ */
+
+ spdk_cpuset_zero(&cpuset);
+ spdk_cpuset_set_cpu(&cpuset, 0, true);
+ CU_ASSERT(spdk_thread_set_cpumask(&cpuset) == 0);
+
+ spdk_cpuset_zero(&cpuset);
+ spdk_cpuset_set_cpu(&cpuset, 2, true);
+ CU_ASSERT(spdk_thread_set_cpumask(&cpuset) == 0);
+
+ CU_ASSERT(lw_thread->resched == true);
+
+ reactor_run(reactor);
+
+ CU_ASSERT(lw_thread->resched == false);
+ CU_ASSERT(TAILQ_EMPTY(&reactor->threads));
+
+ reactor = spdk_reactor_get(0);
+ CU_ASSERT(reactor != NULL);
+ MOCK_SET(spdk_env_get_current_core, 0);
+
+ CU_ASSERT(event_queue_run_batch(reactor) == 0);
+
+ reactor = spdk_reactor_get(2);
+ CU_ASSERT(reactor != NULL);
+ MOCK_SET(spdk_env_get_current_core, 2);
+
+ CU_ASSERT(event_queue_run_batch(reactor) == 1);
+
+ CU_ASSERT(TAILQ_FIRST(&reactor->threads) == lw_thread);
+
+ MOCK_CLEAR(spdk_env_get_current_core);
+
+ TAILQ_REMOVE(&reactor->threads, lw_thread, link);
+ reactor->thread_count--;
+ spdk_set_thread(thread);
+ spdk_thread_exit(thread);
+ while (!spdk_thread_is_exited(thread)) {
+ spdk_thread_poll(thread, 0, 0);
+ }
+ spdk_thread_destroy(thread);
+ spdk_set_thread(NULL);
+
+ spdk_reactors_fini();
+
+ free_cores();
+}
+
+static void
+for_each_reactor_done(void *arg1, void *arg2)
+{
+ uint32_t *count = arg1;
+ bool *done = arg2;
+
+ (*count)++;
+ *done = true;
+}
+
+static void
+for_each_reactor_cb(void *arg1, void *arg2)
+{
+ uint32_t *count = arg1;
+
+ (*count)++;
+}
+
+static void
+test_for_each_reactor(void)
+{
+ uint32_t count = 0, i;
+ bool done = false;
+ struct spdk_reactor *reactor;
+
+ allocate_cores(5);
+
+ CU_ASSERT(spdk_reactors_init() == 0);
+
+ MOCK_SET(spdk_env_get_current_core, 0);
+
+ spdk_for_each_reactor(for_each_reactor_cb, &count, &done, for_each_reactor_done);
+
+ MOCK_CLEAR(spdk_env_get_current_core);
+
+ /* We have not processed any event yet, so count and done should be 0 and false,
+ * respectively.
+ */
+ CU_ASSERT(count == 0);
+
+ /* Poll each reactor to verify the event is passed to each */
+ for (i = 0; i < 5; i++) {
+ reactor = spdk_reactor_get(i);
+ CU_ASSERT(reactor != NULL);
+
+ event_queue_run_batch(reactor);
+ CU_ASSERT(count == (i + 1));
+ CU_ASSERT(done == false);
+ }
+
+ /* After each reactor is called, the completion calls it one more time. */
+ reactor = spdk_reactor_get(0);
+ CU_ASSERT(reactor != NULL);
+
+ event_queue_run_batch(reactor);
+ CU_ASSERT(count == 6);
+ CU_ASSERT(done == true);
+
+ spdk_reactors_fini();
+
+ free_cores();
+}
+
+static int
+poller_run_idle(void *ctx)
+{
+ uint64_t delay_us = (uint64_t)ctx;
+
+ spdk_delay_us(delay_us);
+
+ return 0;
+}
+
+static int
+poller_run_busy(void *ctx)
+{
+ uint64_t delay_us = (uint64_t)ctx;
+
+ spdk_delay_us(delay_us);
+
+ return 1;
+}
+
+static void
+test_reactor_stats(void)
+{
+ struct spdk_cpuset cpuset = {};
+ struct spdk_thread *thread1, *thread2;
+ struct spdk_reactor *reactor;
+ struct spdk_poller *busy1, *idle1, *busy2, *idle2;
+ int rc __attribute__((unused));
+
+ /* Test case is the following:
+ * Create a reactor on CPU core0.
+ * Create thread1 and thread2 simultaneously on reactor0 at TSC = 100.
+ * Reactor runs
+ * - thread1 for 100 with busy
+ * - thread2 for 200 with idle
+ * - thread1 for 300 with idle
+ * - thread2 for 400 with busy.
+ * Then,
+ * - both elapsed TSC of thread1 and thread2 should be 1000 (= 100 + 900).
+ * - busy TSC of reactor should be 500 (= 100 + 400).
+ * - idle TSC of reactor should be 500 (= 200 + 300).
+ */
+
+ allocate_cores(1);
+
+ CU_ASSERT(spdk_reactors_init() == 0);
+
+ spdk_cpuset_set_cpu(&cpuset, 0, true);
+
+ MOCK_SET(spdk_env_get_current_core, 0);
+ MOCK_SET(spdk_get_ticks, 100);
+
+ thread1 = spdk_thread_create(NULL, &cpuset);
+ SPDK_CU_ASSERT_FATAL(thread1 != NULL);
+
+ thread2 = spdk_thread_create(NULL, &cpuset);
+ SPDK_CU_ASSERT_FATAL(thread2 != NULL);
+
+ reactor = spdk_reactor_get(0);
+ SPDK_CU_ASSERT_FATAL(reactor != NULL);
+
+ reactor->tsc_last = 100;
+
+ spdk_set_thread(thread1);
+ busy1 = spdk_poller_register(poller_run_busy, (void *)100, 0);
+ CU_ASSERT(busy1 != NULL);
+
+ spdk_set_thread(thread2);
+ idle2 = spdk_poller_register(poller_run_idle, (void *)300, 0);
+ CU_ASSERT(idle2 != NULL);
+
+ _reactor_run(reactor);
+
+ CU_ASSERT(thread1->tsc_last == 200);
+ CU_ASSERT(thread1->stats.busy_tsc == 100);
+ CU_ASSERT(thread1->stats.idle_tsc == 0);
+ CU_ASSERT(thread2->tsc_last == 500);
+ CU_ASSERT(thread2->stats.busy_tsc == 0);
+ CU_ASSERT(thread2->stats.idle_tsc == 300);
+
+ CU_ASSERT(reactor->busy_tsc == 100);
+ CU_ASSERT(reactor->idle_tsc == 300);
+
+ spdk_set_thread(thread1);
+ spdk_poller_unregister(&busy1);
+ idle1 = spdk_poller_register(poller_run_idle, (void *)200, 0);
+ CU_ASSERT(idle1 != NULL);
+
+ spdk_set_thread(thread2);
+ spdk_poller_unregister(&idle2);
+ busy2 = spdk_poller_register(poller_run_busy, (void *)400, 0);
+ CU_ASSERT(busy2 != NULL);
+
+ _reactor_run(reactor);
+
+ CU_ASSERT(thread1->tsc_last == 700);
+ CU_ASSERT(thread1->stats.busy_tsc == 100);
+ CU_ASSERT(thread1->stats.idle_tsc == 200);
+ CU_ASSERT(thread2->tsc_last == 1100);
+ CU_ASSERT(thread2->stats.busy_tsc == 400);
+ CU_ASSERT(thread2->stats.idle_tsc == 300);
+
+ CU_ASSERT(reactor->busy_tsc == 500);
+ CU_ASSERT(reactor->idle_tsc == 500);
+
+ spdk_set_thread(thread1);
+ spdk_poller_unregister(&idle1);
+ spdk_thread_exit(thread1);
+
+ spdk_set_thread(thread2);
+ spdk_poller_unregister(&busy2);
+ spdk_thread_exit(thread2);
+
+ _reactor_run(reactor);
+
+ CU_ASSERT(TAILQ_EMPTY(&reactor->threads));
+
+ spdk_reactors_fini();
+
+ free_cores();
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("app_suite", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_create_reactor);
+ CU_ADD_TEST(suite, test_init_reactors);
+ CU_ADD_TEST(suite, test_event_call);
+ CU_ADD_TEST(suite, test_schedule_thread);
+ CU_ADD_TEST(suite, test_reschedule_thread);
+ CU_ADD_TEST(suite, test_for_each_reactor);
+ CU_ADD_TEST(suite, test_reactor_stats);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/event/subsystem.c/.gitignore b/src/spdk/test/unit/lib/event/subsystem.c/.gitignore
new file mode 100644
index 000000000..76ca0d330
--- /dev/null
+++ b/src/spdk/test/unit/lib/event/subsystem.c/.gitignore
@@ -0,0 +1 @@
+subsystem_ut
diff --git a/src/spdk/test/unit/lib/event/subsystem.c/Makefile b/src/spdk/test/unit/lib/event/subsystem.c/Makefile
new file mode 100644
index 000000000..b62f1ee1a
--- /dev/null
+++ b/src/spdk/test/unit/lib/event/subsystem.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = subsystem_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/event/subsystem.c/subsystem_ut.c b/src/spdk/test/unit/lib/event/subsystem.c/subsystem_ut.c
new file mode 100644
index 000000000..deeb2f3aa
--- /dev/null
+++ b/src/spdk/test/unit/lib/event/subsystem.c/subsystem_ut.c
@@ -0,0 +1,255 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "unit/lib/json_mock.c"
+#include "event/subsystem.c"
+#include "common/lib/test_env.c"
+
+static struct spdk_subsystem g_ut_subsystems[8];
+static struct spdk_subsystem_depend g_ut_subsystem_deps[8];
+static int global_rc;
+
+static void
+ut_event_fn(int rc, void *arg1)
+{
+ global_rc = rc;
+}
+
+static void
+set_up_subsystem(struct spdk_subsystem *subsystem, const char *name)
+{
+ subsystem->init = NULL;
+ subsystem->fini = NULL;
+ subsystem->config = NULL;
+ subsystem->name = name;
+}
+
+static void
+set_up_depends(struct spdk_subsystem_depend *depend, const char *subsystem_name,
+ const char *dpends_on_name)
+{
+ depend->name = subsystem_name;
+ depend->depends_on = dpends_on_name;
+}
+
+static void
+subsystem_clear(void)
+{
+ struct spdk_subsystem *subsystem, *subsystem_tmp;
+ struct spdk_subsystem_depend *subsystem_dep, *subsystem_dep_tmp;
+
+ TAILQ_FOREACH_SAFE(subsystem, &g_subsystems, tailq, subsystem_tmp) {
+ TAILQ_REMOVE(&g_subsystems, subsystem, tailq);
+ }
+
+ TAILQ_FOREACH_SAFE(subsystem_dep, &g_subsystems_deps, tailq, subsystem_dep_tmp) {
+ TAILQ_REMOVE(&g_subsystems_deps, subsystem_dep, tailq);
+ }
+}
+
+static void
+subsystem_sort_test_depends_on_single(void)
+{
+ struct spdk_subsystem *subsystem;
+ int i;
+ char subsystem_name[16];
+
+ global_rc = -1;
+ spdk_subsystem_init(ut_event_fn, NULL);
+ CU_ASSERT(global_rc == 0);
+
+ i = 4;
+ TAILQ_FOREACH(subsystem, &g_subsystems, tailq) {
+ snprintf(subsystem_name, sizeof(subsystem_name), "subsystem%d", i);
+ SPDK_CU_ASSERT_FATAL(i > 0);
+ i--;
+ CU_ASSERT(strcmp(subsystem_name, subsystem->name) == 0);
+ }
+}
+
+static void
+subsystem_sort_test_depends_on_multiple(void)
+{
+ int i;
+ struct spdk_subsystem *subsystem;
+
+ subsystem_clear();
+ set_up_subsystem(&g_ut_subsystems[0], "iscsi");
+ set_up_subsystem(&g_ut_subsystems[1], "nvmf");
+ set_up_subsystem(&g_ut_subsystems[2], "sock");
+ set_up_subsystem(&g_ut_subsystems[3], "bdev");
+ set_up_subsystem(&g_ut_subsystems[4], "rpc");
+ set_up_subsystem(&g_ut_subsystems[5], "scsi");
+ set_up_subsystem(&g_ut_subsystems[6], "interface");
+ set_up_subsystem(&g_ut_subsystems[7], "accel");
+
+ for (i = 0; i < 8; i++) {
+ spdk_add_subsystem(&g_ut_subsystems[i]);
+ }
+
+ set_up_depends(&g_ut_subsystem_deps[0], "bdev", "accel");
+ set_up_depends(&g_ut_subsystem_deps[1], "scsi", "bdev");
+ set_up_depends(&g_ut_subsystem_deps[2], "rpc", "interface");
+ set_up_depends(&g_ut_subsystem_deps[3], "sock", "interface");
+ set_up_depends(&g_ut_subsystem_deps[4], "nvmf", "interface");
+ set_up_depends(&g_ut_subsystem_deps[5], "iscsi", "scsi");
+ set_up_depends(&g_ut_subsystem_deps[6], "iscsi", "sock");
+ set_up_depends(&g_ut_subsystem_deps[7], "iscsi", "rpc");
+
+ for (i = 0; i < 8; i++) {
+ spdk_add_subsystem_depend(&g_ut_subsystem_deps[i]);
+ }
+
+ global_rc = -1;
+ spdk_subsystem_init(ut_event_fn, NULL);
+ CU_ASSERT(global_rc == 0);
+
+ subsystem = TAILQ_FIRST(&g_subsystems);
+ CU_ASSERT(strcmp(subsystem->name, "interface") == 0);
+ TAILQ_REMOVE(&g_subsystems, subsystem, tailq);
+
+ subsystem = TAILQ_FIRST(&g_subsystems);
+ CU_ASSERT(strcmp(subsystem->name, "accel") == 0);
+ TAILQ_REMOVE(&g_subsystems, subsystem, tailq);
+
+ subsystem = TAILQ_FIRST(&g_subsystems);
+ CU_ASSERT(strcmp(subsystem->name, "nvmf") == 0);
+ TAILQ_REMOVE(&g_subsystems, subsystem, tailq);
+
+ subsystem = TAILQ_FIRST(&g_subsystems);
+ CU_ASSERT(strcmp(subsystem->name, "sock") == 0);
+ TAILQ_REMOVE(&g_subsystems, subsystem, tailq);
+
+ subsystem = TAILQ_FIRST(&g_subsystems);
+ CU_ASSERT(strcmp(subsystem->name, "bdev") == 0);
+ TAILQ_REMOVE(&g_subsystems, subsystem, tailq);
+
+ subsystem = TAILQ_FIRST(&g_subsystems);
+ CU_ASSERT(strcmp(subsystem->name, "rpc") == 0);
+ TAILQ_REMOVE(&g_subsystems, subsystem, tailq);
+
+ subsystem = TAILQ_FIRST(&g_subsystems);
+ CU_ASSERT(strcmp(subsystem->name, "scsi") == 0);
+ TAILQ_REMOVE(&g_subsystems, subsystem, tailq);
+
+ subsystem = TAILQ_FIRST(&g_subsystems);
+ CU_ASSERT(strcmp(subsystem->name, "iscsi") == 0);
+ TAILQ_REMOVE(&g_subsystems, subsystem, tailq);
+}
+
+struct spdk_subsystem subsystem1 = {
+ .name = "subsystem1",
+};
+
+struct spdk_subsystem subsystem2 = {
+ .name = "subsystem2",
+};
+struct spdk_subsystem subsystem3 = {
+ .name = "subsystem3",
+};
+
+struct spdk_subsystem subsystem4 = {
+ .name = "subsystem4",
+};
+
+SPDK_SUBSYSTEM_REGISTER(subsystem1);
+SPDK_SUBSYSTEM_REGISTER(subsystem2);
+SPDK_SUBSYSTEM_REGISTER(subsystem3);
+SPDK_SUBSYSTEM_REGISTER(subsystem4);
+
+SPDK_SUBSYSTEM_DEPEND(subsystem1, subsystem2)
+SPDK_SUBSYSTEM_DEPEND(subsystem2, subsystem3)
+SPDK_SUBSYSTEM_DEPEND(subsystem3, subsystem4)
+
+
+static void
+subsystem_sort_test_missing_dependency(void)
+{
+ /*
+ * A depends on B, but B is missing
+ */
+
+ subsystem_clear();
+ set_up_subsystem(&g_ut_subsystems[0], "A");
+ spdk_add_subsystem(&g_ut_subsystems[0]);
+
+ set_up_depends(&g_ut_subsystem_deps[0], "A", "B");
+ spdk_add_subsystem_depend(&g_ut_subsystem_deps[0]);
+
+ global_rc = -1;
+ spdk_subsystem_init(ut_event_fn, NULL);
+ CU_ASSERT(global_rc != 0);
+
+ /*
+ * Dependency from C to A is defined, but C is missing
+ */
+
+ subsystem_clear();
+ set_up_subsystem(&g_ut_subsystems[0], "A");
+ spdk_add_subsystem(&g_ut_subsystems[0]);
+
+ set_up_depends(&g_ut_subsystem_deps[0], "C", "A");
+ spdk_add_subsystem_depend(&g_ut_subsystem_deps[0]);
+
+ global_rc = -1;
+ spdk_subsystem_init(ut_event_fn, NULL);
+ CU_ASSERT(global_rc != 0);
+
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("subsystem_suite", NULL, NULL);
+
+ CU_ADD_TEST(suite, subsystem_sort_test_depends_on_single);
+ CU_ADD_TEST(suite, subsystem_sort_test_depends_on_multiple);
+ CU_ADD_TEST(suite, subsystem_sort_test_missing_dependency);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/ftl/Makefile b/src/spdk/test/unit/lib/ftl/Makefile
new file mode 100644
index 000000000..57745c450
--- /dev/null
+++ b/src/spdk/test/unit/lib/ftl/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = ftl_ppa ftl_band.c ftl_reloc.c ftl_wptr ftl_md ftl_io.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/ftl/common/utils.c b/src/spdk/test/unit/lib/ftl/common/utils.c
new file mode 100644
index 000000000..dda828df8
--- /dev/null
+++ b/src/spdk/test/unit/lib/ftl/common/utils.c
@@ -0,0 +1,173 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_internal/thread.h"
+
+#include "spdk/ftl.h"
+#include "ftl/ftl_core.h"
+
+struct base_bdev_geometry {
+ size_t write_unit_size;
+ size_t zone_size;
+ size_t optimal_open_zones;
+ size_t blockcnt;
+};
+
+extern struct base_bdev_geometry g_geo;
+
+struct spdk_ftl_dev *test_init_ftl_dev(const struct base_bdev_geometry *geo);
+struct ftl_band *test_init_ftl_band(struct spdk_ftl_dev *dev, size_t id, size_t zone_size);
+void test_free_ftl_dev(struct spdk_ftl_dev *dev);
+void test_free_ftl_band(struct ftl_band *band);
+uint64_t test_offset_from_addr(struct ftl_addr addr, struct ftl_band *band);
+
+DEFINE_STUB(spdk_bdev_desc_get_bdev, struct spdk_bdev *, (struct spdk_bdev_desc *desc), NULL);
+
+uint64_t
+spdk_bdev_get_zone_size(const struct spdk_bdev *bdev)
+{
+ return g_geo.zone_size;
+}
+
+uint32_t
+spdk_bdev_get_optimal_open_zones(const struct spdk_bdev *bdev)
+{
+ return g_geo.optimal_open_zones;
+}
+
+struct spdk_ftl_dev *
+test_init_ftl_dev(const struct base_bdev_geometry *geo)
+{
+ struct spdk_ftl_dev *dev;
+
+ dev = calloc(1, sizeof(*dev));
+ SPDK_CU_ASSERT_FATAL(dev != NULL);
+
+ dev->xfer_size = geo->write_unit_size;
+ dev->core_thread = spdk_thread_create("unit_test_thread", NULL);
+ spdk_set_thread(dev->core_thread);
+ dev->ioch = calloc(1, sizeof(*dev->ioch)
+ + sizeof(struct ftl_io_channel *));
+ dev->num_bands = geo->blockcnt / (geo->zone_size * geo->optimal_open_zones);
+ dev->bands = calloc(dev->num_bands, sizeof(*dev->bands));
+ SPDK_CU_ASSERT_FATAL(dev->bands != NULL);
+
+ dev->lba_pool = spdk_mempool_create("ftl_ut", 2, 0x18000,
+ SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
+ SPDK_ENV_SOCKET_ID_ANY);
+ SPDK_CU_ASSERT_FATAL(dev->lba_pool != NULL);
+
+ LIST_INIT(&dev->free_bands);
+ LIST_INIT(&dev->shut_bands);
+
+ return dev;
+}
+
+struct ftl_band *
+test_init_ftl_band(struct spdk_ftl_dev *dev, size_t id, size_t zone_size)
+{
+ struct ftl_band *band;
+ struct ftl_zone *zone;
+
+ SPDK_CU_ASSERT_FATAL(dev != NULL);
+ SPDK_CU_ASSERT_FATAL(id < dev->num_bands);
+
+ band = &dev->bands[id];
+ band->dev = dev;
+ band->id = id;
+
+ band->state = FTL_BAND_STATE_CLOSED;
+ LIST_INSERT_HEAD(&dev->shut_bands, band, list_entry);
+ CIRCLEQ_INIT(&band->zones);
+
+ band->lba_map.vld = spdk_bit_array_create(ftl_get_num_blocks_in_band(dev));
+ SPDK_CU_ASSERT_FATAL(band->lba_map.vld != NULL);
+
+ band->zone_buf = calloc(ftl_get_num_punits(dev), sizeof(*band->zone_buf));
+ SPDK_CU_ASSERT_FATAL(band->zone_buf != NULL);
+
+ band->reloc_bitmap = spdk_bit_array_create(ftl_get_num_bands(dev));
+ SPDK_CU_ASSERT_FATAL(band->reloc_bitmap != NULL);
+
+ for (size_t i = 0; i < ftl_get_num_punits(dev); ++i) {
+ zone = &band->zone_buf[i];
+ zone->info.state = SPDK_BDEV_ZONE_STATE_FULL;
+ zone->info.zone_id = zone_size * (id * ftl_get_num_punits(dev) + i);
+ CIRCLEQ_INSERT_TAIL(&band->zones, zone, circleq);
+ band->num_zones++;
+ }
+
+ pthread_spin_init(&band->lba_map.lock, PTHREAD_PROCESS_PRIVATE);
+ return band;
+}
+
+void
+test_free_ftl_dev(struct spdk_ftl_dev *dev)
+{
+ struct spdk_thread *thread;
+
+ SPDK_CU_ASSERT_FATAL(dev != NULL);
+ free(dev->ioch);
+
+ thread = dev->core_thread;
+
+ spdk_set_thread(thread);
+ spdk_thread_exit(thread);
+ while (!spdk_thread_is_exited(thread)) {
+ spdk_thread_poll(thread, 0, 0);
+ }
+ spdk_thread_destroy(thread);
+ spdk_mempool_free(dev->lba_pool);
+ free(dev->bands);
+ free(dev);
+}
+
+void
+test_free_ftl_band(struct ftl_band *band)
+{
+ SPDK_CU_ASSERT_FATAL(band != NULL);
+ spdk_bit_array_free(&band->lba_map.vld);
+ spdk_bit_array_free(&band->reloc_bitmap);
+ free(band->zone_buf);
+ spdk_dma_free(band->lba_map.dma_buf);
+}
+
+uint64_t
+test_offset_from_addr(struct ftl_addr addr, struct ftl_band *band)
+{
+ struct spdk_ftl_dev *dev = band->dev;
+
+ CU_ASSERT_EQUAL(ftl_addr_get_band(dev, addr), band->id);
+
+ return addr.offset - band->id * ftl_get_num_blocks_in_band(dev);
+}
diff --git a/src/spdk/test/unit/lib/ftl/ftl_band.c/.gitignore b/src/spdk/test/unit/lib/ftl/ftl_band.c/.gitignore
new file mode 100644
index 000000000..aa8820632
--- /dev/null
+++ b/src/spdk/test/unit/lib/ftl/ftl_band.c/.gitignore
@@ -0,0 +1 @@
+ftl_band_ut
diff --git a/src/spdk/test/unit/lib/ftl/ftl_band.c/Makefile b/src/spdk/test/unit/lib/ftl/ftl_band.c/Makefile
new file mode 100644
index 000000000..4d4195105
--- /dev/null
+++ b/src/spdk/test/unit/lib/ftl/ftl_band.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = ftl_band_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/ftl/ftl_band.c/ftl_band_ut.c b/src/spdk/test/unit/lib/ftl/ftl_band.c/ftl_band_ut.c
new file mode 100644
index 000000000..d4f299e5b
--- /dev/null
+++ b/src/spdk/test/unit/lib/ftl/ftl_band.c/ftl_band_ut.c
@@ -0,0 +1,307 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+#include "common/lib/test_env.c"
+
+#include "ftl/ftl_core.c"
+#include "ftl/ftl_band.c"
+#include "../common/utils.c"
+
+#define TEST_BAND_IDX 68
+#define TEST_LBA 0x68676564
+
+struct base_bdev_geometry g_geo = {
+ .write_unit_size = 16,
+ .optimal_open_zones = 9,
+ .zone_size = 100,
+ .blockcnt = 1500 * 100 * 8,
+};
+
+static struct spdk_ftl_dev *g_dev;
+static struct ftl_band *g_band;
+
+static void
+setup_band(void)
+{
+ int rc;
+
+ g_dev = test_init_ftl_dev(&g_geo);
+ g_band = test_init_ftl_band(g_dev, TEST_BAND_IDX, g_geo.zone_size);
+ rc = ftl_band_alloc_lba_map(g_band);
+ CU_ASSERT_EQUAL_FATAL(rc, 0);
+}
+
+static void
+cleanup_band(void)
+{
+ test_free_ftl_band(g_band);
+ test_free_ftl_dev(g_dev);
+}
+
+static struct ftl_addr
+addr_from_punit(uint64_t punit)
+{
+ struct ftl_addr addr = {};
+
+ addr.offset = punit * g_geo.zone_size;
+ return addr;
+}
+
+static void
+test_band_block_offset_from_addr_base(void)
+{
+ struct ftl_addr addr;
+ uint64_t offset, i, flat_lun = 0;
+
+ setup_band();
+ for (i = 0; i < ftl_get_num_punits(g_dev); ++i) {
+ addr = addr_from_punit(i);
+ addr.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev);
+
+ offset = ftl_band_block_offset_from_addr(g_band, addr);
+ CU_ASSERT_EQUAL(offset, flat_lun * ftl_get_num_blocks_in_zone(g_dev));
+ flat_lun++;
+ }
+ cleanup_band();
+}
+
+static void
+test_band_block_offset_from_addr_offset(void)
+{
+ struct ftl_addr addr;
+ uint64_t offset, expect, i, j;
+
+ setup_band();
+ for (i = 0; i < ftl_get_num_punits(g_dev); ++i) {
+ for (j = 0; j < g_geo.zone_size; ++j) {
+ addr = addr_from_punit(i);
+ addr.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev) + j;
+
+ offset = ftl_band_block_offset_from_addr(g_band, addr);
+
+ expect = test_offset_from_addr(addr, g_band);
+ CU_ASSERT_EQUAL(offset, expect);
+ }
+ }
+ cleanup_band();
+}
+
+static void
+test_band_addr_from_block_offset(void)
+{
+ struct ftl_addr addr, expect;
+ uint64_t offset, i, j;
+
+ setup_band();
+ for (i = 0; i < ftl_get_num_punits(g_dev); ++i) {
+ for (j = 0; j < g_geo.zone_size; ++j) {
+ expect = addr_from_punit(i);
+ expect.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev) + j;
+
+ offset = ftl_band_block_offset_from_addr(g_band, expect);
+ addr = ftl_band_addr_from_block_offset(g_band, offset);
+
+ CU_ASSERT_EQUAL(addr.offset, expect.offset);
+ }
+ }
+ cleanup_band();
+}
+
+static void
+test_band_set_addr(void)
+{
+ struct ftl_lba_map *lba_map;
+ struct ftl_addr addr;
+ uint64_t offset = 0;
+
+ setup_band();
+ lba_map = &g_band->lba_map;
+ addr = addr_from_punit(0);
+ addr.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev);
+
+ CU_ASSERT_EQUAL(lba_map->num_vld, 0);
+
+ offset = test_offset_from_addr(addr, g_band);
+
+ ftl_band_set_addr(g_band, TEST_LBA, addr);
+ CU_ASSERT_EQUAL(lba_map->num_vld, 1);
+ CU_ASSERT_EQUAL(lba_map->map[offset], TEST_LBA);
+ CU_ASSERT_TRUE(spdk_bit_array_get(lba_map->vld, offset));
+
+ addr.offset += g_geo.zone_size;
+ offset = test_offset_from_addr(addr, g_band);
+ ftl_band_set_addr(g_band, TEST_LBA + 1, addr);
+ CU_ASSERT_EQUAL(lba_map->num_vld, 2);
+ CU_ASSERT_EQUAL(lba_map->map[offset], TEST_LBA + 1);
+ CU_ASSERT_TRUE(spdk_bit_array_get(lba_map->vld, offset));
+ addr.offset -= g_geo.zone_size;
+ offset = test_offset_from_addr(addr, g_band);
+ CU_ASSERT_TRUE(spdk_bit_array_get(lba_map->vld, offset));
+ cleanup_band();
+}
+
+static void
+test_invalidate_addr(void)
+{
+ struct ftl_lba_map *lba_map;
+ struct ftl_addr addr;
+ uint64_t offset[2];
+
+ setup_band();
+ lba_map = &g_band->lba_map;
+ addr = addr_from_punit(0);
+ addr.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev);
+ offset[0] = test_offset_from_addr(addr, g_band);
+
+ ftl_band_set_addr(g_band, TEST_LBA, addr);
+ CU_ASSERT_EQUAL(lba_map->num_vld, 1);
+ CU_ASSERT_TRUE(spdk_bit_array_get(lba_map->vld, offset[0]));
+ ftl_invalidate_addr(g_band->dev, addr);
+ CU_ASSERT_EQUAL(lba_map->num_vld, 0);
+ CU_ASSERT_FALSE(spdk_bit_array_get(lba_map->vld, offset[0]));
+
+ offset[0] = test_offset_from_addr(addr, g_band);
+ ftl_band_set_addr(g_band, TEST_LBA, addr);
+ addr.offset += g_geo.zone_size;
+ offset[1] = test_offset_from_addr(addr, g_band);
+ ftl_band_set_addr(g_band, TEST_LBA + 1, addr);
+ CU_ASSERT_EQUAL(lba_map->num_vld, 2);
+ CU_ASSERT_TRUE(spdk_bit_array_get(lba_map->vld, offset[0]));
+ CU_ASSERT_TRUE(spdk_bit_array_get(lba_map->vld, offset[1]));
+ ftl_invalidate_addr(g_band->dev, addr);
+ CU_ASSERT_EQUAL(lba_map->num_vld, 1);
+ CU_ASSERT_TRUE(spdk_bit_array_get(lba_map->vld, offset[0]));
+ CU_ASSERT_FALSE(spdk_bit_array_get(lba_map->vld, offset[1]));
+ cleanup_band();
+}
+
+static void
+test_next_xfer_addr(void)
+{
+ struct ftl_addr addr, result, expect;
+
+ setup_band();
+ /* Verify simple one block incremention */
+ addr = addr_from_punit(0);
+ addr.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev);
+ expect = addr;
+ expect.offset += 1;
+
+ result = ftl_band_next_xfer_addr(g_band, addr, 1);
+ CU_ASSERT_EQUAL(result.offset, expect.offset);
+
+ /* Verify jumping between zones */
+ expect = addr_from_punit(1);
+ expect.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev);
+ result = ftl_band_next_xfer_addr(g_band, addr, g_dev->xfer_size);
+ CU_ASSERT_EQUAL(result.offset, expect.offset);
+
+ /* Verify jumping works with unaligned offsets */
+ expect = addr_from_punit(1);
+ expect.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev) + 3;
+ result = ftl_band_next_xfer_addr(g_band, addr, g_dev->xfer_size + 3);
+ CU_ASSERT_EQUAL(result.offset, expect.offset);
+
+ /* Verify jumping from last zone to the first one */
+ expect = addr_from_punit(0);
+ expect.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev) + g_dev->xfer_size;
+ addr = addr_from_punit(ftl_get_num_punits(g_dev) - 1);
+ addr.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev);
+ result = ftl_band_next_xfer_addr(g_band, addr, g_dev->xfer_size);
+ CU_ASSERT_EQUAL(result.offset, expect.offset);
+
+ /* Verify jumping from last zone to the first one with unaligned offset */
+ expect = addr_from_punit(0);
+ expect.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev);
+ expect.offset += g_dev->xfer_size + 2;
+ addr = addr_from_punit(ftl_get_num_punits(g_dev) - 1);
+ addr.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev);
+ result = ftl_band_next_xfer_addr(g_band, addr, g_dev->xfer_size + 2);
+ CU_ASSERT_EQUAL(result.offset, expect.offset);
+
+ /* Verify large offset spanning across the whole band multiple times */
+ expect = addr_from_punit(0);
+ expect.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev);
+ expect.offset += g_dev->xfer_size * 5 + 4;
+ addr = addr_from_punit(0);
+ addr.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev);
+ addr.offset += g_dev->xfer_size * 2 + 1;
+ result = ftl_band_next_xfer_addr(g_band, addr, 3 * g_dev->xfer_size *
+ ftl_get_num_punits(g_dev) + 3);
+ CU_ASSERT_EQUAL(result.offset, expect.offset);
+
+ /* Remove one zone and verify it's skipped properly */
+ g_band->zone_buf[1].info.state = SPDK_BDEV_ZONE_STATE_OFFLINE;
+ CIRCLEQ_REMOVE(&g_band->zones, &g_band->zone_buf[1], circleq);
+ g_band->num_zones--;
+ expect = addr_from_punit(2);
+ expect.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev);
+ expect.offset += g_dev->xfer_size * 5 + 4;
+ addr = addr_from_punit(0);
+ addr.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev);
+ addr.offset += g_dev->xfer_size * 2 + 1;
+ result = ftl_band_next_xfer_addr(g_band, addr, 3 * g_dev->xfer_size *
+ (ftl_get_num_punits(g_dev) - 1) + g_dev->xfer_size + 3);
+ CU_ASSERT_EQUAL(result.offset, expect.offset);
+ cleanup_band();
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("ftl_band_suite", NULL, NULL);
+
+
+ CU_ADD_TEST(suite, test_band_block_offset_from_addr_base);
+ CU_ADD_TEST(suite, test_band_block_offset_from_addr_offset);
+ CU_ADD_TEST(suite, test_band_addr_from_block_offset);
+ CU_ADD_TEST(suite, test_band_set_addr);
+ CU_ADD_TEST(suite, test_invalidate_addr);
+ CU_ADD_TEST(suite, test_next_xfer_addr);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/ftl/ftl_io.c/.gitignore b/src/spdk/test/unit/lib/ftl/ftl_io.c/.gitignore
new file mode 100644
index 000000000..c5e09253e
--- /dev/null
+++ b/src/spdk/test/unit/lib/ftl/ftl_io.c/.gitignore
@@ -0,0 +1 @@
+ftl_io_ut
diff --git a/src/spdk/test/unit/lib/ftl/ftl_io.c/Makefile b/src/spdk/test/unit/lib/ftl/ftl_io.c/Makefile
new file mode 100644
index 000000000..e06a186b1
--- /dev/null
+++ b/src/spdk/test/unit/lib/ftl/ftl_io.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = ftl_io_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/ftl/ftl_io.c/ftl_io_ut.c b/src/spdk/test/unit/lib/ftl/ftl_io.c/ftl_io_ut.c
new file mode 100644
index 000000000..81288de60
--- /dev/null
+++ b/src/spdk/test/unit/lib/ftl/ftl_io.c/ftl_io_ut.c
@@ -0,0 +1,1068 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+#include "common/lib/ut_multithread.c"
+
+#include "ftl/ftl_io.c"
+#include "ftl/ftl_init.c"
+#include "ftl/ftl_core.c"
+#include "ftl/ftl_band.c"
+
+DEFINE_STUB(spdk_bdev_io_get_append_location, uint64_t, (struct spdk_bdev_io *bdev_io), 0);
+DEFINE_STUB(spdk_bdev_desc_get_bdev, struct spdk_bdev *, (struct spdk_bdev_desc *desc), NULL);
+DEFINE_STUB(spdk_bdev_get_optimal_open_zones, uint32_t, (const struct spdk_bdev *b), 1);
+DEFINE_STUB(spdk_bdev_zone_appendv, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct iovec *iov, int iovcnt, uint64_t zone_id, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
+DEFINE_STUB(spdk_bdev_get_zone_size, uint64_t, (const struct spdk_bdev *b), 1024);
+DEFINE_STUB(spdk_bdev_zone_management, int, (struct spdk_bdev_desc *desc,
+ struct spdk_io_channel *ch, uint64_t zone_id, enum spdk_bdev_zone_action action,
+ spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
+DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
+DEFINE_STUB(spdk_bdev_read_blocks, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ void *buf, uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
+DEFINE_STUB(spdk_bdev_write_blocks, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ void *buf, uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb,
+ void *cb_arg), 0);
+DEFINE_STUB(spdk_bdev_write_blocks_with_md, int, (struct spdk_bdev_desc *desc,
+ struct spdk_io_channel *ch, void *buf, void *md, uint64_t offset_blocks,
+ uint64_t num_blocks, spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
+DEFINE_STUB(spdk_bdev_writev_blocks, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
+DEFINE_STUB(spdk_bdev_get_num_blocks, uint64_t, (const struct spdk_bdev *bdev), 1024);
+DEFINE_STUB(spdk_bdev_get_md_size, uint32_t, (const struct spdk_bdev *bdev), 0);
+DEFINE_STUB(spdk_bdev_get_block_size, uint32_t, (const struct spdk_bdev *bdev), 4096);
+#if defined(FTL_META_DEBUG)
+DEFINE_STUB(ftl_band_validate_md, bool, (struct ftl_band *band), true);
+#endif
+#if defined(DEBUG)
+DEFINE_STUB_V(ftl_trace_submission, (struct spdk_ftl_dev *dev, const struct ftl_io *io,
+ struct ftl_addr addr, size_t addr_cnt));
+DEFINE_STUB_V(ftl_trace_limits, (struct spdk_ftl_dev *dev, int limit, size_t num_free));
+DEFINE_STUB(ftl_trace_alloc_id, uint64_t, (struct spdk_ftl_dev *dev), 0);
+DEFINE_STUB_V(ftl_trace_completion, (struct spdk_ftl_dev *dev, const struct ftl_io *io,
+ enum ftl_trace_completion type));
+DEFINE_STUB_V(ftl_trace_wbuf_fill, (struct spdk_ftl_dev *dev, const struct ftl_io *io));
+#endif
+
+struct spdk_io_channel *
+spdk_bdev_get_io_channel(struct spdk_bdev_desc *bdev_desc)
+{
+ return spdk_get_io_channel(bdev_desc);
+}
+
+static int
+channel_create_cb(void *io_device, void *ctx)
+{
+ return 0;
+}
+
+static void
+channel_destroy_cb(void *io_device, void *ctx)
+{}
+
+static struct spdk_ftl_dev *
+setup_device(uint32_t num_threads, uint32_t xfer_size)
+{
+ struct spdk_ftl_dev *dev;
+ struct _ftl_io_channel *_ioch;
+ struct ftl_io_channel *ioch;
+ int rc;
+
+ allocate_threads(num_threads);
+ set_thread(0);
+
+ dev = calloc(1, sizeof(*dev));
+ SPDK_CU_ASSERT_FATAL(dev != NULL);
+
+ dev->core_thread = spdk_get_thread();
+ dev->ioch = calloc(1, sizeof(*_ioch) + sizeof(struct spdk_io_channel));
+ SPDK_CU_ASSERT_FATAL(dev->ioch != NULL);
+
+ _ioch = (struct _ftl_io_channel *)(dev->ioch + 1);
+ ioch = _ioch->ioch = calloc(1, sizeof(*ioch));
+ SPDK_CU_ASSERT_FATAL(ioch != NULL);
+
+ ioch->elem_size = sizeof(struct ftl_md_io);
+ ioch->io_pool = spdk_mempool_create("io-pool", 4096, ioch->elem_size, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(ioch->io_pool != NULL);
+
+ dev->conf = g_default_conf;
+ dev->xfer_size = xfer_size;
+ dev->base_bdev_desc = (struct spdk_bdev_desc *)0xdeadbeef;
+ spdk_io_device_register(dev->base_bdev_desc, channel_create_cb, channel_destroy_cb, 0, NULL);
+
+ rc = ftl_dev_init_io_channel(dev);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ return dev;
+}
+
+static void
+free_device(struct spdk_ftl_dev *dev)
+{
+ struct ftl_io_channel *ioch;
+
+ ioch = ftl_io_channel_get_ctx(dev->ioch);
+ spdk_mempool_free(ioch->io_pool);
+ free(ioch);
+
+ spdk_io_device_unregister(dev, NULL);
+ spdk_io_device_unregister(dev->base_bdev_desc, NULL);
+ free_threads();
+
+ free(dev->ioch_array);
+ free(dev->iov_buf);
+ free(dev->ioch);
+ free(dev);
+}
+
+static void
+setup_io(struct ftl_io *io, struct spdk_ftl_dev *dev, ftl_io_fn cb, void *ctx)
+{
+ io->dev = dev;
+ io->cb_fn = cb;
+ io->cb_ctx = ctx;
+}
+
+static struct ftl_io *
+alloc_io(struct spdk_ftl_dev *dev, ftl_io_fn cb, void *ctx)
+{
+ struct ftl_io *io;
+
+ io = ftl_io_alloc(dev->ioch);
+ SPDK_CU_ASSERT_FATAL(io != NULL);
+ setup_io(io, dev, cb, ctx);
+
+ return io;
+}
+
+static void
+io_complete_cb(struct ftl_io *io, void *ctx, int status)
+{
+ *(int *)ctx = status;
+}
+
+static void
+test_completion(void)
+{
+ struct spdk_ftl_dev *dev;
+ struct ftl_io_channel *ioch;
+ struct ftl_io *io;
+ int req, status = 0;
+ size_t pool_size;
+
+ dev = setup_device(1, 16);
+ ioch = ftl_io_channel_get_ctx(dev->ioch);
+ pool_size = spdk_mempool_count(ioch->io_pool);
+
+ io = alloc_io(dev, io_complete_cb, &status);
+ io->status = -EIO;
+
+#define NUM_REQUESTS 16
+ for (req = 0; req < NUM_REQUESTS; ++req) {
+ ftl_io_inc_req(io);
+ CU_ASSERT_FALSE(ftl_io_done(io));
+ }
+
+ CU_ASSERT_EQUAL(io->req_cnt, NUM_REQUESTS);
+
+ for (req = 0; req < (NUM_REQUESTS - 1); ++req) {
+ ftl_io_dec_req(io);
+ CU_ASSERT_FALSE(ftl_io_done(io));
+ }
+
+ CU_ASSERT_EQUAL(io->req_cnt, 1);
+
+ ftl_io_dec_req(io);
+ CU_ASSERT_TRUE(ftl_io_done(io));
+
+ ftl_io_complete(io);
+ CU_ASSERT_EQUAL(status, -EIO);
+
+ CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
+
+ free_device(dev);
+}
+
+static void
+test_alloc_free(void)
+{
+ struct spdk_ftl_dev *dev;
+ struct ftl_io_channel *ioch;
+ struct ftl_io *parent, *child;
+ int parent_status = -1;
+ size_t pool_size;
+
+ dev = setup_device(1, 16);
+ ioch = ftl_io_channel_get_ctx(dev->ioch);
+ pool_size = spdk_mempool_count(ioch->io_pool);
+
+ parent = alloc_io(dev, io_complete_cb, &parent_status);
+ SPDK_CU_ASSERT_FATAL(parent != NULL);
+ child = ftl_io_alloc_child(parent);
+ SPDK_CU_ASSERT_FATAL(child != NULL);
+
+ ftl_io_free(child);
+ CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size - 1);
+
+ child = ftl_io_alloc_child(parent);
+ SPDK_CU_ASSERT_FATAL(child != NULL);
+ ftl_io_complete(child);
+ CU_ASSERT_EQUAL(parent_status, -1);
+ ftl_io_complete(parent);
+ CU_ASSERT_EQUAL(parent_status, 0);
+ CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
+
+ parent_status = -1;
+ parent = alloc_io(dev, io_complete_cb, &parent_status);
+ SPDK_CU_ASSERT_FATAL(parent != NULL);
+ child = ftl_io_alloc_child(parent);
+ SPDK_CU_ASSERT_FATAL(child != NULL);
+
+ ftl_io_free(child);
+ CU_ASSERT_EQUAL(parent_status, -1);
+ CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size - 1);
+ ftl_io_complete(parent);
+ CU_ASSERT_EQUAL(parent_status, 0);
+ CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
+
+ free_device(dev);
+}
+
+static void
+test_child_requests(void)
+{
+ struct spdk_ftl_dev *dev;
+ struct ftl_io_channel *ioch;
+#define MAX_CHILDREN 16
+ struct ftl_io *parent, *child[MAX_CHILDREN];
+ int status[MAX_CHILDREN + 1], i;
+ size_t pool_size;
+
+ dev = setup_device(1, 16);
+ ioch = ftl_io_channel_get_ctx(dev->ioch);
+ pool_size = spdk_mempool_count(ioch->io_pool);
+
+ /* Verify correct behaviour when children finish first */
+ parent = alloc_io(dev, io_complete_cb, &status[0]);
+ parent->status = 0;
+
+ ftl_io_inc_req(parent);
+ status[0] = -1;
+
+ for (i = 0; i < MAX_CHILDREN; ++i) {
+ status[i + 1] = -1;
+
+ child[i] = ftl_io_alloc_child(parent);
+ SPDK_CU_ASSERT_FATAL(child[i] != NULL);
+ setup_io(child[i], dev, io_complete_cb, &status[i + 1]);
+ child[i]->status = 0;
+
+ ftl_io_inc_req(child[i]);
+ }
+
+ CU_ASSERT_FALSE(ftl_io_done(parent));
+ CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size - MAX_CHILDREN - 1);
+
+ for (i = 0; i < MAX_CHILDREN; ++i) {
+ CU_ASSERT_FALSE(ftl_io_done(child[i]));
+ ftl_io_dec_req(child[i]);
+ CU_ASSERT_TRUE(ftl_io_done(child[i]));
+ CU_ASSERT_FALSE(ftl_io_done(parent));
+
+ ftl_io_complete(child[i]);
+ CU_ASSERT_FALSE(ftl_io_done(parent));
+ CU_ASSERT_EQUAL(status[i + 1], 0);
+ }
+
+ CU_ASSERT_EQUAL(status[0], -1);
+
+ ftl_io_dec_req(parent);
+ CU_ASSERT_EQUAL(parent->req_cnt, 0);
+ CU_ASSERT_TRUE(ftl_io_done(parent));
+
+ ftl_io_complete(parent);
+ CU_ASSERT_EQUAL(status[0], 0);
+ CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
+
+
+ /* Verify correct behaviour when parent finishes first */
+ parent = alloc_io(dev, io_complete_cb, &status[0]);
+ parent->status = 0;
+
+ ftl_io_inc_req(parent);
+ status[0] = -1;
+
+ for (i = 0; i < MAX_CHILDREN; ++i) {
+ status[i + 1] = -1;
+
+ child[i] = ftl_io_alloc_child(parent);
+ SPDK_CU_ASSERT_FATAL(child[i] != NULL);
+ setup_io(child[i], dev, io_complete_cb, &status[i + 1]);
+ child[i]->status = 0;
+
+ ftl_io_inc_req(child[i]);
+ }
+
+ CU_ASSERT_FALSE(ftl_io_done(parent));
+ CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size - MAX_CHILDREN - 1);
+
+ ftl_io_dec_req(parent);
+ CU_ASSERT_TRUE(ftl_io_done(parent));
+ CU_ASSERT_EQUAL(parent->req_cnt, 0);
+
+ ftl_io_complete(parent);
+ CU_ASSERT_EQUAL(status[0], -1);
+ CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size - MAX_CHILDREN - 1);
+
+ for (i = 0; i < MAX_CHILDREN; ++i) {
+ CU_ASSERT_FALSE(ftl_io_done(child[i]));
+ ftl_io_dec_req(child[i]);
+ CU_ASSERT_TRUE(ftl_io_done(child[i]));
+
+ ftl_io_complete(child[i]);
+ CU_ASSERT_EQUAL(status[i + 1], 0);
+ }
+
+ CU_ASSERT_EQUAL(status[0], 0);
+ CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
+
+ free_device(dev);
+}
+
+static void
+test_child_status(void)
+{
+ struct spdk_ftl_dev *dev;
+ struct ftl_io_channel *ioch;
+ struct ftl_io *parent, *child[2];
+ int parent_status, child_status[2];
+ size_t pool_size, i;
+
+ dev = setup_device(1, 16);
+ ioch = ftl_io_channel_get_ctx(dev->ioch);
+ pool_size = spdk_mempool_count(ioch->io_pool);
+
+ /* Verify the first error is returned by the parent */
+ parent = alloc_io(dev, io_complete_cb, &parent_status);
+ parent->status = 0;
+
+ for (i = 0; i < 2; ++i) {
+ child[i] = ftl_io_alloc_child(parent);
+ SPDK_CU_ASSERT_FATAL(child[i] != NULL);
+ setup_io(child[i], dev, io_complete_cb, &child_status[i]);
+ }
+
+ child[0]->status = -3;
+ child[1]->status = -4;
+
+ ftl_io_complete(child[1]);
+ ftl_io_complete(child[0]);
+ ftl_io_complete(parent);
+
+ CU_ASSERT_EQUAL(child_status[0], -3);
+ CU_ASSERT_EQUAL(child_status[1], -4);
+ CU_ASSERT_EQUAL(parent_status, -4);
+
+ CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
+
+ /* Verify parent's status is kept if children finish successfully */
+ parent = alloc_io(dev, io_complete_cb, &parent_status);
+ parent->status = -1;
+
+ for (i = 0; i < 2; ++i) {
+ child[i] = ftl_io_alloc_child(parent);
+ SPDK_CU_ASSERT_FATAL(child[i] != NULL);
+ setup_io(child[i], dev, io_complete_cb, &child_status[i]);
+ }
+
+ child[0]->status = 0;
+ child[1]->status = 0;
+
+ ftl_io_complete(parent);
+ ftl_io_complete(child[1]);
+ ftl_io_complete(child[0]);
+
+ CU_ASSERT_EQUAL(child_status[0], 0);
+ CU_ASSERT_EQUAL(child_status[1], 0);
+ CU_ASSERT_EQUAL(parent_status, -1);
+
+ CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
+
+ /* Verify parent's status is kept if children fail too */
+ parent = alloc_io(dev, io_complete_cb, &parent_status);
+ parent->status = -1;
+
+ for (i = 0; i < 2; ++i) {
+ child[i] = ftl_io_alloc_child(parent);
+ SPDK_CU_ASSERT_FATAL(child[i] != NULL);
+ setup_io(child[i], dev, io_complete_cb, &child_status[i]);
+ }
+
+ child[0]->status = -3;
+ child[1]->status = -4;
+
+ ftl_io_complete(parent);
+ ftl_io_complete(child[1]);
+ ftl_io_complete(child[0]);
+
+ CU_ASSERT_EQUAL(child_status[0], -3);
+ CU_ASSERT_EQUAL(child_status[1], -4);
+ CU_ASSERT_EQUAL(parent_status, -1);
+
+ CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
+
+ free_device(dev);
+}
+
+static void
+test_multi_generation(void)
+{
+ struct spdk_ftl_dev *dev;
+ struct ftl_io_channel *ioch;
+#define MAX_GRAND_CHILDREN 32
+ struct ftl_io *parent, *child[MAX_CHILDREN], *gchild[MAX_CHILDREN * MAX_GRAND_CHILDREN];
+ int parent_status, child_status[MAX_CHILDREN], gchild_status[MAX_CHILDREN * MAX_GRAND_CHILDREN];
+ size_t pool_size;
+ int i, j;
+
+ dev = setup_device(1, 16);
+ ioch = ftl_io_channel_get_ctx(dev->ioch);
+ pool_size = spdk_mempool_count(ioch->io_pool);
+
+ /* Verify correct behaviour when children finish first */
+ parent = alloc_io(dev, io_complete_cb, &parent_status);
+ parent->status = 0;
+
+ ftl_io_inc_req(parent);
+ parent_status = -1;
+
+ for (i = 0; i < MAX_CHILDREN; ++i) {
+ child_status[i] = -1;
+
+ child[i] = ftl_io_alloc_child(parent);
+ SPDK_CU_ASSERT_FATAL(child[i] != NULL);
+ setup_io(child[i], dev, io_complete_cb, &child_status[i]);
+ child[i]->status = 0;
+
+
+ for (j = 0; j < MAX_GRAND_CHILDREN; ++j) {
+ struct ftl_io *io = ftl_io_alloc_child(child[i]);
+ SPDK_CU_ASSERT_FATAL(io != NULL);
+
+ gchild[i * MAX_GRAND_CHILDREN + j] = io;
+ gchild_status[i * MAX_GRAND_CHILDREN + j] = -1;
+ setup_io(io, dev, io_complete_cb, &gchild_status[i * MAX_GRAND_CHILDREN + j]);
+ io->status = 0;
+
+ ftl_io_inc_req(io);
+ }
+
+ ftl_io_inc_req(child[i]);
+ }
+
+ for (i = 0; i < MAX_CHILDREN; ++i) {
+ CU_ASSERT_FALSE(ftl_io_done(child[i]));
+ ftl_io_dec_req(child[i]);
+ CU_ASSERT_TRUE(ftl_io_done(child[i]));
+
+ ftl_io_complete(child[i]);
+ CU_ASSERT_FALSE(ftl_io_done(parent));
+ CU_ASSERT_EQUAL(child_status[i], -1);
+
+ for (j = 0; j < MAX_GRAND_CHILDREN; ++j) {
+ struct ftl_io *io = gchild[i * MAX_GRAND_CHILDREN + j];
+
+ CU_ASSERT_FALSE(ftl_io_done(io));
+ ftl_io_dec_req(io);
+ CU_ASSERT_TRUE(ftl_io_done(io));
+ ftl_io_complete(io);
+ CU_ASSERT_EQUAL(gchild_status[i * MAX_GRAND_CHILDREN + j], 0);
+ }
+
+ CU_ASSERT_EQUAL(child_status[i], 0);
+ }
+
+ ftl_io_dec_req(parent);
+ CU_ASSERT_TRUE(ftl_io_done(parent));
+ ftl_io_complete(parent);
+ CU_ASSERT_EQUAL(parent_status, 0);
+ CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
+
+ /* Verify correct behaviour when parents finish first */
+ parent = alloc_io(dev, io_complete_cb, &parent_status);
+ parent->status = 0;
+ parent_status = -1;
+
+ for (i = 0; i < MAX_CHILDREN; ++i) {
+ child_status[i] = -1;
+
+ child[i] = ftl_io_alloc_child(parent);
+ SPDK_CU_ASSERT_FATAL(child[i] != NULL);
+ setup_io(child[i], dev, io_complete_cb, &child_status[i]);
+ child[i]->status = 0;
+
+ for (j = 0; j < MAX_GRAND_CHILDREN; ++j) {
+ struct ftl_io *io = ftl_io_alloc_child(child[i]);
+ SPDK_CU_ASSERT_FATAL(io != NULL);
+
+ gchild[i * MAX_GRAND_CHILDREN + j] = io;
+ gchild_status[i * MAX_GRAND_CHILDREN + j] = -1;
+ setup_io(io, dev, io_complete_cb, &gchild_status[i * MAX_GRAND_CHILDREN + j]);
+ io->status = 0;
+
+ ftl_io_inc_req(io);
+ }
+
+ CU_ASSERT_TRUE(ftl_io_done(child[i]));
+ ftl_io_complete(child[i]);
+ CU_ASSERT_EQUAL(child_status[i], -1);
+ }
+
+ CU_ASSERT_TRUE(ftl_io_done(parent));
+ ftl_io_complete(parent);
+ CU_ASSERT_EQUAL(parent_status, -1);
+
+ for (i = 0; i < MAX_CHILDREN; ++i) {
+ for (j = 0; j < MAX_GRAND_CHILDREN; ++j) {
+ struct ftl_io *io = gchild[i * MAX_GRAND_CHILDREN + j];
+
+ CU_ASSERT_FALSE(ftl_io_done(io));
+ ftl_io_dec_req(io);
+ CU_ASSERT_TRUE(ftl_io_done(io));
+ ftl_io_complete(io);
+ CU_ASSERT_EQUAL(gchild_status[i * MAX_GRAND_CHILDREN + j], 0);
+ }
+
+ CU_ASSERT_EQUAL(child_status[i], 0);
+ }
+
+ CU_ASSERT_EQUAL(parent_status, 0);
+ CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
+
+ free_device(dev);
+}
+
+static void
+test_io_channel_create(void)
+{
+ struct spdk_ftl_dev *dev;
+ struct spdk_io_channel *ioch, **ioch_array;
+ struct ftl_io_channel *ftl_ioch;
+ uint32_t ioch_idx;
+
+ dev = setup_device(g_default_conf.max_io_channels + 1, 16);
+
+ ioch = spdk_get_io_channel(dev);
+ CU_ASSERT(ioch != NULL);
+ CU_ASSERT_EQUAL(dev->num_io_channels, 1);
+ spdk_put_io_channel(ioch);
+ poll_threads();
+ CU_ASSERT_EQUAL(dev->num_io_channels, 0);
+
+ ioch_array = calloc(dev->conf.max_io_channels, sizeof(*ioch_array));
+ SPDK_CU_ASSERT_FATAL(ioch != NULL);
+
+ for (ioch_idx = 0; ioch_idx < dev->conf.max_io_channels; ++ioch_idx) {
+ set_thread(ioch_idx);
+ ioch = ioch_array[ioch_idx] = spdk_get_io_channel(dev);
+ SPDK_CU_ASSERT_FATAL(ioch != NULL);
+ poll_threads();
+
+ ftl_ioch = ftl_io_channel_get_ctx(ioch);
+ CU_ASSERT_EQUAL(ftl_ioch->index, ioch_idx);
+ }
+
+ CU_ASSERT_EQUAL(dev->num_io_channels, dev->conf.max_io_channels);
+ set_thread(dev->conf.max_io_channels);
+ ioch = spdk_get_io_channel(dev);
+ CU_ASSERT_EQUAL(dev->num_io_channels, dev->conf.max_io_channels);
+ CU_ASSERT_EQUAL(ioch, NULL);
+
+ for (ioch_idx = 0; ioch_idx < dev->conf.max_io_channels; ioch_idx += 2) {
+ set_thread(ioch_idx);
+ spdk_put_io_channel(ioch_array[ioch_idx]);
+ ioch_array[ioch_idx] = NULL;
+ poll_threads();
+ }
+
+ poll_threads();
+ CU_ASSERT_EQUAL(dev->num_io_channels, dev->conf.max_io_channels / 2);
+
+ for (ioch_idx = 0; ioch_idx < dev->conf.max_io_channels; ioch_idx++) {
+ set_thread(ioch_idx);
+
+ if (ioch_array[ioch_idx] == NULL) {
+ ioch = ioch_array[ioch_idx] = spdk_get_io_channel(dev);
+ SPDK_CU_ASSERT_FATAL(ioch != NULL);
+ poll_threads();
+
+ ftl_ioch = ftl_io_channel_get_ctx(ioch);
+ CU_ASSERT_EQUAL(ftl_ioch->index, ioch_idx);
+ }
+ }
+
+ for (ioch_idx = 0; ioch_idx < dev->conf.max_io_channels; ioch_idx++) {
+ set_thread(ioch_idx);
+ spdk_put_io_channel(ioch_array[ioch_idx]);
+ }
+
+ poll_threads();
+ CU_ASSERT_EQUAL(dev->num_io_channels, 0);
+
+ free(ioch_array);
+ free_device(dev);
+}
+
+static void
+test_acquire_entry(void)
+{
+ struct spdk_ftl_dev *dev;
+ struct spdk_io_channel *ioch, **ioch_array;
+ struct ftl_io_channel *ftl_ioch;
+ struct ftl_wbuf_entry *entry, **entries;
+ uint32_t num_entries, num_io_channels = 2;
+ uint32_t ioch_idx, entry_idx, tmp_idx;
+
+ dev = setup_device(num_io_channels, 16);
+
+ num_entries = dev->conf.write_buffer_size / FTL_BLOCK_SIZE;
+ entries = calloc(num_entries * num_io_channels, sizeof(*entries));
+ SPDK_CU_ASSERT_FATAL(entries != NULL);
+ ioch_array = calloc(num_io_channels, sizeof(*ioch_array));
+ SPDK_CU_ASSERT_FATAL(ioch_array != NULL);
+
+ /* Acquire whole buffer of internal entries */
+ entry_idx = 0;
+ for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
+ set_thread(ioch_idx);
+ ioch_array[ioch_idx] = spdk_get_io_channel(dev);
+ SPDK_CU_ASSERT_FATAL(ioch_array[ioch_idx] != NULL);
+ ftl_ioch = ftl_io_channel_get_ctx(ioch_array[ioch_idx]);
+ poll_threads();
+
+ for (tmp_idx = 0; tmp_idx < num_entries; ++tmp_idx) {
+ entries[entry_idx++] = ftl_acquire_wbuf_entry(ftl_ioch, FTL_IO_INTERNAL);
+ CU_ASSERT(entries[entry_idx - 1] != NULL);
+ }
+
+ entry = ftl_acquire_wbuf_entry(ftl_ioch, FTL_IO_INTERNAL);
+ CU_ASSERT(entry == NULL);
+ }
+
+ for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
+ set_thread(ioch_idx);
+
+ for (tmp_idx = 0; tmp_idx < num_entries; ++tmp_idx) {
+ ftl_release_wbuf_entry(entries[ioch_idx * num_entries + tmp_idx]);
+ entries[ioch_idx * num_entries + tmp_idx] = NULL;
+ }
+
+ spdk_put_io_channel(ioch_array[ioch_idx]);
+ }
+ poll_threads();
+
+ /* Do the same for user entries */
+ entry_idx = 0;
+ for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
+ set_thread(ioch_idx);
+ ioch_array[ioch_idx] = spdk_get_io_channel(dev);
+ SPDK_CU_ASSERT_FATAL(ioch_array[ioch_idx] != NULL);
+ ftl_ioch = ftl_io_channel_get_ctx(ioch_array[ioch_idx]);
+ poll_threads();
+
+ for (tmp_idx = 0; tmp_idx < num_entries; ++tmp_idx) {
+ entries[entry_idx++] = ftl_acquire_wbuf_entry(ftl_ioch, 0);
+ CU_ASSERT(entries[entry_idx - 1] != NULL);
+ }
+
+ entry = ftl_acquire_wbuf_entry(ftl_ioch, 0);
+ CU_ASSERT(entry == NULL);
+ }
+
+ for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
+ set_thread(ioch_idx);
+
+ for (tmp_idx = 0; tmp_idx < num_entries; ++tmp_idx) {
+ ftl_release_wbuf_entry(entries[ioch_idx * num_entries + tmp_idx]);
+ entries[ioch_idx * num_entries + tmp_idx] = NULL;
+ }
+
+ spdk_put_io_channel(ioch_array[ioch_idx]);
+ }
+ poll_threads();
+
+ /* Verify limits */
+ entry_idx = 0;
+ for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
+ set_thread(ioch_idx);
+ ioch_array[ioch_idx] = spdk_get_io_channel(dev);
+ SPDK_CU_ASSERT_FATAL(ioch_array[ioch_idx] != NULL);
+ ftl_ioch = ftl_io_channel_get_ctx(ioch_array[ioch_idx]);
+ poll_threads();
+
+ ftl_ioch->qdepth_limit = num_entries / 2;
+ for (tmp_idx = 0; tmp_idx < num_entries / 2; ++tmp_idx) {
+ entries[entry_idx++] = ftl_acquire_wbuf_entry(ftl_ioch, 0);
+ CU_ASSERT(entries[entry_idx - 1] != NULL);
+ }
+
+ entry = ftl_acquire_wbuf_entry(ftl_ioch, 0);
+ CU_ASSERT(entry == NULL);
+
+ for (; tmp_idx < num_entries; ++tmp_idx) {
+ entries[entry_idx++] = ftl_acquire_wbuf_entry(ftl_ioch, FTL_IO_INTERNAL);
+ CU_ASSERT(entries[entry_idx - 1] != NULL);
+ }
+ }
+
+ for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
+ set_thread(ioch_idx);
+
+ for (tmp_idx = 0; tmp_idx < num_entries; ++tmp_idx) {
+ ftl_release_wbuf_entry(entries[ioch_idx * num_entries + tmp_idx]);
+ entries[ioch_idx * num_entries + tmp_idx] = NULL;
+ }
+
+ spdk_put_io_channel(ioch_array[ioch_idx]);
+ }
+ poll_threads();
+
+ /* Verify acquire/release */
+ set_thread(0);
+ ioch = spdk_get_io_channel(dev);
+ SPDK_CU_ASSERT_FATAL(ioch != NULL);
+ ftl_ioch = ftl_io_channel_get_ctx(ioch);
+ poll_threads();
+
+ for (entry_idx = 0; entry_idx < num_entries; ++entry_idx) {
+ entries[entry_idx] = ftl_acquire_wbuf_entry(ftl_ioch, 0);
+ CU_ASSERT(entries[entry_idx] != NULL);
+ }
+
+ entry = ftl_acquire_wbuf_entry(ftl_ioch, 0);
+ CU_ASSERT(entry == NULL);
+
+ for (entry_idx = 0; entry_idx < num_entries / 2; ++entry_idx) {
+ ftl_release_wbuf_entry(entries[entry_idx]);
+ entries[entry_idx] = NULL;
+ }
+
+ for (; entry_idx < num_entries; ++entry_idx) {
+ entries[entry_idx - num_entries / 2] = ftl_acquire_wbuf_entry(ftl_ioch, 0);
+ CU_ASSERT(entries[entry_idx - num_entries / 2] != NULL);
+ }
+
+ for (entry_idx = 0; entry_idx < num_entries; ++entry_idx) {
+ ftl_release_wbuf_entry(entries[entry_idx]);
+ entries[entry_idx] = NULL;
+ }
+
+ spdk_put_io_channel(ioch);
+ poll_threads();
+
+ free(ioch_array);
+ free(entries);
+ free_device(dev);
+}
+
+static void
+test_submit_batch(void)
+{
+ struct spdk_ftl_dev *dev;
+ struct spdk_io_channel **_ioch_array;
+ struct ftl_io_channel **ioch_array;
+ struct ftl_wbuf_entry *entry;
+ struct ftl_batch *batch, *batch2;
+ uint32_t num_io_channels = 16;
+ uint32_t ioch_idx, tmp_idx, entry_idx;
+ uint64_t ioch_bitmap;
+ size_t num_entries;
+
+ dev = setup_device(num_io_channels, num_io_channels);
+
+ _ioch_array = calloc(num_io_channels, sizeof(*_ioch_array));
+ SPDK_CU_ASSERT_FATAL(_ioch_array != NULL);
+ ioch_array = calloc(num_io_channels, sizeof(*ioch_array));
+ SPDK_CU_ASSERT_FATAL(ioch_array != NULL);
+
+ for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
+ set_thread(ioch_idx);
+ _ioch_array[ioch_idx] = spdk_get_io_channel(dev);
+ SPDK_CU_ASSERT_FATAL(_ioch_array[ioch_idx] != NULL);
+ ioch_array[ioch_idx] = ftl_io_channel_get_ctx(_ioch_array[ioch_idx]);
+ poll_threads();
+ }
+
+ /* Make sure the IO channels are not starved and entries are popped in RR fashion */
+ for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
+ set_thread(ioch_idx);
+
+ for (entry_idx = 0; entry_idx < dev->xfer_size; ++entry_idx) {
+ entry = ftl_acquire_wbuf_entry(ioch_array[ioch_idx], 0);
+ SPDK_CU_ASSERT_FATAL(entry != NULL);
+
+ num_entries = spdk_ring_enqueue(ioch_array[ioch_idx]->submit_queue,
+ (void **)&entry, 1, NULL);
+ CU_ASSERT(num_entries == 1);
+ }
+ }
+
+ for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
+ for (tmp_idx = 0; tmp_idx < ioch_idx; ++tmp_idx) {
+ set_thread(tmp_idx);
+
+ while (spdk_ring_count(ioch_array[tmp_idx]->submit_queue) < dev->xfer_size) {
+ entry = ftl_acquire_wbuf_entry(ioch_array[tmp_idx], 0);
+ SPDK_CU_ASSERT_FATAL(entry != NULL);
+
+ num_entries = spdk_ring_enqueue(ioch_array[tmp_idx]->submit_queue,
+ (void **)&entry, 1, NULL);
+ CU_ASSERT(num_entries == 1);
+ }
+ }
+
+ set_thread(ioch_idx);
+
+ batch = ftl_get_next_batch(dev);
+ SPDK_CU_ASSERT_FATAL(batch != NULL);
+
+ TAILQ_FOREACH(entry, &batch->entries, tailq) {
+ CU_ASSERT(entry->ioch == ioch_array[ioch_idx]);
+ }
+
+ ftl_release_batch(dev, batch);
+
+ CU_ASSERT(spdk_ring_count(ioch_array[ioch_idx]->free_queue) ==
+ ioch_array[ioch_idx]->num_entries);
+ }
+
+ for (ioch_idx = 0; ioch_idx < num_io_channels - 1; ++ioch_idx) {
+ batch = ftl_get_next_batch(dev);
+ SPDK_CU_ASSERT_FATAL(batch != NULL);
+ ftl_release_batch(dev, batch);
+ }
+
+ /* Make sure the batch can be built from entries from any IO channel */
+ for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
+ set_thread(ioch_idx);
+ entry = ftl_acquire_wbuf_entry(ioch_array[ioch_idx], 0);
+ SPDK_CU_ASSERT_FATAL(entry != NULL);
+
+ num_entries = spdk_ring_enqueue(ioch_array[ioch_idx]->submit_queue,
+ (void **)&entry, 1, NULL);
+ CU_ASSERT(num_entries == 1);
+ }
+
+ batch = ftl_get_next_batch(dev);
+ SPDK_CU_ASSERT_FATAL(batch != NULL);
+
+ ioch_bitmap = 0;
+ TAILQ_FOREACH(entry, &batch->entries, tailq) {
+ ioch_bitmap |= 1 << entry->ioch->index;
+ }
+
+ for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
+ CU_ASSERT((ioch_bitmap & (1 << ioch_array[ioch_idx]->index)) != 0);
+ }
+ ftl_release_batch(dev, batch);
+
+ for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
+ CU_ASSERT(spdk_ring_count(ioch_array[ioch_idx]->free_queue) ==
+ ioch_array[ioch_idx]->num_entries);
+ }
+
+ /* Make sure pending batches are prioritized */
+ for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
+ set_thread(ioch_idx);
+
+ while (spdk_ring_count(ioch_array[ioch_idx]->submit_queue) < dev->xfer_size) {
+ entry = ftl_acquire_wbuf_entry(ioch_array[ioch_idx], 0);
+ SPDK_CU_ASSERT_FATAL(entry != NULL);
+ num_entries = spdk_ring_enqueue(ioch_array[ioch_idx]->submit_queue,
+ (void **)&entry, 1, NULL);
+ CU_ASSERT(num_entries == 1);
+ }
+ }
+
+ batch = ftl_get_next_batch(dev);
+ SPDK_CU_ASSERT_FATAL(batch != NULL);
+
+ TAILQ_INSERT_TAIL(&dev->pending_batches, batch, tailq);
+ batch2 = ftl_get_next_batch(dev);
+ SPDK_CU_ASSERT_FATAL(batch2 != NULL);
+
+ CU_ASSERT(TAILQ_EMPTY(&dev->pending_batches));
+ CU_ASSERT(batch == batch2);
+
+ batch = ftl_get_next_batch(dev);
+ SPDK_CU_ASSERT_FATAL(batch != NULL);
+
+ ftl_release_batch(dev, batch);
+ ftl_release_batch(dev, batch2);
+
+ for (ioch_idx = 2; ioch_idx < num_io_channels; ++ioch_idx) {
+ batch = ftl_get_next_batch(dev);
+ SPDK_CU_ASSERT_FATAL(batch != NULL);
+ ftl_release_batch(dev, batch);
+ }
+
+ for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
+ set_thread(ioch_idx);
+ spdk_put_io_channel(_ioch_array[ioch_idx]);
+ }
+ poll_threads();
+
+ free(_ioch_array);
+ free(ioch_array);
+ free_device(dev);
+}
+
+static void
+test_entry_address(void)
+{
+ struct spdk_ftl_dev *dev;
+ struct spdk_io_channel **ioch_array;
+ struct ftl_io_channel *ftl_ioch;
+ struct ftl_wbuf_entry **entry_array;
+ struct ftl_addr addr;
+ uint32_t num_entries, num_io_channels = 7;
+ uint32_t ioch_idx, entry_idx;
+
+ dev = setup_device(num_io_channels, num_io_channels);
+ ioch_array = calloc(num_io_channels, sizeof(*ioch_array));
+ SPDK_CU_ASSERT_FATAL(ioch_array != NULL);
+
+ num_entries = dev->conf.write_buffer_size / FTL_BLOCK_SIZE;
+ entry_array = calloc(num_entries, sizeof(*entry_array));
+ SPDK_CU_ASSERT_FATAL(entry_array != NULL);
+
+ for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
+ set_thread(ioch_idx);
+ ioch_array[ioch_idx] = spdk_get_io_channel(dev);
+ SPDK_CU_ASSERT_FATAL(ioch_array[ioch_idx] != NULL);
+ poll_threads();
+ }
+
+ for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
+ set_thread(ioch_idx);
+ ftl_ioch = ftl_io_channel_get_ctx(ioch_array[ioch_idx]);
+
+ for (entry_idx = 0; entry_idx < num_entries; ++entry_idx) {
+ entry_array[entry_idx] = ftl_acquire_wbuf_entry(ftl_ioch, 0);
+ SPDK_CU_ASSERT_FATAL(entry_array[entry_idx] != NULL);
+
+ addr = ftl_get_addr_from_entry(entry_array[entry_idx]);
+ CU_ASSERT(addr.cached == 1);
+ CU_ASSERT((addr.cache_offset >> dev->ioch_shift) == entry_idx);
+ CU_ASSERT((addr.cache_offset & ((1 << dev->ioch_shift) - 1)) == ioch_idx);
+ CU_ASSERT(entry_array[entry_idx] == ftl_get_entry_from_addr(dev, addr));
+ }
+
+ for (entry_idx = 0; entry_idx < num_entries; ++entry_idx) {
+ ftl_release_wbuf_entry(entry_array[entry_idx]);
+ }
+ }
+
+ for (ioch_idx = 0; ioch_idx < num_io_channels; ioch_idx += 2) {
+ set_thread(ioch_idx);
+ spdk_put_io_channel(ioch_array[ioch_idx]);
+ ioch_array[ioch_idx] = NULL;
+ }
+ poll_threads();
+
+ for (ioch_idx = 1; ioch_idx < num_io_channels; ioch_idx += 2) {
+ set_thread(ioch_idx);
+ ftl_ioch = ftl_io_channel_get_ctx(ioch_array[ioch_idx]);
+
+ for (entry_idx = 0; entry_idx < num_entries; ++entry_idx) {
+ entry_array[entry_idx] = ftl_acquire_wbuf_entry(ftl_ioch, 0);
+ SPDK_CU_ASSERT_FATAL(entry_array[entry_idx] != NULL);
+
+ addr = ftl_get_addr_from_entry(entry_array[entry_idx]);
+ CU_ASSERT(addr.cached == 1);
+ CU_ASSERT(entry_array[entry_idx] == ftl_get_entry_from_addr(dev, addr));
+ }
+
+ for (entry_idx = 0; entry_idx < num_entries; ++entry_idx) {
+ ftl_release_wbuf_entry(entry_array[entry_idx]);
+ }
+ }
+
+ for (ioch_idx = 1; ioch_idx < num_io_channels; ioch_idx += 2) {
+ set_thread(ioch_idx);
+ spdk_put_io_channel(ioch_array[ioch_idx]);
+ }
+ poll_threads();
+
+ free(entry_array);
+ free(ioch_array);
+ free_device(dev);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("ftl_io_suite", NULL, NULL);
+
+
+ CU_ADD_TEST(suite, test_completion);
+ CU_ADD_TEST(suite, test_alloc_free);
+ CU_ADD_TEST(suite, test_child_requests);
+ CU_ADD_TEST(suite, test_child_status);
+ CU_ADD_TEST(suite, test_multi_generation);
+ CU_ADD_TEST(suite, test_io_channel_create);
+ CU_ADD_TEST(suite, test_acquire_entry);
+ CU_ADD_TEST(suite, test_submit_batch);
+ CU_ADD_TEST(suite, test_entry_address);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/ftl/ftl_md/.gitignore b/src/spdk/test/unit/lib/ftl/ftl_md/.gitignore
new file mode 100644
index 000000000..8f0f690f0
--- /dev/null
+++ b/src/spdk/test/unit/lib/ftl/ftl_md/.gitignore
@@ -0,0 +1 @@
+ftl_md_ut
diff --git a/src/spdk/test/unit/lib/ftl/ftl_md/Makefile b/src/spdk/test/unit/lib/ftl/ftl_md/Makefile
new file mode 100644
index 000000000..1ad632aff
--- /dev/null
+++ b/src/spdk/test/unit/lib/ftl/ftl_md/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = ftl_md_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/ftl/ftl_md/ftl_md_ut.c b/src/spdk/test/unit/lib/ftl/ftl_md/ftl_md_ut.c
new file mode 100644
index 000000000..20f3a28c9
--- /dev/null
+++ b/src/spdk/test/unit/lib/ftl/ftl_md/ftl_md_ut.c
@@ -0,0 +1,150 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+#include "common/lib/test_env.c"
+
+#include "ftl/ftl_band.c"
+#include "../common/utils.c"
+
+struct base_bdev_geometry g_geo = {
+ .write_unit_size = 16,
+ .optimal_open_zones = 12,
+ .zone_size = 100,
+ .blockcnt = 1500 * 100 * 12,
+};
+
+static void
+setup_band(struct ftl_band **band, const struct base_bdev_geometry *geo)
+{
+ int rc;
+ struct spdk_ftl_dev *dev;
+
+ dev = test_init_ftl_dev(&g_geo);
+ *band = test_init_ftl_band(dev, 0, geo->zone_size);
+ rc = ftl_band_alloc_lba_map(*band);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ (*band)->state = FTL_BAND_STATE_PREP;
+ ftl_band_clear_lba_map(*band);
+}
+
+static void
+cleanup_band(struct ftl_band *band)
+{
+ struct spdk_ftl_dev *dev = band->dev;
+
+ test_free_ftl_band(band);
+ test_free_ftl_dev(dev);
+}
+
+static void
+test_md_unpack(void)
+{
+ struct ftl_band *band;
+ struct ftl_lba_map *lba_map;
+
+ setup_band(&band, &g_geo);
+
+ lba_map = &band->lba_map;
+ SPDK_CU_ASSERT_FATAL(lba_map->dma_buf);
+
+ ftl_pack_head_md(band);
+ CU_ASSERT_EQUAL(ftl_unpack_head_md(band), FTL_MD_SUCCESS);
+
+ ftl_pack_tail_md(band);
+ CU_ASSERT_EQUAL(ftl_unpack_tail_md(band), FTL_MD_SUCCESS);
+
+ cleanup_band(band);
+}
+
+static void
+test_md_unpack_fail(void)
+{
+ struct ftl_band *band;
+ struct ftl_lba_map *lba_map;
+ struct ftl_md_hdr *hdr;
+
+ setup_band(&band, &g_geo);
+
+ lba_map = &band->lba_map;
+ SPDK_CU_ASSERT_FATAL(lba_map->dma_buf);
+
+ /* check crc */
+ ftl_pack_tail_md(band);
+ /* flip last bit of lba_map */
+ *((char *)lba_map->dma_buf + ftl_tail_md_num_blocks(band->dev) * FTL_BLOCK_SIZE - 1) ^= 0x1;
+ CU_ASSERT_EQUAL(ftl_unpack_tail_md(band), FTL_MD_INVALID_CRC);
+
+ /* check invalid version */
+ hdr = lba_map->dma_buf;
+ ftl_pack_tail_md(band);
+ hdr->ver++;
+ CU_ASSERT_EQUAL(ftl_unpack_tail_md(band), FTL_MD_INVALID_VER);
+
+ /* check wrong UUID */
+ ftl_pack_head_md(band);
+ hdr->uuid.u.raw[0] ^= 0x1;
+ CU_ASSERT_EQUAL(ftl_unpack_head_md(band), FTL_MD_NO_MD);
+
+ /* check invalid size */
+ ftl_pack_tail_md(band);
+ g_geo.zone_size--;
+ CU_ASSERT_EQUAL(ftl_unpack_tail_md(band), FTL_MD_INVALID_SIZE);
+
+ cleanup_band(band);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("ftl_meta_suite", NULL, NULL);
+
+
+ CU_ADD_TEST(suite, test_md_unpack);
+ CU_ADD_TEST(suite, test_md_unpack_fail);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/ftl/ftl_ppa/.gitignore b/src/spdk/test/unit/lib/ftl/ftl_ppa/.gitignore
new file mode 100644
index 000000000..7f07c7f98
--- /dev/null
+++ b/src/spdk/test/unit/lib/ftl/ftl_ppa/.gitignore
@@ -0,0 +1 @@
+ftl_ppa_ut
diff --git a/src/spdk/test/unit/lib/ftl/ftl_ppa/Makefile b/src/spdk/test/unit/lib/ftl/ftl_ppa/Makefile
new file mode 100644
index 000000000..f8df5209e
--- /dev/null
+++ b/src/spdk/test/unit/lib/ftl/ftl_ppa/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = ftl_ppa_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/ftl/ftl_ppa/ftl_ppa_ut.c b/src/spdk/test/unit/lib/ftl/ftl_ppa/ftl_ppa_ut.c
new file mode 100644
index 000000000..dae57abcd
--- /dev/null
+++ b/src/spdk/test/unit/lib/ftl/ftl_ppa/ftl_ppa_ut.c
@@ -0,0 +1,226 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+#include "common/lib/test_env.c"
+
+#include "ftl/ftl_core.h"
+
+#define L2P_TABLE_SIZE 1024
+
+static struct spdk_ftl_dev *g_dev;
+
+DEFINE_STUB(spdk_bdev_desc_get_bdev, struct spdk_bdev *, (struct spdk_bdev_desc *desc), NULL);
+
+uint64_t
+spdk_bdev_get_zone_size(const struct spdk_bdev *bdev)
+{
+ if (g_dev->addr_len > 32) {
+ return 1ULL << 32;
+ }
+
+ return 1024;
+}
+
+uint32_t
+spdk_bdev_get_optimal_open_zones(const struct spdk_bdev *bdev)
+{
+ return 100;
+}
+
+static struct spdk_ftl_dev *
+test_alloc_dev(size_t size)
+{
+ struct spdk_ftl_dev *dev;
+
+ dev = calloc(1, sizeof(*dev));
+
+ dev->num_lbas = L2P_TABLE_SIZE;
+ dev->l2p = calloc(L2P_TABLE_SIZE, size);
+
+ return dev;
+}
+
+static int
+setup_l2p_32bit(void)
+{
+ g_dev = test_alloc_dev(sizeof(uint32_t));
+ g_dev->addr_len = 24;
+ return 0;
+}
+
+static int
+setup_l2p_64bit(void)
+{
+ g_dev = test_alloc_dev(sizeof(uint64_t));
+ g_dev->addr_len = 63;
+ return 0;
+}
+
+static void
+clean_l2p(void)
+{
+ size_t l2p_elem_size;
+
+ if (ftl_addr_packed(g_dev)) {
+ l2p_elem_size = sizeof(uint32_t);
+ } else {
+ l2p_elem_size = sizeof(uint64_t);
+ }
+ memset(g_dev->l2p, 0, g_dev->num_lbas * l2p_elem_size);
+}
+
+static int
+cleanup(void)
+{
+ free(g_dev->l2p);
+ free(g_dev);
+ g_dev = NULL;
+ return 0;
+}
+
+static void
+test_addr_pack32(void)
+{
+ struct ftl_addr orig = {}, addr;
+
+ /* Check valid address transformation */
+ orig.offset = 4;
+ addr = ftl_addr_to_packed(g_dev, orig);
+ CU_ASSERT_TRUE(addr.offset <= UINT32_MAX);
+ CU_ASSERT_FALSE(addr.pack.cached);
+ addr = ftl_addr_from_packed(g_dev, addr);
+ CU_ASSERT_FALSE(ftl_addr_invalid(addr));
+ CU_ASSERT_EQUAL(addr.offset, orig.offset);
+
+ /* Check invalid address transformation */
+ orig = ftl_to_addr(FTL_ADDR_INVALID);
+ addr = ftl_addr_to_packed(g_dev, orig);
+ CU_ASSERT_TRUE(addr.offset <= UINT32_MAX);
+ addr = ftl_addr_from_packed(g_dev, addr);
+ CU_ASSERT_TRUE(ftl_addr_invalid(addr));
+
+ /* Check cached entry offset transformation */
+ orig.cached = 1;
+ orig.cache_offset = 1024;
+ addr = ftl_addr_to_packed(g_dev, orig);
+ CU_ASSERT_TRUE(addr.offset <= UINT32_MAX);
+ CU_ASSERT_TRUE(addr.pack.cached);
+ addr = ftl_addr_from_packed(g_dev, addr);
+ CU_ASSERT_FALSE(ftl_addr_invalid(addr));
+ CU_ASSERT_TRUE(ftl_addr_cached(addr));
+ CU_ASSERT_EQUAL(addr.offset, orig.offset);
+ clean_l2p();
+}
+
+static void
+test_addr_invalid(void)
+{
+ struct ftl_addr addr;
+ size_t i;
+
+ /* Set every other LBA as invalid */
+ for (i = 0; i < L2P_TABLE_SIZE; i += 2) {
+ ftl_l2p_set(g_dev, i, ftl_to_addr(FTL_ADDR_INVALID));
+ }
+
+ /* Check every even LBA is invalid while others are fine */
+ for (i = 0; i < L2P_TABLE_SIZE; ++i) {
+ addr = ftl_l2p_get(g_dev, i);
+
+ if (i % 2 == 0) {
+ CU_ASSERT_TRUE(ftl_addr_invalid(addr));
+ } else {
+ CU_ASSERT_FALSE(ftl_addr_invalid(addr));
+ }
+ }
+ clean_l2p();
+}
+
+static void
+test_addr_cached(void)
+{
+ struct ftl_addr addr;
+ size_t i;
+
+ /* Set every other LBA is cached */
+ for (i = 0; i < L2P_TABLE_SIZE; i += 2) {
+ addr.cached = 1;
+ addr.cache_offset = i;
+ ftl_l2p_set(g_dev, i, addr);
+ }
+
+ /* Check every even LBA is cached while others are not */
+ for (i = 0; i < L2P_TABLE_SIZE; ++i) {
+ addr = ftl_l2p_get(g_dev, i);
+
+ if (i % 2 == 0) {
+ CU_ASSERT_TRUE(ftl_addr_cached(addr));
+ CU_ASSERT_EQUAL(addr.cache_offset, i);
+ } else {
+ CU_ASSERT_FALSE(ftl_addr_cached(addr));
+ }
+ }
+ clean_l2p();
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite32 = NULL, suite64 = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite32 = CU_add_suite("ftl_addr32_suite", setup_l2p_32bit, cleanup);
+
+
+ suite64 = CU_add_suite("ftl_addr64_suite", setup_l2p_64bit, cleanup);
+
+
+ CU_ADD_TEST(suite32, test_addr_pack32);
+ CU_ADD_TEST(suite32, test_addr_invalid);
+ CU_ADD_TEST(suite32, test_addr_cached);
+ CU_ADD_TEST(suite64, test_addr_invalid);
+ CU_ADD_TEST(suite64, test_addr_cached);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/ftl/ftl_reloc.c/.gitignore b/src/spdk/test/unit/lib/ftl/ftl_reloc.c/.gitignore
new file mode 100644
index 000000000..439602062
--- /dev/null
+++ b/src/spdk/test/unit/lib/ftl/ftl_reloc.c/.gitignore
@@ -0,0 +1 @@
+ftl_reloc_ut
diff --git a/src/spdk/test/unit/lib/ftl/ftl_reloc.c/Makefile b/src/spdk/test/unit/lib/ftl/ftl_reloc.c/Makefile
new file mode 100644
index 000000000..ed4188107
--- /dev/null
+++ b/src/spdk/test/unit/lib/ftl/ftl_reloc.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = ftl_reloc_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/ftl/ftl_reloc.c/ftl_reloc_ut.c b/src/spdk/test/unit/lib/ftl/ftl_reloc.c/ftl_reloc_ut.c
new file mode 100644
index 000000000..26a423882
--- /dev/null
+++ b/src/spdk/test/unit/lib/ftl/ftl_reloc.c/ftl_reloc_ut.c
@@ -0,0 +1,508 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+#include "common/lib/test_env.c"
+
+#include "ftl/ftl_reloc.c"
+#include "../common/utils.c"
+
+#define MAX_ACTIVE_RELOCS 5
+#define MAX_RELOC_QDEPTH 31
+
+struct base_bdev_geometry g_geo = {
+ .write_unit_size = 16,
+ .optimal_open_zones = 12,
+ .zone_size = 100,
+ .blockcnt = 1500 * 100 * 12,
+};
+
+DEFINE_STUB(ftl_dev_tail_md_disk_size, size_t, (const struct spdk_ftl_dev *dev), 1);
+DEFINE_STUB(ftl_addr_is_written, bool, (struct ftl_band *band, struct ftl_addr addr), true);
+DEFINE_STUB_V(ftl_band_set_state, (struct ftl_band *band, enum ftl_band_state state));
+DEFINE_STUB_V(ftl_free_io, (struct ftl_io *io));
+#if defined(DEBUG)
+DEFINE_STUB_V(ftl_trace_lba_io_init, (struct spdk_ftl_dev *dev, const struct ftl_io *io));
+#endif
+
+int
+ftl_band_alloc_lba_map(struct ftl_band *band)
+{
+ struct spdk_ftl_dev *dev = band->dev;
+
+ ftl_band_acquire_lba_map(band);
+ band->lba_map.map = spdk_mempool_get(dev->lba_pool);
+
+ return 0;
+}
+
+void
+ftl_band_release_lba_map(struct ftl_band *band)
+{
+ struct spdk_ftl_dev *dev = band->dev;
+
+ band->lba_map.ref_cnt--;
+ spdk_mempool_put(dev->lba_pool, band->lba_map.map);
+ band->lba_map.map = NULL;
+}
+
+void
+ftl_band_acquire_lba_map(struct ftl_band *band)
+{
+ band->lba_map.ref_cnt++;
+}
+
+size_t
+ftl_lba_map_num_blocks(const struct spdk_ftl_dev *dev)
+{
+ return spdk_divide_round_up(ftl_get_num_blocks_in_band(dev) * sizeof(uint64_t), FTL_BLOCK_SIZE);
+}
+
+int
+ftl_band_read_lba_map(struct ftl_band *band, size_t offset,
+ size_t num_blocks, ftl_io_fn fn, void *ctx)
+{
+ fn(ctx, ctx, 0);
+ return 0;
+}
+
+uint64_t
+ftl_band_block_offset_from_addr(struct ftl_band *band, struct ftl_addr addr)
+{
+ return test_offset_from_addr(addr, band);
+}
+
+struct ftl_addr
+ftl_band_addr_from_block_offset(struct ftl_band *band, uint64_t block_off)
+{
+ struct ftl_addr addr = {};
+
+ addr.offset = block_off + band->id * ftl_get_num_blocks_in_band(band->dev);
+ return addr;
+}
+
+void
+ftl_io_read(struct ftl_io *io)
+{
+ io->cb_fn(io, io->cb_ctx, 0);
+ free(io);
+}
+
+void
+ftl_io_write(struct ftl_io *io)
+{
+ io->cb_fn(io, io->cb_ctx, 0);
+ free(io->lba.vector);
+ free(io);
+}
+
+struct ftl_io *
+ftl_io_init_internal(const struct ftl_io_init_opts *opts)
+{
+ struct ftl_io *io = opts->io;
+
+ if (!io) {
+ io = calloc(1, opts->size);
+ }
+
+ SPDK_CU_ASSERT_FATAL(io != NULL);
+
+ io->dev = opts->dev;
+ io->band = opts->band;
+ io->flags = opts->flags;
+ io->cb_fn = opts->cb_fn;
+ io->cb_ctx = io;
+ io->num_blocks = opts->num_blocks;
+ memcpy(&io->iov, &opts->iovs, sizeof(io->iov));
+ io->iov_cnt = opts->iovcnt;
+
+ if (opts->flags & FTL_IO_VECTOR_LBA) {
+ io->lba.vector = calloc(io->num_blocks, sizeof(uint64_t));
+ SPDK_CU_ASSERT_FATAL(io->lba.vector != NULL);
+ }
+
+ return io;
+}
+
+struct ftl_io *
+ftl_io_alloc(struct spdk_io_channel *ch)
+{
+ size_t io_size = sizeof(struct ftl_md_io);
+
+ return malloc(io_size);
+}
+
+void
+ftl_io_reinit(struct ftl_io *io, ftl_io_fn fn, void *ctx, int flags, int type)
+{
+ io->cb_fn = fn;
+ io->cb_ctx = ctx;
+ io->type = type;
+}
+
+static void
+single_reloc_move(struct ftl_band_reloc *breloc)
+{
+ /* Process read */
+ ftl_process_reloc(breloc);
+ /* Process lba map read */
+ ftl_process_reloc(breloc);
+ /* Process write */
+ ftl_process_reloc(breloc);
+}
+
+static void
+add_to_active_queue(struct ftl_reloc *reloc, struct ftl_band_reloc *breloc)
+{
+ TAILQ_REMOVE(&reloc->pending_queue, breloc, entry);
+ breloc->state = FTL_BAND_RELOC_STATE_ACTIVE;
+ TAILQ_INSERT_HEAD(&reloc->active_queue, breloc, entry);
+}
+
+static void
+setup_reloc(struct spdk_ftl_dev **_dev, struct ftl_reloc **_reloc,
+ const struct base_bdev_geometry *geo)
+{
+ size_t i;
+ struct spdk_ftl_dev *dev;
+ struct ftl_reloc *reloc;
+
+ dev = test_init_ftl_dev(geo);
+
+ dev->conf.max_active_relocs = MAX_ACTIVE_RELOCS;
+ dev->conf.max_reloc_qdepth = MAX_RELOC_QDEPTH;
+
+ SPDK_CU_ASSERT_FATAL(ftl_get_num_bands(dev) > 0);
+
+ for (i = 0; i < ftl_get_num_bands(dev); ++i) {
+ test_init_ftl_band(dev, i, geo->zone_size);
+ }
+
+ reloc = ftl_reloc_init(dev);
+ dev->reloc = reloc;
+ CU_ASSERT_PTR_NOT_NULL_FATAL(reloc);
+ ftl_reloc_resume(reloc);
+
+ *_dev = dev;
+ *_reloc = reloc;
+}
+
+static void
+cleanup_reloc(struct spdk_ftl_dev *dev, struct ftl_reloc *reloc)
+{
+ size_t i;
+
+ for (i = 0; i < ftl_get_num_bands(reloc->dev); ++i) {
+ SPDK_CU_ASSERT_FATAL(reloc->brelocs[i].state == FTL_BAND_RELOC_STATE_INACTIVE);
+ }
+
+ ftl_reloc_free(reloc);
+
+ for (i = 0; i < ftl_get_num_bands(dev); ++i) {
+ test_free_ftl_band(&dev->bands[i]);
+ }
+ test_free_ftl_dev(dev);
+}
+
+static void
+set_band_valid_map(struct ftl_band *band, size_t offset, size_t num_blocks)
+{
+ struct ftl_lba_map *lba_map = &band->lba_map;
+ size_t i;
+
+ SPDK_CU_ASSERT_FATAL(lba_map != NULL);
+ for (i = offset; i < offset + num_blocks; ++i) {
+ spdk_bit_array_set(lba_map->vld, i);
+ lba_map->num_vld++;
+ }
+}
+
+static void
+test_reloc_iter_full(void)
+{
+ size_t num_blocks, num_iters, reminder, i;
+ struct spdk_ftl_dev *dev;
+ struct ftl_reloc *reloc;
+ struct ftl_band_reloc *breloc;
+ struct ftl_band *band;
+ struct ftl_addr addr;
+
+ setup_reloc(&dev, &reloc, &g_geo);
+
+ g_geo.zone_size = 100;
+ breloc = &reloc->brelocs[0];
+ band = breloc->band;
+
+ set_band_valid_map(band, 0, ftl_get_num_blocks_in_band(dev));
+
+ ftl_reloc_add(reloc, band, 0, ftl_get_num_blocks_in_band(dev), 0, true);
+
+ CU_ASSERT_EQUAL(breloc->num_blocks, ftl_get_num_blocks_in_band(dev));
+
+ num_iters = ftl_get_num_punits(dev) *
+ (ftl_get_num_blocks_in_zone(dev) / reloc->xfer_size);
+
+ for (i = 0; i < num_iters; i++) {
+ num_blocks = ftl_reloc_next_blocks(breloc, &addr);
+ CU_ASSERT_EQUAL(num_blocks, reloc->xfer_size);
+ }
+
+ num_iters = ftl_get_num_punits(dev);
+
+ /* ftl_reloc_next_blocks is searching for maximum xfer_size */
+ /* contiguous valid logic blocks in zone, so we can end up */
+ /* with some reminder if number of logical blocks in zone */
+ /* is not divisible by xfer_size */
+ reminder = ftl_get_num_blocks_in_zone(dev) % reloc->xfer_size;
+ for (i = 0; i < num_iters; i++) {
+ num_blocks = ftl_reloc_next_blocks(breloc, &addr);
+ CU_ASSERT_EQUAL(reminder, num_blocks);
+ }
+
+ /* num_blocks should remain intact since all the blocks are valid */
+ CU_ASSERT_EQUAL(breloc->num_blocks, ftl_get_num_blocks_in_band(dev));
+ breloc->state = FTL_BAND_RELOC_STATE_INACTIVE;
+
+ cleanup_reloc(dev, reloc);
+}
+
+static void
+test_reloc_empty_band(void)
+{
+ struct spdk_ftl_dev *dev;
+ struct ftl_reloc *reloc;
+ struct ftl_band_reloc *breloc;
+ struct ftl_band *band;
+
+ setup_reloc(&dev, &reloc, &g_geo);
+
+ breloc = &reloc->brelocs[0];
+ band = breloc->band;
+
+ ftl_reloc_add(reloc, band, 0, ftl_get_num_blocks_in_band(dev), 0, true);
+
+ CU_ASSERT_EQUAL(breloc->num_blocks, 0);
+
+ cleanup_reloc(dev, reloc);
+}
+
+static void
+test_reloc_full_band(void)
+{
+ struct spdk_ftl_dev *dev;
+ struct ftl_reloc *reloc;
+ struct ftl_band_reloc *breloc;
+ struct ftl_band *band;
+ size_t num_moves, num_iters, num_block, i;
+
+ setup_reloc(&dev, &reloc, &g_geo);
+
+ breloc = &reloc->brelocs[0];
+ band = breloc->band;
+ num_moves = MAX_RELOC_QDEPTH * reloc->xfer_size;
+ num_iters = ftl_get_num_blocks_in_band(dev) / num_moves;
+
+ set_band_valid_map(band, 0, ftl_get_num_blocks_in_band(dev));
+
+ ftl_reloc_add(reloc, band, 0, ftl_get_num_blocks_in_band(dev), 0, true);
+
+ CU_ASSERT_EQUAL(breloc->num_blocks, ftl_get_num_blocks_in_band(dev));
+
+ ftl_reloc_prep(breloc);
+ add_to_active_queue(reloc, breloc);
+
+ for (i = 1; i <= num_iters; ++i) {
+ single_reloc_move(breloc);
+ num_block = ftl_get_num_blocks_in_band(dev) - (i * num_moves);
+ CU_ASSERT_EQUAL(breloc->num_blocks, num_block);
+
+ }
+
+ /* Process reminder blocks */
+ single_reloc_move(breloc);
+ /* Drain move queue */
+ ftl_reloc_process_moves(breloc);
+
+ CU_ASSERT_EQUAL(breloc->num_blocks, 0);
+ CU_ASSERT_TRUE(ftl_reloc_done(breloc));
+ ftl_reloc_release(breloc);
+
+ cleanup_reloc(dev, reloc);
+}
+
+static void
+test_reloc_scatter_band(void)
+{
+ struct spdk_ftl_dev *dev;
+ struct ftl_reloc *reloc;
+ struct ftl_band_reloc *breloc;
+ struct ftl_band *band;
+ size_t num_iters, i;
+
+ setup_reloc(&dev, &reloc, &g_geo);
+
+ breloc = &reloc->brelocs[0];
+ band = breloc->band;
+ num_iters = spdk_divide_round_up(ftl_get_num_blocks_in_band(dev), MAX_RELOC_QDEPTH * 2);
+
+ for (i = 0; i < ftl_get_num_blocks_in_band(dev); ++i) {
+ if (i % 2) {
+ set_band_valid_map(band, i, 1);
+ }
+ }
+
+ ftl_reloc_add(reloc, band, 0, ftl_get_num_blocks_in_band(dev), 0, true);
+ ftl_reloc_prep(breloc);
+ add_to_active_queue(reloc, breloc);
+
+ CU_ASSERT_EQUAL(breloc->num_blocks, ftl_get_num_blocks_in_band(dev));
+
+ for (i = 0; i < num_iters ; ++i) {
+ single_reloc_move(breloc);
+ }
+
+ ftl_process_reloc(breloc);
+ CU_ASSERT_EQUAL(breloc->num_blocks, 0);
+ CU_ASSERT_TRUE(ftl_reloc_done(breloc));
+
+ cleanup_reloc(dev, reloc);
+}
+
+static void
+test_reloc_zone(void)
+{
+ struct spdk_ftl_dev *dev;
+ struct ftl_reloc *reloc;
+ struct ftl_band_reloc *breloc;
+ struct ftl_band *band;
+ size_t num_io, num_iters, num_block, i;
+
+ setup_reloc(&dev, &reloc, &g_geo);
+
+ breloc = &reloc->brelocs[0];
+ band = breloc->band;
+ /* High priority band have allocated lba map */
+ band->high_prio = 1;
+ ftl_band_alloc_lba_map(band);
+ num_io = MAX_RELOC_QDEPTH * reloc->xfer_size;
+ num_iters = ftl_get_num_blocks_in_zone(dev) / num_io;
+
+ set_band_valid_map(band, 0, ftl_get_num_blocks_in_band(dev));
+
+ ftl_reloc_add(reloc, band, ftl_get_num_blocks_in_zone(dev) * 3,
+ ftl_get_num_blocks_in_zone(dev), 1, false);
+ add_to_active_queue(reloc, breloc);
+
+ CU_ASSERT_EQUAL(breloc->num_blocks, ftl_get_num_blocks_in_zone(dev));
+
+ for (i = 1; i <= num_iters ; ++i) {
+ single_reloc_move(breloc);
+ num_block = ftl_get_num_blocks_in_zone(dev) - (i * num_io);
+
+ CU_ASSERT_EQUAL(breloc->num_blocks, num_block);
+ }
+
+ /* In case num_blocks_in_zone % num_io != 0 one extra iteration is needed */
+ single_reloc_move(breloc);
+ /* Drain move queue */
+ ftl_reloc_process_moves(breloc);
+
+ CU_ASSERT_EQUAL(breloc->num_blocks, 0);
+ CU_ASSERT_TRUE(ftl_reloc_done(breloc));
+ ftl_reloc_release(breloc);
+
+ cleanup_reloc(dev, reloc);
+}
+
+static void
+test_reloc_single_block(void)
+{
+ struct spdk_ftl_dev *dev;
+ struct ftl_reloc *reloc;
+ struct ftl_band_reloc *breloc;
+ struct ftl_band *band;
+#define TEST_RELOC_OFFSET 6
+
+ setup_reloc(&dev, &reloc, &g_geo);
+
+ breloc = &reloc->brelocs[0];
+ band = breloc->band;
+
+ set_band_valid_map(band, TEST_RELOC_OFFSET, 1);
+
+ ftl_reloc_add(reloc, band, TEST_RELOC_OFFSET, 1, 0, false);
+ SPDK_CU_ASSERT_FATAL(breloc == TAILQ_FIRST(&reloc->pending_queue));
+ ftl_reloc_prep(breloc);
+ add_to_active_queue(reloc, breloc);
+
+ CU_ASSERT_EQUAL(breloc->num_blocks, 1);
+
+ single_reloc_move(breloc);
+ /* Drain move queue */
+ ftl_reloc_process_moves(breloc);
+
+ CU_ASSERT_EQUAL(breloc->num_blocks, 0);
+ CU_ASSERT_TRUE(ftl_reloc_done(breloc));
+ ftl_reloc_release(breloc);
+
+ cleanup_reloc(dev, reloc);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("ftl_band_suite", NULL, NULL);
+
+
+ CU_ADD_TEST(suite, test_reloc_iter_full);
+ CU_ADD_TEST(suite, test_reloc_empty_band);
+ CU_ADD_TEST(suite, test_reloc_full_band);
+ CU_ADD_TEST(suite, test_reloc_scatter_band);
+ CU_ADD_TEST(suite, test_reloc_zone);
+ CU_ADD_TEST(suite, test_reloc_single_block);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/ftl/ftl_wptr/.gitignore b/src/spdk/test/unit/lib/ftl/ftl_wptr/.gitignore
new file mode 100644
index 000000000..8f1f46756
--- /dev/null
+++ b/src/spdk/test/unit/lib/ftl/ftl_wptr/.gitignore
@@ -0,0 +1 @@
+ftl_wptr_ut
diff --git a/src/spdk/test/unit/lib/ftl/ftl_wptr/Makefile b/src/spdk/test/unit/lib/ftl/ftl_wptr/Makefile
new file mode 100644
index 000000000..42bf7c602
--- /dev/null
+++ b/src/spdk/test/unit/lib/ftl/ftl_wptr/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = ftl_wptr_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/ftl/ftl_wptr/ftl_wptr_ut.c b/src/spdk/test/unit/lib/ftl/ftl_wptr/ftl_wptr_ut.c
new file mode 100644
index 000000000..ccee312a2
--- /dev/null
+++ b/src/spdk/test/unit/lib/ftl/ftl_wptr/ftl_wptr_ut.c
@@ -0,0 +1,223 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+#include "common/lib/test_env.c"
+
+#include "ftl/ftl_core.c"
+#include "ftl/ftl_band.c"
+#include "ftl/ftl_init.c"
+#include "../common/utils.c"
+
+struct base_bdev_geometry g_geo = {
+ .write_unit_size = 16,
+ .optimal_open_zones = 12,
+ .zone_size = 128,
+ .blockcnt = 20 * 128 * 12,
+};
+
+#if defined(DEBUG)
+DEFINE_STUB(ftl_band_validate_md, bool, (struct ftl_band *band), true);
+DEFINE_STUB_V(ftl_trace_limits, (struct spdk_ftl_dev *dev, int limit, size_t num_free));
+
+DEFINE_STUB_V(ftl_trace_completion, (struct spdk_ftl_dev *dev, const struct ftl_io *io,
+ enum ftl_trace_completion completion));
+DEFINE_STUB_V(ftl_trace_write_band, (struct spdk_ftl_dev *dev, const struct ftl_band *band));
+DEFINE_STUB_V(ftl_trace_submission, (struct spdk_ftl_dev *dev, const struct ftl_io *io,
+ struct ftl_addr addr, size_t addr_cnt));
+#endif
+DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
+DEFINE_STUB_V(ftl_io_dec_req, (struct ftl_io *io));
+DEFINE_STUB_V(ftl_io_inc_req, (struct ftl_io *io));
+DEFINE_STUB_V(ftl_io_fail, (struct ftl_io *io, int status));
+DEFINE_STUB_V(ftl_reloc_add, (struct ftl_reloc *reloc, struct ftl_band *band, size_t offset,
+ size_t num_blocks, int prio, bool defrag));
+DEFINE_STUB_V(ftl_io_process_error, (struct ftl_io *io, const struct spdk_nvme_cpl *status));
+DEFINE_STUB(spdk_bdev_get_num_blocks, uint64_t, (const struct spdk_bdev *bdev), 0);
+DEFINE_STUB(spdk_bdev_zone_management, int, (struct spdk_bdev_desc *desc,
+ struct spdk_io_channel *ch,
+ uint64_t zone_id, enum spdk_bdev_zone_action action,
+ spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
+DEFINE_STUB(spdk_bdev_io_get_append_location, uint64_t, (struct spdk_bdev_io *bdev_io), 0);
+
+struct spdk_io_channel *
+spdk_bdev_get_io_channel(struct spdk_bdev_desc *bdev_desc)
+{
+ return spdk_get_io_channel(bdev_desc);
+}
+
+struct ftl_io *
+ftl_io_erase_init(struct ftl_band *band, size_t num_blocks, ftl_io_fn cb)
+{
+ struct ftl_io *io;
+
+ io = calloc(1, sizeof(struct ftl_io));
+ SPDK_CU_ASSERT_FATAL(io != NULL);
+
+ io->dev = band->dev;
+ io->band = band;
+ io->cb_fn = cb;
+ io->num_blocks = 1;
+
+ return io;
+}
+
+void
+ftl_io_advance(struct ftl_io *io, size_t num_blocks)
+{
+ io->pos += num_blocks;
+}
+
+void
+ftl_io_complete(struct ftl_io *io)
+{
+ io->cb_fn(io, NULL, 0);
+ free(io);
+}
+
+static void
+setup_wptr_test(struct spdk_ftl_dev **dev, const struct base_bdev_geometry *geo)
+{
+ struct spdk_ftl_dev *t_dev;
+ struct _ftl_io_channel *_ioch;
+ size_t i;
+
+ t_dev = test_init_ftl_dev(geo);
+ for (i = 0; i < ftl_get_num_bands(t_dev); ++i) {
+ test_init_ftl_band(t_dev, i, geo->zone_size);
+ t_dev->bands[i].state = FTL_BAND_STATE_CLOSED;
+ ftl_band_set_state(&t_dev->bands[i], FTL_BAND_STATE_FREE);
+ }
+
+ _ioch = (struct _ftl_io_channel *)(t_dev->ioch + 1);
+ _ioch->ioch = calloc(1, sizeof(*_ioch->ioch));
+ SPDK_CU_ASSERT_FATAL(_ioch->ioch != NULL);
+
+ *dev = t_dev;
+}
+
+static void
+cleanup_wptr_test(struct spdk_ftl_dev *dev)
+{
+ struct _ftl_io_channel *_ioch;
+ size_t i;
+
+ for (i = 0; i < ftl_get_num_bands(dev); ++i) {
+ dev->bands[i].lba_map.segments = NULL;
+ test_free_ftl_band(&dev->bands[i]);
+ }
+
+ _ioch = (struct _ftl_io_channel *)(dev->ioch + 1);
+ free(_ioch->ioch);
+
+ test_free_ftl_dev(dev);
+}
+
+static void
+test_wptr(void)
+{
+ struct spdk_ftl_dev *dev;
+ struct ftl_wptr *wptr;
+ struct ftl_band *band;
+ struct ftl_io io = { 0 };
+ size_t xfer_size;
+ size_t zone, block, offset, i;
+ int rc;
+
+ setup_wptr_test(&dev, &g_geo);
+
+ xfer_size = dev->xfer_size;
+ ftl_add_wptr(dev);
+ for (i = 0; i < ftl_get_num_bands(dev); ++i) {
+ wptr = LIST_FIRST(&dev->wptr_list);
+ band = wptr->band;
+ ftl_band_set_state(band, FTL_BAND_STATE_OPENING);
+ ftl_band_set_state(band, FTL_BAND_STATE_OPEN);
+ io.band = band;
+ io.dev = dev;
+
+ for (block = 0, offset = 0; block < ftl_get_num_blocks_in_zone(dev) / xfer_size; ++block) {
+ for (zone = 0; zone < band->num_zones; ++zone) {
+ CU_ASSERT_EQUAL(wptr->offset, offset);
+ ftl_wptr_advance(wptr, xfer_size);
+ offset += xfer_size;
+ }
+ }
+
+ CU_ASSERT_EQUAL(band->state, FTL_BAND_STATE_FULL);
+
+ ftl_band_set_state(band, FTL_BAND_STATE_CLOSING);
+
+ /* Call the metadata completion cb to force band state change */
+ /* and removal of the actual wptr */
+ ftl_md_write_cb(&io, NULL, 0);
+ CU_ASSERT_EQUAL(band->state, FTL_BAND_STATE_CLOSED);
+ CU_ASSERT_TRUE(LIST_EMPTY(&dev->wptr_list));
+
+ rc = ftl_add_wptr(dev);
+
+ /* There are no free bands during the last iteration, so */
+ /* there'll be no new wptr allocation */
+ if (i == (ftl_get_num_bands(dev) - 1)) {
+ CU_ASSERT_EQUAL(rc, -1);
+ } else {
+ CU_ASSERT_EQUAL(rc, 0);
+ }
+ }
+
+ cleanup_wptr_test(dev);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("ftl_wptr_suite", NULL, NULL);
+
+
+ CU_ADD_TEST(suite, test_wptr);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/idxd/Makefile b/src/spdk/test/unit/lib/idxd/Makefile
new file mode 100644
index 000000000..e37cb22d9
--- /dev/null
+++ b/src/spdk/test/unit/lib/idxd/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = idxd.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/idxd/idxd.c/.gitignore b/src/spdk/test/unit/lib/idxd/idxd.c/.gitignore
new file mode 100644
index 000000000..b9fee58fe
--- /dev/null
+++ b/src/spdk/test/unit/lib/idxd/idxd.c/.gitignore
@@ -0,0 +1 @@
+idxd_ut
diff --git a/src/spdk/test/unit/lib/idxd/idxd.c/Makefile b/src/spdk/test/unit/lib/idxd/idxd.c/Makefile
new file mode 100644
index 000000000..73fdbe3e4
--- /dev/null
+++ b/src/spdk/test/unit/lib/idxd/idxd.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = idxd_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/idxd/idxd.c/idxd_ut.c b/src/spdk/test/unit/lib/idxd/idxd.c/idxd_ut.c
new file mode 100644
index 000000000..0eed4273a
--- /dev/null
+++ b/src/spdk/test/unit/lib/idxd/idxd.c/idxd_ut.c
@@ -0,0 +1,300 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+#include "spdk_internal/mock.h"
+#include "spdk_internal/idxd.h"
+#include "common/lib/test_env.c"
+#include "idxd/idxd.c"
+
+#define FAKE_REG_SIZE 0x800
+#define NUM_GROUPS 4
+#define NUM_WQ_PER_GROUP 1
+#define NUM_ENGINES_PER_GROUP 1
+#define TOTAL_WQS (NUM_GROUPS * NUM_WQ_PER_GROUP)
+#define TOTAL_ENGINES (NUM_GROUPS * NUM_ENGINES_PER_GROUP)
+
+int
+spdk_pci_enumerate(struct spdk_pci_driver *driver, spdk_pci_enum_cb enum_cb, void *enum_ctx)
+{
+ return -1;
+}
+
+int
+spdk_pci_device_map_bar(struct spdk_pci_device *dev, uint32_t bar,
+ void **mapped_addr, uint64_t *phys_addr, uint64_t *size)
+{
+ *mapped_addr = NULL;
+ *phys_addr = 0;
+ *size = 0;
+ return 0;
+}
+
+int
+spdk_pci_device_unmap_bar(struct spdk_pci_device *dev, uint32_t bar, void *addr)
+{
+ return 0;
+}
+
+int
+spdk_pci_device_cfg_read32(struct spdk_pci_device *dev, uint32_t *value,
+ uint32_t offset)
+{
+ *value = 0xFFFFFFFFu;
+ return 0;
+}
+
+int
+spdk_pci_device_cfg_write32(struct spdk_pci_device *dev, uint32_t value,
+ uint32_t offset)
+{
+ return 0;
+}
+
+#define movdir64b mock_movdir64b
+static inline void
+mock_movdir64b(void *dst, const void *src)
+{
+ return;
+}
+
+#define WQ_CFG_OFFSET 0x500
+#define TOTAL_WQE_SIZE 0x40
+static int
+test_idxd_wq_config(void)
+{
+ struct spdk_idxd_device idxd = {};
+ union idxd_wqcfg wqcfg = {};
+ uint32_t expected[8] = {0x10, 0, 0x11, 0x11e, 0, 0, 0x40000000, 0};
+ uint32_t wq_size;
+ int rc, i, j;
+
+ idxd.reg_base = calloc(1, FAKE_REG_SIZE);
+ SPDK_CU_ASSERT_FATAL(idxd.reg_base != NULL);
+
+ g_dev_cfg = &g_dev_cfg0;
+ idxd.registers.wqcap.total_wq_size = TOTAL_WQE_SIZE;
+ idxd.registers.wqcap.num_wqs = TOTAL_WQS;
+ idxd.registers.gencap.max_batch_shift = LOG2_WQ_MAX_BATCH;
+ idxd.registers.gencap.max_xfer_shift = LOG2_WQ_MAX_XFER;
+ idxd.wqcfg_offset = WQ_CFG_OFFSET;
+ wq_size = idxd.registers.wqcap.total_wq_size / g_dev_cfg->total_wqs;
+
+ rc = idxd_wq_config(&idxd);
+ CU_ASSERT(rc == 0);
+ for (i = 0; i < g_dev_cfg->total_wqs; i++) {
+ CU_ASSERT(idxd.queues[i].wqcfg.wq_size == wq_size);
+ CU_ASSERT(idxd.queues[i].wqcfg.mode == WQ_MODE_DEDICATED);
+ CU_ASSERT(idxd.queues[i].wqcfg.max_batch_shift == LOG2_WQ_MAX_BATCH);
+ CU_ASSERT(idxd.queues[i].wqcfg.max_xfer_shift == LOG2_WQ_MAX_XFER);
+ CU_ASSERT(idxd.queues[i].wqcfg.wq_state == WQ_ENABLED);
+ CU_ASSERT(idxd.queues[i].wqcfg.priority == WQ_PRIORITY_1);
+ CU_ASSERT(idxd.queues[i].idxd == &idxd);
+ CU_ASSERT(idxd.queues[i].group == &idxd.groups[i % g_dev_cfg->num_groups]);
+ }
+
+ for (i = 0 ; i < idxd.registers.wqcap.num_wqs; i++) {
+ for (j = 0 ; j < WQCFG_NUM_DWORDS; j++) {
+ wqcfg.raw[j] = spdk_mmio_read_4((uint32_t *)(idxd.reg_base + idxd.wqcfg_offset + i * 32 + j *
+ 4));
+ CU_ASSERT(wqcfg.raw[j] == expected[j]);
+ }
+ }
+
+ free(idxd.queues);
+ free(idxd.reg_base);
+
+ return 0;
+}
+
+#define GRP_CFG_OFFSET 0x400
+#define MAX_TOKENS 0x40
+static int
+test_idxd_group_config(void)
+{
+ struct spdk_idxd_device idxd = {};
+ uint64_t wqs[NUM_GROUPS] = {};
+ uint64_t engines[NUM_GROUPS] = {};
+ union idxd_group_flags flags[NUM_GROUPS] = {};
+ int rc, i;
+ uint64_t base_offset;
+
+ idxd.reg_base = calloc(1, FAKE_REG_SIZE);
+ SPDK_CU_ASSERT_FATAL(idxd.reg_base != NULL);
+
+ g_dev_cfg = &g_dev_cfg0;
+ idxd.registers.groupcap.num_groups = NUM_GROUPS;
+ idxd.registers.enginecap.num_engines = TOTAL_ENGINES;
+ idxd.registers.wqcap.num_wqs = TOTAL_WQS;
+ idxd.registers.groupcap.total_tokens = MAX_TOKENS;
+ idxd.grpcfg_offset = GRP_CFG_OFFSET;
+
+ rc = idxd_group_config(&idxd);
+ CU_ASSERT(rc == 0);
+ for (i = 0 ; i < idxd.registers.groupcap.num_groups; i++) {
+ base_offset = idxd.grpcfg_offset + i * 64;
+
+ wqs[i] = spdk_mmio_read_8((uint64_t *)(idxd.reg_base + base_offset));
+ engines[i] = spdk_mmio_read_8((uint64_t *)(idxd.reg_base + base_offset + CFG_ENGINE_OFFSET));
+ flags[i].raw = spdk_mmio_read_8((uint64_t *)(idxd.reg_base + base_offset + CFG_FLAG_OFFSET));
+ }
+ /* wqe and engine arrays are indexed by group id and are bitmaps of assigned elements. */
+ CU_ASSERT(wqs[0] == 0x1);
+ CU_ASSERT(engines[0] == 0x1);
+ CU_ASSERT(wqs[1] == 0x2);
+ CU_ASSERT(engines[1] == 0x2);
+ CU_ASSERT(flags[0].tokens_allowed == MAX_TOKENS / NUM_GROUPS);
+ CU_ASSERT(flags[1].tokens_allowed == MAX_TOKENS / NUM_GROUPS);
+
+ /* groups allocated by code under test. */
+ free(idxd.groups);
+ free(idxd.reg_base);
+
+ return 0;
+}
+
+static int
+test_idxd_reset_dev(void)
+{
+ struct spdk_idxd_device idxd = {};
+ union idxd_cmdsts_reg *fake_cmd_status_reg;
+ int rc;
+
+ idxd.reg_base = calloc(1, FAKE_REG_SIZE);
+ SPDK_CU_ASSERT_FATAL(idxd.reg_base != NULL);
+ fake_cmd_status_reg = idxd.reg_base + IDXD_CMDSTS_OFFSET;
+
+ /* Test happy path */
+ rc = idxd_reset_dev(&idxd);
+ CU_ASSERT(rc == 0);
+
+ /* Test error reported path */
+ fake_cmd_status_reg->err = 1;
+ rc = idxd_reset_dev(&idxd);
+ CU_ASSERT(rc == -EINVAL);
+
+ free(idxd.reg_base);
+
+ return 0;
+}
+
+static int
+test_idxd_wait_cmd(void)
+{
+ struct spdk_idxd_device idxd = {};
+ int timeout = 1;
+ union idxd_cmdsts_reg *fake_cmd_status_reg;
+ int rc;
+
+ idxd.reg_base = calloc(1, FAKE_REG_SIZE);
+ SPDK_CU_ASSERT_FATAL(idxd.reg_base != NULL);
+ fake_cmd_status_reg = idxd.reg_base + IDXD_CMDSTS_OFFSET;
+
+ /* Test happy path. */
+ rc = idxd_wait_cmd(&idxd, timeout);
+ CU_ASSERT(rc == 0);
+
+ /* Setup up our fake register to set the error bit. */
+ fake_cmd_status_reg->err = 1;
+ rc = idxd_wait_cmd(&idxd, timeout);
+ CU_ASSERT(rc == -EINVAL);
+ fake_cmd_status_reg->err = 0;
+
+ /* Setup up our fake register to set the active bit. */
+ fake_cmd_status_reg->active = 1;
+ rc = idxd_wait_cmd(&idxd, timeout);
+ CU_ASSERT(rc == -EBUSY);
+
+ free(idxd.reg_base);
+
+ return 0;
+}
+
+static int
+test_spdk_idxd_set_config(void)
+{
+
+ g_dev_cfg = NULL;
+ spdk_idxd_set_config(0);
+ SPDK_CU_ASSERT_FATAL(g_dev_cfg != NULL);
+ CU_ASSERT(memcmp(&g_dev_cfg0, g_dev_cfg, sizeof(struct device_config)) == 0);
+
+ return 0;
+}
+
+static int
+test_spdk_idxd_reconfigure_chan(void)
+{
+ struct spdk_idxd_io_channel chan = {};
+ int rc;
+ uint32_t test_ring_size = 8;
+ uint32_t num_channels = 2;
+
+ chan.ring_ctrl.ring_slots = spdk_bit_array_create(test_ring_size);
+ chan.ring_ctrl.ring_size = test_ring_size;
+ chan.ring_ctrl.completions = spdk_zmalloc(test_ring_size * sizeof(struct idxd_hw_desc), 0, NULL,
+ SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
+ SPDK_CU_ASSERT_FATAL(chan.ring_ctrl.completions != NULL);
+
+ rc = spdk_idxd_reconfigure_chan(&chan, num_channels);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(chan.ring_ctrl.max_ring_slots == test_ring_size / num_channels);
+
+ spdk_bit_array_free(&chan.ring_ctrl.ring_slots);
+ spdk_free(chan.ring_ctrl.completions);
+ return 0;
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("idxd", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_spdk_idxd_reconfigure_chan);
+ CU_ADD_TEST(suite, test_spdk_idxd_set_config);
+ CU_ADD_TEST(suite, test_idxd_wait_cmd);
+ CU_ADD_TEST(suite, test_idxd_reset_dev);
+ CU_ADD_TEST(suite, test_idxd_group_config);
+ CU_ADD_TEST(suite, test_idxd_wq_config);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/ioat/Makefile b/src/spdk/test/unit/lib/ioat/Makefile
new file mode 100644
index 000000000..8d982710e
--- /dev/null
+++ b/src/spdk/test/unit/lib/ioat/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = ioat.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/ioat/ioat.c/.gitignore b/src/spdk/test/unit/lib/ioat/ioat.c/.gitignore
new file mode 100644
index 000000000..deefbf0c1
--- /dev/null
+++ b/src/spdk/test/unit/lib/ioat/ioat.c/.gitignore
@@ -0,0 +1 @@
+ioat_ut
diff --git a/src/spdk/test/unit/lib/ioat/ioat.c/Makefile b/src/spdk/test/unit/lib/ioat/ioat.c/Makefile
new file mode 100644
index 000000000..8b685ce0b
--- /dev/null
+++ b/src/spdk/test/unit/lib/ioat/ioat.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = ioat_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/ioat/ioat.c/ioat_ut.c b/src/spdk/test/unit/lib/ioat/ioat.c/ioat_ut.c
new file mode 100644
index 000000000..abe13c2b9
--- /dev/null
+++ b/src/spdk/test/unit/lib/ioat/ioat.c/ioat_ut.c
@@ -0,0 +1,144 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "ioat/ioat.c"
+
+#include "spdk_internal/mock.h"
+
+#include "common/lib/test_env.c"
+
+int
+spdk_pci_enumerate(struct spdk_pci_driver *driver, spdk_pci_enum_cb enum_cb, void *enum_ctx)
+{
+ return -1;
+}
+
+int
+spdk_pci_device_map_bar(struct spdk_pci_device *dev, uint32_t bar,
+ void **mapped_addr, uint64_t *phys_addr, uint64_t *size)
+{
+ *mapped_addr = NULL;
+ *phys_addr = 0;
+ *size = 0;
+ return 0;
+}
+
+int
+spdk_pci_device_unmap_bar(struct spdk_pci_device *dev, uint32_t bar, void *addr)
+{
+ return 0;
+}
+
+int
+spdk_pci_device_cfg_read32(struct spdk_pci_device *dev, uint32_t *value,
+ uint32_t offset)
+{
+ *value = 0xFFFFFFFFu;
+ return 0;
+}
+
+int
+spdk_pci_device_cfg_write32(struct spdk_pci_device *dev, uint32_t value,
+ uint32_t offset)
+{
+ return 0;
+}
+
+static void ioat_state_check(void)
+{
+ /*
+ * CHANSTS's STATUS field is 3 bits (8 possible values), but only has 5 valid states:
+ * ACTIVE 0x0
+ * IDLE 0x1
+ * SUSPENDED 0x2
+ * HALTED 0x3
+ * ARMED 0x4
+ */
+
+ CU_ASSERT(is_ioat_active(0) == 1); /* ACTIVE */
+ CU_ASSERT(is_ioat_active(1) == 0); /* IDLE */
+ CU_ASSERT(is_ioat_active(2) == 0); /* SUSPENDED */
+ CU_ASSERT(is_ioat_active(3) == 0); /* HALTED */
+ CU_ASSERT(is_ioat_active(4) == 0); /* ARMED */
+ CU_ASSERT(is_ioat_active(5) == 0); /* reserved */
+ CU_ASSERT(is_ioat_active(6) == 0); /* reserved */
+ CU_ASSERT(is_ioat_active(7) == 0); /* reserved */
+
+ CU_ASSERT(is_ioat_idle(0) == 0); /* ACTIVE */
+ CU_ASSERT(is_ioat_idle(1) == 1); /* IDLE */
+ CU_ASSERT(is_ioat_idle(2) == 0); /* SUSPENDED */
+ CU_ASSERT(is_ioat_idle(3) == 0); /* HALTED */
+ CU_ASSERT(is_ioat_idle(4) == 0); /* ARMED */
+ CU_ASSERT(is_ioat_idle(5) == 0); /* reserved */
+ CU_ASSERT(is_ioat_idle(6) == 0); /* reserved */
+ CU_ASSERT(is_ioat_idle(7) == 0); /* reserved */
+
+ CU_ASSERT(is_ioat_suspended(0) == 0); /* ACTIVE */
+ CU_ASSERT(is_ioat_suspended(1) == 0); /* IDLE */
+ CU_ASSERT(is_ioat_suspended(2) == 1); /* SUSPENDED */
+ CU_ASSERT(is_ioat_suspended(3) == 0); /* HALTED */
+ CU_ASSERT(is_ioat_suspended(4) == 0); /* ARMED */
+ CU_ASSERT(is_ioat_suspended(5) == 0); /* reserved */
+ CU_ASSERT(is_ioat_suspended(6) == 0); /* reserved */
+ CU_ASSERT(is_ioat_suspended(7) == 0); /* reserved */
+
+ CU_ASSERT(is_ioat_halted(0) == 0); /* ACTIVE */
+ CU_ASSERT(is_ioat_halted(1) == 0); /* IDLE */
+ CU_ASSERT(is_ioat_halted(2) == 0); /* SUSPENDED */
+ CU_ASSERT(is_ioat_halted(3) == 1); /* HALTED */
+ CU_ASSERT(is_ioat_halted(4) == 0); /* ARMED */
+ CU_ASSERT(is_ioat_halted(5) == 0); /* reserved */
+ CU_ASSERT(is_ioat_halted(6) == 0); /* reserved */
+ CU_ASSERT(is_ioat_halted(7) == 0); /* reserved */
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("ioat", NULL, NULL);
+
+ CU_ADD_TEST(suite, ioat_state_check);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/iscsi/Makefile b/src/spdk/test/unit/lib/iscsi/Makefile
new file mode 100644
index 000000000..396c5a055
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = conn.c init_grp.c iscsi.c param.c portal_grp.c tgt_node.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/iscsi/common.c b/src/spdk/test/unit/lib/iscsi/common.c
new file mode 100644
index 000000000..e6631848a
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/common.c
@@ -0,0 +1,209 @@
+#include "iscsi/task.h"
+#include "iscsi/iscsi.h"
+#include "iscsi/conn.h"
+
+#include "spdk/env.h"
+#include "spdk/sock.h"
+#include "spdk_cunit.h"
+
+#include "spdk_internal/log.h"
+#include "spdk_internal/mock.h"
+
+#include "scsi/scsi_internal.h"
+
+SPDK_LOG_REGISTER_COMPONENT("iscsi", SPDK_LOG_ISCSI)
+
+struct spdk_trace_histories *g_trace_histories;
+DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn));
+DEFINE_STUB_V(spdk_trace_register_owner, (uint8_t type, char id_prefix));
+DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix));
+DEFINE_STUB_V(spdk_trace_register_description, (const char *name,
+ uint16_t tpoint_id, uint8_t owner_type, uint8_t object_type, uint8_t new_object,
+ uint8_t arg1_type, const char *arg1_name));
+DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
+ uint32_t size, uint64_t object_id, uint64_t arg1));
+
+TAILQ_HEAD(, spdk_iscsi_pdu) g_write_pdu_list = TAILQ_HEAD_INITIALIZER(g_write_pdu_list);
+
+static bool g_task_pool_is_empty = false;
+static bool g_pdu_pool_is_empty = false;
+
+struct spdk_iscsi_task *
+iscsi_task_get(struct spdk_iscsi_conn *conn,
+ struct spdk_iscsi_task *parent,
+ spdk_scsi_task_cpl cpl_fn)
+{
+ struct spdk_iscsi_task *task;
+
+ if (g_task_pool_is_empty) {
+ return NULL;
+ }
+
+ task = calloc(1, sizeof(*task));
+ if (!task) {
+ return NULL;
+ }
+
+ task->conn = conn;
+ task->scsi.cpl_fn = cpl_fn;
+ if (parent) {
+ parent->scsi.ref++;
+ task->parent = parent;
+ task->tag = parent->tag;
+ task->lun_id = parent->lun_id;
+ task->scsi.dxfer_dir = parent->scsi.dxfer_dir;
+ task->scsi.transfer_len = parent->scsi.transfer_len;
+ task->scsi.lun = parent->scsi.lun;
+ task->scsi.cdb = parent->scsi.cdb;
+ task->scsi.target_port = parent->scsi.target_port;
+ task->scsi.initiator_port = parent->scsi.initiator_port;
+ if (conn && (task->scsi.dxfer_dir == SPDK_SCSI_DIR_FROM_DEV)) {
+ conn->data_in_cnt++;
+ }
+ }
+
+ task->scsi.iovs = &task->scsi.iov;
+ return task;
+}
+
+void
+spdk_scsi_task_put(struct spdk_scsi_task *task)
+{
+ free(task);
+}
+
+void
+iscsi_put_pdu(struct spdk_iscsi_pdu *pdu)
+{
+ if (!pdu) {
+ return;
+ }
+
+ pdu->ref--;
+ if (pdu->ref < 0) {
+ CU_FAIL("negative ref count");
+ pdu->ref = 0;
+ }
+
+ if (pdu->ref == 0) {
+ if (pdu->data && !pdu->data_from_mempool) {
+ free(pdu->data);
+ }
+ free(pdu);
+ }
+}
+
+struct spdk_iscsi_pdu *
+iscsi_get_pdu(struct spdk_iscsi_conn *conn)
+{
+ struct spdk_iscsi_pdu *pdu;
+
+ assert(conn != NULL);
+ if (g_pdu_pool_is_empty) {
+ return NULL;
+ }
+
+ pdu = malloc(sizeof(*pdu));
+ if (!pdu) {
+ return NULL;
+ }
+
+ memset(pdu, 0, offsetof(struct spdk_iscsi_pdu, ahs));
+ pdu->ref = 1;
+ pdu->conn = conn;
+
+ return pdu;
+}
+
+DEFINE_STUB_V(spdk_scsi_task_process_null_lun, (struct spdk_scsi_task *task));
+
+DEFINE_STUB_V(spdk_scsi_task_process_abort, (struct spdk_scsi_task *task));
+
+DEFINE_STUB_V(spdk_scsi_dev_queue_task,
+ (struct spdk_scsi_dev *dev, struct spdk_scsi_task *task));
+
+DEFINE_STUB(spdk_scsi_dev_find_port_by_id, struct spdk_scsi_port *,
+ (struct spdk_scsi_dev *dev, uint64_t id), NULL);
+
+DEFINE_STUB_V(spdk_scsi_dev_queue_mgmt_task,
+ (struct spdk_scsi_dev *dev, struct spdk_scsi_task *task));
+
+const char *
+spdk_scsi_dev_get_name(const struct spdk_scsi_dev *dev)
+{
+ if (dev != NULL) {
+ return dev->name;
+ }
+
+ return NULL;
+}
+
+DEFINE_STUB(spdk_scsi_dev_construct, struct spdk_scsi_dev *,
+ (const char *name, const char **bdev_name_list,
+ int *lun_id_list, int num_luns, uint8_t protocol_id,
+ void (*hotremove_cb)(const struct spdk_scsi_lun *, void *),
+ void *hotremove_ctx),
+ NULL);
+
+DEFINE_STUB_V(spdk_scsi_dev_destruct,
+ (struct spdk_scsi_dev *dev, spdk_scsi_dev_destruct_cb_t cb_fn, void *cb_arg));
+
+DEFINE_STUB(spdk_scsi_dev_add_port, int,
+ (struct spdk_scsi_dev *dev, uint64_t id, const char *name), 0);
+
+DEFINE_STUB(iscsi_drop_conns, int,
+ (struct spdk_iscsi_conn *conn, const char *conn_match, int drop_all),
+ 0);
+
+DEFINE_STUB(spdk_scsi_dev_delete_port, int,
+ (struct spdk_scsi_dev *dev, uint64_t id), 0);
+
+DEFINE_STUB_V(shutdown_iscsi_conns, (void));
+
+DEFINE_STUB_V(iscsi_conns_request_logout, (struct spdk_iscsi_tgt_node *target));
+
+DEFINE_STUB(iscsi_get_active_conns, int, (struct spdk_iscsi_tgt_node *target), 0);
+
+void
+iscsi_task_cpl(struct spdk_scsi_task *scsi_task)
+{
+ struct spdk_iscsi_task *iscsi_task;
+
+ if (scsi_task != NULL) {
+ iscsi_task = iscsi_task_from_scsi_task(scsi_task);
+ if (iscsi_task->parent && (iscsi_task->scsi.dxfer_dir == SPDK_SCSI_DIR_FROM_DEV)) {
+ assert(iscsi_task->conn->data_in_cnt > 0);
+ iscsi_task->conn->data_in_cnt--;
+ }
+
+ free(iscsi_task);
+ }
+}
+
+DEFINE_STUB_V(iscsi_task_mgmt_cpl, (struct spdk_scsi_task *scsi_task));
+
+DEFINE_STUB(iscsi_conn_read_data, int,
+ (struct spdk_iscsi_conn *conn, int bytes, void *buf), 0);
+
+DEFINE_STUB(iscsi_conn_readv_data, int,
+ (struct spdk_iscsi_conn *conn, struct iovec *iov, int iovcnt), 0);
+
+void
+iscsi_conn_write_pdu(struct spdk_iscsi_conn *conn, struct spdk_iscsi_pdu *pdu,
+ iscsi_conn_xfer_complete_cb cb_fn, void *cb_arg)
+{
+ TAILQ_INSERT_TAIL(&g_write_pdu_list, pdu, tailq);
+}
+
+DEFINE_STUB_V(iscsi_conn_logout, (struct spdk_iscsi_conn *conn));
+
+DEFINE_STUB_V(spdk_scsi_task_set_status,
+ (struct spdk_scsi_task *task, int sc, int sk, int asc, int ascq));
+
+void
+spdk_scsi_task_set_data(struct spdk_scsi_task *task, void *data, uint32_t len)
+{
+ SPDK_CU_ASSERT_FATAL(task->iovs != NULL);
+ task->iovs[0].iov_base = data;
+ task->iovs[0].iov_len = len;
+}
diff --git a/src/spdk/test/unit/lib/iscsi/conn.c/.gitignore b/src/spdk/test/unit/lib/iscsi/conn.c/.gitignore
new file mode 100644
index 000000000..3bb0afd8a
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/conn.c/.gitignore
@@ -0,0 +1 @@
+conn_ut
diff --git a/src/spdk/test/unit/lib/iscsi/conn.c/Makefile b/src/spdk/test/unit/lib/iscsi/conn.c/Makefile
new file mode 100644
index 000000000..0c208d888
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/conn.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = conn_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/iscsi/conn.c/conn_ut.c b/src/spdk/test/unit/lib/iscsi/conn.c/conn_ut.c
new file mode 100644
index 000000000..967e16ec1
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/conn.c/conn_ut.c
@@ -0,0 +1,927 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "common/lib/test_env.c"
+#include "spdk_cunit.h"
+
+#include "iscsi/conn.c"
+
+#include "spdk_internal/mock.h"
+
+SPDK_LOG_REGISTER_COMPONENT("iscsi", SPDK_LOG_ISCSI)
+
+struct spdk_trace_histories *g_trace_histories;
+DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn));
+DEFINE_STUB_V(spdk_trace_register_owner, (uint8_t type, char id_prefix));
+DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix));
+DEFINE_STUB_V(spdk_trace_register_description, (const char *name,
+ uint16_t tpoint_id, uint8_t owner_type, uint8_t object_type, uint8_t new_object,
+ uint8_t arg1_type, const char *arg1_name));
+DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
+ uint32_t size, uint64_t object_id, uint64_t arg1));
+
+struct spdk_scsi_lun {
+ uint8_t reserved;
+};
+
+struct spdk_iscsi_globals g_iscsi;
+static TAILQ_HEAD(read_tasks_head, spdk_iscsi_task) g_ut_read_tasks =
+ TAILQ_HEAD_INITIALIZER(g_ut_read_tasks);
+static struct spdk_iscsi_task *g_new_task = NULL;
+static ssize_t g_sock_writev_bytes = 0;
+
+DEFINE_STUB(spdk_app_get_shm_id, int, (void), 0);
+
+DEFINE_STUB(spdk_sock_getaddr, int,
+ (struct spdk_sock *sock, char *saddr, int slen, uint16_t *sport,
+ char *caddr, int clen, uint16_t *cport),
+ 0);
+
+int
+spdk_sock_close(struct spdk_sock **sock)
+{
+ *sock = NULL;
+ return 0;
+}
+
+DEFINE_STUB(spdk_sock_recv, ssize_t,
+ (struct spdk_sock *sock, void *buf, size_t len), 0);
+
+DEFINE_STUB(spdk_sock_readv, ssize_t,
+ (struct spdk_sock *sock, struct iovec *iov, int iovcnt), 0);
+
+ssize_t
+spdk_sock_writev(struct spdk_sock *sock, struct iovec *iov, int iovcnt)
+{
+ return g_sock_writev_bytes;
+}
+
+DEFINE_STUB(spdk_sock_set_recvlowat, int, (struct spdk_sock *s, int nbytes), 0);
+
+DEFINE_STUB(spdk_sock_set_recvbuf, int, (struct spdk_sock *sock, int sz), 0);
+
+DEFINE_STUB(spdk_sock_set_sendbuf, int, (struct spdk_sock *sock, int sz), 0);
+
+DEFINE_STUB(spdk_sock_group_add_sock, int,
+ (struct spdk_sock_group *group, struct spdk_sock *sock,
+ spdk_sock_cb cb_fn, void *cb_arg),
+ 0);
+
+DEFINE_STUB(spdk_sock_group_remove_sock, int,
+ (struct spdk_sock_group *group, struct spdk_sock *sock), 0);
+
+struct spdk_iscsi_task *
+iscsi_task_get(struct spdk_iscsi_conn *conn,
+ struct spdk_iscsi_task *parent,
+ spdk_scsi_task_cpl cpl_fn)
+{
+ struct spdk_iscsi_task *task;
+
+ task = g_new_task;
+ if (task == NULL) {
+ return NULL;
+ }
+ memset(task, 0, sizeof(*task));
+
+ task->scsi.ref = 1;
+ task->conn = conn;
+ task->scsi.cpl_fn = cpl_fn;
+ if (parent) {
+ parent->scsi.ref++;
+ task->parent = parent;
+ task->scsi.dxfer_dir = parent->scsi.dxfer_dir;
+ task->scsi.transfer_len = parent->scsi.transfer_len;
+ task->scsi.lun = parent->scsi.lun;
+ if (conn && (task->scsi.dxfer_dir == SPDK_SCSI_DIR_FROM_DEV)) {
+ conn->data_in_cnt++;
+ }
+ }
+
+ return task;
+}
+
+void
+spdk_scsi_task_put(struct spdk_scsi_task *scsi_task)
+{
+ struct spdk_iscsi_task *task;
+
+ CU_ASSERT(scsi_task->ref > 0);
+ scsi_task->ref--;
+
+ task = iscsi_task_from_scsi_task(scsi_task);
+ if (task->parent) {
+ spdk_scsi_task_put(&task->parent->scsi);
+ }
+}
+
+DEFINE_STUB(spdk_scsi_dev_get_lun, struct spdk_scsi_lun *,
+ (struct spdk_scsi_dev *dev, int lun_id), NULL);
+
+DEFINE_STUB(spdk_scsi_dev_has_pending_tasks, bool,
+ (const struct spdk_scsi_dev *dev, const struct spdk_scsi_port *initiator_port),
+ true);
+
+DEFINE_STUB(spdk_scsi_lun_open, int,
+ (struct spdk_scsi_lun *lun, spdk_scsi_lun_remove_cb_t hotremove_cb,
+ void *hotremove_ctx, struct spdk_scsi_lun_desc **desc),
+ 0);
+
+DEFINE_STUB_V(spdk_scsi_lun_close, (struct spdk_scsi_lun_desc *desc));
+
+DEFINE_STUB(spdk_scsi_lun_allocate_io_channel, int,
+ (struct spdk_scsi_lun_desc *desc), 0);
+
+DEFINE_STUB_V(spdk_scsi_lun_free_io_channel, (struct spdk_scsi_lun_desc *desc));
+
+DEFINE_STUB(spdk_scsi_lun_get_id, int, (const struct spdk_scsi_lun *lun), 0);
+
+DEFINE_STUB(spdk_scsi_port_get_name, const char *,
+ (const struct spdk_scsi_port *port), NULL);
+
+void
+spdk_scsi_task_copy_status(struct spdk_scsi_task *dst,
+ struct spdk_scsi_task *src)
+{
+ dst->status = src->status;
+}
+
+DEFINE_STUB_V(spdk_scsi_task_set_data, (struct spdk_scsi_task *task, void *data, uint32_t len));
+
+DEFINE_STUB_V(spdk_scsi_task_process_null_lun, (struct spdk_scsi_task *task));
+
+DEFINE_STUB_V(spdk_scsi_task_process_abort, (struct spdk_scsi_task *task));
+
+DEFINE_STUB_V(iscsi_put_pdu, (struct spdk_iscsi_pdu *pdu));
+
+DEFINE_STUB_V(iscsi_param_free, (struct iscsi_param *params));
+
+DEFINE_STUB(iscsi_conn_params_init, int, (struct iscsi_param **params), 0);
+
+DEFINE_STUB_V(iscsi_clear_all_transfer_task,
+ (struct spdk_iscsi_conn *conn, struct spdk_scsi_lun *lun,
+ struct spdk_iscsi_pdu *pdu));
+
+DEFINE_STUB(iscsi_build_iovs, int,
+ (struct spdk_iscsi_conn *conn, struct iovec *iov, int num_iovs,
+ struct spdk_iscsi_pdu *pdu, uint32_t *mapped_length),
+ 0);
+
+DEFINE_STUB_V(iscsi_queue_task,
+ (struct spdk_iscsi_conn *conn, struct spdk_iscsi_task *task));
+
+DEFINE_STUB_V(iscsi_task_response,
+ (struct spdk_iscsi_conn *conn, struct spdk_iscsi_task *task));
+
+DEFINE_STUB_V(iscsi_task_mgmt_response,
+ (struct spdk_iscsi_conn *conn, struct spdk_iscsi_task *task));
+
+DEFINE_STUB_V(iscsi_send_nopin, (struct spdk_iscsi_conn *conn));
+
+bool
+iscsi_del_transfer_task(struct spdk_iscsi_conn *conn, uint32_t task_tag)
+{
+ struct spdk_iscsi_task *task;
+
+ task = TAILQ_FIRST(&conn->active_r2t_tasks);
+ if (task == NULL || task->tag != task_tag) {
+ return false;
+ }
+
+ TAILQ_REMOVE(&conn->active_r2t_tasks, task, link);
+ task->is_r2t_active = false;
+ iscsi_task_put(task);
+
+ return true;
+}
+
+DEFINE_STUB(iscsi_handle_incoming_pdus, int, (struct spdk_iscsi_conn *conn), 0);
+
+DEFINE_STUB_V(iscsi_free_sess, (struct spdk_iscsi_sess *sess));
+
+DEFINE_STUB(iscsi_tgt_node_cleanup_luns, int,
+ (struct spdk_iscsi_conn *conn, struct spdk_iscsi_tgt_node *target),
+ 0);
+
+DEFINE_STUB(iscsi_pdu_calc_header_digest, uint32_t,
+ (struct spdk_iscsi_pdu *pdu), 0);
+
+DEFINE_STUB(spdk_iscsi_pdu_calc_data_digest, uint32_t,
+ (struct spdk_iscsi_pdu *pdu), 0);
+
+DEFINE_STUB_V(shutdown_iscsi_conns_done, (void));
+
+static struct spdk_iscsi_task *
+ut_conn_task_get(struct spdk_iscsi_task *parent)
+{
+ struct spdk_iscsi_task *task;
+
+ task = calloc(1, sizeof(*task));
+ SPDK_CU_ASSERT_FATAL(task != NULL);
+
+ task->scsi.ref = 1;
+
+ if (parent) {
+ task->parent = parent;
+ parent->scsi.ref++;
+ }
+ return task;
+}
+
+static void
+ut_conn_create_read_tasks(struct spdk_iscsi_task *primary)
+{
+ struct spdk_iscsi_task *subtask;
+ uint32_t remaining_size = 0;
+
+ while (1) {
+ if (primary->current_datain_offset < primary->scsi.transfer_len) {
+ remaining_size = primary->scsi.transfer_len - primary->current_datain_offset;
+
+ subtask = ut_conn_task_get(primary);
+
+ subtask->scsi.offset = primary->current_datain_offset;
+ subtask->scsi.length = spdk_min(SPDK_BDEV_LARGE_BUF_MAX_SIZE, remaining_size);
+ subtask->scsi.status = SPDK_SCSI_STATUS_GOOD;
+
+ primary->current_datain_offset += subtask->scsi.length;
+
+ TAILQ_INSERT_TAIL(&g_ut_read_tasks, subtask, link);
+ }
+
+ if (primary->current_datain_offset == primary->scsi.transfer_len) {
+ break;
+ }
+ }
+}
+
+static void
+read_task_split_in_order_case(void)
+{
+ struct spdk_iscsi_task primary = {};
+ struct spdk_iscsi_task *task, *tmp;
+
+ primary.scsi.transfer_len = SPDK_BDEV_LARGE_BUF_MAX_SIZE * 8;
+ TAILQ_INIT(&primary.subtask_list);
+ primary.current_datain_offset = 0;
+ primary.bytes_completed = 0;
+ primary.scsi.ref = 1;
+
+ ut_conn_create_read_tasks(&primary);
+ SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&g_ut_read_tasks));
+
+ TAILQ_FOREACH(task, &g_ut_read_tasks, link) {
+ CU_ASSERT(&primary == iscsi_task_get_primary(task));
+ process_read_task_completion(NULL, task, &primary);
+ }
+
+ CU_ASSERT(primary.bytes_completed == primary.scsi.transfer_len);
+ CU_ASSERT(primary.scsi.ref == 0);
+
+ TAILQ_FOREACH_SAFE(task, &g_ut_read_tasks, link, tmp) {
+ CU_ASSERT(task->scsi.ref == 0);
+ TAILQ_REMOVE(&g_ut_read_tasks, task, link);
+ free(task);
+ }
+
+}
+
+static void
+read_task_split_reverse_order_case(void)
+{
+ struct spdk_iscsi_task primary = {};
+ struct spdk_iscsi_task *task, *tmp;
+
+ primary.scsi.transfer_len = SPDK_BDEV_LARGE_BUF_MAX_SIZE * 8;
+ TAILQ_INIT(&primary.subtask_list);
+ primary.current_datain_offset = 0;
+ primary.bytes_completed = 0;
+ primary.scsi.ref = 1;
+
+ ut_conn_create_read_tasks(&primary);
+ SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&g_ut_read_tasks));
+
+ TAILQ_FOREACH_REVERSE(task, &g_ut_read_tasks, read_tasks_head, link) {
+ CU_ASSERT(&primary == iscsi_task_get_primary(task));
+ process_read_task_completion(NULL, task, &primary);
+ }
+
+ CU_ASSERT(primary.bytes_completed == primary.scsi.transfer_len);
+ CU_ASSERT(primary.scsi.ref == 0);
+
+ TAILQ_FOREACH_SAFE(task, &g_ut_read_tasks, link, tmp) {
+ CU_ASSERT(task->scsi.ref == 0);
+ TAILQ_REMOVE(&g_ut_read_tasks, task, link);
+ free(task);
+ }
+}
+
+static void
+propagate_scsi_error_status_for_split_read_tasks(void)
+{
+ struct spdk_iscsi_task primary = {};
+ struct spdk_iscsi_task task1 = {}, task2 = {}, task3 = {}, task4 = {}, task5 = {}, task6 = {};
+
+ primary.scsi.transfer_len = 512 * 6;
+ primary.rsp_scsi_status = SPDK_SCSI_STATUS_GOOD;
+ TAILQ_INIT(&primary.subtask_list);
+ primary.scsi.ref = 7;
+
+ task1.scsi.offset = 0;
+ task1.scsi.length = 512;
+ task1.scsi.status = SPDK_SCSI_STATUS_GOOD;
+ task1.scsi.ref = 1;
+ task1.parent = &primary;
+
+ task2.scsi.offset = 512;
+ task2.scsi.length = 512;
+ task2.scsi.status = SPDK_SCSI_STATUS_CHECK_CONDITION;
+ task2.scsi.ref = 1;
+ task2.parent = &primary;
+
+ task3.scsi.offset = 512 * 2;
+ task3.scsi.length = 512;
+ task3.scsi.status = SPDK_SCSI_STATUS_GOOD;
+ task3.scsi.ref = 1;
+ task3.parent = &primary;
+
+ task4.scsi.offset = 512 * 3;
+ task4.scsi.length = 512;
+ task4.scsi.status = SPDK_SCSI_STATUS_GOOD;
+ task4.scsi.ref = 1;
+ task4.parent = &primary;
+
+ task5.scsi.offset = 512 * 4;
+ task5.scsi.length = 512;
+ task5.scsi.status = SPDK_SCSI_STATUS_GOOD;
+ task5.scsi.ref = 1;
+ task5.parent = &primary;
+
+ task6.scsi.offset = 512 * 5;
+ task6.scsi.length = 512;
+ task6.scsi.status = SPDK_SCSI_STATUS_GOOD;
+ task6.scsi.ref = 1;
+ task6.parent = &primary;
+
+ /* task2 has check condition status, and verify if the check condition
+ * status is propagated to remaining tasks correctly when these tasks complete
+ * by the following order, task4, task3, task2, task1, primary, task5, and task6.
+ */
+ process_read_task_completion(NULL, &task4, &primary);
+ process_read_task_completion(NULL, &task3, &primary);
+ process_read_task_completion(NULL, &task2, &primary);
+ process_read_task_completion(NULL, &task1, &primary);
+ process_read_task_completion(NULL, &task5, &primary);
+ process_read_task_completion(NULL, &task6, &primary);
+
+ CU_ASSERT(primary.rsp_scsi_status == SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT(task1.scsi.status == SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT(task2.scsi.status == SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT(task3.scsi.status == SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT(task4.scsi.status == SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT(task5.scsi.status == SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT(task6.scsi.status == SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT(primary.bytes_completed == primary.scsi.transfer_len);
+ CU_ASSERT(TAILQ_EMPTY(&primary.subtask_list));
+ CU_ASSERT(primary.scsi.ref == 0);
+ CU_ASSERT(task1.scsi.ref == 0);
+ CU_ASSERT(task2.scsi.ref == 0);
+ CU_ASSERT(task3.scsi.ref == 0);
+ CU_ASSERT(task4.scsi.ref == 0);
+ CU_ASSERT(task5.scsi.ref == 0);
+ CU_ASSERT(task6.scsi.ref == 0);
+}
+
+static void
+process_non_read_task_completion_test(void)
+{
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_task primary = {};
+ struct spdk_iscsi_task task = {};
+
+ TAILQ_INIT(&conn.active_r2t_tasks);
+
+ primary.bytes_completed = 0;
+ primary.scsi.transfer_len = 4096 * 3;
+ primary.rsp_scsi_status = SPDK_SCSI_STATUS_GOOD;
+ primary.scsi.ref = 1;
+ TAILQ_INSERT_TAIL(&conn.active_r2t_tasks, &primary, link);
+ primary.is_r2t_active = true;
+ primary.tag = 1;
+
+ /* First subtask which failed. */
+ task.scsi.length = 4096;
+ task.scsi.data_transferred = 4096;
+ task.scsi.status = SPDK_SCSI_STATUS_CHECK_CONDITION;
+ task.scsi.ref = 1;
+ task.parent = &primary;
+ primary.scsi.ref++;
+
+ process_non_read_task_completion(&conn, &task, &primary);
+ CU_ASSERT(!TAILQ_EMPTY(&conn.active_r2t_tasks));
+ CU_ASSERT(primary.bytes_completed == 4096);
+ CU_ASSERT(primary.scsi.data_transferred == 0);
+ CU_ASSERT(primary.rsp_scsi_status == SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT(task.scsi.ref == 0);
+ CU_ASSERT(primary.scsi.ref == 1);
+
+ /* Second subtask which succeeded. */
+ task.scsi.length = 4096;
+ task.scsi.data_transferred = 4096;
+ task.scsi.status = SPDK_SCSI_STATUS_GOOD;
+ task.scsi.ref = 1;
+ task.parent = &primary;
+ primary.scsi.ref++;
+
+ process_non_read_task_completion(&conn, &task, &primary);
+ CU_ASSERT(!TAILQ_EMPTY(&conn.active_r2t_tasks));
+ CU_ASSERT(primary.bytes_completed == 4096 * 2);
+ CU_ASSERT(primary.scsi.data_transferred == 4096);
+ CU_ASSERT(primary.rsp_scsi_status == SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT(task.scsi.ref == 0);
+ CU_ASSERT(primary.scsi.ref == 1);
+
+ /* Third and final subtask which succeeded. */
+ task.scsi.length = 4096;
+ task.scsi.data_transferred = 4096;
+ task.scsi.status = SPDK_SCSI_STATUS_GOOD;
+ task.scsi.ref = 1;
+ task.parent = &primary;
+ primary.scsi.ref++;
+
+ process_non_read_task_completion(&conn, &task, &primary);
+ CU_ASSERT(TAILQ_EMPTY(&conn.active_r2t_tasks));
+ CU_ASSERT(primary.bytes_completed == 4096 * 3);
+ CU_ASSERT(primary.scsi.data_transferred == 4096 * 2);
+ CU_ASSERT(primary.rsp_scsi_status == SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT(task.scsi.ref == 0);
+ CU_ASSERT(primary.scsi.ref == 0);
+
+ /* Tricky case when the last task completed was the initial task. */
+ primary.scsi.length = 4096;
+ primary.bytes_completed = 4096 * 2;
+ primary.scsi.data_transferred = 4096 * 2;
+ primary.scsi.transfer_len = 4096 * 3;
+ primary.scsi.status = SPDK_SCSI_STATUS_GOOD;
+ primary.rsp_scsi_status = SPDK_SCSI_STATUS_GOOD;
+ primary.scsi.ref = 2;
+ TAILQ_INSERT_TAIL(&conn.active_r2t_tasks, &primary, link);
+ primary.is_r2t_active = true;
+
+ process_non_read_task_completion(&conn, &primary, &primary);
+ CU_ASSERT(TAILQ_EMPTY(&conn.active_r2t_tasks));
+ CU_ASSERT(primary.bytes_completed == 4096 * 3);
+ CU_ASSERT(primary.scsi.data_transferred == 4096 * 2);
+ CU_ASSERT(primary.rsp_scsi_status == SPDK_SCSI_STATUS_GOOD);
+ CU_ASSERT(primary.scsi.ref == 0);
+
+ /* Further tricky case when the last task completed ws the initial task,
+ * and the R2T was already terminated.
+ */
+ primary.scsi.ref = 1;
+ primary.scsi.length = 4096;
+ primary.bytes_completed = 4096 * 2;
+ primary.scsi.data_transferred = 4096 * 2;
+ primary.scsi.transfer_len = 4096 * 3;
+ primary.scsi.status = SPDK_SCSI_STATUS_GOOD;
+ primary.rsp_scsi_status = SPDK_SCSI_STATUS_GOOD;
+ primary.is_r2t_active = false;
+
+ process_non_read_task_completion(&conn, &primary, &primary);
+ CU_ASSERT(primary.bytes_completed == 4096 * 3);
+ CU_ASSERT(primary.scsi.data_transferred == 4096 * 2);
+ CU_ASSERT(primary.rsp_scsi_status == SPDK_SCSI_STATUS_GOOD);
+ CU_ASSERT(primary.scsi.ref == 0);
+}
+
+static bool
+dequeue_pdu(void *_head, struct spdk_iscsi_pdu *pdu)
+{
+ TAILQ_HEAD(queued_pdus, spdk_iscsi_pdu) *head = _head;
+ struct spdk_iscsi_pdu *tmp;
+
+ TAILQ_FOREACH(tmp, head, tailq) {
+ if (tmp == pdu) {
+ TAILQ_REMOVE(head, tmp, tailq);
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool
+dequeue_task(void *_head, struct spdk_iscsi_task *task)
+{
+ TAILQ_HEAD(queued_tasks, spdk_iscsi_task) *head = _head;
+ struct spdk_iscsi_task *tmp;
+
+ TAILQ_FOREACH(tmp, head, link) {
+ if (tmp == task) {
+ TAILQ_REMOVE(head, tmp, link);
+ return true;
+ }
+ }
+ return false;
+}
+
+static void iscsi_conn_pdu_dummy_complete(void *arg)
+{
+}
+
+static void
+free_tasks_on_connection(void)
+{
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_pdu pdu1 = {}, pdu2 = {}, pdu3 = {}, pdu4 = {};
+ struct spdk_iscsi_task task1 = {}, task2 = {}, task3 = {};
+ struct spdk_scsi_lun lun1 = {}, lun2 = {};
+
+ TAILQ_INIT(&conn.write_pdu_list);
+ TAILQ_INIT(&conn.snack_pdu_list);
+ TAILQ_INIT(&conn.queued_datain_tasks);
+ conn.data_in_cnt = MAX_LARGE_DATAIN_PER_CONNECTION;
+
+ pdu1.task = &task1;
+ pdu2.task = &task2;
+ pdu3.task = &task3;
+
+ pdu1.cb_fn = iscsi_conn_pdu_dummy_complete;
+ pdu2.cb_fn = iscsi_conn_pdu_dummy_complete;
+ pdu3.cb_fn = iscsi_conn_pdu_dummy_complete;
+ pdu4.cb_fn = iscsi_conn_pdu_dummy_complete;
+
+ task1.scsi.lun = &lun1;
+ task2.scsi.lun = &lun2;
+
+ task1.is_queued = false;
+ task2.is_queued = false;
+ task3.is_queued = true;
+
+ /* Test conn->write_pdu_list. */
+
+ task1.scsi.ref = 1;
+ task2.scsi.ref = 1;
+ task3.scsi.ref = 1;
+ TAILQ_INSERT_TAIL(&conn.write_pdu_list, &pdu1, tailq);
+ TAILQ_INSERT_TAIL(&conn.write_pdu_list, &pdu2, tailq);
+ TAILQ_INSERT_TAIL(&conn.write_pdu_list, &pdu3, tailq);
+ TAILQ_INSERT_TAIL(&conn.write_pdu_list, &pdu4, tailq);
+
+ /* Free all PDUs when exiting connection. */
+ iscsi_conn_free_tasks(&conn);
+
+ CU_ASSERT(TAILQ_EMPTY(&conn.write_pdu_list));
+ CU_ASSERT(task1.scsi.ref == 0);
+ CU_ASSERT(task2.scsi.ref == 0);
+ CU_ASSERT(task3.scsi.ref == 0);
+
+ /* Test conn->snack_pdu_list */
+
+ task1.scsi.ref = 1;
+ task2.scsi.ref = 1;
+ task3.scsi.ref = 1;
+ pdu1.cb_fn = iscsi_conn_pdu_dummy_complete;
+ pdu2.cb_fn = iscsi_conn_pdu_dummy_complete;
+ pdu3.cb_fn = iscsi_conn_pdu_dummy_complete;
+ TAILQ_INSERT_TAIL(&conn.snack_pdu_list, &pdu1, tailq);
+ TAILQ_INSERT_TAIL(&conn.snack_pdu_list, &pdu2, tailq);
+ TAILQ_INSERT_TAIL(&conn.snack_pdu_list, &pdu3, tailq);
+
+ /* Free all PDUs and associated tasks when exiting connection. */
+ iscsi_conn_free_tasks(&conn);
+
+ CU_ASSERT(!dequeue_pdu(&conn.snack_pdu_list, &pdu1));
+ CU_ASSERT(!dequeue_pdu(&conn.snack_pdu_list, &pdu2));
+ CU_ASSERT(!dequeue_pdu(&conn.snack_pdu_list, &pdu3));
+ CU_ASSERT(task1.scsi.ref == 0);
+ CU_ASSERT(task2.scsi.ref == 0);
+ CU_ASSERT(task3.scsi.ref == 0);
+
+ /* Test conn->queued_datain_tasks */
+
+ task1.scsi.ref = 1;
+ task2.scsi.ref = 1;
+ task3.scsi.ref = 1;
+ TAILQ_INSERT_TAIL(&conn.queued_datain_tasks, &task1, link);
+ TAILQ_INSERT_TAIL(&conn.queued_datain_tasks, &task2, link);
+ TAILQ_INSERT_TAIL(&conn.queued_datain_tasks, &task3, link);
+
+ /* Free all tasks which is not queued when exiting connection. */
+ iscsi_conn_free_tasks(&conn);
+
+ CU_ASSERT(!dequeue_task(&conn.queued_datain_tasks, &task1));
+ CU_ASSERT(!dequeue_task(&conn.queued_datain_tasks, &task2));
+ CU_ASSERT(dequeue_task(&conn.queued_datain_tasks, &task3));
+ CU_ASSERT(task1.scsi.ref == 0);
+ CU_ASSERT(task2.scsi.ref == 0);
+ CU_ASSERT(task3.scsi.ref == 1);
+}
+
+static void
+free_tasks_with_queued_datain(void)
+{
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_pdu pdu1 = {}, pdu2 = {}, pdu3 = {}, pdu4 = {}, pdu5 = {}, pdu6 = {};
+ struct spdk_iscsi_task task1 = {}, task2 = {}, task3 = {}, task4 = {}, task5 = {}, task6 = {};
+
+ TAILQ_INIT(&conn.write_pdu_list);
+ TAILQ_INIT(&conn.snack_pdu_list);
+ TAILQ_INIT(&conn.queued_datain_tasks);
+
+ pdu1.task = &task1;
+ pdu2.task = &task2;
+ pdu3.task = &task3;
+ pdu1.cb_fn = iscsi_conn_pdu_dummy_complete;
+ pdu2.cb_fn = iscsi_conn_pdu_dummy_complete;
+ pdu3.cb_fn = iscsi_conn_pdu_dummy_complete;
+
+ task1.scsi.ref = 1;
+ task2.scsi.ref = 1;
+ task3.scsi.ref = 1;
+
+ pdu3.bhs.opcode = ISCSI_OP_SCSI_DATAIN;
+ task3.scsi.offset = 1;
+ conn.data_in_cnt = 1;
+
+ TAILQ_INSERT_TAIL(&conn.write_pdu_list, &pdu1, tailq);
+ TAILQ_INSERT_TAIL(&conn.write_pdu_list, &pdu2, tailq);
+ TAILQ_INSERT_TAIL(&conn.write_pdu_list, &pdu3, tailq);
+
+ task4.scsi.ref = 1;
+ task5.scsi.ref = 1;
+ task6.scsi.ref = 1;
+
+ task4.pdu = &pdu4;
+ task5.pdu = &pdu5;
+ task6.pdu = &pdu6;
+ pdu4.cb_fn = iscsi_conn_pdu_dummy_complete;
+ pdu5.cb_fn = iscsi_conn_pdu_dummy_complete;
+ pdu6.cb_fn = iscsi_conn_pdu_dummy_complete;
+
+ TAILQ_INSERT_TAIL(&conn.queued_datain_tasks, &task4, link);
+ TAILQ_INSERT_TAIL(&conn.queued_datain_tasks, &task5, link);
+ TAILQ_INSERT_TAIL(&conn.queued_datain_tasks, &task6, link);
+
+ iscsi_conn_free_tasks(&conn);
+
+ CU_ASSERT(TAILQ_EMPTY(&conn.write_pdu_list));
+ CU_ASSERT(TAILQ_EMPTY(&conn.queued_datain_tasks));
+}
+
+static void
+abort_queued_datain_task_test(void)
+{
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_task task = {}, subtask = {};
+ struct spdk_iscsi_pdu pdu = {};
+ struct iscsi_bhs_scsi_req *scsi_req;
+ int rc;
+
+ TAILQ_INIT(&conn.queued_datain_tasks);
+ task.scsi.ref = 1;
+ task.scsi.dxfer_dir = SPDK_SCSI_DIR_FROM_DEV;
+ task.pdu = &pdu;
+ TAILQ_INIT(&task.subtask_list);
+ scsi_req = (struct iscsi_bhs_scsi_req *)&pdu.bhs;
+ scsi_req->read_bit = 1;
+
+ g_new_task = &subtask;
+
+ /* Case1: Queue one task, and this task is not executed */
+ task.scsi.transfer_len = SPDK_BDEV_LARGE_BUF_MAX_SIZE * 3;
+ task.scsi.offset = 0;
+ TAILQ_INSERT_TAIL(&conn.queued_datain_tasks, &task, link);
+
+ /* No slots for sub read tasks */
+ conn.data_in_cnt = MAX_LARGE_DATAIN_PER_CONNECTION;
+ rc = _iscsi_conn_abort_queued_datain_task(&conn, &task);
+ CU_ASSERT(rc != 0);
+ CU_ASSERT(!TAILQ_EMPTY(&conn.queued_datain_tasks));
+
+ /* Have slots for sub read tasks */
+ conn.data_in_cnt = 0;
+ rc = _iscsi_conn_abort_queued_datain_task(&conn, &task);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(TAILQ_EMPTY(&conn.queued_datain_tasks));
+ CU_ASSERT(task.current_datain_offset == SPDK_BDEV_LARGE_BUF_MAX_SIZE * 3);
+ CU_ASSERT(task.scsi.ref == 0);
+ CU_ASSERT(subtask.scsi.offset == 0);
+ CU_ASSERT(subtask.scsi.length == SPDK_BDEV_LARGE_BUF_MAX_SIZE * 3);
+ CU_ASSERT(subtask.scsi.ref == 0);
+
+ /* Case2: Queue one task, and this task is partially executed */
+ task.scsi.ref = 1;
+ task.scsi.transfer_len = SPDK_BDEV_LARGE_BUF_MAX_SIZE * 3;
+ task.current_datain_offset = SPDK_BDEV_LARGE_BUF_MAX_SIZE;
+ TAILQ_INSERT_TAIL(&conn.queued_datain_tasks, &task, link);
+
+ /* No slots for sub read tasks */
+ conn.data_in_cnt = MAX_LARGE_DATAIN_PER_CONNECTION;
+ rc = _iscsi_conn_abort_queued_datain_task(&conn, &task);
+ CU_ASSERT(rc != 0);
+ CU_ASSERT(!TAILQ_EMPTY(&conn.queued_datain_tasks));
+
+ /* have slots for sub read tasks */
+ conn.data_in_cnt = 0;
+ rc = _iscsi_conn_abort_queued_datain_task(&conn, &task);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(task.current_datain_offset == SPDK_BDEV_LARGE_BUF_MAX_SIZE * 3);
+ CU_ASSERT(task.scsi.ref == 2);
+ CU_ASSERT(TAILQ_FIRST(&task.subtask_list) == &subtask);
+ CU_ASSERT(subtask.scsi.offset == SPDK_BDEV_LARGE_BUF_MAX_SIZE);
+ CU_ASSERT(subtask.scsi.length == SPDK_BDEV_LARGE_BUF_MAX_SIZE * 2);
+ CU_ASSERT(subtask.scsi.ref == 1);
+
+ g_new_task = NULL;
+}
+
+static bool
+datain_task_is_queued(struct spdk_iscsi_conn *conn,
+ struct spdk_iscsi_task *task)
+{
+ struct spdk_iscsi_task *tmp;
+
+ TAILQ_FOREACH(tmp, &conn->queued_datain_tasks, link) {
+ if (tmp == task) {
+ return true;
+ }
+ }
+ return false;
+}
+static void
+abort_queued_datain_tasks_test(void)
+{
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_task task1 = {}, task2 = {}, task3 = {}, task4 = {}, task5 = {}, task6 = {};
+ struct spdk_iscsi_task subtask = {};
+ struct spdk_iscsi_pdu pdu1 = {}, pdu2 = {}, pdu3 = {}, pdu4 = {}, pdu5 = {}, pdu6 = {};
+ struct spdk_iscsi_pdu mgmt_pdu1 = {}, mgmt_pdu2 = {};
+ struct spdk_scsi_lun lun1 = {}, lun2 = {};
+ uint32_t alloc_cmd_sn;
+ struct iscsi_bhs_scsi_req *scsi_req;
+ int rc;
+
+ TAILQ_INIT(&conn.queued_datain_tasks);
+ conn.data_in_cnt = 0;
+
+ g_new_task = &subtask;
+
+ alloc_cmd_sn = 88;
+
+ pdu1.cmd_sn = alloc_cmd_sn;
+ alloc_cmd_sn++;
+ scsi_req = (struct iscsi_bhs_scsi_req *)&pdu1.bhs;
+ scsi_req->read_bit = 1;
+ task1.scsi.ref = 1;
+ task1.current_datain_offset = 0;
+ task1.scsi.transfer_len = 512;
+ task1.scsi.lun = &lun1;
+ iscsi_task_set_pdu(&task1, &pdu1);
+ TAILQ_INSERT_TAIL(&conn.queued_datain_tasks, &task1, link);
+
+ pdu2.cmd_sn = alloc_cmd_sn;
+ alloc_cmd_sn++;
+ scsi_req = (struct iscsi_bhs_scsi_req *)&pdu2.bhs;
+ scsi_req->read_bit = 1;
+ task2.scsi.ref = 1;
+ task2.current_datain_offset = 0;
+ task2.scsi.transfer_len = 512;
+ task2.scsi.lun = &lun2;
+ iscsi_task_set_pdu(&task2, &pdu2);
+ TAILQ_INSERT_TAIL(&conn.queued_datain_tasks, &task2, link);
+
+ mgmt_pdu1.cmd_sn = alloc_cmd_sn;
+ alloc_cmd_sn++;
+
+ pdu3.cmd_sn = alloc_cmd_sn;
+ alloc_cmd_sn++;
+ scsi_req = (struct iscsi_bhs_scsi_req *)&pdu3.bhs;
+ scsi_req->read_bit = 1;
+ task3.scsi.ref = 1;
+ task3.current_datain_offset = 0;
+ task3.scsi.transfer_len = 512;
+ task3.scsi.lun = &lun1;
+ iscsi_task_set_pdu(&task3, &pdu3);
+ TAILQ_INSERT_TAIL(&conn.queued_datain_tasks, &task3, link);
+
+ pdu4.cmd_sn = alloc_cmd_sn;
+ alloc_cmd_sn++;
+ scsi_req = (struct iscsi_bhs_scsi_req *)&pdu4.bhs;
+ scsi_req->read_bit = 1;
+ task4.scsi.ref = 1;
+ task4.current_datain_offset = 0;
+ task4.scsi.transfer_len = 512;
+ task4.scsi.lun = &lun2;
+ iscsi_task_set_pdu(&task4, &pdu4);
+ TAILQ_INSERT_TAIL(&conn.queued_datain_tasks, &task4, link);
+
+ pdu5.cmd_sn = alloc_cmd_sn;
+ alloc_cmd_sn++;
+ scsi_req = (struct iscsi_bhs_scsi_req *)&pdu5.bhs;
+ scsi_req->read_bit = 1;
+ task5.scsi.ref = 1;
+ task5.current_datain_offset = 0;
+ task5.scsi.transfer_len = 512;
+ task5.scsi.lun = &lun1;
+ iscsi_task_set_pdu(&task5, &pdu5);
+ TAILQ_INSERT_TAIL(&conn.queued_datain_tasks, &task5, link);
+
+ mgmt_pdu2.cmd_sn = alloc_cmd_sn;
+ alloc_cmd_sn++;
+
+ pdu6.cmd_sn = alloc_cmd_sn;
+ alloc_cmd_sn++;
+ scsi_req = (struct iscsi_bhs_scsi_req *)&pdu6.bhs;
+ scsi_req->read_bit = 1;
+ task6.scsi.ref = 1;
+ task6.current_datain_offset = 0;
+ task6.scsi.transfer_len = 512;
+ task6.scsi.lun = &lun2;
+ iscsi_task_set_pdu(&task6, &pdu6);
+ TAILQ_INSERT_TAIL(&conn.queued_datain_tasks, &task6, link);
+
+ rc = iscsi_conn_abort_queued_datain_tasks(&conn, &lun1, &mgmt_pdu1);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(!datain_task_is_queued(&conn, &task1));
+ CU_ASSERT(datain_task_is_queued(&conn, &task2));
+ CU_ASSERT(datain_task_is_queued(&conn, &task3));
+ CU_ASSERT(datain_task_is_queued(&conn, &task4));
+ CU_ASSERT(datain_task_is_queued(&conn, &task5));
+ CU_ASSERT(datain_task_is_queued(&conn, &task6));
+
+ rc = iscsi_conn_abort_queued_datain_tasks(&conn, &lun2, &mgmt_pdu2);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(!datain_task_is_queued(&conn, &task2));
+ CU_ASSERT(datain_task_is_queued(&conn, &task3));
+ CU_ASSERT(!datain_task_is_queued(&conn, &task4));
+ CU_ASSERT(datain_task_is_queued(&conn, &task5));
+ CU_ASSERT(datain_task_is_queued(&conn, &task6));
+
+ CU_ASSERT(task1.scsi.ref == 0);
+ CU_ASSERT(task2.scsi.ref == 0);
+ CU_ASSERT(task3.scsi.ref == 1);
+ CU_ASSERT(task4.scsi.ref == 0);
+ CU_ASSERT(task5.scsi.ref == 1);
+ CU_ASSERT(task6.scsi.ref == 1);
+ CU_ASSERT(subtask.scsi.ref == 0);
+
+ g_new_task = NULL;
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("conn_suite", NULL, NULL);
+
+ CU_ADD_TEST(suite, read_task_split_in_order_case);
+ CU_ADD_TEST(suite, read_task_split_reverse_order_case);
+ CU_ADD_TEST(suite, propagate_scsi_error_status_for_split_read_tasks);
+ CU_ADD_TEST(suite, process_non_read_task_completion_test);
+ CU_ADD_TEST(suite, free_tasks_on_connection);
+ CU_ADD_TEST(suite, free_tasks_with_queued_datain);
+ CU_ADD_TEST(suite, abort_queued_datain_task_test);
+ CU_ADD_TEST(suite, abort_queued_datain_tasks_test);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/iscsi/init_grp.c/.gitignore b/src/spdk/test/unit/lib/iscsi/init_grp.c/.gitignore
new file mode 100644
index 000000000..8fbc2b636
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/init_grp.c/.gitignore
@@ -0,0 +1 @@
+init_grp_ut
diff --git a/src/spdk/test/unit/lib/iscsi/init_grp.c/Makefile b/src/spdk/test/unit/lib/iscsi/init_grp.c/Makefile
new file mode 100644
index 000000000..708e691a5
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/init_grp.c/Makefile
@@ -0,0 +1,39 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+SPDK_LIB_LIST = conf
+TEST_FILE = init_grp_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/iscsi/init_grp.c/init_grp.conf b/src/spdk/test/unit/lib/iscsi/init_grp.c/init_grp.conf
new file mode 100644
index 000000000..aaa660def
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/init_grp.c/init_grp.conf
@@ -0,0 +1,31 @@
+[IG_Valid0]
+# Success is expected.
+ InitiatorName iqn.2017-10.spdk.io:0001
+ Netmask 192.168.2.0
+
+[IG_Valid1]
+# Success is expected.
+ InitiatorName iqn.2017-10.spdk.io:0001
+ Netmask 192.168.2.0
+ Netmask 192.168.2.1
+
+[IG_Valid2]
+# Success is expected.
+ InitiatorName iqn.2017-10.spdk.io:0001
+ InitiatorName iqn.2017-10.spdk.io:0002
+ Netmask 192.168.2.0
+
+[IG_Valid3]
+# Success is expected.
+ InitiatorName iqn.2017-10.spdk.io:0001
+ InitiatorName iqn.2017-10.spdk.io:0002
+ Netmask 192.168.2.0
+ Netmask 192.168.2.1
+
+[IG_Invalid0]
+# Failure is expected.
+ InitiatorName iqn.2017-10.spdk.io:0001
+
+[IG_Invalid1]
+# Failure is expected.
+ Netmask 192.168.2.0
diff --git a/src/spdk/test/unit/lib/iscsi/init_grp.c/init_grp_ut.c b/src/spdk/test/unit/lib/iscsi/init_grp.c/init_grp_ut.c
new file mode 100644
index 000000000..199aad8b8
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/init_grp.c/init_grp_ut.c
@@ -0,0 +1,674 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+#include "CUnit/Basic.h"
+
+#include "iscsi/init_grp.c"
+#include "unit/lib/json_mock.c"
+
+SPDK_LOG_REGISTER_COMPONENT("iscsi", SPDK_LOG_ISCSI)
+
+struct spdk_iscsi_globals g_iscsi;
+
+const char *config_file;
+
+static int
+test_setup(void)
+{
+ TAILQ_INIT(&g_iscsi.ig_head);
+ return 0;
+}
+
+static void
+create_from_config_file_cases(void)
+{
+ struct spdk_conf *config;
+ struct spdk_conf_section *sp;
+ char section_name[64];
+ int section_index;
+ int rc;
+
+ config = spdk_conf_allocate();
+
+ rc = spdk_conf_read(config, config_file);
+ CU_ASSERT(rc == 0);
+
+ section_index = 0;
+ while (true) {
+ snprintf(section_name, sizeof(section_name), "IG_Valid%d", section_index);
+
+ sp = spdk_conf_find_section(config, section_name);
+ if (sp == NULL) {
+ break;
+ }
+
+ rc = iscsi_parse_init_grp(sp);
+ CU_ASSERT(rc == 0);
+
+ iscsi_init_grps_destroy();
+
+ section_index++;
+ }
+
+ section_index = 0;
+ while (true) {
+ snprintf(section_name, sizeof(section_name), "IG_Invalid%d", section_index);
+
+ sp = spdk_conf_find_section(config, section_name);
+ if (sp == NULL) {
+ break;
+ }
+
+ rc = iscsi_parse_init_grp(sp);
+ CU_ASSERT(rc != 0);
+
+ iscsi_init_grps_destroy();
+
+ section_index++;
+ }
+
+ spdk_conf_free(config);
+}
+
+
+static void
+create_initiator_group_success_case(void)
+{
+ struct spdk_iscsi_init_grp *ig;
+
+ ig = iscsi_init_grp_create(1);
+ CU_ASSERT(ig != NULL);
+
+ iscsi_init_grp_destroy(ig);
+}
+
+static void
+find_initiator_group_success_case(void)
+{
+ struct spdk_iscsi_init_grp *ig, *tmp;
+ int rc;
+
+ ig = iscsi_init_grp_create(1);
+ CU_ASSERT(ig != NULL);
+
+ rc = iscsi_init_grp_register(ig);
+ CU_ASSERT(rc == 0);
+
+ ig = iscsi_init_grp_find_by_tag(1);
+ CU_ASSERT(ig != NULL);
+
+ tmp = iscsi_init_grp_unregister(1);
+ CU_ASSERT(ig == tmp);
+ iscsi_init_grp_destroy(ig);
+
+ ig = iscsi_init_grp_find_by_tag(1);
+ CU_ASSERT(ig == NULL);
+}
+
+static void
+register_initiator_group_twice_case(void)
+{
+ struct spdk_iscsi_init_grp *ig, *tmp;
+ int rc;
+
+ ig = iscsi_init_grp_create(1);
+ CU_ASSERT(ig != NULL);
+
+ rc = iscsi_init_grp_register(ig);
+ CU_ASSERT(rc == 0);
+
+ rc = iscsi_init_grp_register(ig);
+ CU_ASSERT(rc != 0);
+
+ ig = iscsi_init_grp_find_by_tag(1);
+ CU_ASSERT(ig != NULL);
+
+ tmp = iscsi_init_grp_unregister(1);
+ CU_ASSERT(tmp == ig);
+ iscsi_init_grp_destroy(ig);
+
+ ig = iscsi_init_grp_find_by_tag(1);
+ CU_ASSERT(ig == NULL);
+}
+
+static void
+add_initiator_name_success_case(void)
+{
+
+ int rc;
+ struct spdk_iscsi_init_grp *ig;
+ struct spdk_iscsi_initiator_name *iname;
+ char *name1 = "iqn.2017-10.spdk.io:0001";
+ char *name2 = "iqn.2017-10.spdk.io:0002";
+
+ ig = iscsi_init_grp_create(1);
+ CU_ASSERT(ig != NULL);
+
+ /* add two different names to the empty name list */
+ rc = iscsi_init_grp_add_initiator(ig, name1);
+ CU_ASSERT(rc == 0);
+
+ rc = iscsi_init_grp_add_initiator(ig, name2);
+ CU_ASSERT(rc == 0);
+
+ /* check if two names are added correctly. */
+ iname = iscsi_init_grp_find_initiator(ig, name1);
+ CU_ASSERT(iname != NULL);
+
+ iname = iscsi_init_grp_find_initiator(ig, name2);
+ CU_ASSERT(iname != NULL);
+
+ /* restore the initial state */
+ rc = iscsi_init_grp_delete_initiator(ig, name1);
+ CU_ASSERT(rc == 0);
+
+ iname = iscsi_init_grp_find_initiator(ig, name1);
+ CU_ASSERT(iname == NULL);
+
+ rc = iscsi_init_grp_delete_initiator(ig, name2);
+ CU_ASSERT(rc == 0);
+
+ iname = iscsi_init_grp_find_initiator(ig, name2);
+ CU_ASSERT(iname == NULL);
+
+ iscsi_init_grp_destroy(ig);
+}
+
+static void
+add_initiator_name_fail_case(void)
+{
+ int rc;
+ struct spdk_iscsi_init_grp *ig;
+ struct spdk_iscsi_initiator_name *iname;
+ char *name1 = "iqn.2017-10.spdk.io:0001";
+
+ ig = iscsi_init_grp_create(1);
+ CU_ASSERT(ig != NULL);
+
+ /* add an name to the full name list */
+ ig->ninitiators = MAX_INITIATOR;
+
+ rc = iscsi_init_grp_add_initiator(ig, name1);
+ CU_ASSERT(rc != 0);
+
+ ig->ninitiators = 0;
+
+ /* add the same name to the name list twice */
+ rc = iscsi_init_grp_add_initiator(ig, name1);
+ CU_ASSERT(rc == 0);
+
+ rc = iscsi_init_grp_add_initiator(ig, name1);
+ CU_ASSERT(rc != 0);
+
+ /* restore the initial state */
+ rc = iscsi_init_grp_delete_initiator(ig, name1);
+ CU_ASSERT(rc == 0);
+
+ iname = iscsi_init_grp_find_initiator(ig, name1);
+ CU_ASSERT(iname == NULL);
+
+ iscsi_init_grp_destroy(ig);
+}
+
+static void
+delete_all_initiator_names_success_case(void)
+{
+ int rc;
+ struct spdk_iscsi_init_grp *ig;
+ struct spdk_iscsi_initiator_name *iname;
+ char *name1 = "iqn.2017-10.spdk.io:0001";
+ char *name2 = "iqn.2017-10.spdk.io:0002";
+
+ ig = iscsi_init_grp_create(1);
+ CU_ASSERT(ig != NULL);
+
+ /* add two different names to the empty name list */
+ rc = iscsi_init_grp_add_initiator(ig, name1);
+ CU_ASSERT(rc == 0);
+
+ rc = iscsi_init_grp_add_initiator(ig, name2);
+ CU_ASSERT(rc == 0);
+
+ /* delete all initiator names */
+ iscsi_init_grp_delete_all_initiators(ig);
+
+ /* check if two names are deleted correctly. */
+ iname = iscsi_init_grp_find_initiator(ig, name1);
+ CU_ASSERT(iname == NULL);
+
+ iname = iscsi_init_grp_find_initiator(ig, name2);
+ CU_ASSERT(iname == NULL);
+
+ /* restore the initial state */
+ iscsi_init_grp_destroy(ig);
+}
+
+static void
+add_netmask_success_case(void)
+{
+ int rc;
+ struct spdk_iscsi_init_grp *ig;
+ struct spdk_iscsi_initiator_netmask *imask;
+ char *netmask1 = "192.168.2.0";
+ char *netmask2 = "192.168.2.1";
+
+ ig = iscsi_init_grp_create(1);
+ CU_ASSERT(ig != NULL);
+
+ /* add two different netmasks to the empty netmask list */
+ rc = iscsi_init_grp_add_netmask(ig, netmask1);
+ CU_ASSERT(rc == 0);
+
+ rc = iscsi_init_grp_add_netmask(ig, netmask2);
+ CU_ASSERT(rc == 0);
+
+ /* check if two netmasks are added correctly. */
+ imask = iscsi_init_grp_find_netmask(ig, netmask1);
+ CU_ASSERT(imask != NULL);
+
+ imask = iscsi_init_grp_find_netmask(ig, netmask2);
+ CU_ASSERT(imask != NULL);
+
+ /* restore the initial state */
+ rc = iscsi_init_grp_delete_netmask(ig, netmask1);
+ CU_ASSERT(rc == 0);
+
+ imask = iscsi_init_grp_find_netmask(ig, netmask1);
+ CU_ASSERT(imask == NULL);
+
+ rc = iscsi_init_grp_delete_netmask(ig, netmask2);
+ CU_ASSERT(rc == 0);
+
+ imask = iscsi_init_grp_find_netmask(ig, netmask2);
+ CU_ASSERT(imask == NULL);
+
+ iscsi_init_grp_destroy(ig);
+}
+
+static void
+add_netmask_fail_case(void)
+{
+ int rc;
+ struct spdk_iscsi_init_grp *ig;
+ struct spdk_iscsi_initiator_netmask *imask;
+ char *netmask1 = "192.168.2.0";
+
+ ig = iscsi_init_grp_create(1);
+ CU_ASSERT(ig != NULL);
+
+ /* add an netmask to the full netmask list */
+ ig->nnetmasks = MAX_NETMASK;
+
+ rc = iscsi_init_grp_add_netmask(ig, netmask1);
+ CU_ASSERT(rc != 0);
+
+ ig->nnetmasks = 0;
+
+ /* add the same netmask to the netmask list twice */
+ rc = iscsi_init_grp_add_netmask(ig, netmask1);
+ CU_ASSERT(rc == 0);
+
+ rc = iscsi_init_grp_add_netmask(ig, netmask1);
+ CU_ASSERT(rc != 0);
+
+ /* restore the initial state */
+ rc = iscsi_init_grp_delete_netmask(ig, netmask1);
+ CU_ASSERT(rc == 0);
+
+ imask = iscsi_init_grp_find_netmask(ig, netmask1);
+ CU_ASSERT(imask == NULL);
+
+ iscsi_init_grp_destroy(ig);
+}
+
+static void
+delete_all_netmasks_success_case(void)
+{
+ int rc;
+ struct spdk_iscsi_init_grp *ig;
+ struct spdk_iscsi_initiator_netmask *imask;
+ char *netmask1 = "192.168.2.0";
+ char *netmask2 = "192.168.2.1";
+
+ ig = iscsi_init_grp_create(1);
+ CU_ASSERT(ig != NULL);
+
+ /* add two different netmasks to the empty netmask list */
+ rc = iscsi_init_grp_add_netmask(ig, netmask1);
+ CU_ASSERT(rc == 0);
+
+ rc = iscsi_init_grp_add_netmask(ig, netmask2);
+ CU_ASSERT(rc == 0);
+
+ /* delete all netmasks */
+ iscsi_init_grp_delete_all_netmasks(ig);
+
+ /* check if two netmasks are deleted correctly. */
+ imask = iscsi_init_grp_find_netmask(ig, netmask1);
+ CU_ASSERT(imask == NULL);
+
+ imask = iscsi_init_grp_find_netmask(ig, netmask2);
+ CU_ASSERT(imask == NULL);
+
+ /* restore the initial state */
+ iscsi_init_grp_destroy(ig);
+}
+
+static void
+initiator_name_overwrite_all_to_any_case(void)
+{
+ int rc;
+ struct spdk_iscsi_init_grp *ig;
+ struct spdk_iscsi_initiator_name *iname;
+ char *all = "ALL";
+ char *any = "ANY";
+ char *all_not = "!ALL";
+ char *any_not = "!ANY";
+
+ ig = iscsi_init_grp_create(1);
+ CU_ASSERT(ig != NULL);
+
+ rc = iscsi_init_grp_add_initiator(ig, all);
+ CU_ASSERT(rc == 0);
+
+ iname = iscsi_init_grp_find_initiator(ig, all);
+ CU_ASSERT(iname == NULL);
+
+ iname = iscsi_init_grp_find_initiator(ig, any);
+ CU_ASSERT(iname != NULL);
+
+ rc = iscsi_init_grp_delete_initiator(ig, any);
+ CU_ASSERT(rc == 0);
+
+ rc = iscsi_init_grp_add_initiator(ig, all_not);
+ CU_ASSERT(rc == 0);
+
+ iname = iscsi_init_grp_find_initiator(ig, all_not);
+ CU_ASSERT(iname == NULL);
+
+ iname = iscsi_init_grp_find_initiator(ig, any_not);
+ CU_ASSERT(iname != NULL);
+
+ rc = iscsi_init_grp_delete_initiator(ig, any_not);
+ CU_ASSERT(rc == 0);
+
+ iscsi_init_grp_destroy(ig);
+}
+
+static void
+netmask_overwrite_all_to_any_case(void)
+{
+ int rc;
+ struct spdk_iscsi_init_grp *ig;
+ struct spdk_iscsi_initiator_netmask *imask;
+ char *all = "ALL";
+ char *any = "ANY";
+
+ ig = iscsi_init_grp_create(1);
+ CU_ASSERT(ig != NULL);
+
+ rc = iscsi_init_grp_add_netmask(ig, all);
+ CU_ASSERT(rc == 0);
+
+ imask = iscsi_init_grp_find_netmask(ig, all);
+ CU_ASSERT(imask == NULL);
+
+ imask = iscsi_init_grp_find_netmask(ig, any);
+ CU_ASSERT(imask != NULL);
+
+ rc = iscsi_init_grp_delete_netmask(ig, any);
+ CU_ASSERT(rc == 0);
+
+ iscsi_init_grp_destroy(ig);
+}
+
+static void
+add_delete_initiator_names_case(void)
+{
+ int rc, i;
+ struct spdk_iscsi_init_grp *ig;
+ struct spdk_iscsi_initiator_name *iname;
+ char *names[3] = {"iqn.2018-02.spdk.io:0001", "iqn.2018-02.spdk.io:0002", "iqn.2018-02.spdk.io:0003"};
+
+ ig = iscsi_init_grp_create(1);
+ SPDK_CU_ASSERT_FATAL(ig != NULL);
+
+ rc = iscsi_init_grp_add_initiators(ig, 3, names);
+ CU_ASSERT(rc == 0);
+
+ for (i = 0; i < 3; i++) {
+ iname = iscsi_init_grp_find_initiator(ig, names[i]);
+ CU_ASSERT(iname != NULL);
+ }
+
+ rc = iscsi_init_grp_delete_initiators(ig, 3, names);
+ CU_ASSERT(rc == 0);
+
+ if (ig != NULL) {
+ CU_ASSERT(TAILQ_EMPTY(&ig->initiator_head));
+ }
+
+ iscsi_init_grp_destroy(ig);
+}
+
+static void
+add_duplicated_initiator_names_case(void)
+{
+ int rc;
+ struct spdk_iscsi_init_grp *ig;
+ char *names[3] = {"iqn.2018-02.spdk.io:0001", "iqn.2018-02.spdk.io:0002", "iqn.2018-02.spdk.io:0001"};
+
+ ig = iscsi_init_grp_create(1);
+ SPDK_CU_ASSERT_FATAL(ig != NULL);
+
+ rc = iscsi_init_grp_add_initiators(ig, 3, names);
+ CU_ASSERT(rc != 0);
+
+ if (ig != NULL) {
+ CU_ASSERT(TAILQ_EMPTY(&ig->initiator_head));
+ }
+
+ iscsi_init_grp_destroy(ig);
+}
+
+static void
+delete_nonexisting_initiator_names_case(void)
+{
+ int rc, i;
+ struct spdk_iscsi_init_grp *ig;
+ struct spdk_iscsi_initiator_name *iname;
+ char *names1[3] = {"iqn.2018-02.spdk.io:0001", "iqn.2018-02.spdk.io:0002", "iqn.2018-02.spdk.io:0003"};
+ char *names2[3] = {"iqn.2018-02.spdk.io:0001", "iqn.2018-02.spdk.io:0002", "iqn.2018-02.spdk.io:0004"};
+
+ ig = iscsi_init_grp_create(1);
+ SPDK_CU_ASSERT_FATAL(ig != NULL);
+
+ rc = iscsi_init_grp_add_initiators(ig, 3, names1);
+ CU_ASSERT(rc == 0);
+
+ for (i = 0; i < 3; i++) {
+ iname = iscsi_init_grp_find_initiator(ig, names1[i]);
+ CU_ASSERT(iname != NULL);
+ }
+
+ rc = iscsi_init_grp_delete_initiators(ig, 3, names2);
+ CU_ASSERT(rc != 0);
+
+ for (i = 0; i < 3; i++) {
+ iname = iscsi_init_grp_find_initiator(ig, names1[i]);
+ CU_ASSERT(iname != NULL);
+ }
+
+ rc = iscsi_init_grp_delete_initiators(ig, 3, names1);
+ CU_ASSERT(rc == 0);
+
+ if (ig != NULL) {
+ CU_ASSERT(TAILQ_EMPTY(&ig->initiator_head));
+ }
+
+ iscsi_init_grp_destroy(ig);
+}
+
+static void
+add_delete_netmasks_case(void)
+{
+ int rc, i;
+ struct spdk_iscsi_init_grp *ig;
+ struct spdk_iscsi_initiator_netmask *netmask;
+ char *netmasks[3] = {"192.168.2.0", "192.168.2.1", "192.168.2.2"};
+
+ ig = iscsi_init_grp_create(1);
+ SPDK_CU_ASSERT_FATAL(ig != NULL);
+
+ rc = iscsi_init_grp_add_netmasks(ig, 3, netmasks);
+ CU_ASSERT(rc == 0);
+
+ for (i = 0; i < 3; i++) {
+ netmask = iscsi_init_grp_find_netmask(ig, netmasks[i]);
+ CU_ASSERT(netmask != NULL);
+ }
+
+ rc = iscsi_init_grp_delete_netmasks(ig, 3, netmasks);
+ CU_ASSERT(rc == 0);
+
+ if (ig != NULL) {
+ CU_ASSERT(TAILQ_EMPTY(&ig->netmask_head));
+ }
+
+ iscsi_init_grp_destroy(ig);
+}
+
+static void
+add_duplicated_netmasks_case(void)
+{
+ int rc;
+ struct spdk_iscsi_init_grp *ig;
+ char *netmasks[3] = {"192.168.2.0", "192.168.2.1", "192.168.2.0"};
+
+ ig = iscsi_init_grp_create(1);
+ SPDK_CU_ASSERT_FATAL(ig != NULL);
+
+ rc = iscsi_init_grp_add_netmasks(ig, 3, netmasks);
+ CU_ASSERT(rc != 0);
+
+ if (ig != NULL) {
+ CU_ASSERT(TAILQ_EMPTY(&ig->netmask_head));
+ }
+
+ iscsi_init_grp_destroy(ig);
+}
+
+static void
+delete_nonexisting_netmasks_case(void)
+{
+ int rc, i;
+ struct spdk_iscsi_init_grp *ig;
+ struct spdk_iscsi_initiator_netmask *netmask;
+ char *netmasks1[3] = {"192.168.2.0", "192.168.2.1", "192.168.2.2"};
+ char *netmasks2[3] = {"192.168.2.0", "192.168.2.1", "192.168.2.3"};
+
+ ig = iscsi_init_grp_create(1);
+ SPDK_CU_ASSERT_FATAL(ig != NULL);
+
+ rc = iscsi_init_grp_add_netmasks(ig, 3, netmasks1);
+ CU_ASSERT(rc == 0);
+
+ for (i = 0; i < 3; i++) {
+ netmask = iscsi_init_grp_find_netmask(ig, netmasks1[i]);
+ CU_ASSERT(netmask != NULL);
+ }
+
+ rc = iscsi_init_grp_delete_netmasks(ig, 3, netmasks2);
+ CU_ASSERT(rc != 0);
+
+ for (i = 0; i < 3; i++) {
+ netmask = iscsi_init_grp_find_netmask(ig, netmasks1[i]);
+ CU_ASSERT(netmask != NULL);
+ }
+
+ rc = iscsi_init_grp_delete_netmasks(ig, 3, netmasks1);
+ CU_ASSERT(rc == 0);
+
+ if (ig != NULL) {
+ CU_ASSERT(TAILQ_EMPTY(&ig->netmask_head));
+ }
+
+ iscsi_init_grp_destroy(ig);
+}
+
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (argc < 2) {
+ fprintf(stderr, "usage: %s <config file>\n", argv[0]);
+ exit(1);
+ }
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ config_file = argv[1];
+
+ suite = CU_add_suite("init_grp_suite", test_setup, NULL);
+
+ CU_ADD_TEST(suite, create_from_config_file_cases);
+ CU_ADD_TEST(suite, create_initiator_group_success_case);
+ CU_ADD_TEST(suite, find_initiator_group_success_case);
+ CU_ADD_TEST(suite, register_initiator_group_twice_case);
+ CU_ADD_TEST(suite, add_initiator_name_success_case);
+ CU_ADD_TEST(suite, add_initiator_name_fail_case);
+ CU_ADD_TEST(suite, delete_all_initiator_names_success_case);
+ CU_ADD_TEST(suite, add_netmask_success_case);
+ CU_ADD_TEST(suite, add_netmask_fail_case);
+ CU_ADD_TEST(suite, delete_all_netmasks_success_case);
+ CU_ADD_TEST(suite, initiator_name_overwrite_all_to_any_case);
+ CU_ADD_TEST(suite, netmask_overwrite_all_to_any_case);
+ CU_ADD_TEST(suite, add_delete_initiator_names_case);
+ CU_ADD_TEST(suite, add_duplicated_initiator_names_case);
+ CU_ADD_TEST(suite, delete_nonexisting_initiator_names_case);
+ CU_ADD_TEST(suite, add_delete_netmasks_case);
+ CU_ADD_TEST(suite, add_duplicated_netmasks_case);
+ CU_ADD_TEST(suite, delete_nonexisting_netmasks_case);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/iscsi/iscsi.c/.gitignore b/src/spdk/test/unit/lib/iscsi/iscsi.c/.gitignore
new file mode 100644
index 000000000..4d41887c8
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/iscsi.c/.gitignore
@@ -0,0 +1 @@
+iscsi_ut
diff --git a/src/spdk/test/unit/lib/iscsi/iscsi.c/Makefile b/src/spdk/test/unit/lib/iscsi/iscsi.c/Makefile
new file mode 100644
index 000000000..66d7334a4
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/iscsi.c/Makefile
@@ -0,0 +1,46 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+SPDK_LIB_LIST = conf util
+
+SCSI_OBJS = port
+ISCSI_OBJS = md5 param
+LIBS += $(SCSI_OBJS:%=$(SPDK_ROOT_DIR)/lib/scsi/%.o)
+LIBS += $(ISCSI_OBJS:%=$(SPDK_ROOT_DIR)/lib/iscsi/%.o)
+LIBS += -lcunit
+
+TEST_FILE = iscsi_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/iscsi/iscsi.c/iscsi_ut.c b/src/spdk/test/unit/lib/iscsi/iscsi.c/iscsi_ut.c
new file mode 100644
index 000000000..f96afd999
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/iscsi.c/iscsi_ut.c
@@ -0,0 +1,2024 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk/endian.h"
+#include "spdk/scsi.h"
+#include "spdk_cunit.h"
+
+#include "CUnit/Basic.h"
+
+#include "iscsi/iscsi.c"
+
+#include "../common.c"
+#include "iscsi/portal_grp.h"
+#include "scsi/scsi_internal.h"
+#include "common/lib/test_env.c"
+
+#include "spdk_internal/mock.h"
+
+#define UT_TARGET_NAME1 "iqn.2017-11.spdk.io:t0001"
+#define UT_TARGET_NAME2 "iqn.2017-11.spdk.io:t0002"
+#define UT_INITIATOR_NAME1 "iqn.2017-11.spdk.io:i0001"
+#define UT_INITIATOR_NAME2 "iqn.2017-11.spdk.io:i0002"
+#define UT_ISCSI_TSIH 256
+
+struct spdk_iscsi_tgt_node g_tgt;
+
+struct spdk_iscsi_tgt_node *
+iscsi_find_tgt_node(const char *target_name)
+{
+ if (strcasecmp(target_name, UT_TARGET_NAME1) == 0) {
+ g_tgt.dev = NULL;
+ return (struct spdk_iscsi_tgt_node *)&g_tgt;
+ } else {
+ return NULL;
+ }
+}
+
+bool
+iscsi_tgt_node_access(struct spdk_iscsi_conn *conn,
+ struct spdk_iscsi_tgt_node *target,
+ const char *iqn, const char *addr)
+{
+ if (strcasecmp(conn->initiator_name, UT_INITIATOR_NAME1) == 0) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+DEFINE_STUB(iscsi_send_tgts, int,
+ (struct spdk_iscsi_conn *conn, const char *iiqn, const char *iaddr,
+ const char *tiqn, uint8_t *data, int alloc_len, int data_len),
+ 0);
+
+DEFINE_STUB(iscsi_tgt_node_is_destructed, bool,
+ (struct spdk_iscsi_tgt_node *target), false);
+
+DEFINE_STUB_V(iscsi_portal_grp_close_all, (void));
+
+DEFINE_STUB_V(iscsi_conn_schedule, (struct spdk_iscsi_conn *conn));
+
+DEFINE_STUB_V(iscsi_conn_free_pdu,
+ (struct spdk_iscsi_conn *conn, struct spdk_iscsi_pdu *pdu));
+
+DEFINE_STUB_V(iscsi_conn_pdu_generic_complete, (void *cb_arg));
+
+DEFINE_STUB(iscsi_conn_handle_queued_datain_tasks, int,
+ (struct spdk_iscsi_conn *conn), 0);
+
+DEFINE_STUB(iscsi_conn_abort_queued_datain_task, int,
+ (struct spdk_iscsi_conn *conn, uint32_t ref_task_tag), 0);
+
+DEFINE_STUB(iscsi_conn_abort_queued_datain_tasks, int,
+ (struct spdk_iscsi_conn *conn, struct spdk_scsi_lun *lun,
+ struct spdk_iscsi_pdu *pdu), 0);
+
+DEFINE_STUB(iscsi_chap_get_authinfo, int,
+ (struct iscsi_chap_auth *auth, const char *authuser, int ag_tag),
+ 0);
+
+DEFINE_STUB(spdk_sock_set_recvbuf, int, (struct spdk_sock *sock, int sz), 0);
+
+int
+spdk_scsi_lun_get_id(const struct spdk_scsi_lun *lun)
+{
+ return lun->id;
+}
+
+DEFINE_STUB(spdk_scsi_lun_is_removing, bool, (const struct spdk_scsi_lun *lun),
+ true);
+
+struct spdk_scsi_lun *
+spdk_scsi_dev_get_lun(struct spdk_scsi_dev *dev, int lun_id)
+{
+ if (lun_id < 0 || lun_id >= SPDK_SCSI_DEV_MAX_LUN) {
+ return NULL;
+ }
+
+ return dev->lun[lun_id];
+}
+
+DEFINE_STUB(spdk_scsi_lun_id_int_to_fmt, uint64_t, (int lun_id), 0);
+
+DEFINE_STUB(spdk_scsi_lun_id_fmt_to_int, int, (uint64_t lun_fmt), 0);
+
+DEFINE_STUB(spdk_scsi_lun_get_dif_ctx, bool,
+ (struct spdk_scsi_lun *lun, struct spdk_scsi_task *task,
+ struct spdk_dif_ctx *dif_ctx), false);
+
+static void
+op_login_check_target_test(void)
+{
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_pdu rsp_pdu = {};
+ struct spdk_iscsi_tgt_node *target;
+ int rc;
+
+ /* expect success */
+ snprintf(conn.initiator_name, sizeof(conn.initiator_name),
+ "%s", UT_INITIATOR_NAME1);
+
+ rc = iscsi_op_login_check_target(&conn, &rsp_pdu,
+ UT_TARGET_NAME1, &target);
+ CU_ASSERT(rc == 0);
+
+ /* expect failure */
+ snprintf(conn.initiator_name, sizeof(conn.initiator_name),
+ "%s", UT_INITIATOR_NAME1);
+
+ rc = iscsi_op_login_check_target(&conn, &rsp_pdu,
+ UT_TARGET_NAME2, &target);
+ CU_ASSERT(rc != 0);
+
+ /* expect failure */
+ snprintf(conn.initiator_name, sizeof(conn.initiator_name),
+ "%s", UT_INITIATOR_NAME2);
+
+ rc = iscsi_op_login_check_target(&conn, &rsp_pdu,
+ UT_TARGET_NAME1, &target);
+ CU_ASSERT(rc != 0);
+}
+
+static void
+op_login_session_normal_test(void)
+{
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_portal portal = {};
+ struct spdk_iscsi_portal_grp group = {};
+ struct spdk_iscsi_pdu rsp_pdu = {};
+ struct iscsi_bhs_login_rsp *rsph;
+ struct spdk_iscsi_sess sess = {};
+ struct iscsi_param param = {};
+ int rc;
+
+ /* setup related data structures */
+ rsph = (struct iscsi_bhs_login_rsp *)&rsp_pdu.bhs;
+ rsph->tsih = 0;
+ memset(rsph->isid, 0, sizeof(rsph->isid));
+ conn.portal = &portal;
+ portal.group = &group;
+ conn.portal->group->tag = 0;
+ conn.params = NULL;
+
+ /* expect failure: NULL params for target name */
+ rc = iscsi_op_login_session_normal(&conn, &rsp_pdu, UT_INITIATOR_NAME1,
+ NULL, 0);
+ CU_ASSERT(rc != 0);
+ CU_ASSERT(rsph->status_class == ISCSI_CLASS_INITIATOR_ERROR);
+ CU_ASSERT(rsph->status_detail == ISCSI_LOGIN_MISSING_PARMS);
+
+ /* expect failure: incorrect key for target name */
+ param.next = NULL;
+ rc = iscsi_op_login_session_normal(&conn, &rsp_pdu, UT_INITIATOR_NAME1,
+ &param, 0);
+ CU_ASSERT(rc != 0);
+ CU_ASSERT(rsph->status_class == ISCSI_CLASS_INITIATOR_ERROR);
+ CU_ASSERT(rsph->status_detail == ISCSI_LOGIN_MISSING_PARMS);
+
+ /* expect failure: NULL target name */
+ param.key = "TargetName";
+ param.val = NULL;
+ rc = iscsi_op_login_session_normal(&conn, &rsp_pdu, UT_INITIATOR_NAME1,
+ &param, 0);
+ CU_ASSERT(rc != 0);
+ CU_ASSERT(rsph->status_class == ISCSI_CLASS_INITIATOR_ERROR);
+ CU_ASSERT(rsph->status_detail == ISCSI_LOGIN_MISSING_PARMS);
+
+ /* expect failure: session not found */
+ param.key = "TargetName";
+ param.val = "iqn.2017-11.spdk.io:t0001";
+ snprintf(conn.initiator_name, sizeof(conn.initiator_name),
+ "%s", UT_INITIATOR_NAME1);
+ rsph->tsih = 1; /* to append the session */
+ rc = iscsi_op_login_session_normal(&conn, &rsp_pdu, UT_INITIATOR_NAME1,
+ &param, 0);
+ CU_ASSERT(conn.target_port == NULL);
+ CU_ASSERT(rc != 0);
+ CU_ASSERT(rsph->status_class == ISCSI_CLASS_INITIATOR_ERROR);
+ CU_ASSERT(rsph->status_detail == ISCSI_LOGIN_CONN_ADD_FAIL);
+
+ /* expect failure: session found while tag is wrong */
+ g_iscsi.MaxSessions = UT_ISCSI_TSIH * 2;
+ g_iscsi.session = calloc(1, sizeof(void *) * g_iscsi.MaxSessions);
+ g_iscsi.session[UT_ISCSI_TSIH - 1] = &sess;
+ sess.tsih = UT_ISCSI_TSIH;
+ rsph->tsih = UT_ISCSI_TSIH >> 8; /* to append the session */
+ sess.tag = 1;
+ rc = iscsi_op_login_session_normal(&conn, &rsp_pdu, UT_INITIATOR_NAME1,
+ &param, 0);
+ CU_ASSERT(conn.target_port == NULL);
+ CU_ASSERT(rc != 0);
+ CU_ASSERT(rsph->status_class == ISCSI_CLASS_INITIATOR_ERROR);
+ CU_ASSERT(rsph->status_detail == ISCSI_LOGIN_CONN_ADD_FAIL);
+
+ /* expect suceess: drop the session */
+ rsph->tsih = 0; /* to create the session */
+ g_iscsi.AllowDuplicateIsid = false;
+ rc = iscsi_op_login_session_normal(&conn, &rsp_pdu, UT_INITIATOR_NAME1,
+ &param, 0);
+ CU_ASSERT(rc == 0);
+
+ /* expect suceess: create the session */
+ rsph->tsih = 0; /* to create the session */
+ g_iscsi.AllowDuplicateIsid = true;
+ rc = iscsi_op_login_session_normal(&conn, &rsp_pdu, UT_INITIATOR_NAME1,
+ &param, 0);
+ CU_ASSERT(rc == 0);
+
+ free(g_iscsi.session);
+}
+
+static void
+maxburstlength_test(void)
+{
+ struct spdk_iscsi_sess sess = {};
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_scsi_dev dev = {};
+ struct spdk_scsi_lun lun = {};
+ struct spdk_iscsi_pdu *req_pdu, *data_out_pdu, *r2t_pdu;
+ struct iscsi_bhs_scsi_req *req;
+ struct iscsi_bhs_r2t *r2t;
+ struct iscsi_bhs_data_out *data_out;
+ struct spdk_iscsi_pdu *response_pdu;
+ int rc;
+
+ req_pdu = iscsi_get_pdu(&conn);
+ data_out_pdu = iscsi_get_pdu(&conn);
+
+ sess.ExpCmdSN = 0;
+ sess.MaxCmdSN = 64;
+ sess.session_type = SESSION_TYPE_NORMAL;
+ sess.MaxBurstLength = 1024;
+
+ lun.id = 0;
+
+ dev.lun[0] = &lun;
+
+ conn.full_feature = 1;
+ conn.sess = &sess;
+ conn.dev = &dev;
+ conn.state = ISCSI_CONN_STATE_RUNNING;
+ TAILQ_INIT(&conn.write_pdu_list);
+ TAILQ_INIT(&conn.active_r2t_tasks);
+
+ req_pdu->bhs.opcode = ISCSI_OP_SCSI;
+ req_pdu->data_segment_len = 0;
+
+ req = (struct iscsi_bhs_scsi_req *)&req_pdu->bhs;
+
+ to_be32(&req->cmd_sn, 0);
+ to_be32(&req->expected_data_xfer_len, 1028);
+ to_be32(&req->itt, 0x1234);
+ req->write_bit = 1;
+ req->final_bit = 1;
+
+ rc = iscsi_pdu_hdr_handle(&conn, req_pdu);
+ if (rc == 0 && !req_pdu->is_rejected) {
+ rc = iscsi_pdu_payload_handle(&conn, req_pdu);
+ }
+ CU_ASSERT(rc == 0);
+
+ response_pdu = TAILQ_FIRST(&g_write_pdu_list);
+ SPDK_CU_ASSERT_FATAL(response_pdu != NULL);
+
+ /*
+ * Confirm that a correct R2T reply was sent in response to the
+ * SCSI request.
+ */
+ TAILQ_REMOVE(&g_write_pdu_list, response_pdu, tailq);
+ CU_ASSERT(response_pdu->bhs.opcode == ISCSI_OP_R2T);
+ r2t = (struct iscsi_bhs_r2t *)&response_pdu->bhs;
+ CU_ASSERT(from_be32(&r2t->desired_xfer_len) == 1024);
+ CU_ASSERT(from_be32(&r2t->buffer_offset) == 0);
+ CU_ASSERT(from_be32(&r2t->itt) == 0x1234);
+
+ data_out_pdu->bhs.opcode = ISCSI_OP_SCSI_DATAOUT;
+ data_out_pdu->bhs.flags = ISCSI_FLAG_FINAL;
+ data_out_pdu->data_segment_len = 1028;
+ data_out = (struct iscsi_bhs_data_out *)&data_out_pdu->bhs;
+ data_out->itt = r2t->itt;
+ data_out->ttt = r2t->ttt;
+ DSET24(data_out->data_segment_len, 1028);
+
+ rc = iscsi_pdu_hdr_handle(&conn, data_out_pdu);
+ if (rc == 0 && !data_out_pdu->is_rejected) {
+ rc = iscsi_pdu_payload_handle(&conn, data_out_pdu);
+ }
+ CU_ASSERT(rc == SPDK_ISCSI_CONNECTION_FATAL);
+
+ SPDK_CU_ASSERT_FATAL(response_pdu->task != NULL);
+ iscsi_task_disassociate_pdu(response_pdu->task);
+ iscsi_task_put(response_pdu->task);
+ iscsi_put_pdu(response_pdu);
+
+ r2t_pdu = TAILQ_FIRST(&g_write_pdu_list);
+ CU_ASSERT(r2t_pdu != NULL);
+ TAILQ_REMOVE(&g_write_pdu_list, r2t_pdu, tailq);
+ iscsi_put_pdu(r2t_pdu);
+
+ iscsi_put_pdu(data_out_pdu);
+ iscsi_put_pdu(req_pdu);
+}
+
+static void
+underflow_for_read_transfer_test(void)
+{
+ struct spdk_iscsi_sess sess = {};
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_task task = {};
+ struct spdk_scsi_dev dev = {};
+ struct spdk_scsi_lun lun = {};
+ struct spdk_iscsi_pdu *pdu;
+ struct iscsi_bhs_scsi_req *scsi_req;
+ struct iscsi_bhs_data_in *datah;
+ uint32_t residual_count = 0;
+
+ sess.MaxBurstLength = SPDK_ISCSI_MAX_BURST_LENGTH;
+
+ conn.sess = &sess;
+ conn.MaxRecvDataSegmentLength = 8192;
+
+ dev.lun[0] = &lun;
+ conn.dev = &dev;
+
+ pdu = iscsi_get_pdu(&conn);
+ SPDK_CU_ASSERT_FATAL(pdu != NULL);
+
+ scsi_req = (struct iscsi_bhs_scsi_req *)&pdu->bhs;
+ scsi_req->read_bit = 1;
+
+ iscsi_task_set_pdu(&task, pdu);
+ task.parent = NULL;
+
+ task.scsi.iovs = &task.scsi.iov;
+ task.scsi.iovcnt = 1;
+ task.scsi.length = 512;
+ task.scsi.transfer_len = 512;
+ task.bytes_completed = 512;
+ task.scsi.data_transferred = 256;
+ task.scsi.status = SPDK_SCSI_STATUS_GOOD;
+
+ iscsi_task_response(&conn, &task);
+ iscsi_put_pdu(pdu);
+
+ /*
+ * In this case, a SCSI Data-In PDU should contain the Status
+ * for the data transfer.
+ */
+ to_be32(&residual_count, 256);
+
+ pdu = TAILQ_FIRST(&g_write_pdu_list);
+ SPDK_CU_ASSERT_FATAL(pdu != NULL);
+
+ CU_ASSERT(pdu->bhs.opcode == ISCSI_OP_SCSI_DATAIN);
+
+ datah = (struct iscsi_bhs_data_in *)&pdu->bhs;
+
+ CU_ASSERT(datah->flags == (ISCSI_DATAIN_UNDERFLOW | ISCSI_FLAG_FINAL | ISCSI_DATAIN_STATUS));
+ CU_ASSERT(datah->res_cnt == residual_count);
+
+ TAILQ_REMOVE(&g_write_pdu_list, pdu, tailq);
+ iscsi_put_pdu(pdu);
+
+ CU_ASSERT(TAILQ_EMPTY(&g_write_pdu_list));
+}
+
+static void
+underflow_for_zero_read_transfer_test(void)
+{
+ struct spdk_iscsi_sess sess = {};
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_task task = {};
+ struct spdk_scsi_dev dev = {};
+ struct spdk_scsi_lun lun = {};
+ struct spdk_iscsi_pdu *pdu;
+ struct iscsi_bhs_scsi_req *scsi_req;
+ struct iscsi_bhs_scsi_resp *resph;
+ uint32_t residual_count = 0, data_segment_len;
+
+ sess.MaxBurstLength = SPDK_ISCSI_MAX_BURST_LENGTH;
+
+ conn.sess = &sess;
+ conn.MaxRecvDataSegmentLength = 8192;
+
+ dev.lun[0] = &lun;
+ conn.dev = &dev;
+
+ pdu = iscsi_get_pdu(&conn);
+ SPDK_CU_ASSERT_FATAL(pdu != NULL);
+
+ scsi_req = (struct iscsi_bhs_scsi_req *)&pdu->bhs;
+ scsi_req->read_bit = 1;
+
+ iscsi_task_set_pdu(&task, pdu);
+ task.parent = NULL;
+
+ task.scsi.length = 512;
+ task.scsi.transfer_len = 512;
+ task.bytes_completed = 512;
+ task.scsi.data_transferred = 0;
+ task.scsi.status = SPDK_SCSI_STATUS_GOOD;
+
+ iscsi_task_response(&conn, &task);
+ iscsi_put_pdu(pdu);
+
+ /*
+ * In this case, only a SCSI Response PDU is expected and
+ * underflow must be set in it.
+ * */
+ to_be32(&residual_count, 512);
+
+ pdu = TAILQ_FIRST(&g_write_pdu_list);
+ SPDK_CU_ASSERT_FATAL(pdu != NULL);
+
+ CU_ASSERT(pdu->bhs.opcode == ISCSI_OP_SCSI_RSP);
+
+ resph = (struct iscsi_bhs_scsi_resp *)&pdu->bhs;
+
+ CU_ASSERT(resph->flags == (ISCSI_SCSI_UNDERFLOW | 0x80));
+
+ data_segment_len = DGET24(resph->data_segment_len);
+ CU_ASSERT(data_segment_len == 0);
+ CU_ASSERT(resph->res_cnt == residual_count);
+
+ TAILQ_REMOVE(&g_write_pdu_list, pdu, tailq);
+ iscsi_put_pdu(pdu);
+
+ CU_ASSERT(TAILQ_EMPTY(&g_write_pdu_list));
+}
+
+static void
+underflow_for_request_sense_test(void)
+{
+ struct spdk_iscsi_sess sess = {};
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_task task = {};
+ struct spdk_scsi_dev dev = {};
+ struct spdk_scsi_lun lun = {};
+ struct spdk_iscsi_pdu *pdu1, *pdu2;
+ struct iscsi_bhs_scsi_req *scsi_req;
+ struct iscsi_bhs_data_in *datah;
+ struct iscsi_bhs_scsi_resp *resph;
+ uint32_t residual_count = 0, data_segment_len;
+
+ sess.MaxBurstLength = SPDK_ISCSI_MAX_BURST_LENGTH;
+
+ conn.sess = &sess;
+ conn.MaxRecvDataSegmentLength = 8192;
+
+ dev.lun[0] = &lun;
+ conn.dev = &dev;
+
+ pdu1 = iscsi_get_pdu(&conn);
+ SPDK_CU_ASSERT_FATAL(pdu1 != NULL);
+
+ scsi_req = (struct iscsi_bhs_scsi_req *)&pdu1->bhs;
+ scsi_req->read_bit = 1;
+
+ iscsi_task_set_pdu(&task, pdu1);
+ task.parent = NULL;
+
+ task.scsi.iovs = &task.scsi.iov;
+ task.scsi.iovcnt = 1;
+ task.scsi.length = 512;
+ task.scsi.transfer_len = 512;
+ task.bytes_completed = 512;
+
+ task.scsi.sense_data_len = 18;
+ task.scsi.data_transferred = 18;
+ task.scsi.status = SPDK_SCSI_STATUS_GOOD;
+
+ iscsi_task_response(&conn, &task);
+ iscsi_put_pdu(pdu1);
+
+ /*
+ * In this case, a SCSI Data-In PDU and a SCSI Response PDU are returned.
+ * Sense data are set both in payload and sense area.
+ * The SCSI Data-In PDU sets FINAL and the SCSI Response PDU sets UNDERFLOW.
+ *
+ * Probably there will be different implementation but keeping current SPDK
+ * implementation by adding UT will be valuable for any implementation.
+ */
+ to_be32(&residual_count, 494);
+
+ pdu1 = TAILQ_FIRST(&g_write_pdu_list);
+ SPDK_CU_ASSERT_FATAL(pdu1 != NULL);
+
+ CU_ASSERT(pdu1->bhs.opcode == ISCSI_OP_SCSI_DATAIN);
+
+ datah = (struct iscsi_bhs_data_in *)&pdu1->bhs;
+
+ CU_ASSERT(datah->flags == ISCSI_FLAG_FINAL);
+
+ data_segment_len = DGET24(datah->data_segment_len);
+ CU_ASSERT(data_segment_len == 18);
+ CU_ASSERT(datah->res_cnt == 0);
+
+ TAILQ_REMOVE(&g_write_pdu_list, pdu1, tailq);
+ iscsi_put_pdu(pdu1);
+
+ pdu2 = TAILQ_FIRST(&g_write_pdu_list);
+ /* inform scan-build (clang 6) that these pointers are not the same */
+ SPDK_CU_ASSERT_FATAL(pdu1 != pdu2);
+ SPDK_CU_ASSERT_FATAL(pdu2 != NULL);
+
+ CU_ASSERT(pdu2->bhs.opcode == ISCSI_OP_SCSI_RSP);
+
+ resph = (struct iscsi_bhs_scsi_resp *)&pdu2->bhs;
+
+ CU_ASSERT(resph->flags == (ISCSI_SCSI_UNDERFLOW | 0x80));
+
+ data_segment_len = DGET24(resph->data_segment_len);
+ CU_ASSERT(data_segment_len == task.scsi.sense_data_len + 2);
+ CU_ASSERT(resph->res_cnt == residual_count);
+
+ TAILQ_REMOVE(&g_write_pdu_list, pdu2, tailq);
+ iscsi_put_pdu(pdu2);
+
+ CU_ASSERT(TAILQ_EMPTY(&g_write_pdu_list));
+}
+
+static void
+underflow_for_check_condition_test(void)
+{
+ struct spdk_iscsi_sess sess = {};
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_task task = {};
+ struct spdk_scsi_dev dev = {};
+ struct spdk_scsi_lun lun = {};
+ struct spdk_iscsi_pdu *pdu;
+ struct iscsi_bhs_scsi_req *scsi_req;
+ struct iscsi_bhs_scsi_resp *resph;
+ uint32_t data_segment_len;
+
+ sess.MaxBurstLength = SPDK_ISCSI_MAX_BURST_LENGTH;
+
+ conn.sess = &sess;
+ conn.MaxRecvDataSegmentLength = 8192;
+
+ dev.lun[0] = &lun;
+ conn.dev = &dev;
+
+ pdu = iscsi_get_pdu(&conn);
+ SPDK_CU_ASSERT_FATAL(pdu != NULL);
+
+ scsi_req = (struct iscsi_bhs_scsi_req *)&pdu->bhs;
+ scsi_req->read_bit = 1;
+
+ iscsi_task_set_pdu(&task, pdu);
+ task.parent = NULL;
+
+ task.scsi.iovs = &task.scsi.iov;
+ task.scsi.iovcnt = 1;
+ task.scsi.length = 512;
+ task.scsi.transfer_len = 512;
+ task.bytes_completed = 512;
+
+ task.scsi.sense_data_len = 18;
+ task.scsi.data_transferred = 18;
+ task.scsi.status = SPDK_SCSI_STATUS_CHECK_CONDITION;
+
+ iscsi_task_response(&conn, &task);
+ iscsi_put_pdu(pdu);
+
+ /*
+ * In this case, a SCSI Response PDU is returned.
+ * Sense data is set in sense area.
+ * Underflow is not set.
+ */
+ pdu = TAILQ_FIRST(&g_write_pdu_list);
+ SPDK_CU_ASSERT_FATAL(pdu != NULL);
+
+ CU_ASSERT(pdu->bhs.opcode == ISCSI_OP_SCSI_RSP);
+
+ resph = (struct iscsi_bhs_scsi_resp *)&pdu->bhs;
+
+ CU_ASSERT(resph->flags == 0x80);
+
+ data_segment_len = DGET24(resph->data_segment_len);
+ CU_ASSERT(data_segment_len == task.scsi.sense_data_len + 2);
+ CU_ASSERT(resph->res_cnt == 0);
+
+ TAILQ_REMOVE(&g_write_pdu_list, pdu, tailq);
+ iscsi_put_pdu(pdu);
+
+ CU_ASSERT(TAILQ_EMPTY(&g_write_pdu_list));
+}
+
+static void
+add_transfer_task_test(void)
+{
+ struct spdk_iscsi_sess sess = {};
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_task task = {};
+ struct spdk_iscsi_pdu *pdu, *tmp;
+ struct iscsi_bhs_r2t *r2th;
+ int rc, count = 0;
+ uint32_t buffer_offset, desired_xfer_len;
+
+ sess.MaxBurstLength = SPDK_ISCSI_MAX_BURST_LENGTH; /* 1M */
+ sess.MaxOutstandingR2T = DEFAULT_MAXR2T; /* 4 */
+
+ conn.sess = &sess;
+ TAILQ_INIT(&conn.queued_r2t_tasks);
+ TAILQ_INIT(&conn.active_r2t_tasks);
+
+ pdu = iscsi_get_pdu(&conn);
+ SPDK_CU_ASSERT_FATAL(pdu != NULL);
+
+ pdu->data_segment_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH; /* 64K */
+ task.scsi.transfer_len = 16 * 1024 * 1024;
+ iscsi_task_set_pdu(&task, pdu);
+
+ /* The following tests if the task is queued because R2T tasks are full. */
+ conn.pending_r2t = DEFAULT_MAXR2T;
+
+ rc = add_transfer_task(&conn, &task);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(TAILQ_FIRST(&conn.queued_r2t_tasks) == &task);
+
+ TAILQ_REMOVE(&conn.queued_r2t_tasks, &task, link);
+ CU_ASSERT(TAILQ_EMPTY(&conn.queued_r2t_tasks));
+
+ /* The following tests if multiple R2Ts are issued. */
+ conn.pending_r2t = 0;
+
+ rc = add_transfer_task(&conn, &task);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(TAILQ_FIRST(&conn.active_r2t_tasks) == &task);
+
+ TAILQ_REMOVE(&conn.active_r2t_tasks, &task, link);
+ CU_ASSERT(TAILQ_EMPTY(&conn.active_r2t_tasks));
+
+ CU_ASSERT(conn.data_out_cnt == 255);
+ CU_ASSERT(conn.pending_r2t == 1);
+ CU_ASSERT(conn.ttt == 1);
+
+ CU_ASSERT(task.data_out_cnt == 255);
+ CU_ASSERT(task.ttt == 1);
+ CU_ASSERT(task.outstanding_r2t == sess.MaxOutstandingR2T);
+ CU_ASSERT(task.next_r2t_offset ==
+ pdu->data_segment_len + sess.MaxBurstLength * sess.MaxOutstandingR2T);
+
+
+ while (!TAILQ_EMPTY(&g_write_pdu_list)) {
+ tmp = TAILQ_FIRST(&g_write_pdu_list);
+ TAILQ_REMOVE(&g_write_pdu_list, tmp, tailq);
+
+ r2th = (struct iscsi_bhs_r2t *)&tmp->bhs;
+
+ buffer_offset = from_be32(&r2th->buffer_offset);
+ CU_ASSERT(buffer_offset == pdu->data_segment_len + sess.MaxBurstLength * count);
+
+ desired_xfer_len = from_be32(&r2th->desired_xfer_len);
+ CU_ASSERT(desired_xfer_len == sess.MaxBurstLength);
+
+ iscsi_put_pdu(tmp);
+ count++;
+ }
+
+ CU_ASSERT(count == DEFAULT_MAXR2T);
+
+ iscsi_put_pdu(pdu);
+}
+
+static void
+get_transfer_task_test(void)
+{
+ struct spdk_iscsi_sess sess = {};
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_task task1 = {}, task2 = {}, *task;
+ struct spdk_iscsi_pdu *pdu1, *pdu2, *pdu;
+ int rc;
+
+ sess.MaxBurstLength = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ sess.MaxOutstandingR2T = 1;
+
+ conn.sess = &sess;
+ TAILQ_INIT(&conn.active_r2t_tasks);
+
+ pdu1 = iscsi_get_pdu(&conn);
+ SPDK_CU_ASSERT_FATAL(pdu1 != NULL);
+
+ pdu1->data_segment_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ task1.scsi.transfer_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ iscsi_task_set_pdu(&task1, pdu1);
+
+ rc = add_transfer_task(&conn, &task1);
+ CU_ASSERT(rc == 0);
+
+ pdu2 = iscsi_get_pdu(&conn);
+ SPDK_CU_ASSERT_FATAL(pdu2 != NULL);
+
+ pdu2->data_segment_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ task2.scsi.transfer_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ iscsi_task_set_pdu(&task2, pdu2);
+
+ rc = add_transfer_task(&conn, &task2);
+ CU_ASSERT(rc == 0);
+
+ task = get_transfer_task(&conn, 1);
+ CU_ASSERT(task == &task1);
+
+ task = get_transfer_task(&conn, 2);
+ CU_ASSERT(task == &task2);
+
+ while (!TAILQ_EMPTY(&conn.active_r2t_tasks)) {
+ task = TAILQ_FIRST(&conn.active_r2t_tasks);
+ TAILQ_REMOVE(&conn.active_r2t_tasks, task, link);
+ }
+
+ while (!TAILQ_EMPTY(&g_write_pdu_list)) {
+ pdu = TAILQ_FIRST(&g_write_pdu_list);
+ TAILQ_REMOVE(&g_write_pdu_list, pdu, tailq);
+ iscsi_put_pdu(pdu);
+ }
+
+ iscsi_put_pdu(pdu2);
+ iscsi_put_pdu(pdu1);
+}
+
+static void
+del_transfer_task_test(void)
+{
+ struct spdk_iscsi_sess sess = {};
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_task *task1, *task2, *task3, *task4, *task5;
+ struct spdk_iscsi_pdu *pdu1, *pdu2, *pdu3, *pdu4, *pdu5, *pdu;
+ int rc;
+
+ sess.MaxBurstLength = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ sess.MaxOutstandingR2T = 1;
+
+ conn.sess = &sess;
+ TAILQ_INIT(&conn.active_r2t_tasks);
+ TAILQ_INIT(&conn.queued_r2t_tasks);
+
+ pdu1 = iscsi_get_pdu(&conn);
+ SPDK_CU_ASSERT_FATAL(pdu1 != NULL);
+
+ pdu1->data_segment_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+
+ task1 = iscsi_task_get(&conn, NULL, NULL);
+ SPDK_CU_ASSERT_FATAL(task1 != NULL);
+
+ task1->scsi.transfer_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ iscsi_task_set_pdu(task1, pdu1);
+ task1->tag = 11;
+
+ rc = add_transfer_task(&conn, task1);
+ CU_ASSERT(rc == 0);
+
+ pdu2 = iscsi_get_pdu(&conn);
+ SPDK_CU_ASSERT_FATAL(pdu2 != NULL);
+
+ pdu2->data_segment_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+
+ task2 = iscsi_task_get(&conn, NULL, NULL);
+ SPDK_CU_ASSERT_FATAL(task2 != NULL);
+
+ task2->scsi.transfer_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ iscsi_task_set_pdu(task2, pdu2);
+ task2->tag = 12;
+
+ rc = add_transfer_task(&conn, task2);
+ CU_ASSERT(rc == 0);
+
+ pdu3 = iscsi_get_pdu(&conn);
+ SPDK_CU_ASSERT_FATAL(pdu3 != NULL);
+
+ pdu3->data_segment_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+
+ task3 = iscsi_task_get(&conn, NULL, NULL);
+ SPDK_CU_ASSERT_FATAL(task3 != NULL);
+
+ task3->scsi.transfer_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ iscsi_task_set_pdu(task3, pdu3);
+ task3->tag = 13;
+
+ rc = add_transfer_task(&conn, task3);
+ CU_ASSERT(rc == 0);
+
+ pdu4 = iscsi_get_pdu(&conn);
+ SPDK_CU_ASSERT_FATAL(pdu4 != NULL);
+
+ pdu4->data_segment_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+
+ task4 = iscsi_task_get(&conn, NULL, NULL);
+ SPDK_CU_ASSERT_FATAL(task4 != NULL);
+
+ task4->scsi.transfer_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ iscsi_task_set_pdu(task4, pdu4);
+ task4->tag = 14;
+
+ rc = add_transfer_task(&conn, task4);
+ CU_ASSERT(rc == 0);
+
+ pdu5 = iscsi_get_pdu(&conn);
+ SPDK_CU_ASSERT_FATAL(pdu5 != NULL);
+
+ pdu5->data_segment_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+
+ task5 = iscsi_task_get(&conn, NULL, NULL);
+ SPDK_CU_ASSERT_FATAL(task5 != NULL);
+
+ task5->scsi.transfer_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ iscsi_task_set_pdu(task5, pdu5);
+ task5->tag = 15;
+
+ rc = add_transfer_task(&conn, task5);
+ CU_ASSERT(rc == 0);
+
+ CU_ASSERT(get_transfer_task(&conn, 1) == task1);
+ CU_ASSERT(get_transfer_task(&conn, 5) == NULL);
+ iscsi_del_transfer_task(&conn, 11);
+ CU_ASSERT(get_transfer_task(&conn, 1) == NULL);
+ CU_ASSERT(get_transfer_task(&conn, 5) == task5);
+
+ CU_ASSERT(get_transfer_task(&conn, 2) == task2);
+ iscsi_del_transfer_task(&conn, 12);
+ CU_ASSERT(get_transfer_task(&conn, 2) == NULL);
+
+ CU_ASSERT(get_transfer_task(&conn, 3) == task3);
+ iscsi_del_transfer_task(&conn, 13);
+ CU_ASSERT(get_transfer_task(&conn, 3) == NULL);
+
+ CU_ASSERT(get_transfer_task(&conn, 4) == task4);
+ iscsi_del_transfer_task(&conn, 14);
+ CU_ASSERT(get_transfer_task(&conn, 4) == NULL);
+
+ CU_ASSERT(get_transfer_task(&conn, 5) == task5);
+ iscsi_del_transfer_task(&conn, 15);
+ CU_ASSERT(get_transfer_task(&conn, 5) == NULL);
+
+ CU_ASSERT(TAILQ_EMPTY(&conn.active_r2t_tasks));
+
+ while (!TAILQ_EMPTY(&g_write_pdu_list)) {
+ pdu = TAILQ_FIRST(&g_write_pdu_list);
+ TAILQ_REMOVE(&g_write_pdu_list, pdu, tailq);
+ iscsi_put_pdu(pdu);
+ }
+
+ iscsi_put_pdu(pdu5);
+ iscsi_put_pdu(pdu4);
+ iscsi_put_pdu(pdu3);
+ iscsi_put_pdu(pdu2);
+ iscsi_put_pdu(pdu1);
+}
+
+static void
+clear_all_transfer_tasks_test(void)
+{
+ struct spdk_iscsi_sess sess = {};
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_task *task1, *task2, *task3, *task4, *task5, *task6;
+ struct spdk_iscsi_pdu *pdu1, *pdu2, *pdu3, *pdu4, *pdu5, *pdu6, *pdu;
+ struct spdk_iscsi_pdu *mgmt_pdu1, *mgmt_pdu2;
+ struct spdk_scsi_lun lun1 = {}, lun2 = {};
+ uint32_t alloc_cmd_sn;
+ int rc;
+
+ sess.MaxBurstLength = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ sess.MaxOutstandingR2T = 1;
+
+ conn.sess = &sess;
+ TAILQ_INIT(&conn.active_r2t_tasks);
+ TAILQ_INIT(&conn.queued_r2t_tasks);
+
+ alloc_cmd_sn = 10;
+
+ task1 = iscsi_task_get(&conn, NULL, NULL);
+ SPDK_CU_ASSERT_FATAL(task1 != NULL);
+ pdu1 = iscsi_get_pdu(&conn);
+ SPDK_CU_ASSERT_FATAL(pdu1 != NULL);
+
+ pdu1->data_segment_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ pdu1->cmd_sn = alloc_cmd_sn;
+ alloc_cmd_sn++;
+ task1->scsi.transfer_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ task1->scsi.lun = &lun1;
+ iscsi_task_set_pdu(task1, pdu1);
+
+ rc = add_transfer_task(&conn, task1);
+ CU_ASSERT(rc == 0);
+
+ mgmt_pdu1 = iscsi_get_pdu(&conn);
+ SPDK_CU_ASSERT_FATAL(mgmt_pdu1 != NULL);
+
+ mgmt_pdu1->cmd_sn = alloc_cmd_sn;
+ alloc_cmd_sn++;
+
+ task2 = iscsi_task_get(&conn, NULL, NULL);
+ SPDK_CU_ASSERT_FATAL(task2 != NULL);
+ pdu2 = iscsi_get_pdu(&conn);
+ SPDK_CU_ASSERT_FATAL(pdu2 != NULL);
+
+ pdu2->data_segment_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ pdu2->cmd_sn = alloc_cmd_sn;
+ alloc_cmd_sn++;
+ task2->scsi.transfer_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ task2->scsi.lun = &lun1;
+ iscsi_task_set_pdu(task2, pdu2);
+
+ rc = add_transfer_task(&conn, task2);
+ CU_ASSERT(rc == 0);
+
+ task3 = iscsi_task_get(&conn, NULL, NULL);
+ SPDK_CU_ASSERT_FATAL(task3 != NULL);
+ pdu3 = iscsi_get_pdu(&conn);
+ SPDK_CU_ASSERT_FATAL(pdu3 != NULL);
+
+ pdu3->data_segment_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ pdu3->cmd_sn = alloc_cmd_sn;
+ alloc_cmd_sn++;
+ task3->scsi.transfer_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ task3->scsi.lun = &lun1;
+ iscsi_task_set_pdu(task3, pdu3);
+
+ rc = add_transfer_task(&conn, task3);
+ CU_ASSERT(rc == 0);
+
+ task4 = iscsi_task_get(&conn, NULL, NULL);
+ SPDK_CU_ASSERT_FATAL(task4 != NULL);
+ pdu4 = iscsi_get_pdu(&conn);
+ SPDK_CU_ASSERT_FATAL(pdu4 != NULL);
+
+ pdu4->data_segment_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ pdu4->cmd_sn = alloc_cmd_sn;
+ alloc_cmd_sn++;
+ task4->scsi.transfer_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ task4->scsi.lun = &lun2;
+ iscsi_task_set_pdu(task4, pdu4);
+
+ rc = add_transfer_task(&conn, task4);
+ CU_ASSERT(rc == 0);
+
+ task5 = iscsi_task_get(&conn, NULL, NULL);
+ SPDK_CU_ASSERT_FATAL(task5 != NULL);
+ pdu5 = iscsi_get_pdu(&conn);
+ SPDK_CU_ASSERT_FATAL(pdu5 != NULL);
+
+ pdu5->data_segment_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ pdu5->cmd_sn = alloc_cmd_sn;
+ alloc_cmd_sn++;
+ task5->scsi.transfer_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ task5->scsi.lun = &lun2;
+ iscsi_task_set_pdu(task5, pdu5);
+
+ rc = add_transfer_task(&conn, task5);
+ CU_ASSERT(rc == 0);
+
+ mgmt_pdu2 = iscsi_get_pdu(&conn);
+ SPDK_CU_ASSERT_FATAL(mgmt_pdu2 != NULL);
+
+ mgmt_pdu2->cmd_sn = alloc_cmd_sn;
+ alloc_cmd_sn++;
+
+ task6 = iscsi_task_get(&conn, NULL, NULL);
+ SPDK_CU_ASSERT_FATAL(task6 != NULL);
+ pdu6 = iscsi_get_pdu(&conn);
+ SPDK_CU_ASSERT_FATAL(pdu6 != NULL);
+
+ pdu6->data_segment_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ pdu6->cmd_sn = alloc_cmd_sn;
+ alloc_cmd_sn++;
+ task5->scsi.transfer_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ task6->scsi.lun = &lun2;
+ iscsi_task_set_pdu(task6, pdu6);
+
+ rc = add_transfer_task(&conn, task6);
+ CU_ASSERT(rc == 0);
+
+ CU_ASSERT(conn.ttt == 4);
+
+ CU_ASSERT(get_transfer_task(&conn, 1) == task1);
+ CU_ASSERT(get_transfer_task(&conn, 2) == task2);
+ CU_ASSERT(get_transfer_task(&conn, 3) == task3);
+ CU_ASSERT(get_transfer_task(&conn, 4) == task4);
+ CU_ASSERT(get_transfer_task(&conn, 5) == NULL);
+
+ iscsi_clear_all_transfer_task(&conn, &lun1, mgmt_pdu1);
+
+ CU_ASSERT(!TAILQ_EMPTY(&conn.queued_r2t_tasks));
+ CU_ASSERT(get_transfer_task(&conn, 1) == NULL);
+ CU_ASSERT(get_transfer_task(&conn, 2) == task2);
+ CU_ASSERT(get_transfer_task(&conn, 3) == task3);
+ CU_ASSERT(get_transfer_task(&conn, 4) == task4);
+ CU_ASSERT(get_transfer_task(&conn, 5) == task5);
+ CU_ASSERT(get_transfer_task(&conn, 6) == NULL);
+
+ iscsi_clear_all_transfer_task(&conn, &lun1, NULL);
+
+ CU_ASSERT(TAILQ_EMPTY(&conn.queued_r2t_tasks));
+ CU_ASSERT(get_transfer_task(&conn, 1) == NULL);
+ CU_ASSERT(get_transfer_task(&conn, 2) == NULL);
+ CU_ASSERT(get_transfer_task(&conn, 3) == NULL);
+ CU_ASSERT(get_transfer_task(&conn, 4) == task4);
+ CU_ASSERT(get_transfer_task(&conn, 5) == task5);
+ CU_ASSERT(get_transfer_task(&conn, 6) == task6);
+
+ iscsi_clear_all_transfer_task(&conn, &lun2, mgmt_pdu2);
+
+ CU_ASSERT(get_transfer_task(&conn, 4) == NULL);
+ CU_ASSERT(get_transfer_task(&conn, 5) == NULL);
+ CU_ASSERT(get_transfer_task(&conn, 6) == task6);
+
+ iscsi_clear_all_transfer_task(&conn, NULL, NULL);
+
+ CU_ASSERT(get_transfer_task(&conn, 6) == NULL);
+
+ CU_ASSERT(TAILQ_EMPTY(&conn.active_r2t_tasks));
+ while (!TAILQ_EMPTY(&g_write_pdu_list)) {
+ pdu = TAILQ_FIRST(&g_write_pdu_list);
+ TAILQ_REMOVE(&g_write_pdu_list, pdu, tailq);
+ iscsi_put_pdu(pdu);
+ }
+
+ iscsi_put_pdu(mgmt_pdu2);
+ iscsi_put_pdu(mgmt_pdu1);
+ iscsi_put_pdu(pdu6);
+ iscsi_put_pdu(pdu5);
+ iscsi_put_pdu(pdu4);
+ iscsi_put_pdu(pdu3);
+ iscsi_put_pdu(pdu2);
+ iscsi_put_pdu(pdu1);
+}
+
+static void
+build_iovs_test(void)
+{
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_pdu pdu = {};
+ struct iovec iovs[5] = {};
+ uint8_t *data;
+ uint32_t mapped_length = 0;
+ int rc;
+
+ conn.header_digest = true;
+ conn.data_digest = true;
+
+ DSET24(&pdu.bhs.data_segment_len, 512);
+ data = calloc(1, 512);
+ SPDK_CU_ASSERT_FATAL(data != NULL);
+ pdu.data = data;
+
+ pdu.bhs.total_ahs_len = 0;
+ pdu.bhs.opcode = ISCSI_OP_SCSI;
+
+ pdu.writev_offset = 0;
+ rc = iscsi_build_iovs(&conn, iovs, 5, &pdu, &mapped_length);
+ CU_ASSERT(rc == 4);
+ CU_ASSERT(iovs[0].iov_base == (void *)&pdu.bhs);
+ CU_ASSERT(iovs[0].iov_len == ISCSI_BHS_LEN);
+ CU_ASSERT(iovs[1].iov_base == (void *)pdu.header_digest);
+ CU_ASSERT(iovs[1].iov_len == ISCSI_DIGEST_LEN);
+ CU_ASSERT(iovs[2].iov_base == (void *)pdu.data);
+ CU_ASSERT(iovs[2].iov_len == 512);
+ CU_ASSERT(iovs[3].iov_base == (void *)pdu.data_digest);
+ CU_ASSERT(iovs[3].iov_len == ISCSI_DIGEST_LEN);
+ CU_ASSERT(mapped_length == ISCSI_BHS_LEN + ISCSI_DIGEST_LEN + 512 + ISCSI_DIGEST_LEN);
+
+ pdu.writev_offset = ISCSI_BHS_LEN / 2;
+ rc = iscsi_build_iovs(&conn, iovs, 5, &pdu, &mapped_length);
+ CU_ASSERT(rc == 4);
+ CU_ASSERT(iovs[0].iov_base == (void *)((uint8_t *)&pdu.bhs + ISCSI_BHS_LEN / 2));
+ CU_ASSERT(iovs[0].iov_len == ISCSI_BHS_LEN / 2);
+ CU_ASSERT(iovs[1].iov_base == (void *)pdu.header_digest);
+ CU_ASSERT(iovs[1].iov_len == ISCSI_DIGEST_LEN);
+ CU_ASSERT(iovs[2].iov_base == (void *)pdu.data);
+ CU_ASSERT(iovs[2].iov_len == 512);
+ CU_ASSERT(iovs[3].iov_base == (void *)pdu.data_digest);
+ CU_ASSERT(iovs[3].iov_len == ISCSI_DIGEST_LEN);
+ CU_ASSERT(mapped_length == ISCSI_BHS_LEN / 2 + ISCSI_DIGEST_LEN + 512 + ISCSI_DIGEST_LEN);
+
+ pdu.writev_offset = ISCSI_BHS_LEN;
+ rc = iscsi_build_iovs(&conn, iovs, 5, &pdu, &mapped_length);
+ CU_ASSERT(rc == 3);
+ CU_ASSERT(iovs[0].iov_base == (void *)pdu.header_digest);
+ CU_ASSERT(iovs[0].iov_len == ISCSI_DIGEST_LEN);
+ CU_ASSERT(iovs[1].iov_base == (void *)pdu.data);
+ CU_ASSERT(iovs[1].iov_len == 512);
+ CU_ASSERT(iovs[2].iov_base == (void *)pdu.data_digest);
+ CU_ASSERT(iovs[2].iov_len == ISCSI_DIGEST_LEN);
+ CU_ASSERT(mapped_length == ISCSI_DIGEST_LEN + 512 + ISCSI_DIGEST_LEN);
+
+ pdu.writev_offset = ISCSI_BHS_LEN + ISCSI_DIGEST_LEN / 2;
+ rc = iscsi_build_iovs(&conn, iovs, 5, &pdu, &mapped_length);
+ CU_ASSERT(rc == 3);
+ CU_ASSERT(iovs[0].iov_base == (void *)((uint8_t *)pdu.header_digest + ISCSI_DIGEST_LEN / 2));
+ CU_ASSERT(iovs[0].iov_len == ISCSI_DIGEST_LEN / 2);
+ CU_ASSERT(iovs[1].iov_base == (void *)pdu.data);
+ CU_ASSERT(iovs[1].iov_len == 512);
+ CU_ASSERT(iovs[2].iov_base == (void *)pdu.data_digest);
+ CU_ASSERT(iovs[2].iov_len == ISCSI_DIGEST_LEN);
+ CU_ASSERT(mapped_length == ISCSI_DIGEST_LEN / 2 + 512 + ISCSI_DIGEST_LEN);
+
+ pdu.writev_offset = ISCSI_BHS_LEN + ISCSI_DIGEST_LEN;
+ rc = iscsi_build_iovs(&conn, iovs, 5, &pdu, &mapped_length);
+ CU_ASSERT(rc == 2);
+ CU_ASSERT(iovs[0].iov_base == (void *)pdu.data);
+ CU_ASSERT(iovs[0].iov_len == 512);
+ CU_ASSERT(iovs[1].iov_base == (void *)pdu.data_digest);
+ CU_ASSERT(iovs[1].iov_len == ISCSI_DIGEST_LEN);
+ CU_ASSERT(mapped_length == 512 + ISCSI_DIGEST_LEN);
+
+ pdu.writev_offset = ISCSI_BHS_LEN + ISCSI_DIGEST_LEN + 512;
+ rc = iscsi_build_iovs(&conn, iovs, 5, &pdu, &mapped_length);
+ CU_ASSERT(rc == 1);
+ CU_ASSERT(iovs[0].iov_base == (void *)pdu.data_digest);
+ CU_ASSERT(iovs[0].iov_len == ISCSI_DIGEST_LEN);
+ CU_ASSERT(mapped_length == ISCSI_DIGEST_LEN);
+
+ pdu.writev_offset = ISCSI_BHS_LEN + ISCSI_DIGEST_LEN + 512 + ISCSI_DIGEST_LEN / 2;
+ rc = iscsi_build_iovs(&conn, iovs, 5, &pdu, &mapped_length);
+ CU_ASSERT(rc == 1);
+ CU_ASSERT(iovs[0].iov_base == (void *)((uint8_t *)pdu.data_digest + ISCSI_DIGEST_LEN / 2));
+ CU_ASSERT(iovs[0].iov_len == ISCSI_DIGEST_LEN / 2);
+ CU_ASSERT(mapped_length == ISCSI_DIGEST_LEN / 2);
+
+ pdu.writev_offset = ISCSI_BHS_LEN + ISCSI_DIGEST_LEN + 512 + ISCSI_DIGEST_LEN;
+ rc = iscsi_build_iovs(&conn, iovs, 5, &pdu, &mapped_length);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(mapped_length == 0);
+
+ pdu.writev_offset = 0;
+ rc = iscsi_build_iovs(&conn, iovs, 1, &pdu, &mapped_length);
+ CU_ASSERT(rc == 1);
+ CU_ASSERT(iovs[0].iov_base == (void *)&pdu.bhs);
+ CU_ASSERT(iovs[0].iov_len == ISCSI_BHS_LEN);
+ CU_ASSERT(mapped_length == ISCSI_BHS_LEN);
+
+ rc = iscsi_build_iovs(&conn, iovs, 2, &pdu, &mapped_length);
+ CU_ASSERT(rc == 2);
+ CU_ASSERT(iovs[0].iov_base == (void *)&pdu.bhs);
+ CU_ASSERT(iovs[0].iov_len == ISCSI_BHS_LEN);
+ CU_ASSERT(iovs[1].iov_base == (void *)pdu.header_digest);
+ CU_ASSERT(iovs[1].iov_len == ISCSI_DIGEST_LEN);
+ CU_ASSERT(mapped_length == ISCSI_BHS_LEN + ISCSI_DIGEST_LEN);
+
+ rc = iscsi_build_iovs(&conn, iovs, 3, &pdu, &mapped_length);
+ CU_ASSERT(rc == 3);
+ CU_ASSERT(iovs[0].iov_base == (void *)&pdu.bhs);
+ CU_ASSERT(iovs[0].iov_len == ISCSI_BHS_LEN);
+ CU_ASSERT(iovs[1].iov_base == (void *)pdu.header_digest);
+ CU_ASSERT(iovs[1].iov_len == ISCSI_DIGEST_LEN);
+ CU_ASSERT(iovs[2].iov_base == (void *)pdu.data);
+ CU_ASSERT(iovs[2].iov_len == 512);
+ CU_ASSERT(mapped_length == ISCSI_BHS_LEN + ISCSI_DIGEST_LEN + 512);
+
+ rc = iscsi_build_iovs(&conn, iovs, 4, &pdu, &mapped_length);
+ CU_ASSERT(rc == 4);
+ CU_ASSERT(iovs[0].iov_base == (void *)&pdu.bhs);
+ CU_ASSERT(iovs[0].iov_len == ISCSI_BHS_LEN);
+ CU_ASSERT(iovs[1].iov_base == (void *)pdu.header_digest);
+ CU_ASSERT(iovs[1].iov_len == ISCSI_DIGEST_LEN);
+ CU_ASSERT(iovs[2].iov_base == (void *)pdu.data);
+ CU_ASSERT(iovs[2].iov_len == 512);
+ CU_ASSERT(iovs[3].iov_base == (void *)pdu.data_digest);
+ CU_ASSERT(iovs[3].iov_len == ISCSI_DIGEST_LEN);
+ CU_ASSERT(mapped_length == ISCSI_BHS_LEN + ISCSI_DIGEST_LEN + 512 + ISCSI_DIGEST_LEN);
+
+ free(data);
+}
+
+static void
+build_iovs_with_md_test(void)
+{
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_pdu pdu = {};
+ struct iovec iovs[6] = {};
+ uint8_t *data;
+ uint32_t mapped_length = 0;
+ int rc;
+
+ conn.header_digest = true;
+ conn.data_digest = true;
+
+ DSET24(&pdu.bhs.data_segment_len, 4096 * 2);
+ data = calloc(1, (4096 + 128) * 2);
+ SPDK_CU_ASSERT_FATAL(data != NULL);
+ pdu.data = data;
+ pdu.data_buf_len = (4096 + 128) * 2;
+
+ pdu.bhs.total_ahs_len = 0;
+ pdu.bhs.opcode = ISCSI_OP_SCSI;
+
+ rc = spdk_dif_ctx_init(&pdu.dif_ctx, 4096 + 128, 128, true, false, SPDK_DIF_TYPE1,
+ 0, 0, 0, 0, 0, 0);
+ CU_ASSERT(rc == 0);
+
+ pdu.dif_insert_or_strip = true;
+
+ pdu.writev_offset = 0;
+ rc = iscsi_build_iovs(&conn, iovs, 6, &pdu, &mapped_length);
+ CU_ASSERT(rc == 5);
+ CU_ASSERT(iovs[0].iov_base == (void *)&pdu.bhs);
+ CU_ASSERT(iovs[0].iov_len == ISCSI_BHS_LEN);
+ CU_ASSERT(iovs[1].iov_base == (void *)pdu.header_digest);
+ CU_ASSERT(iovs[1].iov_len == ISCSI_DIGEST_LEN);
+ CU_ASSERT(iovs[2].iov_base == (void *)pdu.data);
+ CU_ASSERT(iovs[2].iov_len == 4096);
+ CU_ASSERT(iovs[3].iov_base == (void *)(pdu.data + 4096 + 128));
+ CU_ASSERT(iovs[3].iov_len == 4096);
+ CU_ASSERT(iovs[4].iov_base == (void *)pdu.data_digest);
+ CU_ASSERT(iovs[4].iov_len == ISCSI_DIGEST_LEN);
+ CU_ASSERT(mapped_length == ISCSI_BHS_LEN + ISCSI_DIGEST_LEN + 4096 * 2 + ISCSI_DIGEST_LEN);
+
+ pdu.writev_offset = ISCSI_BHS_LEN + ISCSI_DIGEST_LEN + 2048;
+ rc = iscsi_build_iovs(&conn, iovs, 6, &pdu, &mapped_length);
+ CU_ASSERT(rc == 3);
+ CU_ASSERT(iovs[0].iov_base == (void *)(pdu.data + 2048));
+ CU_ASSERT(iovs[0].iov_len == 2048);
+ CU_ASSERT(iovs[1].iov_base == (void *)(pdu.data + 4096 + 128));
+ CU_ASSERT(iovs[1].iov_len == 4096);
+ CU_ASSERT(iovs[2].iov_base == (void *)pdu.data_digest);
+ CU_ASSERT(iovs[2].iov_len == ISCSI_DIGEST_LEN);
+ CU_ASSERT(mapped_length == 2048 + 4096 + ISCSI_DIGEST_LEN);
+
+ pdu.writev_offset = ISCSI_BHS_LEN + ISCSI_DIGEST_LEN + 4096 * 2;
+ rc = iscsi_build_iovs(&conn, iovs, 6, &pdu, &mapped_length);
+ CU_ASSERT(rc == 1);
+ CU_ASSERT(iovs[0].iov_base == (void *)pdu.data_digest);
+ CU_ASSERT(iovs[0].iov_len == ISCSI_DIGEST_LEN);
+ CU_ASSERT(mapped_length == ISCSI_DIGEST_LEN);
+
+ pdu.writev_offset = 0;
+ rc = iscsi_build_iovs(&conn, iovs, 3, &pdu, &mapped_length);
+ CU_ASSERT(rc == 3);
+ CU_ASSERT(iovs[0].iov_base == (void *)&pdu.bhs);
+ CU_ASSERT(iovs[0].iov_len == ISCSI_BHS_LEN);
+ CU_ASSERT(iovs[1].iov_base == (void *)pdu.header_digest);
+ CU_ASSERT(iovs[1].iov_len == ISCSI_DIGEST_LEN);
+ CU_ASSERT(iovs[2].iov_base == (void *)pdu.data);
+ CU_ASSERT(iovs[2].iov_len == 4096);
+ CU_ASSERT(mapped_length == ISCSI_BHS_LEN + ISCSI_DIGEST_LEN + 4096);
+
+ free(data);
+}
+
+static void
+check_iscsi_reject(struct spdk_iscsi_pdu *pdu, uint8_t reason)
+{
+ struct spdk_iscsi_pdu *rsp_pdu;
+ struct iscsi_bhs_reject *reject_bhs;
+
+ CU_ASSERT(pdu->is_rejected == true);
+ rsp_pdu = TAILQ_FIRST(&g_write_pdu_list);
+ CU_ASSERT(rsp_pdu != NULL);
+ reject_bhs = (struct iscsi_bhs_reject *)&rsp_pdu->bhs;
+ CU_ASSERT(reject_bhs->reason == reason);
+
+ TAILQ_REMOVE(&g_write_pdu_list, rsp_pdu, tailq);
+ iscsi_put_pdu(rsp_pdu);
+ pdu->is_rejected = false;
+}
+
+static void
+check_login_response(uint8_t status_class, uint8_t status_detail)
+{
+ struct spdk_iscsi_pdu *rsp_pdu;
+ struct iscsi_bhs_login_rsp *login_rsph;
+
+ rsp_pdu = TAILQ_FIRST(&g_write_pdu_list);
+ CU_ASSERT(rsp_pdu != NULL);
+ login_rsph = (struct iscsi_bhs_login_rsp *)&rsp_pdu->bhs;
+ CU_ASSERT(login_rsph->status_class == status_class);
+ CU_ASSERT(login_rsph->status_detail == status_detail);
+
+ TAILQ_REMOVE(&g_write_pdu_list, rsp_pdu, tailq);
+ iscsi_put_pdu(rsp_pdu);
+}
+
+static void
+pdu_hdr_op_login_test(void)
+{
+ struct spdk_iscsi_sess sess = {};
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_pdu pdu = {};
+ struct iscsi_bhs_login_req *login_reqh;
+ int rc;
+
+ login_reqh = (struct iscsi_bhs_login_req *)&pdu.bhs;
+
+ /* Case 1 - On discovery session, target only accepts text requests with the
+ * SendTargets key and logout request with reason "close the session".
+ */
+ sess.session_type = SESSION_TYPE_DISCOVERY;
+ conn.full_feature = true;
+ conn.sess = &sess;
+
+ rc = iscsi_pdu_hdr_op_login(&conn, &pdu);
+ CU_ASSERT(rc == SPDK_ISCSI_CONNECTION_FATAL);
+
+ /* Case 2 - Data segment length is limited to be not more than 8KB, the default
+ * FirstBurstLength, for login request.
+ */
+ sess.session_type = SESSION_TYPE_INVALID;
+ conn.full_feature = false;
+ conn.sess = NULL;
+ pdu.data_segment_len = SPDK_ISCSI_FIRST_BURST_LENGTH + 1;
+
+ rc = iscsi_pdu_hdr_op_login(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_iscsi_reject(&pdu, ISCSI_REASON_PROTOCOL_ERROR);
+
+ /* Case 3 - PDU pool is empty */
+ pdu.data_segment_len = SPDK_ISCSI_FIRST_BURST_LENGTH;
+ g_pdu_pool_is_empty = true;
+
+ rc = iscsi_pdu_hdr_op_login(&conn, &pdu);
+ CU_ASSERT(rc == SPDK_ISCSI_CONNECTION_FATAL);
+
+ /* Case 4 - A login request with the C bit set to 1 must have the T bit set to 0. */
+ g_pdu_pool_is_empty = false;
+ login_reqh->flags |= ISCSI_LOGIN_TRANSIT;
+ login_reqh->flags |= ISCSI_LOGIN_CONTINUE;
+
+ rc = iscsi_pdu_hdr_op_login(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_login_response(ISCSI_CLASS_INITIATOR_ERROR, ISCSI_LOGIN_INITIATOR_ERROR);
+
+ /* Case 5 - Both version-min and version-max must be set to 0x00. */
+ login_reqh->flags = 0;
+ login_reqh->version_min = ISCSI_VERSION + 1;
+
+ rc = iscsi_pdu_hdr_op_login(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_login_response(ISCSI_CLASS_INITIATOR_ERROR, ISCSI_LOGIN_UNSUPPORTED_VERSION);
+
+ /* Case 6 - T bit is set to 1 correctly but invalid stage code is set to NSG. */
+ login_reqh->version_min = ISCSI_VERSION;
+ login_reqh->flags |= ISCSI_LOGIN_TRANSIT;
+ login_reqh->flags |= ISCSI_NSG_RESERVED_CODE;
+
+ rc = iscsi_pdu_hdr_op_login(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_login_response(ISCSI_CLASS_INITIATOR_ERROR, ISCSI_LOGIN_INITIATOR_ERROR);
+
+ /* Case 7 - Login request is correct. Login response is initialized and set to
+ * the current connection.
+ */
+ login_reqh->flags = 0;
+
+ rc = iscsi_pdu_hdr_op_login(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(conn.login_rsp_pdu != NULL);
+
+ iscsi_put_pdu(conn.login_rsp_pdu);
+}
+
+static void
+pdu_hdr_op_text_test(void)
+{
+ struct spdk_iscsi_sess sess = {};
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_pdu pdu = {};
+ struct iscsi_bhs_text_req *text_reqh;
+ int rc;
+
+ text_reqh = (struct iscsi_bhs_text_req *)&pdu.bhs;
+
+ conn.sess = &sess;
+
+ /* Case 1 - Data segment length for text request must not be more than
+ * FirstBurstLength plus extra space to account for digests.
+ */
+ pdu.data_segment_len = iscsi_get_max_immediate_data_size() + 1;
+
+ rc = iscsi_pdu_hdr_op_text(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_iscsi_reject(&pdu, ISCSI_REASON_PROTOCOL_ERROR);
+
+ /* Case 2 - A text request with the C bit set to 1 must have the F bit set to 0. */
+ pdu.data_segment_len = iscsi_get_max_immediate_data_size();
+ text_reqh->flags |= ISCSI_FLAG_FINAL;
+ text_reqh->flags |= ISCSI_TEXT_CONTINUE;
+
+ rc = iscsi_pdu_hdr_op_text(&conn, &pdu);
+ CU_ASSERT(rc == -1);
+
+ /* Case 3 - ExpStatSN of the text request is expected to match StatSN of the current
+ * connection. But StarPort iSCSI initiator didn't follow the expectation. In this
+ * case we overwrite StatSN by ExpStatSN and processes the request as correct.
+ */
+ text_reqh->flags = 0;
+ to_be32(&text_reqh->exp_stat_sn, 1234);
+ to_be32(&conn.StatSN, 4321);
+
+ rc = iscsi_pdu_hdr_op_text(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(conn.StatSN == 1234);
+
+ /* Case 4 - Text request is the first in the sequence of text requests and responses,
+ * and so its ITT is hold to the current connection.
+ */
+ sess.current_text_itt = 0xffffffffU;
+ to_be32(&text_reqh->itt, 5678);
+
+ rc = iscsi_pdu_hdr_op_text(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(sess.current_text_itt == 5678);
+
+ /* Case 5 - If text request is sent as part of a sequence of text requests and responses,
+ * its ITT must be the same for all the text requests. But it was not. */
+ sess.current_text_itt = 5679;
+
+ rc = iscsi_pdu_hdr_op_text(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_iscsi_reject(&pdu, ISCSI_REASON_PROTOCOL_ERROR);
+
+ /* Case 6 - Different from case 5, its ITT matches the value saved in the connection. */
+ text_reqh->flags = 0;
+ sess.current_text_itt = 5678;
+
+ rc = iscsi_pdu_hdr_op_text(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+}
+
+static void
+check_logout_response(uint8_t response, uint32_t stat_sn, uint32_t exp_cmd_sn,
+ uint32_t max_cmd_sn)
+{
+ struct spdk_iscsi_pdu *rsp_pdu;
+ struct iscsi_bhs_logout_resp *logout_rsph;
+
+ rsp_pdu = TAILQ_FIRST(&g_write_pdu_list);
+ CU_ASSERT(rsp_pdu != NULL);
+ logout_rsph = (struct iscsi_bhs_logout_resp *)&rsp_pdu->bhs;
+ CU_ASSERT(logout_rsph->response == response);
+ CU_ASSERT(from_be32(&logout_rsph->stat_sn) == stat_sn);
+ CU_ASSERT(from_be32(&logout_rsph->exp_cmd_sn) == exp_cmd_sn);
+ CU_ASSERT(from_be32(&logout_rsph->max_cmd_sn) == max_cmd_sn);
+
+ TAILQ_REMOVE(&g_write_pdu_list, rsp_pdu, tailq);
+ iscsi_put_pdu(rsp_pdu);
+}
+
+static void
+pdu_hdr_op_logout_test(void)
+{
+ struct spdk_iscsi_sess sess = {};
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_pdu pdu = {};
+ struct iscsi_bhs_logout_req *logout_reqh;
+ int rc;
+
+ logout_reqh = (struct iscsi_bhs_logout_req *)&pdu.bhs;
+
+ /* Case 1 - Target can accept logout request only with the reason "close the session"
+ * on discovery session.
+ */
+ logout_reqh->reason = 1;
+ conn.sess = &sess;
+ sess.session_type = SESSION_TYPE_DISCOVERY;
+
+ rc = iscsi_pdu_hdr_op_logout(&conn, &pdu);
+ CU_ASSERT(rc == SPDK_ISCSI_CONNECTION_FATAL);
+
+ /* Case 2 - Session is not established yet but connection was closed successfully. */
+ conn.sess = NULL;
+ conn.StatSN = 1234;
+ to_be32(&logout_reqh->exp_stat_sn, 1234);
+ pdu.cmd_sn = 5678;
+
+ rc = iscsi_pdu_hdr_op_logout(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_logout_response(0, 1234, 5678, 5678);
+ CU_ASSERT(conn.StatSN == 1235);
+
+ /* Case 3 - Session type is normal but CID was not found. Hence connection or session
+ * was not closed.
+ */
+ sess.session_type = SESSION_TYPE_NORMAL;
+ sess.ExpCmdSN = 5679;
+ sess.connections = 1;
+ conn.sess = &sess;
+ conn.id = 1;
+
+ rc = iscsi_pdu_hdr_op_logout(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_logout_response(1, 1235, 5679, 1);
+ CU_ASSERT(conn.StatSN == 1236);
+ CU_ASSERT(sess.MaxCmdSN == 1);
+
+ /* Case 4 - Session type is normal and CID was found. Connection or session was closed
+ * successfully.
+ */
+ to_be16(&logout_reqh->cid, 1);
+
+ rc = iscsi_pdu_hdr_op_logout(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_logout_response(0, 1236, 5679, 2);
+ CU_ASSERT(conn.StatSN == 1237);
+ CU_ASSERT(sess.MaxCmdSN == 2);
+
+ /* Case 5 - PDU pool is empty. */
+ g_pdu_pool_is_empty = true;
+
+ rc = iscsi_pdu_hdr_op_logout(&conn, &pdu);
+ CU_ASSERT(rc == SPDK_ISCSI_CONNECTION_FATAL);
+
+ g_pdu_pool_is_empty = false;
+}
+
+static void
+check_scsi_task(struct spdk_iscsi_pdu *pdu, enum spdk_scsi_data_dir dir)
+{
+ struct spdk_iscsi_task *task;
+
+ task = pdu->task;
+ CU_ASSERT(task != NULL);
+ CU_ASSERT(task->pdu == pdu);
+ CU_ASSERT(task->scsi.dxfer_dir == dir);
+
+ iscsi_task_put(task);
+ pdu->task = NULL;
+}
+
+static void
+pdu_hdr_op_scsi_test(void)
+{
+ struct spdk_iscsi_sess sess = {};
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_pdu pdu = {};
+ struct spdk_scsi_dev dev = {};
+ struct spdk_scsi_lun lun = {};
+ struct iscsi_bhs_scsi_req *scsi_reqh;
+ int rc;
+
+ scsi_reqh = (struct iscsi_bhs_scsi_req *)&pdu.bhs;
+
+ conn.sess = &sess;
+ conn.dev = &dev;
+
+ /* Case 1 - SCSI command is acceptable only on normal session. */
+ sess.session_type = SESSION_TYPE_DISCOVERY;
+
+ rc = iscsi_pdu_hdr_op_scsi(&conn, &pdu);
+ CU_ASSERT(rc == SPDK_ISCSI_CONNECTION_FATAL);
+
+ /* Case 2 - Task pool is empty. */
+ g_task_pool_is_empty = true;
+
+ rc = iscsi_pdu_hdr_op_scsi(&conn, &pdu);
+ CU_ASSERT(rc == SPDK_ISCSI_CONNECTION_FATAL);
+
+ g_task_pool_is_empty = false;
+
+ /* Case 3 - bidirectional operations (both R and W flags are set to 1) are not supported. */
+ sess.session_type = SESSION_TYPE_NORMAL;
+ scsi_reqh->read_bit = 1;
+ scsi_reqh->write_bit = 1;
+
+ rc = iscsi_pdu_hdr_op_scsi(&conn, &pdu);
+ CU_ASSERT(rc == SPDK_ISCSI_CONNECTION_FATAL);
+
+ /* Case 4 - LUN is hot-removed, and return immediately. */
+ scsi_reqh->write_bit = 0;
+
+ rc = iscsi_pdu_hdr_op_scsi(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(pdu.task == NULL);
+
+ /* Case 5 - SCSI read command PDU is correct, and the configured iSCSI task is set to the PDU. */
+ dev.lun[0] = &lun;
+
+ rc = iscsi_pdu_hdr_op_scsi(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_scsi_task(&pdu, SPDK_SCSI_DIR_FROM_DEV);
+
+ /* Case 6 - For SCSI write command PDU, its data segment length must not be more than
+ * FirstBurstLength plus extra space to account for digests.
+ */
+ scsi_reqh->read_bit = 0;
+ scsi_reqh->write_bit = 1;
+ pdu.data_segment_len = iscsi_get_max_immediate_data_size() + 1;
+
+ rc = iscsi_pdu_hdr_op_scsi(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_iscsi_reject(&pdu, ISCSI_REASON_PROTOCOL_ERROR);
+
+ /* Case 7 - For SCSI write command PDU, its data segment length must not be more than
+ * Expected Data Transfer Length (EDTL).
+ */
+ pdu.data_segment_len = iscsi_get_max_immediate_data_size();
+ to_be32(&scsi_reqh->expected_data_xfer_len, pdu.data_segment_len - 1);
+
+ rc = iscsi_pdu_hdr_op_scsi(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_iscsi_reject(&pdu, ISCSI_REASON_PROTOCOL_ERROR);
+
+ /* Case 8 - If ImmediateData is not enabled for the session, SCSI write command PDU
+ * cannot have data segment.
+ */
+ to_be32(&scsi_reqh->expected_data_xfer_len, pdu.data_segment_len);
+
+ rc = iscsi_pdu_hdr_op_scsi(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_iscsi_reject(&pdu, ISCSI_REASON_PROTOCOL_ERROR);
+
+ /* Case 9 - For SCSI write command PDU, its data segment length must not be more
+ * than FirstBurstLength.
+ */
+ sess.ImmediateData = true;
+
+ rc = iscsi_pdu_hdr_op_scsi(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_iscsi_reject(&pdu, ISCSI_REASON_PROTOCOL_ERROR);
+
+ /* Case 10 - SCSI write command PDU is correct, and the configured iSCSI task is set to the PDU. */
+ sess.FirstBurstLength = pdu.data_segment_len;
+
+ rc = iscsi_pdu_hdr_op_scsi(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_scsi_task(&pdu, SPDK_SCSI_DIR_TO_DEV);
+
+ /* Case 11 - R and W must not both be 0 when EDTL is not 0. */
+ scsi_reqh->write_bit = 0;
+
+ rc = iscsi_pdu_hdr_op_scsi(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_iscsi_reject(&pdu, ISCSI_REASON_INVALID_PDU_FIELD);
+
+ /* Case 11 - R and W are both 0 and EDTL is also 0, and hence SCSI command PDU is accepted. */
+ to_be32(&scsi_reqh->expected_data_xfer_len, 0);
+
+ rc = iscsi_pdu_hdr_op_scsi(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_scsi_task(&pdu, SPDK_SCSI_DIR_NONE);
+}
+
+static void
+check_iscsi_task_mgmt_response(uint8_t response, uint32_t task_tag, uint32_t stat_sn,
+ uint32_t exp_cmd_sn, uint32_t max_cmd_sn)
+{
+ struct spdk_iscsi_pdu *rsp_pdu;
+ struct iscsi_bhs_task_resp *rsph;
+
+ rsp_pdu = TAILQ_FIRST(&g_write_pdu_list);
+ CU_ASSERT(rsp_pdu != NULL);
+ rsph = (struct iscsi_bhs_task_resp *)&rsp_pdu->bhs;
+ CU_ASSERT(rsph->response == response);
+ CU_ASSERT(from_be32(&rsph->itt) == task_tag);
+ CU_ASSERT(from_be32(&rsph->exp_cmd_sn) == exp_cmd_sn);
+ CU_ASSERT(from_be32(&rsph->max_cmd_sn) == max_cmd_sn);
+
+ TAILQ_REMOVE(&g_write_pdu_list, rsp_pdu, tailq);
+ iscsi_put_pdu(rsp_pdu);
+}
+
+static void
+pdu_hdr_op_task_mgmt_test(void)
+{
+ struct spdk_iscsi_sess sess = {};
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_pdu pdu = {};
+ struct spdk_scsi_dev dev = {};
+ struct spdk_scsi_lun lun = {};
+ struct iscsi_bhs_task_req *task_reqh;
+ int rc;
+
+ /* TBD: This test covers only error paths before creating iSCSI task for now.
+ * Testing iSCSI task creation in iscsi_pdu_hdr_op_task() by UT is not simple
+ * and do it separately later.
+ */
+
+ task_reqh = (struct iscsi_bhs_task_req *)&pdu.bhs;
+
+ conn.sess = &sess;
+ conn.dev = &dev;
+
+ /* Case 1 - Task Management Function request PDU is acceptable only on normal session. */
+ sess.session_type = SESSION_TYPE_DISCOVERY;
+
+ rc = iscsi_pdu_hdr_op_task(&conn, &pdu);
+ CU_ASSERT(rc == SPDK_ISCSI_CONNECTION_FATAL);
+
+ /* Case 2 - LUN is hot removed. "LUN does not exist" response is sent. */
+ sess.session_type = SESSION_TYPE_NORMAL;
+ task_reqh->immediate = 0;
+ to_be32(&task_reqh->itt, 1234);
+
+ rc = iscsi_pdu_hdr_op_task(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_iscsi_task_mgmt_response(ISCSI_TASK_FUNC_RESP_LUN_NOT_EXIST, 1234, 0, 0, 1);
+
+ /* Case 3 - Unassigned function is specified. "Function rejected" response is sent. */
+ dev.lun[0] = &lun;
+ task_reqh->flags = 0;
+
+ rc = iscsi_pdu_hdr_op_task(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_iscsi_task_mgmt_response(ISCSI_TASK_FUNC_REJECTED, 1234, 0, 0, 2);
+
+ /* Case 4 - CLEAR TASK SET is not supported. "Task management function not supported"
+ * response is sent.
+ */
+ task_reqh->flags = ISCSI_TASK_FUNC_CLEAR_TASK_SET;
+
+ rc = iscsi_pdu_hdr_op_task(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_iscsi_task_mgmt_response(ISCSI_TASK_FUNC_RESP_FUNC_NOT_SUPPORTED, 1234, 0, 0, 3);
+
+ /* Case 5 - CLEAR ACA is not supported. "Task management function not supported" is sent. */
+ task_reqh->flags = ISCSI_TASK_FUNC_CLEAR_ACA;
+
+ rc = iscsi_pdu_hdr_op_task(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_iscsi_task_mgmt_response(ISCSI_TASK_FUNC_RESP_FUNC_NOT_SUPPORTED, 1234, 0, 0, 4);
+
+ /* Case 6 - TARGET WARM RESET is not supported. "Task management function not supported
+ * is sent.
+ */
+ task_reqh->flags = ISCSI_TASK_FUNC_TARGET_WARM_RESET;
+
+ rc = iscsi_pdu_hdr_op_task(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_iscsi_task_mgmt_response(ISCSI_TASK_FUNC_RESP_FUNC_NOT_SUPPORTED, 1234, 0, 0, 5);
+
+ /* Case 7 - TARGET COLD RESET is not supported. "Task management function not supported
+ * is sent.
+ */
+ task_reqh->flags = ISCSI_TASK_FUNC_TARGET_COLD_RESET;
+
+ rc = iscsi_pdu_hdr_op_task(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_iscsi_task_mgmt_response(ISCSI_TASK_FUNC_RESP_FUNC_NOT_SUPPORTED, 1234, 0, 0, 6);
+
+ /* Case 8 - TASK REASSIGN is not supported. "Task management function not supported" is sent. */
+ task_reqh->flags = ISCSI_TASK_FUNC_TASK_REASSIGN;
+
+ rc = iscsi_pdu_hdr_op_task(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_iscsi_task_mgmt_response(ISCSI_TASK_FUNC_RESP_FUNC_NOT_SUPPORTED, 1234, 0, 0, 7);
+}
+
+static void
+pdu_hdr_op_nopout_test(void)
+{
+ struct spdk_iscsi_sess sess = {};
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_pdu pdu = {};
+ struct iscsi_bhs_nop_out *nopout_reqh;
+ int rc;
+
+ nopout_reqh = (struct iscsi_bhs_nop_out *)&pdu.bhs;
+
+ conn.sess = &sess;
+
+ /* Case 1 - NOP-Out PDU is acceptable only on normal session. */
+ sess.session_type = SESSION_TYPE_DISCOVERY;
+
+ rc = iscsi_pdu_hdr_op_nopout(&conn, &pdu);
+ CU_ASSERT(rc == SPDK_ISCSI_CONNECTION_FATAL);
+
+ /* Case 2 - The length of the reflected ping data is limited to MaxRecvDataSegmentLength. */
+ sess.session_type = SESSION_TYPE_NORMAL;
+ pdu.data_segment_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH + 1;
+
+ rc = iscsi_pdu_hdr_op_nopout(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_iscsi_reject(&pdu, ISCSI_REASON_PROTOCOL_ERROR);
+
+ /* Case 3 - If Initiator Task Tag contains 0xffffffff, the I bit must be set
+ * to 1 and Target Transfer Tag should be copied from NOP-In PDU. This case
+ * satisfies the former but doesn't satisfy the latter, but ignore the error
+ * for now.
+ */
+ pdu.data_segment_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ conn.id = 1234;
+ to_be32(&nopout_reqh->ttt, 1235);
+ to_be32(&nopout_reqh->itt, 0xffffffffU);
+ nopout_reqh->immediate = 1;
+
+ rc = iscsi_pdu_hdr_op_nopout(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+
+ /* Case 4 - This case doesn't satisfy the above former. This error is not ignored. */
+ nopout_reqh->immediate = 0;
+
+ rc = iscsi_pdu_hdr_op_nopout(&conn, &pdu);
+ CU_ASSERT(rc == SPDK_ISCSI_CONNECTION_FATAL);
+}
+
+static void
+check_iscsi_r2t(struct spdk_iscsi_task *task, uint32_t len)
+{
+ struct spdk_iscsi_pdu *rsp_pdu;
+ struct iscsi_bhs_r2t *rsph;
+
+ rsp_pdu = TAILQ_FIRST(&g_write_pdu_list);
+ CU_ASSERT(rsp_pdu != NULL);
+ rsph = (struct iscsi_bhs_r2t *)&rsp_pdu->bhs;
+ CU_ASSERT(rsph->opcode == ISCSI_OP_R2T);
+ CU_ASSERT(from_be64(&rsph->lun) == spdk_scsi_lun_id_int_to_fmt(task->lun_id));
+ CU_ASSERT(from_be32(&rsph->buffer_offset) == task->next_r2t_offset);
+ CU_ASSERT(from_be32(&rsph->desired_xfer_len) == len);
+
+ TAILQ_REMOVE(&g_write_pdu_list, rsp_pdu, tailq);
+ iscsi_put_pdu(rsp_pdu);
+}
+
+static void
+pdu_hdr_op_data_test(void)
+{
+ struct spdk_iscsi_sess sess = {};
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_pdu pdu = {};
+ struct spdk_iscsi_task primary = {};
+ struct spdk_scsi_dev dev = {};
+ struct spdk_scsi_lun lun = {};
+ struct iscsi_bhs_data_out *data_reqh;
+ int rc;
+
+ data_reqh = (struct iscsi_bhs_data_out *)&pdu.bhs;
+
+ conn.sess = &sess;
+ conn.dev = &dev;
+ TAILQ_INIT(&conn.active_r2t_tasks);
+
+ /* Case 1 - SCSI Data-Out PDU is acceptable only on normal session. */
+ sess.session_type = SESSION_TYPE_DISCOVERY;
+
+ rc = iscsi_pdu_hdr_op_data(&conn, &pdu);
+ CU_ASSERT(rc == SPDK_ISCSI_CONNECTION_FATAL);
+
+ /* Case 2 - Data segment length must not be more than MaxRecvDataSegmentLength. */
+ sess.session_type = SESSION_TYPE_NORMAL;
+ pdu.data_segment_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH + 1;
+
+ rc = iscsi_pdu_hdr_op_data(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_iscsi_reject(&pdu, ISCSI_REASON_PROTOCOL_ERROR);
+
+ /* Case 3 - R2T task whose Target Transfer Tag matches is not found. */
+ pdu.data_segment_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+
+ rc = iscsi_pdu_hdr_op_data(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_iscsi_reject(&pdu, ISCSI_REASON_INVALID_PDU_FIELD);
+
+ /* Case 4 - R2T task whose Target Transfer Tag matches is found but data segment length
+ * is more than Desired Data Transfer Length of the R2T.
+ */
+ primary.desired_data_transfer_length = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH - 1;
+ conn.pending_r2t = 1;
+ TAILQ_INSERT_TAIL(&conn.active_r2t_tasks, &primary, link);
+
+ rc = iscsi_pdu_hdr_op_data(&conn, &pdu);
+ CU_ASSERT(rc == SPDK_ISCSI_CONNECTION_FATAL);
+
+ /* Case 5 - Initiator task tag doesn't match tag of R2T task. */
+ primary.desired_data_transfer_length = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ to_be32(&data_reqh->itt, 1);
+
+ rc = iscsi_pdu_hdr_op_data(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_iscsi_reject(&pdu, ISCSI_REASON_INVALID_PDU_FIELD);
+
+ /* Case 6 - DataSN doesn't match the Data-Out PDU number within the current
+ * output sequence.
+ */
+ to_be32(&data_reqh->itt, 0);
+ to_be32(&data_reqh->data_sn, 1);
+
+ rc = iscsi_pdu_hdr_op_data(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ check_iscsi_reject(&pdu, ISCSI_REASON_PROTOCOL_ERROR);
+
+ /* Case 7 - Output sequence must be in increasing buffer offset and must not
+ * be overlaid but they are not satisfied.
+ */
+ to_be32(&data_reqh->data_sn, 0);
+ to_be32(&data_reqh->buffer_offset, 4096);
+
+ rc = iscsi_pdu_hdr_op_data(&conn, &pdu);
+ CU_ASSERT(rc == SPDK_ISCSI_CONNECTION_FATAL);
+
+ /* Case 8 - Data segment length must not exceed MaxBurstLength. */
+ to_be32(&data_reqh->buffer_offset, 0);
+ sess.MaxBurstLength = pdu.data_segment_len - 1;
+
+ rc = iscsi_pdu_hdr_op_data(&conn, &pdu);
+ CU_ASSERT(rc == SPDK_ISCSI_CONNECTION_FATAL);
+
+ /* Case 9 - LUN is hot removed. */
+ sess.MaxBurstLength = pdu.data_segment_len * 4;
+ to_be32(&data_reqh->data_sn, primary.r2t_datasn);
+ to_be32(&data_reqh->buffer_offset, primary.next_expected_r2t_offset);
+
+ rc = iscsi_pdu_hdr_op_data(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(pdu.task == NULL);
+
+ /* Case 10 - SCSI Data-Out PDU is correct and processed. Created task is held
+ * to the PDU, but its F bit is 0 and hence R2T is not sent.
+ */
+ dev.lun[0] = &lun;
+ to_be32(&data_reqh->data_sn, primary.r2t_datasn);
+ to_be32(&data_reqh->buffer_offset, primary.next_expected_r2t_offset);
+
+ rc = iscsi_pdu_hdr_op_data(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(pdu.task != NULL);
+ iscsi_task_put(pdu.task);
+ pdu.task = NULL;
+
+ /* Case 11 - SCSI Data-Out PDU is correct and processed. Created task is held
+ * to the PDU, and Its F bit is 1 and hence R2T is sent.
+ */
+ data_reqh->flags |= ISCSI_FLAG_FINAL;
+ to_be32(&data_reqh->data_sn, primary.r2t_datasn);
+ to_be32(&data_reqh->buffer_offset, primary.next_expected_r2t_offset);
+ primary.scsi.transfer_len = pdu.data_segment_len * 5;
+
+ rc = iscsi_pdu_hdr_op_data(&conn, &pdu);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(pdu.task != NULL);
+ check_iscsi_r2t(pdu.task, pdu.data_segment_len * 4);
+ iscsi_task_put(pdu.task);
+
+ /* Case 12 - Task pool is empty. */
+ to_be32(&data_reqh->data_sn, primary.r2t_datasn);
+ to_be32(&data_reqh->buffer_offset, primary.next_expected_r2t_offset);
+ g_task_pool_is_empty = true;
+
+ rc = iscsi_pdu_hdr_op_data(&conn, &pdu);
+ CU_ASSERT(rc == SPDK_ISCSI_CONNECTION_FATAL);
+
+ g_task_pool_is_empty = false;
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("iscsi_suite", NULL, NULL);
+
+ CU_ADD_TEST(suite, op_login_check_target_test);
+ CU_ADD_TEST(suite, op_login_session_normal_test);
+ CU_ADD_TEST(suite, maxburstlength_test);
+ CU_ADD_TEST(suite, underflow_for_read_transfer_test);
+ CU_ADD_TEST(suite, underflow_for_zero_read_transfer_test);
+ CU_ADD_TEST(suite, underflow_for_request_sense_test);
+ CU_ADD_TEST(suite, underflow_for_check_condition_test);
+ CU_ADD_TEST(suite, add_transfer_task_test);
+ CU_ADD_TEST(suite, get_transfer_task_test);
+ CU_ADD_TEST(suite, del_transfer_task_test);
+ CU_ADD_TEST(suite, clear_all_transfer_tasks_test);
+ CU_ADD_TEST(suite, build_iovs_test);
+ CU_ADD_TEST(suite, build_iovs_with_md_test);
+ CU_ADD_TEST(suite, pdu_hdr_op_login_test);
+ CU_ADD_TEST(suite, pdu_hdr_op_text_test);
+ CU_ADD_TEST(suite, pdu_hdr_op_logout_test);
+ CU_ADD_TEST(suite, pdu_hdr_op_scsi_test);
+ CU_ADD_TEST(suite, pdu_hdr_op_task_mgmt_test);
+ CU_ADD_TEST(suite, pdu_hdr_op_nopout_test);
+ CU_ADD_TEST(suite, pdu_hdr_op_data_test);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/iscsi/param.c/.gitignore b/src/spdk/test/unit/lib/iscsi/param.c/.gitignore
new file mode 100644
index 000000000..269921462
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/param.c/.gitignore
@@ -0,0 +1 @@
+param_ut
diff --git a/src/spdk/test/unit/lib/iscsi/param.c/Makefile b/src/spdk/test/unit/lib/iscsi/param.c/Makefile
new file mode 100644
index 000000000..d1b567b54
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/param.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = param_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/iscsi/param.c/param_ut.c b/src/spdk/test/unit/lib/iscsi/param.c/param_ut.c
new file mode 100644
index 000000000..ccf62643f
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/param.c/param_ut.c
@@ -0,0 +1,400 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk/scsi.h"
+
+#include "spdk_cunit.h"
+
+#include "../common.c"
+#include "iscsi/param.c"
+
+#include "spdk_internal/mock.h"
+
+struct spdk_iscsi_globals g_iscsi;
+
+DEFINE_STUB(iscsi_find_tgt_node, struct spdk_iscsi_tgt_node *,
+ (const char *target_name), NULL);
+
+DEFINE_STUB(iscsi_tgt_node_access, bool,
+ (struct spdk_iscsi_conn *conn, struct spdk_iscsi_tgt_node *target,
+ const char *iqn, const char *addr),
+ false);
+
+DEFINE_STUB(iscsi_send_tgts, int,
+ (struct spdk_iscsi_conn *conn, const char *iiqn, const char *iaddr,
+ const char *tiqn, uint8_t *data, int alloc_len, int data_len),
+ 0);
+
+static void
+burst_length_param_negotation(int FirstBurstLength, int MaxBurstLength,
+ int initialR2T)
+{
+ struct spdk_iscsi_sess sess;
+ struct spdk_iscsi_conn conn;
+ struct iscsi_param *params;
+ struct iscsi_param **params_p;
+ char data[8192];
+ int rc;
+ int total, len;
+
+ total = 0;
+ params = NULL;
+ params_p = &params;
+
+ memset(&sess, 0, sizeof(sess));
+ memset(&conn, 0, sizeof(conn));
+ memset(data, 0, 8192);
+
+ sess.ExpCmdSN = 0;
+ sess.MaxCmdSN = 64;
+ sess.session_type = SESSION_TYPE_NORMAL;
+ sess.params = NULL;
+ sess.MaxBurstLength = 65536;
+ sess.InitialR2T = true;
+ sess.FirstBurstLength = SPDK_ISCSI_FIRST_BURST_LENGTH;
+ sess.MaxOutstandingR2T = 1;
+
+ /* set default params */
+ rc = iscsi_sess_params_init(&sess.params);
+ CU_ASSERT(rc == 0);
+
+ rc = iscsi_param_set_int(sess.params, "FirstBurstLength",
+ sess.FirstBurstLength);
+ CU_ASSERT(rc == 0);
+
+ rc = iscsi_param_set_int(sess.params, "MaxBurstLength",
+ sess.MaxBurstLength);
+ CU_ASSERT(rc == 0);
+
+ rc = iscsi_param_set(sess.params, "InitialR2T",
+ sess.InitialR2T ? "Yes" : "No");
+ CU_ASSERT(rc == 0);
+
+ conn.full_feature = 1;
+ conn.sess = &sess;
+ conn.MaxRecvDataSegmentLength = 65536;
+
+ rc = iscsi_conn_params_init(&conn.params);
+ CU_ASSERT(rc == 0);
+
+ /* construct the data */
+ len = snprintf(data + total, 8192 - total, "%s=%d",
+ "FirstBurstLength", FirstBurstLength);
+ total += len + 1;
+
+ len = snprintf(data + total, 8192 - total, "%s=%d",
+ "MaxBurstLength", MaxBurstLength);
+ total += len + 1;
+
+ len = snprintf(data + total, 8192 - total, "%s=%d",
+ "InitialR2T", initialR2T);
+ total += len + 1;
+
+ /* add one extra NUL byte at the end to match real iSCSI params */
+ total++;
+
+ /* store incoming parameters */
+ rc = iscsi_parse_params(params_p, data, total, false, NULL);
+ CU_ASSERT(rc == 0);
+
+ /* negotiate parameters */
+ rc = iscsi_negotiate_params(&conn, params_p,
+ data, 8192, rc);
+ CU_ASSERT(rc > 0);
+
+ rc = iscsi_copy_param2var(&conn);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(conn.sess->FirstBurstLength <= SPDK_ISCSI_FIRST_BURST_LENGTH);
+ CU_ASSERT(conn.sess->FirstBurstLength <= conn.sess->MaxBurstLength);
+ CU_ASSERT(conn.sess->MaxBurstLength <= SPDK_ISCSI_MAX_BURST_LENGTH);
+ CU_ASSERT(conn.sess->MaxOutstandingR2T == 1);
+
+ iscsi_param_free(sess.params);
+ iscsi_param_free(conn.params);
+ iscsi_param_free(*params_p);
+}
+
+static void
+param_negotiation_test(void)
+{
+ burst_length_param_negotation(8192, 16384, 0);
+ burst_length_param_negotation(8192, 16384, 1);
+ burst_length_param_negotation(8192, 1024, 1);
+ burst_length_param_negotation(8192, 1024, 0);
+ burst_length_param_negotation(512, 1024, 1);
+ burst_length_param_negotation(512, 1024, 0);
+}
+
+static void
+list_negotiation_test(void)
+{
+ int add_param_value = 0;
+ struct iscsi_param param = {};
+ char *new_val;
+ char valid_list_buf[1024];
+ char in_val_buf[1024];
+
+#define TEST_LIST(valid_list, in_val, expected_result) \
+ do { \
+ snprintf(valid_list_buf, sizeof(valid_list_buf), "%s", valid_list); \
+ snprintf(in_val_buf, sizeof(in_val_buf), "%s", in_val); \
+ new_val = iscsi_negotiate_param_list(&add_param_value, &param, valid_list_buf, in_val_buf, NULL); \
+ if (expected_result) { \
+ SPDK_CU_ASSERT_FATAL(new_val != NULL); \
+ CU_ASSERT_STRING_EQUAL(new_val, expected_result); \
+ } \
+ } while (0)
+
+ TEST_LIST("None", "None", "None");
+ TEST_LIST("CHAP,None", "None", "None");
+ TEST_LIST("CHAP,None", "CHAP", "CHAP");
+ TEST_LIST("KRB5,SRP,CHAP,None", "SRP,CHAP,None", "SRP");
+ TEST_LIST("KRB5,SRP,CHAP,None", "CHAP,SRP,None", "CHAP");
+ TEST_LIST("KRB5,SRP,CHAP,None", "SPKM1,SRP,CHAP,None", "SRP");
+ TEST_LIST("KRB5,SRP,None", "CHAP,None", "None");
+}
+
+#define PARSE(strconst, partial_enabled, partial_text) \
+ data = strconst; \
+ len = sizeof(strconst) - 1; \
+ rc = iscsi_parse_params(&params, data, len, partial_enabled, partial_text)
+
+#define EXPECT_VAL(key, expected_value) \
+ { \
+ const char *val = iscsi_param_get_val(params, key); \
+ CU_ASSERT(val != NULL); \
+ if (val != NULL) { \
+ CU_ASSERT(strcmp(val, expected_value) == 0); \
+ } \
+ }
+
+#define EXPECT_NULL(key) \
+ CU_ASSERT(iscsi_param_get_val(params, key) == NULL)
+
+static void
+parse_valid_test(void)
+{
+ struct iscsi_param *params = NULL;
+ int rc;
+ char *data;
+ int len;
+ char *partial_parameter = NULL;
+
+ /* simple test with a single key=value */
+ PARSE("Abc=def\0", false, NULL);
+ CU_ASSERT(rc == 0);
+ EXPECT_VAL("Abc", "def");
+
+ /* multiple key=value pairs */
+ PARSE("Aaa=bbbbbb\0Xyz=test\0", false, NULL);
+ CU_ASSERT(rc == 0);
+ EXPECT_VAL("Aaa", "bbbbbb");
+ EXPECT_VAL("Xyz", "test");
+
+ /* value with embedded '=' */
+ PARSE("A=b=c\0", false, NULL);
+ CU_ASSERT(rc == 0);
+ EXPECT_VAL("A", "b=c");
+
+ /* CHAP_C=AAAA.... with value length 8192 */
+ len = strlen("CHAP_C=") + ISCSI_TEXT_MAX_VAL_LEN + 1/* null terminators */;
+ data = malloc(len);
+ SPDK_CU_ASSERT_FATAL(data != NULL);
+ memset(data, 'A', len);
+ memcpy(data, "CHAP_C", 6);
+ data[6] = '=';
+ data[len - 1] = '\0';
+ rc = iscsi_parse_params(&params, data, len, false, NULL);
+ CU_ASSERT(rc == 0);
+ free(data);
+
+ /* partial parameter: value is partial */
+ PARSE("C=AAA\0D=B", true, &partial_parameter);
+ SPDK_CU_ASSERT_FATAL(partial_parameter != NULL);
+ CU_ASSERT_STRING_EQUAL(partial_parameter, "D=B");
+ CU_ASSERT(rc == 0);
+ EXPECT_VAL("C", "AAA");
+ EXPECT_NULL("D");
+ PARSE("XXXX\0E=UUUU\0", false, &partial_parameter);
+ CU_ASSERT(rc == 0);
+ EXPECT_VAL("D", "BXXXX");
+ EXPECT_VAL("E", "UUUU");
+ CU_ASSERT_PTR_NULL(partial_parameter);
+
+ /* partial parameter: key is partial */
+ PARSE("IAMAFAK", true, &partial_parameter);
+ CU_ASSERT_STRING_EQUAL(partial_parameter, "IAMAFAK");
+ CU_ASSERT(rc == 0);
+ EXPECT_NULL("IAMAFAK");
+ PARSE("EDKEY=TTTT\0F=IIII", false, &partial_parameter);
+ CU_ASSERT(rc == 0);
+ EXPECT_VAL("IAMAFAKEDKEY", "TTTT");
+ EXPECT_VAL("F", "IIII");
+ CU_ASSERT_PTR_NULL(partial_parameter);
+
+ /* Second partial parameter is the only parameter */
+ PARSE("OOOO", true, &partial_parameter);
+ CU_ASSERT_STRING_EQUAL(partial_parameter, "OOOO");
+ CU_ASSERT(rc == 0);
+ EXPECT_NULL("OOOO");
+ PARSE("LL=MMMM", false, &partial_parameter);
+ CU_ASSERT(rc == 0);
+ EXPECT_VAL("OOOOLL", "MMMM");
+ CU_ASSERT_PTR_NULL(partial_parameter);
+
+ partial_parameter = NULL;
+ data = "PartialKey=";
+ len = 7;
+ rc = iscsi_parse_params(&params, data, len, true, &partial_parameter);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT_STRING_EQUAL(partial_parameter, "Partial");
+ EXPECT_NULL("PartialKey");
+ PARSE("Key=Value", false, &partial_parameter);
+ CU_ASSERT(rc == 0);
+ EXPECT_VAL("PartialKey", "Value");
+ CU_ASSERT_PTR_NULL(partial_parameter);
+
+ iscsi_param_free(params);
+}
+
+static void
+parse_invalid_test(void)
+{
+ struct iscsi_param *params = NULL;
+ int rc;
+ char *data;
+ int len;
+
+ /* key without '=' */
+ PARSE("Abc\0", false, NULL);
+ CU_ASSERT(rc != 0);
+ EXPECT_NULL("Abc");
+
+ /* multiple key=value pairs, one missing '=' */
+ PARSE("Abc=def\0Xyz\0Www=test\0", false, NULL);
+ CU_ASSERT(rc != 0);
+ EXPECT_VAL("Abc", "def");
+ EXPECT_NULL("Xyz");
+ EXPECT_NULL("Www");
+
+ /* empty key */
+ PARSE("=abcdef", false, NULL);
+ CU_ASSERT(rc != 0);
+ EXPECT_NULL("");
+
+ /* CHAP_C=AAAA.... with value length 8192 + 1 */
+ len = strlen("CHAP_C=") + ISCSI_TEXT_MAX_VAL_LEN + 1 /* max value len + 1 */ +
+ 1 /* null terminators */;
+ data = malloc(len);
+ SPDK_CU_ASSERT_FATAL(data != NULL);
+ memset(data, 'A', len);
+ memcpy(data, "CHAP_C", 6);
+ data[6] = '=';
+ data[len - 1] = '\0';
+ rc = iscsi_parse_params(&params, data, len, false, NULL);
+ free(data);
+ CU_ASSERT(rc != 0);
+ EXPECT_NULL("CHAP_C");
+
+ /* Test simple value, length of value bigger than 255 */
+ len = strlen("A=") + ISCSI_TEXT_MAX_SIMPLE_VAL_LEN + 1 /* max simple value len + 1 */ +
+ 1 /* null terminators */;
+ data = malloc(len);
+ SPDK_CU_ASSERT_FATAL(data != NULL);
+ memset(data, 'A', len);
+ data[1] = '=';
+ data[len - 1] = '\0';
+ rc = iscsi_parse_params(&params, data, len, false, NULL);
+ free(data);
+ CU_ASSERT(rc != 0);
+ EXPECT_NULL("A");
+
+ /* key length bigger than 63 */
+ len = ISCSI_TEXT_MAX_KEY_LEN + 1 /* max key length + 1 */ + 1 /* = */ + 1 /* A */ +
+ 1 /* null terminators */;
+ data = malloc(len);
+ SPDK_CU_ASSERT_FATAL(data != NULL);
+ memset(data, 'A', len);
+ data[64] = '=';
+ data[len - 1] = '\0';
+ rc = iscsi_parse_params(&params, data, len, false, NULL);
+ free(data);
+ CU_ASSERT(rc != 0);
+ EXPECT_NULL("A");
+
+ /* duplicated key */
+ PARSE("B=BB", false, NULL);
+ CU_ASSERT(rc == 0);
+ PARSE("B=BBBB", false, NULL);
+ CU_ASSERT(rc != 0);
+ EXPECT_VAL("B", "BB");
+
+ /* Test where data buffer has non-NULL characters past the end of
+ * the valid data region. This can happen with SPDK iSCSI target,
+ * since data buffers are reused and we do not zero the data buffers
+ * after they are freed since it would be too expensive. Added as
+ * part of fixing an intermittent Calsoft failure that triggered this
+ * bug.
+ */
+ data = "MaxRecvDataSegmentLength=81928";
+ len = strlen(data) - 1;
+ rc = iscsi_parse_params(&params, data, len, false, NULL);
+ EXPECT_VAL("MaxRecvDataSegmentLength", "8192");
+ CU_ASSERT(rc == 0);
+ iscsi_param_free(params);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("iscsi_suite", NULL, NULL);
+
+ CU_ADD_TEST(suite, param_negotiation_test);
+ CU_ADD_TEST(suite, list_negotiation_test);
+ CU_ADD_TEST(suite, parse_valid_test);
+ CU_ADD_TEST(suite, parse_invalid_test);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/iscsi/portal_grp.c/.gitignore b/src/spdk/test/unit/lib/iscsi/portal_grp.c/.gitignore
new file mode 100644
index 000000000..106ffebc2
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/portal_grp.c/.gitignore
@@ -0,0 +1 @@
+portal_grp_ut
diff --git a/src/spdk/test/unit/lib/iscsi/portal_grp.c/Makefile b/src/spdk/test/unit/lib/iscsi/portal_grp.c/Makefile
new file mode 100644
index 000000000..f3ca0646f
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/portal_grp.c/Makefile
@@ -0,0 +1,40 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+SPDK_LIB_LIST = conf
+
+TEST_FILE = portal_grp_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/iscsi/portal_grp.c/portal_grp_ut.c b/src/spdk/test/unit/lib/iscsi/portal_grp.c/portal_grp_ut.c
new file mode 100644
index 000000000..a89a1567f
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/portal_grp.c/portal_grp_ut.c
@@ -0,0 +1,419 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "common/lib/ut_multithread.c"
+#include "common/lib/test_sock.c"
+
+#include "../common.c"
+#include "iscsi/portal_grp.c"
+#include "unit/lib/json_mock.c"
+
+#include "spdk_internal/thread.h"
+
+DEFINE_STUB(iscsi_conn_construct, int,
+ (struct spdk_iscsi_portal *portal, struct spdk_sock *sock),
+ 0);
+
+struct spdk_iscsi_globals g_iscsi;
+
+static int
+test_setup(void)
+{
+ TAILQ_INIT(&g_iscsi.portal_head);
+ TAILQ_INIT(&g_iscsi.pg_head);
+ pthread_mutex_init(&g_iscsi.mutex, NULL);
+ return 0;
+}
+
+static void
+portal_create_ipv4_normal_case(void)
+{
+ struct spdk_iscsi_portal *p;
+
+ const char *host = "192.168.2.0";
+ const char *port = "3260";
+
+ p = iscsi_portal_create(host, port);
+ CU_ASSERT(p != NULL);
+
+ iscsi_portal_destroy(p);
+ CU_ASSERT(TAILQ_EMPTY(&g_iscsi.portal_head));
+}
+
+static void
+portal_create_ipv6_normal_case(void)
+{
+ struct spdk_iscsi_portal *p;
+
+ const char *host = "[2001:ad6:1234::]";
+ const char *port = "3260";
+
+ p = iscsi_portal_create(host, port);
+ CU_ASSERT(p != NULL);
+
+ iscsi_portal_destroy(p);
+ CU_ASSERT(TAILQ_EMPTY(&g_iscsi.portal_head));
+}
+
+static void
+portal_create_ipv4_wildcard_case(void)
+{
+ struct spdk_iscsi_portal *p;
+
+ const char *host = "*";
+ const char *port = "3260";
+
+ p = iscsi_portal_create(host, port);
+ CU_ASSERT(p != NULL);
+
+ iscsi_portal_destroy(p);
+ CU_ASSERT(TAILQ_EMPTY(&g_iscsi.portal_head));
+}
+
+static void
+portal_create_ipv6_wildcard_case(void)
+{
+ struct spdk_iscsi_portal *p;
+
+ const char *host = "[*]";
+ const char *port = "3260";
+
+ p = iscsi_portal_create(host, port);
+ CU_ASSERT(p != NULL);
+
+ iscsi_portal_destroy(p);
+ CU_ASSERT(TAILQ_EMPTY(&g_iscsi.portal_head));
+}
+
+static void
+portal_create_twice_case(void)
+{
+ struct spdk_iscsi_portal *p1, *p2;
+
+ const char *host = "192.168.2.0";
+ const char *port = "3260";
+
+ p1 = iscsi_portal_create(host, port);
+ CU_ASSERT(p1 != NULL);
+
+ p2 = iscsi_portal_create(host, port);
+ CU_ASSERT(p2 == NULL);
+
+ iscsi_portal_destroy(p1);
+ CU_ASSERT(TAILQ_EMPTY(&g_iscsi.portal_head));
+}
+
+static void
+parse_portal_ipv4_normal_case(void)
+{
+ const char *string = "192.168.2.0:3260";
+ const char *host_str = "192.168.2.0";
+ const char *port_str = "3260";
+ struct spdk_iscsi_portal *p = NULL;
+ int rc;
+
+ rc = iscsi_parse_portal(string, &p);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(p != NULL);
+ CU_ASSERT(strcmp(p->host, host_str) == 0);
+ CU_ASSERT(strcmp(p->port, port_str) == 0);
+
+ iscsi_portal_destroy(p);
+ CU_ASSERT(TAILQ_EMPTY(&g_iscsi.portal_head));
+
+}
+
+static void
+parse_portal_ipv6_normal_case(void)
+{
+ const char *string = "[2001:ad6:1234::]:3260";
+ const char *host_str = "[2001:ad6:1234::]";
+ const char *port_str = "3260";
+ struct spdk_iscsi_portal *p = NULL;
+ int rc;
+
+ rc = iscsi_parse_portal(string, &p);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(p != NULL);
+ CU_ASSERT(strcmp(p->host, host_str) == 0);
+ CU_ASSERT(strcmp(p->port, port_str) == 0);
+
+ iscsi_portal_destroy(p);
+ CU_ASSERT(TAILQ_EMPTY(&g_iscsi.portal_head));
+}
+
+static void
+parse_portal_ipv4_skip_port_case(void)
+{
+ const char *string = "192.168.2.0";
+ const char *host_str = "192.168.2.0";
+ const char *port_str = "3260";
+ struct spdk_iscsi_portal *p = NULL;
+ int rc;
+
+ rc = iscsi_parse_portal(string, &p);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(p != NULL);
+ CU_ASSERT(strcmp(p->host, host_str) == 0);
+ CU_ASSERT(strcmp(p->port, port_str) == 0);
+
+ iscsi_portal_destroy(p);
+ CU_ASSERT(TAILQ_EMPTY(&g_iscsi.portal_head));
+}
+
+static void
+parse_portal_ipv6_skip_port_case(void)
+{
+ const char *string = "[2001:ad6:1234::]";
+ const char *host_str = "[2001:ad6:1234::]";
+ const char *port_str = "3260";
+ struct spdk_iscsi_portal *p = NULL;
+ int rc;
+
+ rc = iscsi_parse_portal(string, &p);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(p != NULL);
+ CU_ASSERT(strcmp(p->host, host_str) == 0);
+ CU_ASSERT(strcmp(p->port, port_str) == 0);
+
+ iscsi_portal_destroy(p);
+ CU_ASSERT(TAILQ_EMPTY(&g_iscsi.portal_head));
+}
+
+static void
+portal_grp_register_unregister_case(void)
+{
+ struct spdk_iscsi_portal *p;
+ struct spdk_iscsi_portal_grp *pg1, *pg2;
+ int rc;
+ const char *host = "192.168.2.0";
+ const char *port = "3260";
+
+ pg1 = iscsi_portal_grp_create(1);
+ CU_ASSERT(pg1 != NULL);
+
+ p = iscsi_portal_create(host, port);
+ CU_ASSERT(p != NULL);
+
+ iscsi_portal_grp_add_portal(pg1, p);
+
+ rc = iscsi_portal_grp_register(pg1);
+ CU_ASSERT(rc == 0);
+
+ pg2 = iscsi_portal_grp_unregister(1);
+ CU_ASSERT(pg2 != NULL);
+ CU_ASSERT(pg1 == pg2);
+
+ CU_ASSERT(TAILQ_EMPTY(&g_iscsi.pg_head));
+
+ iscsi_portal_grp_destroy(pg1);
+
+ CU_ASSERT(TAILQ_EMPTY(&g_iscsi.portal_head));
+}
+
+static void
+portal_grp_register_twice_case(void)
+{
+ struct spdk_iscsi_portal *p;
+ struct spdk_iscsi_portal_grp *pg1, *pg2;
+ int rc;
+ const char *host = "192.168.2.0";
+ const char *port = "3260";
+
+ pg1 = iscsi_portal_grp_create(1);
+ CU_ASSERT(pg1 != NULL);
+
+ p = iscsi_portal_create(host, port);
+ CU_ASSERT(p != NULL);
+
+ iscsi_portal_grp_add_portal(pg1, p);
+
+ rc = iscsi_portal_grp_register(pg1);
+ CU_ASSERT(rc == 0);
+
+ rc = iscsi_portal_grp_register(pg1);
+ CU_ASSERT(rc != 0);
+
+ pg2 = iscsi_portal_grp_unregister(1);
+ CU_ASSERT(pg2 != NULL);
+ CU_ASSERT(pg1 == pg2);
+
+ CU_ASSERT(TAILQ_EMPTY(&g_iscsi.pg_head));
+
+ iscsi_portal_grp_destroy(pg1);
+
+ CU_ASSERT(TAILQ_EMPTY(&g_iscsi.portal_head));
+}
+
+static void
+portal_grp_add_delete_case(void)
+{
+ struct spdk_sock sock = {};
+ struct spdk_iscsi_portal_grp *pg1, *pg2;
+ struct spdk_iscsi_portal *p;
+ int rc;
+
+ const char *host = "192.168.2.0";
+ const char *port = "3260";
+
+ allocate_threads(1);
+ set_thread(0);
+
+ /* internal of iscsi_create_portal_group */
+ pg1 = iscsi_portal_grp_create(1);
+ CU_ASSERT(pg1 != NULL);
+
+ p = iscsi_portal_create(host, port);
+ CU_ASSERT(p != NULL);
+
+ iscsi_portal_grp_add_portal(pg1, p);
+
+ MOCK_SET(spdk_sock_listen, &sock);
+ rc = iscsi_portal_grp_open(pg1);
+ CU_ASSERT(rc == 0);
+ MOCK_CLEAR_P(spdk_sock_listen);
+
+ rc = iscsi_portal_grp_register(pg1);
+ CU_ASSERT(rc == 0);
+
+ /* internal of delete_portal_group */
+ pg2 = iscsi_portal_grp_unregister(1);
+ CU_ASSERT(pg2 != NULL);
+ CU_ASSERT(pg1 == pg2);
+
+ iscsi_portal_grp_release(pg2);
+
+ poll_thread(0);
+
+ CU_ASSERT(TAILQ_EMPTY(&g_iscsi.portal_head));
+ CU_ASSERT(TAILQ_EMPTY(&g_iscsi.pg_head));
+
+ free_threads();
+}
+
+static void
+portal_grp_add_delete_twice_case(void)
+{
+ struct spdk_sock sock = {};
+ struct spdk_iscsi_portal_grp *pg1, *pg2;
+ struct spdk_iscsi_portal *p;
+ int rc;
+
+ const char *host = "192.168.2.0";
+ const char *port1 = "3260", *port2 = "3261";
+
+ allocate_threads(1);
+ set_thread(0);
+
+ /* internal of iscsi_create_portal_group related */
+ pg1 = iscsi_portal_grp_create(1);
+ CU_ASSERT(pg1 != NULL);
+
+ p = iscsi_portal_create(host, port1);
+ CU_ASSERT(p != NULL);
+
+ iscsi_portal_grp_add_portal(pg1, p);
+
+ MOCK_SET(spdk_sock_listen, &sock);
+ rc = iscsi_portal_grp_open(pg1);
+ CU_ASSERT(rc == 0);
+
+ rc = iscsi_portal_grp_register(pg1);
+ CU_ASSERT(rc == 0);
+
+ /* internal of iscsi_create_portal_group related */
+ pg2 = iscsi_portal_grp_create(2);
+ CU_ASSERT(pg2 != NULL);
+
+ p = iscsi_portal_create(host, port2);
+ CU_ASSERT(p != NULL);
+
+ iscsi_portal_grp_add_portal(pg2, p);
+
+ rc = iscsi_portal_grp_open(pg2);
+ CU_ASSERT(rc == 0);
+
+ rc = iscsi_portal_grp_register(pg2);
+ CU_ASSERT(rc == 0);
+
+ /* internal of destroy_portal_group related */
+ iscsi_portal_grp_close(pg1);
+ iscsi_portal_grp_close(pg2);
+
+ poll_thread(0);
+
+ iscsi_portal_grps_destroy();
+
+ CU_ASSERT(TAILQ_EMPTY(&g_iscsi.portal_head));
+ CU_ASSERT(TAILQ_EMPTY(&g_iscsi.pg_head));
+
+ MOCK_CLEAR_P(spdk_sock_listen);
+
+ free_threads();
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("portal_grp_suite", test_setup, NULL);
+
+ CU_ADD_TEST(suite, portal_create_ipv4_normal_case);
+ CU_ADD_TEST(suite, portal_create_ipv6_normal_case);
+ CU_ADD_TEST(suite, portal_create_ipv4_wildcard_case);
+ CU_ADD_TEST(suite, portal_create_ipv6_wildcard_case);
+ CU_ADD_TEST(suite, portal_create_twice_case);
+ CU_ADD_TEST(suite, parse_portal_ipv4_normal_case);
+ CU_ADD_TEST(suite, parse_portal_ipv6_normal_case);
+ CU_ADD_TEST(suite, parse_portal_ipv4_skip_port_case);
+ CU_ADD_TEST(suite, parse_portal_ipv6_skip_port_case);
+ CU_ADD_TEST(suite, portal_grp_register_unregister_case);
+ CU_ADD_TEST(suite, portal_grp_register_twice_case);
+ CU_ADD_TEST(suite, portal_grp_add_delete_case);
+ CU_ADD_TEST(suite, portal_grp_add_delete_twice_case);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/iscsi/tgt_node.c/.gitignore b/src/spdk/test/unit/lib/iscsi/tgt_node.c/.gitignore
new file mode 100644
index 000000000..010d84b83
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/tgt_node.c/.gitignore
@@ -0,0 +1 @@
+tgt_node_ut
diff --git a/src/spdk/test/unit/lib/iscsi/tgt_node.c/Makefile b/src/spdk/test/unit/lib/iscsi/tgt_node.c/Makefile
new file mode 100644
index 000000000..90bd4f990
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/tgt_node.c/Makefile
@@ -0,0 +1,39 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+SPDK_LIB_LIST = conf
+TEST_FILE = tgt_node_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/iscsi/tgt_node.c/tgt_node.conf b/src/spdk/test/unit/lib/iscsi/tgt_node.c/tgt_node.conf
new file mode 100644
index 000000000..6bf5aa664
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/tgt_node.c/tgt_node.conf
@@ -0,0 +1,95 @@
+[Global]
+
+# Test that parsing fails if there is no TargetName
+[Failure0]
+ TargetAlias "Data Disk1"
+ Mapping PortalGroup1 InitiatorGroup1
+ AuthMethod Auto
+ AuthGroup AuthGroup1
+ UseDigest Auto
+ QueueDepth 128
+ LUN0 Malloc0
+ LUN1 Malloc1
+
+# Test that parsing fails if there is no Mapping
+[Failure1]
+ TargetName target1
+ TargetAlias "Data Disk1"
+ AuthMethod Auto
+ AuthGroup AuthGroup1
+ UseDigest Auto
+ QueueDepth 128
+ LUN0 Malloc0
+ LUN1 Malloc1
+
+# Test that parsing fails if Mapping does not define Portal or InitiatorGroup
+[Failure2]
+ TargetName target1
+ TargetAlias "Data Disk1"
+ Mapping
+ AuthMethod Auto
+ AuthGroup AuthGroup1
+ UseDigest Auto
+ QueueDepth 128
+ LUN0 Malloc0
+ LUN1 Malloc1
+
+# Test that parsing fails if Mapping does not define InitiatorGroup
+[Failure3]
+ TargetName target1
+ TargetAlias "Data Disk1"
+ Mapping PortalGroup1
+ AuthMethod Auto
+ AuthGroup AuthGroup1
+ UseDigest Auto
+ QueueDepth 128
+ LUN0 Malloc0
+ LUN1 Malloc1
+
+# Test that parsing fails if Mapping switches PortalGroup/InitiatorGroup order
+[Failure4]
+ TargetName target1
+ TargetAlias "Data Disk1"
+ Mapping InitiatorGroup1 PortalGroup1
+ AuthMethod Auto
+ AuthGroup AuthGroup1
+ UseDigest Auto
+ QueueDepth 128
+ LUN0 Malloc0
+ LUN1 Malloc1
+
+# Test that parsing fails if Mapping uses invalid InitiatorGroup0
+[Failure5]
+ TargetName target1
+ TargetAlias "Data Disk1"
+ Mapping PortalGroup1 InitiatorGroup0
+ AuthMethod Auto
+ AuthGroup AuthGroup1
+ UseDigest Auto
+ QueueDepth 128
+ LUN0 Malloc0
+ LUN1 Malloc1
+
+# Test that parsing fails if Mapping uses invalid PortalGroup0
+[Failure6]
+ TargetName target1
+ TargetAlias "Data Disk1"
+ Mapping PortalGroup0 InitiatorGroup1
+ AuthMethod Auto
+ AuthGroup AuthGroup1
+ UseDigest Auto
+ QueueDepth 128
+ LUN0 Malloc0
+ LUN1 Malloc1
+
+# Test that parsing fails if AuthMethod is invalid
+[Failure7]
+ TargetName target1
+ TargetAlias "Data Disk1"
+ Mapping PortalGroup1 InitiatorGroup1
+ AuthMethod SomeGarbage
+ AuthGroup AuthGroup1
+ UseDigest Auto
+ QueueDepth 128
+ LUN0 Malloc0
+ LUN1 Malloc1
diff --git a/src/spdk/test/unit/lib/iscsi/tgt_node.c/tgt_node_ut.c b/src/spdk/test/unit/lib/iscsi/tgt_node.c/tgt_node_ut.c
new file mode 100644
index 000000000..3f3bda39b
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/tgt_node.c/tgt_node_ut.c
@@ -0,0 +1,832 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk/scsi.h"
+
+#include "CUnit/Basic.h"
+#include "spdk_internal/mock.h"
+
+#include "../common.c"
+#include "iscsi/tgt_node.c"
+#include "scsi/scsi_internal.h"
+#include "unit/lib/json_mock.c"
+#include "common/lib/test_env.c"
+
+struct spdk_iscsi_globals g_iscsi;
+
+const char *config_file;
+
+DEFINE_STUB(spdk_scsi_dev_get_id,
+ int,
+ (const struct spdk_scsi_dev *dev),
+ 0);
+
+DEFINE_STUB(spdk_scsi_lun_get_bdev_name,
+ const char *,
+ (const struct spdk_scsi_lun *lun),
+ NULL);
+
+DEFINE_STUB(spdk_scsi_lun_get_id,
+ int,
+ (const struct spdk_scsi_lun *lun),
+ 0);
+
+DEFINE_STUB_V(spdk_iscsi_op_abort_task_set,
+ (struct spdk_iscsi_task *task,
+ uint8_t function));
+
+DEFINE_STUB(spdk_sock_is_ipv6, bool, (struct spdk_sock *sock), false);
+
+DEFINE_STUB(spdk_sock_is_ipv4, bool, (struct spdk_sock *sock), false);
+
+DEFINE_STUB(iscsi_portal_grp_find_by_tag,
+ struct spdk_iscsi_portal_grp *, (int tag), NULL);
+
+DEFINE_STUB(iscsi_init_grp_find_by_tag, struct spdk_iscsi_init_grp *,
+ (int tag), NULL);
+
+struct spdk_scsi_lun *
+spdk_scsi_dev_get_lun(struct spdk_scsi_dev *dev, int lun_id)
+{
+ if (lun_id < 0 || lun_id >= SPDK_SCSI_DEV_MAX_LUN) {
+ return NULL;
+ }
+
+ return dev->lun[lun_id];
+}
+
+int
+spdk_scsi_dev_add_lun(struct spdk_scsi_dev *dev, const char *bdev_name, int lun_id,
+ void (*hotremove_cb)(const struct spdk_scsi_lun *, void *),
+ void *hotremove_ctx)
+{
+ if (bdev_name == NULL) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+static void
+add_lun_test_cases(void)
+{
+ struct spdk_iscsi_tgt_node tgtnode = {};
+ int lun_id = 0;
+ char *bdev_name = NULL;
+ struct spdk_scsi_dev scsi_dev = {};
+ int rc;
+
+ /* case 1 */
+ tgtnode.num_active_conns = 1;
+
+ rc = iscsi_tgt_node_add_lun(&tgtnode, bdev_name, lun_id);
+ CU_ASSERT(rc != 0);
+
+ /* case 2 */
+ tgtnode.num_active_conns = 0;
+ lun_id = -2;
+
+ rc = iscsi_tgt_node_add_lun(&tgtnode, bdev_name, lun_id);
+ CU_ASSERT(rc != 0);
+
+ /* case 3 */
+ lun_id = SPDK_SCSI_DEV_MAX_LUN;
+
+ rc = iscsi_tgt_node_add_lun(&tgtnode, bdev_name, lun_id);
+ CU_ASSERT(rc != 0);
+
+ /* case 4 */
+ lun_id = -1;
+ tgtnode.dev = NULL;
+
+ rc = iscsi_tgt_node_add_lun(&tgtnode, bdev_name, lun_id);
+ CU_ASSERT(rc != 0);
+
+ /* case 5 */
+ tgtnode.dev = &scsi_dev;
+
+ rc = iscsi_tgt_node_add_lun(&tgtnode, bdev_name, lun_id);
+ CU_ASSERT(rc != 0);
+
+ /* case 6 */
+ bdev_name = "LUN0";
+
+ rc = iscsi_tgt_node_add_lun(&tgtnode, bdev_name, lun_id);
+ CU_ASSERT(rc == 0);
+}
+
+static void
+config_file_fail_cases(void)
+{
+ struct spdk_conf *config;
+ struct spdk_conf_section *sp;
+ char section_name[64];
+ int section_index;
+ int rc;
+
+ config = spdk_conf_allocate();
+
+ rc = spdk_conf_read(config, config_file);
+ CU_ASSERT(rc == 0);
+
+ section_index = 0;
+ while (true) {
+ snprintf(section_name, sizeof(section_name), "Failure%d", section_index);
+ sp = spdk_conf_find_section(config, section_name);
+ if (sp == NULL) {
+ break;
+ }
+ rc = iscsi_parse_tgt_node(sp);
+ CU_ASSERT(rc < 0);
+ section_index++;
+ }
+
+ spdk_conf_free(config);
+}
+
+static void
+allow_any_allowed(void)
+{
+ bool result;
+ char *netmask;
+ char *addr1, *addr2;
+
+ netmask = "ANY";
+ addr1 = "2001:ad6:1234:5678:9abc::";
+ addr2 = "192.168.2.1";
+
+ result = iscsi_netmask_allow_addr(netmask, addr1);
+ CU_ASSERT(result == true);
+
+ result = iscsi_netmask_allow_addr(netmask, addr2);
+ CU_ASSERT(result == true);
+}
+
+static void
+allow_ipv6_allowed(void)
+{
+ bool result;
+ char *netmask;
+ char *addr;
+
+ netmask = "[2001:ad6:1234::]/48";
+ addr = "2001:ad6:1234:5678:9abc::";
+
+ result = iscsi_ipv6_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == true);
+
+ result = iscsi_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == true);
+
+ /* Netmask prefix bits == 128 (all bits must match) */
+ netmask = "[2001:ad6:1234:5678:9abc::1]/128";
+ addr = "2001:ad6:1234:5678:9abc::1";
+ result = iscsi_ipv6_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == true);
+}
+
+static void
+allow_ipv6_denied(void)
+{
+ bool result;
+ char *netmask;
+ char *addr;
+
+ netmask = "[2001:ad6:1234::]/56";
+ addr = "2001:ad6:1234:5678:9abc::";
+
+ result = iscsi_ipv6_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == false);
+
+ result = iscsi_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == false);
+
+ /* Netmask prefix bits == 128 (all bits must match) */
+ netmask = "[2001:ad6:1234:5678:9abc::1]/128";
+ addr = "2001:ad6:1234:5678:9abc::2";
+ result = iscsi_ipv6_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == false);
+}
+
+static void
+allow_ipv6_invalid(void)
+{
+ bool result;
+ char *netmask;
+ char *addr;
+
+ /* Netmask prefix bits > 128 */
+ netmask = "[2001:ad6:1234::]/129";
+ addr = "2001:ad6:1234:5678:9abc::";
+ result = iscsi_ipv6_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == false);
+
+ /* Netmask prefix bits == 0 */
+ netmask = "[2001:ad6:1234::]/0";
+ addr = "2001:ad6:1234:5678:9abc::";
+ result = iscsi_ipv6_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == false);
+
+ /* Netmask prefix bits < 0 */
+ netmask = "[2001:ad6:1234::]/-1";
+ addr = "2001:ad6:1234:5678:9abc::";
+ result = iscsi_ipv6_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == false);
+}
+
+static void
+allow_ipv4_allowed(void)
+{
+ bool result;
+ char *netmask;
+ char *addr;
+
+ netmask = "192.168.2.0/24";
+ addr = "192.168.2.1";
+
+ result = iscsi_ipv4_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == true);
+
+ result = iscsi_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == true);
+
+ /* Netmask prefix == 32 (all bits must match) */
+ netmask = "192.168.2.1/32";
+ addr = "192.168.2.1";
+ result = iscsi_ipv4_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == true);
+}
+
+static void
+allow_ipv4_denied(void)
+{
+ bool result;
+ char *netmask;
+ char *addr;
+
+ netmask = "192.168.2.0";
+ addr = "192.168.2.1";
+
+ result = iscsi_ipv4_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == false);
+
+ result = iscsi_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == false);
+
+ /* Netmask prefix == 32 (all bits must match) */
+ netmask = "192.168.2.1/32";
+ addr = "192.168.2.2";
+ result = iscsi_ipv4_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == false);
+}
+
+static void
+allow_ipv4_invalid(void)
+{
+ bool result;
+ char *netmask;
+ char *addr;
+
+ /* Netmask prefix bits > 32 */
+ netmask = "192.168.2.0/33";
+ addr = "192.168.2.1";
+ result = iscsi_ipv4_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == false);
+
+ /* Netmask prefix bits == 0 */
+ netmask = "192.168.2.0/0";
+ addr = "192.168.2.1";
+ result = iscsi_ipv4_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == false);
+
+ /* Netmask prefix bits < 0 */
+ netmask = "192.168.2.0/-1";
+ addr = "192.168.2.1";
+ result = iscsi_ipv4_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == false);
+}
+
+static void
+node_access_allowed(void)
+{
+ struct spdk_iscsi_tgt_node tgtnode = {};
+ struct spdk_iscsi_portal_grp pg = {};
+ struct spdk_iscsi_init_grp ig = {};
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_portal portal = {};
+ struct spdk_iscsi_initiator_name iname = {};
+ struct spdk_iscsi_initiator_netmask imask = {};
+ struct spdk_scsi_dev scsi_dev = {};
+ struct spdk_iscsi_pg_map *pg_map;
+ char *iqn, *addr;
+ bool result;
+
+ /* portal group initialization */
+ pg.tag = 1;
+
+ /* initiator group initialization */
+ ig.tag = 1;
+
+ ig.ninitiators = 1;
+ snprintf(iname.name, sizeof(iname.name), "iqn.2017-10.spdk.io:0001");
+ TAILQ_INIT(&ig.initiator_head);
+ TAILQ_INSERT_TAIL(&ig.initiator_head, &iname, tailq);
+
+ ig.nnetmasks = 1;
+ snprintf(imask.mask, sizeof(imask.mask), "192.168.2.0/24");
+ TAILQ_INIT(&ig.netmask_head);
+ TAILQ_INSERT_TAIL(&ig.netmask_head, &imask, tailq);
+
+ /* target initialization */
+ snprintf(tgtnode.name, sizeof(tgtnode.name), "iqn.2017-10.spdk.io:0001");
+ TAILQ_INIT(&tgtnode.pg_map_head);
+
+ snprintf(scsi_dev.name, sizeof(scsi_dev.name), "iqn.2017-10.spdk.io:0001");
+ tgtnode.dev = &scsi_dev;
+
+ pg_map = iscsi_tgt_node_add_pg_map(&tgtnode, &pg);
+ iscsi_pg_map_add_ig_map(pg_map, &ig);
+
+ /* portal initialization */
+ portal.group = &pg;
+ snprintf(portal.host, sizeof(portal.host), "192.168.2.0");
+ snprintf(portal.port, sizeof(portal.port), "3260");
+
+ /* input for UT */
+ conn.portal = &portal;
+
+ iqn = "iqn.2017-10.spdk.io:0001";
+ addr = "192.168.2.1";
+
+ result = iscsi_tgt_node_access(&conn, &tgtnode, iqn, addr);
+ CU_ASSERT(result == true);
+
+ iscsi_pg_map_delete_ig_map(pg_map, &ig);
+ iscsi_tgt_node_delete_pg_map(&tgtnode, &pg);
+}
+
+static void
+node_access_denied_by_empty_netmask(void)
+{
+ struct spdk_iscsi_tgt_node tgtnode = {};
+ struct spdk_iscsi_portal_grp pg = {};
+ struct spdk_iscsi_init_grp ig = {};
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_portal portal = {};
+ struct spdk_iscsi_initiator_name iname = {};
+ struct spdk_scsi_dev scsi_dev = {};
+ struct spdk_iscsi_pg_map *pg_map;
+ char *iqn, *addr;
+ bool result;
+
+ /* portal group initialization */
+ pg.tag = 1;
+
+ /* initiator group initialization */
+ ig.tag = 1;
+
+ ig.ninitiators = 1;
+ snprintf(iname.name, sizeof(iname.name), "iqn.2017-10.spdk.io:0001");
+ TAILQ_INIT(&ig.initiator_head);
+ TAILQ_INSERT_TAIL(&ig.initiator_head, &iname, tailq);
+
+ ig.nnetmasks = 0;
+ TAILQ_INIT(&ig.netmask_head);
+
+ /* target initialization */
+ snprintf(tgtnode.name, sizeof(tgtnode.name), "iqn.2017-10.spdk.io:0001");
+ TAILQ_INIT(&tgtnode.pg_map_head);
+
+ snprintf(scsi_dev.name, sizeof(scsi_dev.name), "iqn.2017-10.spdk.io:0001");
+ tgtnode.dev = &scsi_dev;
+
+ pg_map = iscsi_tgt_node_add_pg_map(&tgtnode, &pg);
+ iscsi_pg_map_add_ig_map(pg_map, &ig);
+
+ /* portal initialization */
+ portal.group = &pg;
+ snprintf(portal.host, sizeof(portal.host), "192.168.2.0");
+ snprintf(portal.port, sizeof(portal.port), "3260");
+
+ /* input for UT */
+ conn.portal = &portal;
+
+ iqn = "iqn.2017-10.spdk.io:0001";
+ addr = "192.168.3.1";
+
+ result = iscsi_tgt_node_access(&conn, &tgtnode, iqn, addr);
+ CU_ASSERT(result == false);
+
+ iscsi_pg_map_delete_ig_map(pg_map, &ig);
+ iscsi_tgt_node_delete_pg_map(&tgtnode, &pg);
+}
+
+#define IQN1 "iqn.2017-11.spdk.io:0001"
+#define NO_IQN1 "!iqn.2017-11.spdk.io:0001"
+#define IQN2 "iqn.2017-11.spdk.io:0002"
+#define IP1 "192.168.2.0"
+#define IP2 "192.168.2.1"
+
+static void
+node_access_multi_initiator_groups_cases(void)
+{
+ struct spdk_iscsi_tgt_node tgtnode = {};
+ struct spdk_iscsi_conn conn = {};
+ struct spdk_iscsi_portal_grp pg = {};
+ struct spdk_iscsi_portal portal = {};
+ struct spdk_iscsi_init_grp ig1 = {}, ig2 = {};
+ struct spdk_iscsi_initiator_name iname1 = {}, iname2 = {};
+ struct spdk_iscsi_initiator_netmask imask1 = {}, imask2 = {};
+ struct spdk_scsi_dev scsi_dev = {};
+ struct spdk_iscsi_pg_map *pg_map;
+ char *iqn, *addr;
+ bool result;
+
+ /* target initialization */
+ snprintf(tgtnode.name, sizeof(tgtnode.name), IQN1);
+ TAILQ_INIT(&tgtnode.pg_map_head);
+
+ snprintf(scsi_dev.name, sizeof(scsi_dev.name), IQN1);
+ tgtnode.dev = &scsi_dev;
+
+ /* initiator group initialization */
+ ig1.tag = 1;
+ TAILQ_INIT(&ig1.initiator_head);
+ TAILQ_INIT(&ig1.netmask_head);
+
+ ig1.ninitiators = 1;
+ TAILQ_INSERT_TAIL(&ig1.initiator_head, &iname1, tailq);
+
+ ig1.nnetmasks = 1;
+ TAILQ_INSERT_TAIL(&ig1.netmask_head, &imask1, tailq);
+
+ ig2.tag = 2;
+ TAILQ_INIT(&ig2.initiator_head);
+ TAILQ_INIT(&ig2.netmask_head);
+
+ ig2.ninitiators = 1;
+ TAILQ_INSERT_TAIL(&ig2.initiator_head, &iname2, tailq);
+
+ ig2.nnetmasks = 1;
+ TAILQ_INSERT_TAIL(&ig2.netmask_head, &imask2, tailq);
+
+ /* portal group initialization */
+ pg.tag = 1;
+
+ pg_map = iscsi_tgt_node_add_pg_map(&tgtnode, &pg);
+ iscsi_pg_map_add_ig_map(pg_map, &ig1);
+ iscsi_pg_map_add_ig_map(pg_map, &ig2);
+
+ /* portal initialization */
+ portal.group = &pg;
+ snprintf(portal.host, sizeof(portal.host), IP1);
+ snprintf(portal.port, sizeof(portal.port), "3260");
+
+ /* connection initialization */
+ conn.portal = &portal;
+
+ iqn = IQN1;
+ addr = IP1;
+
+ /*
+ * case 1:
+ * +-------------------------------------------+---------+
+ * | IG1 | IG2 | |
+ * +-------------------------------------------+ |
+ * | name | addr | name | addr | result |
+ * +-------------------------------------------+---------+
+ * +-------------------------------------------+---------+
+ * | denied | - | - | - | denied |
+ * +-------------------------------------------+---------+
+ */
+ snprintf(iname1.name, sizeof(iname1.name), NO_IQN1);
+
+ result = iscsi_tgt_node_access(&conn, &tgtnode, iqn, addr);
+ CU_ASSERT(result == false);
+
+ /*
+ * case 2:
+ * +-------------------------------------------+---------+
+ * | IG1 | IG2 | |
+ * +-------------------------------------------+ |
+ * | name | addr | name | addr | result |
+ * +-------------------------------------------+---------+
+ * +-------------------------------------------+---------+
+ * | allowed | allowed | - | - | allowed |
+ * +-------------------------------------------+---------+
+ */
+ snprintf(iname1.name, sizeof(iname1.name), IQN1);
+ snprintf(imask1.mask, sizeof(imask1.mask), IP1);
+
+ result = iscsi_tgt_node_access(&conn, &tgtnode, iqn, addr);
+ CU_ASSERT(result == true);
+
+ /*
+ * case 3:
+ * +-------------------------------------------+---------+
+ * | IG1 | IG2 | |
+ * +-------------------------------------------+ |
+ * | name | addr | name | addr | result |
+ * +-------------------------------------------+---------+
+ * +-------------------------------------------+---------+
+ * | allowed | denied | denied | - | denied |
+ * +-------------------------------------------+---------+
+ */
+ snprintf(iname1.name, sizeof(iname1.name), IQN1);
+ snprintf(imask1.mask, sizeof(imask1.mask), IP2);
+ snprintf(iname2.name, sizeof(iname2.name), NO_IQN1);
+
+ result = iscsi_tgt_node_access(&conn, &tgtnode, iqn, addr);
+ CU_ASSERT(result == false);
+
+ /*
+ * case 4:
+ * +-------------------------------------------+---------+
+ * | IG1 | IG2 | |
+ * +-------------------------------------------+ |
+ * | name | addr | name | addr | result |
+ * +-------------------------------------------+---------+
+ * +-------------------------------------------+---------+
+ * | allowed | denied | allowed | allowed | allowed |
+ * +-------------------------------------------+---------+
+ */
+ snprintf(iname1.name, sizeof(iname1.name), IQN1);
+ snprintf(imask1.mask, sizeof(imask1.mask), IP2);
+ snprintf(iname2.name, sizeof(iname2.name), IQN1);
+ snprintf(imask2.mask, sizeof(imask2.mask), IP1);
+
+ result = iscsi_tgt_node_access(&conn, &tgtnode, iqn, addr);
+ CU_ASSERT(result == true);
+
+ /*
+ * case 5:
+ * +---------------------------------------------+---------+
+ * | IG1 | IG2 | |
+ * +---------------------------------------------+ |
+ * | name | addr | name | addr | result |
+ * +---------------------------------------------+---------+
+ * +---------------------------------------------+---------+
+ * | allowed | denied | allowed | denied | denied |
+ * +---------------------------------------------+---------+
+ */
+ snprintf(iname1.name, sizeof(iname1.name), IQN1);
+ snprintf(imask1.mask, sizeof(imask1.mask), IP2);
+ snprintf(iname2.name, sizeof(iname2.name), IQN1);
+ snprintf(imask2.mask, sizeof(imask2.mask), IP2);
+
+ result = iscsi_tgt_node_access(&conn, &tgtnode, iqn, addr);
+ CU_ASSERT(result == false);
+
+ /*
+ * case 6:
+ * +---------------------------------------------+---------+
+ * | IG1 | IG2 | |
+ * +---------------------------------------------+ |
+ * | name | addr | name | addr | result |
+ * +---------------------------------------------+---------+
+ * +---------------------------------------------+---------+
+ * | allowed | denied | not found | - | denied |
+ * +---------------------------------------------+---------+
+ */
+ snprintf(iname1.name, sizeof(iname1.name), IQN1);
+ snprintf(imask1.mask, sizeof(imask1.mask), IP2);
+ snprintf(iname2.name, sizeof(iname2.name), IQN2);
+
+ result = iscsi_tgt_node_access(&conn, &tgtnode, iqn, addr);
+ CU_ASSERT(result == false);
+
+ /*
+ * case 7:
+ * +---------------------------------------------+---------+
+ * | IG1 | IG2 | |
+ * +---------------------------------------------+ |
+ * | name | addr | name | addr | result |
+ * +---------------------------------------------+---------+
+ * +---------------------------------------------+---------+
+ * | not found | - | denied | - | denied |
+ * +---------------------------------------------+---------+
+ */
+ snprintf(iname1.name, sizeof(iname1.name), IQN2);
+ snprintf(iname2.name, sizeof(iname2.name), NO_IQN1);
+
+ result = iscsi_tgt_node_access(&conn, &tgtnode, iqn, addr);
+ CU_ASSERT(result == false);
+
+ /*
+ * case 8:
+ * +---------------------------------------------+---------+
+ * | IG1 | IG2 | |
+ * +---------------------------------------------+ |
+ * | name | addr | name | addr | result |
+ * +---------------------------------------------+---------+
+ * +---------------------------------------------+---------+
+ * | not found | - | allowed | allowed | allowed |
+ * +---------------------------------------------+---------+
+ */
+ snprintf(iname1.name, sizeof(iname1.name), IQN2);
+ snprintf(iname2.name, sizeof(iname2.name), IQN1);
+ snprintf(imask2.mask, sizeof(imask2.mask), IP1);
+
+ result = iscsi_tgt_node_access(&conn, &tgtnode, iqn, addr);
+ CU_ASSERT(result == true);
+
+ /*
+ * case 9:
+ * +---------------------------------------------+---------+
+ * | IG1 | IG2 | |
+ * +---------------------------------------------+ |
+ * | name | addr | name | addr | result |
+ * +---------------------------------------------+---------+
+ * +---------------------------------------------+---------+
+ * | not found | - | allowed | denied | denied |
+ * +---------------------------------------------+---------+
+ */
+ snprintf(iname1.name, sizeof(iname1.name), IQN2);
+ snprintf(iname2.name, sizeof(iname2.name), IQN1);
+ snprintf(imask2.mask, sizeof(imask2.mask), IP2);
+
+ result = iscsi_tgt_node_access(&conn, &tgtnode, iqn, addr);
+ CU_ASSERT(result == false);
+
+ /*
+ * case 10:
+ * +---------------------------------------------+---------+
+ * | IG1 | IG2 | |
+ * +---------------------------------------------+ |
+ * | name | addr | name | addr | result |
+ * +---------------------------------------------+---------+
+ * +---------------------------------------------+---------+
+ * | not found | - | not found | - | denied |
+ * +---------------------------------------------+---------+
+ */
+ snprintf(iname1.name, sizeof(iname1.name), IQN2);
+ snprintf(iname2.name, sizeof(iname2.name), IQN2);
+
+ result = iscsi_tgt_node_access(&conn, &tgtnode, iqn, addr);
+ CU_ASSERT(result == false);
+
+ iscsi_pg_map_delete_ig_map(pg_map, &ig1);
+ iscsi_pg_map_delete_ig_map(pg_map, &ig2);
+ iscsi_tgt_node_delete_pg_map(&tgtnode, &pg);
+}
+
+static void
+allow_iscsi_name_multi_maps_case(void)
+{
+ struct spdk_iscsi_tgt_node tgtnode = {};
+ struct spdk_iscsi_portal_grp pg1 = {}, pg2 = {};
+ struct spdk_iscsi_init_grp ig = {};
+ struct spdk_iscsi_initiator_name iname = {};
+ struct spdk_iscsi_pg_map *pg_map1, *pg_map2;
+ struct spdk_scsi_dev scsi_dev = {};
+ char *iqn;
+ bool result;
+
+ /* target initialization */
+ TAILQ_INIT(&tgtnode.pg_map_head);
+
+ snprintf(scsi_dev.name, sizeof(scsi_dev.name), IQN1);
+ tgtnode.dev = &scsi_dev;
+
+ /* initiator group initialization */
+ TAILQ_INIT(&ig.initiator_head);
+
+ ig.ninitiators = 1;
+ TAILQ_INSERT_TAIL(&ig.initiator_head, &iname, tailq);
+
+ /* portal group initialization */
+ pg1.tag = 1;
+ pg2.tag = 1;
+
+ pg_map1 = iscsi_tgt_node_add_pg_map(&tgtnode, &pg1);
+ pg_map2 = iscsi_tgt_node_add_pg_map(&tgtnode, &pg2);
+ iscsi_pg_map_add_ig_map(pg_map1, &ig);
+ iscsi_pg_map_add_ig_map(pg_map2, &ig);
+
+ /* test for IG1 <-> PG1, PG2 case */
+ iqn = IQN1;
+
+ snprintf(iname.name, sizeof(iname.name), IQN1);
+
+ result = iscsi_tgt_node_allow_iscsi_name(&tgtnode, iqn);
+ CU_ASSERT(result == true);
+
+ snprintf(iname.name, sizeof(iname.name), IQN2);
+
+ result = iscsi_tgt_node_allow_iscsi_name(&tgtnode, iqn);
+ CU_ASSERT(result == false);
+
+ iscsi_pg_map_delete_ig_map(pg_map1, &ig);
+ iscsi_pg_map_delete_ig_map(pg_map2, &ig);
+ iscsi_tgt_node_delete_pg_map(&tgtnode, &pg1);
+ iscsi_tgt_node_delete_pg_map(&tgtnode, &pg2);
+}
+
+/*
+ * static bool
+ * iscsi_check_chap_params(bool disable_chap, bool require_chap,
+ * bool mutual_chap, int chap_group);
+ */
+static void
+chap_param_test_cases(void)
+{
+ /* Auto */
+ CU_ASSERT(iscsi_check_chap_params(false, false, false, 0) == true);
+
+ /* None */
+ CU_ASSERT(iscsi_check_chap_params(true, false, false, 0) == true);
+
+ /* CHAP */
+ CU_ASSERT(iscsi_check_chap_params(false, true, false, 0) == true);
+
+ /* CHAP Mutual */
+ CU_ASSERT(iscsi_check_chap_params(false, true, true, 0) == true);
+
+ /* Check mutual exclusiveness of disabled and required */
+ CU_ASSERT(iscsi_check_chap_params(true, true, false, 0) == false);
+
+ /* Mutual requires Required */
+ CU_ASSERT(iscsi_check_chap_params(false, false, true, 0) == false);
+
+ /* Remaining combinations */
+ CU_ASSERT(iscsi_check_chap_params(true, false, true, 0) == false);
+ CU_ASSERT(iscsi_check_chap_params(true, true, true, 0) == false);
+
+ /* Valid auth group ID */
+ CU_ASSERT(iscsi_check_chap_params(false, false, false, 1) == true);
+
+ /* Invalid auth group ID */
+ CU_ASSERT(iscsi_check_chap_params(false, false, false, -1) == false);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (argc < 2) {
+ fprintf(stderr, "usage: %s <config file>\n", argv[0]);
+ exit(1);
+ }
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ config_file = argv[1];
+
+ suite = CU_add_suite("iscsi_target_node_suite", NULL, NULL);
+
+ CU_ADD_TEST(suite, add_lun_test_cases);
+ CU_ADD_TEST(suite, config_file_fail_cases);
+ CU_ADD_TEST(suite, allow_any_allowed);
+ CU_ADD_TEST(suite, allow_ipv6_allowed);
+ CU_ADD_TEST(suite, allow_ipv6_denied);
+ CU_ADD_TEST(suite, allow_ipv6_invalid);
+ CU_ADD_TEST(suite, allow_ipv4_allowed);
+ CU_ADD_TEST(suite, allow_ipv4_denied);
+ CU_ADD_TEST(suite, allow_ipv4_invalid);
+ CU_ADD_TEST(suite, node_access_allowed);
+ CU_ADD_TEST(suite, node_access_denied_by_empty_netmask);
+ CU_ADD_TEST(suite, node_access_multi_initiator_groups_cases);
+ CU_ADD_TEST(suite, allow_iscsi_name_multi_maps_case);
+ CU_ADD_TEST(suite, chap_param_test_cases);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/json/Makefile b/src/spdk/test/unit/lib/json/Makefile
new file mode 100644
index 000000000..db38f27dc
--- /dev/null
+++ b/src/spdk/test/unit/lib/json/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = json_parse.c json_util.c json_write.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/json/json_parse.c/.gitignore b/src/spdk/test/unit/lib/json/json_parse.c/.gitignore
new file mode 100644
index 000000000..2b4445fd8
--- /dev/null
+++ b/src/spdk/test/unit/lib/json/json_parse.c/.gitignore
@@ -0,0 +1 @@
+json_parse_ut
diff --git a/src/spdk/test/unit/lib/json/json_parse.c/Makefile b/src/spdk/test/unit/lib/json/json_parse.c/Makefile
new file mode 100644
index 000000000..3d4100240
--- /dev/null
+++ b/src/spdk/test/unit/lib/json/json_parse.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = json_parse_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/json/json_parse.c/json_parse_ut.c b/src/spdk/test/unit/lib/json/json_parse.c/json_parse_ut.c
new file mode 100644
index 000000000..7f704214b
--- /dev/null
+++ b/src/spdk/test/unit/lib/json/json_parse.c/json_parse_ut.c
@@ -0,0 +1,931 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "json/json_parse.c"
+
+static uint8_t g_buf[1000];
+static void *g_end;
+static struct spdk_json_val g_vals[100];
+static int g_cur_val;
+
+/* Fill buf with raw data */
+#define BUF_SETUP(in) \
+ memset(g_buf, 0, sizeof(g_buf)); \
+ if (sizeof(in) > 1) { \
+ memcpy(g_buf, in, sizeof(in) - 1); \
+ } \
+ g_end = NULL
+
+/*
+ * Do two checks - first pass NULL for values to ensure the count is correct,
+ * then pass g_vals to get the actual values.
+ */
+#define PARSE_PASS_FLAGS(in, num_vals, trailing, flags) \
+ BUF_SETUP(in); \
+ CU_ASSERT(spdk_json_parse(g_buf, sizeof(in) - 1, NULL, 0, &g_end, flags) == num_vals); \
+ memset(g_vals, 0, sizeof(g_vals)); \
+ CU_ASSERT(spdk_json_parse(g_buf, sizeof(in) - 1, g_vals, sizeof(g_vals), &g_end, flags | SPDK_JSON_PARSE_FLAG_DECODE_IN_PLACE) == num_vals); \
+ CU_ASSERT(g_end == g_buf + sizeof(in) - sizeof(trailing)); \
+ CU_ASSERT(memcmp(g_end, trailing, sizeof(trailing) - 1) == 0); \
+ g_cur_val = 0
+
+#define PARSE_PASS(in, num_vals, trailing) \
+ PARSE_PASS_FLAGS(in, num_vals, trailing, 0)
+
+#define PARSE_FAIL_FLAGS(in, retval, flags) \
+ BUF_SETUP(in); \
+ CU_ASSERT(spdk_json_parse(g_buf, sizeof(in) - 1, NULL, 0, &g_end, flags) == retval)
+
+#define PARSE_FAIL(in, retval) \
+ PARSE_FAIL_FLAGS(in, retval, 0)
+
+#define VAL_STRING_MATCH(str, var_type) \
+ CU_ASSERT(g_vals[g_cur_val].type == var_type); \
+ CU_ASSERT(g_vals[g_cur_val].len == sizeof(str) - 1); \
+ if (g_vals[g_cur_val].len == sizeof(str) - 1 && sizeof(str) > 1) { \
+ CU_ASSERT(memcmp(g_vals[g_cur_val].start, str, g_vals[g_cur_val].len) == 0); \
+ } \
+ g_cur_val++
+
+#define VAL_STRING(str) VAL_STRING_MATCH(str, SPDK_JSON_VAL_STRING)
+#define VAL_NAME(str) VAL_STRING_MATCH(str, SPDK_JSON_VAL_NAME)
+#define VAL_NUMBER(num) VAL_STRING_MATCH(num, SPDK_JSON_VAL_NUMBER)
+
+#define VAL_LITERAL(str, val_type) \
+ CU_ASSERT(g_vals[g_cur_val].type == val_type); \
+ CU_ASSERT(g_vals[g_cur_val].len == strlen(str)); \
+ if (g_vals[g_cur_val].len == strlen(str)) { \
+ CU_ASSERT(memcmp(g_vals[g_cur_val].start, str, g_vals[g_cur_val].len) == 0); \
+ } \
+ g_cur_val++
+
+#define VAL_TRUE() VAL_LITERAL("true", SPDK_JSON_VAL_TRUE)
+#define VAL_FALSE() VAL_LITERAL("false", SPDK_JSON_VAL_FALSE)
+#define VAL_NULL() VAL_LITERAL("null", SPDK_JSON_VAL_NULL)
+
+#define VAL_ARRAY_BEGIN(count) \
+ CU_ASSERT(g_vals[g_cur_val].type == SPDK_JSON_VAL_ARRAY_BEGIN); \
+ CU_ASSERT(g_vals[g_cur_val].len == count); \
+ g_cur_val++
+
+#define VAL_ARRAY_END() \
+ CU_ASSERT(g_vals[g_cur_val].type == SPDK_JSON_VAL_ARRAY_END); \
+ g_cur_val++
+
+#define VAL_OBJECT_BEGIN(count) \
+ CU_ASSERT(g_vals[g_cur_val].type == SPDK_JSON_VAL_OBJECT_BEGIN); \
+ CU_ASSERT(g_vals[g_cur_val].len == count); \
+ g_cur_val++
+
+#define VAL_OBJECT_END() \
+ CU_ASSERT(g_vals[g_cur_val].type == SPDK_JSON_VAL_OBJECT_END); \
+ g_cur_val++
+
+/* Simplified macros for string-only testing */
+#define STR_PASS(in, out) \
+ PARSE_PASS("\"" in "\"", 1, ""); \
+ VAL_STRING(out)
+
+#define STR_FAIL(in, retval) \
+ PARSE_FAIL("\"" in "\"", retval)
+
+/* Simplified macros for number-only testing (no whitespace allowed) */
+#define NUM_PASS(in) \
+ PARSE_PASS(in, 1, ""); \
+ VAL_NUMBER(in)
+
+#define NUM_FAIL(in, retval) \
+ PARSE_FAIL(in, retval)
+
+static void
+test_parse_literal(void)
+{
+ PARSE_PASS("true", 1, "");
+ VAL_TRUE();
+
+ PARSE_PASS(" true ", 1, "");
+ VAL_TRUE();
+
+ PARSE_PASS("false", 1, "");
+ VAL_FALSE();
+
+ PARSE_PASS("null", 1, "");
+ VAL_NULL();
+
+ PARSE_PASS("trueaaa", 1, "aaa");
+ VAL_TRUE();
+
+ PARSE_PASS("truefalse", 1, "false");
+ VAL_TRUE();
+
+ PARSE_PASS("true false", 1, "false");
+ VAL_TRUE();
+
+ PARSE_PASS("true,false", 1, ",false");
+ VAL_TRUE();
+
+ PARSE_PASS("true,", 1, ",");
+ VAL_TRUE();
+
+ PARSE_FAIL("True", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("abcdef", SPDK_JSON_PARSE_INVALID);
+
+ PARSE_FAIL("t", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("tru", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("f", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("fals", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("n", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("nul", SPDK_JSON_PARSE_INCOMPLETE);
+
+ PARSE_FAIL("taaaaa", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("faaaaa", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("naaaaa", SPDK_JSON_PARSE_INVALID);
+}
+
+static void
+test_parse_string_simple(void)
+{
+ PARSE_PASS("\"\"", 1, "");
+ VAL_STRING("");
+
+ PARSE_PASS("\"hello world\"", 1, "");
+ VAL_STRING("hello world");
+
+ PARSE_PASS(" \"hello world\" ", 1, "");
+ VAL_STRING("hello world");
+
+ /* Unterminated string */
+ PARSE_FAIL("\"hello world", SPDK_JSON_PARSE_INCOMPLETE);
+
+ /* Trailing comma */
+ PARSE_PASS("\"hello world\",", 1, ",");
+ VAL_STRING("hello world");
+}
+
+static void
+test_parse_string_control_chars(void)
+{
+ /* U+0000 through U+001F must be escaped */
+ STR_FAIL("\x00", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x01", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x02", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x03", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x04", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x05", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x06", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x07", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x08", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x09", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x0A", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x0B", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x0C", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x0D", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x0E", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x0F", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x10", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x11", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x12", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x13", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x14", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x15", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x16", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x17", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x18", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x19", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x1A", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x1B", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x1C", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x1D", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x1E", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x1F", SPDK_JSON_PARSE_INVALID);
+ STR_PASS(" ", " "); /* \x20 (first valid unescaped char) */
+
+ /* Test control chars in the middle of a string */
+ STR_FAIL("abc\ndef", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("abc\tdef", SPDK_JSON_PARSE_INVALID);
+}
+
+static void
+test_parse_string_utf8(void)
+{
+ /* Valid one-, two-, three-, and four-byte sequences */
+ STR_PASS("\x41", "A");
+ STR_PASS("\xC3\xB6", "\xC3\xB6");
+ STR_PASS("\xE2\x88\x9A", "\xE2\x88\x9A");
+ STR_PASS("\xF0\xA0\x9C\x8E", "\xF0\xA0\x9C\x8E");
+
+ /* Examples from RFC 3629 */
+ STR_PASS("\x41\xE2\x89\xA2\xCE\x91\x2E", "\x41\xE2\x89\xA2\xCE\x91\x2E");
+ STR_PASS("\xED\x95\x9C\xEA\xB5\xAD\xEC\x96\xB4", "\xED\x95\x9C\xEA\xB5\xAD\xEC\x96\xB4");
+ STR_PASS("\xE6\x97\xA5\xE6\x9C\xAC\xE8\xAA\x9E", "\xE6\x97\xA5\xE6\x9C\xAC\xE8\xAA\x9E");
+ STR_PASS("\xEF\xBB\xBF\xF0\xA3\x8E\xB4", "\xEF\xBB\xBF\xF0\xA3\x8E\xB4");
+
+ /* Edge cases */
+ STR_PASS("\x7F", "\x7F");
+ STR_FAIL("\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xC1", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xC2", SPDK_JSON_PARSE_INVALID);
+ STR_PASS("\xC2\x80", "\xC2\x80");
+ STR_PASS("\xC2\xBF", "\xC2\xBF");
+ STR_PASS("\xDF\x80", "\xDF\x80");
+ STR_PASS("\xDF\xBF", "\xDF\xBF");
+ STR_FAIL("\xDF", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xE0\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xE0\x1F", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xE0\x1F\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xE0", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xE0\xA0", SPDK_JSON_PARSE_INVALID);
+ STR_PASS("\xE0\xA0\x80", "\xE0\xA0\x80");
+ STR_PASS("\xE0\xA0\xBF", "\xE0\xA0\xBF");
+ STR_FAIL("\xE0\xA0\xC0", SPDK_JSON_PARSE_INVALID);
+ STR_PASS("\xE0\xBF\x80", "\xE0\xBF\x80");
+ STR_PASS("\xE0\xBF\xBF", "\xE0\xBF\xBF");
+ STR_FAIL("\xE0\xC0\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xE1", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xE1\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xE1\x7F\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xE1\x80\x7F", SPDK_JSON_PARSE_INVALID);
+ STR_PASS("\xE1\x80\x80", "\xE1\x80\x80");
+ STR_PASS("\xE1\x80\xBF", "\xE1\x80\xBF");
+ STR_PASS("\xE1\xBF\x80", "\xE1\xBF\x80");
+ STR_PASS("\xE1\xBF\xBF", "\xE1\xBF\xBF");
+ STR_FAIL("\xE1\xC0\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xE1\x80\xC0", SPDK_JSON_PARSE_INVALID);
+ STR_PASS("\xEF\x80\x80", "\xEF\x80\x80");
+ STR_PASS("\xEF\xBF\xBF", "\xEF\xBF\xBF");
+ STR_FAIL("\xF0", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF0\x90", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF0\x90\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF0\x80\x80\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF0\x8F\x80\x80", SPDK_JSON_PARSE_INVALID);
+ STR_PASS("\xF0\x90\x80\x80", "\xF0\x90\x80\x80");
+ STR_PASS("\xF0\x90\x80\xBF", "\xF0\x90\x80\xBF");
+ STR_PASS("\xF0\x90\xBF\x80", "\xF0\x90\xBF\x80");
+ STR_PASS("\xF0\xBF\x80\x80", "\xF0\xBF\x80\x80");
+ STR_FAIL("\xF0\xC0\x80\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF1", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF1\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF1\x80\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF1\x80\x80\x7F", SPDK_JSON_PARSE_INVALID);
+ STR_PASS("\xF1\x80\x80\x80", "\xF1\x80\x80\x80");
+ STR_PASS("\xF1\x80\x80\xBF", "\xF1\x80\x80\xBF");
+ STR_PASS("\xF1\x80\xBF\x80", "\xF1\x80\xBF\x80");
+ STR_PASS("\xF1\xBF\x80\x80", "\xF1\xBF\x80\x80");
+ STR_PASS("\xF3\x80\x80\x80", "\xF3\x80\x80\x80");
+ STR_FAIL("\xF3\xC0\x80\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF3\x80\xC0\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF3\x80\x80\xC0", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF4", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF4\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF4\x80\x80", SPDK_JSON_PARSE_INVALID);
+ STR_PASS("\xF4\x80\x80\x80", "\xF4\x80\x80\x80");
+ STR_PASS("\xF4\x8F\x80\x80", "\xF4\x8F\x80\x80");
+ STR_PASS("\xF4\x8F\xBF\xBF", "\xF4\x8F\xBF\xBF");
+ STR_FAIL("\xF4\x90\x80\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF5", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF5\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF5\x80\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF5\x80\x80\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF5\x80\x80\x80\x80", SPDK_JSON_PARSE_INVALID);
+
+ /* Overlong encodings */
+ STR_FAIL("\xC0\x80", SPDK_JSON_PARSE_INVALID);
+
+ /* Surrogate pairs */
+ STR_FAIL("\xED\xA0\x80", SPDK_JSON_PARSE_INVALID); /* U+D800 First high surrogate */
+ STR_FAIL("\xED\xAF\xBF", SPDK_JSON_PARSE_INVALID); /* U+DBFF Last high surrogate */
+ STR_FAIL("\xED\xB0\x80", SPDK_JSON_PARSE_INVALID); /* U+DC00 First low surrogate */
+ STR_FAIL("\xED\xBF\xBF", SPDK_JSON_PARSE_INVALID); /* U+DFFF Last low surrogate */
+ STR_FAIL("\xED\xA1\x8C\xED\xBE\xB4",
+ SPDK_JSON_PARSE_INVALID); /* U+233B4 (invalid surrogate pair encoding) */
+}
+
+static void
+test_parse_string_escapes_twochar(void)
+{
+ STR_PASS("\\\"", "\"");
+ STR_PASS("\\\\", "\\");
+ STR_PASS("\\/", "/");
+ STR_PASS("\\b", "\b");
+ STR_PASS("\\f", "\f");
+ STR_PASS("\\n", "\n");
+ STR_PASS("\\r", "\r");
+ STR_PASS("\\t", "\t");
+
+ STR_PASS("abc\\tdef", "abc\tdef");
+ STR_PASS("abc\\\"def", "abc\"def");
+
+ /* Backslash at end of string (will be treated as escaped quote) */
+ STR_FAIL("\\", SPDK_JSON_PARSE_INCOMPLETE);
+ STR_FAIL("abc\\", SPDK_JSON_PARSE_INCOMPLETE);
+
+ /* Invalid C-like escapes */
+ STR_FAIL("\\a", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\\v", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\\'", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\\?", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\\0", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\\x00", SPDK_JSON_PARSE_INVALID);
+
+ /* Other invalid escapes */
+ STR_FAIL("\\B", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\\z", SPDK_JSON_PARSE_INVALID);
+}
+
+static void
+test_parse_string_escapes_unicode(void)
+{
+ STR_PASS("\\u0000", "\0");
+ STR_PASS("\\u0001", "\1");
+ STR_PASS("\\u0041", "A");
+ STR_PASS("\\uAAAA", "\xEA\xAA\xAA");
+ STR_PASS("\\uaaaa", "\xEA\xAA\xAA");
+ STR_PASS("\\uAaAa", "\xEA\xAA\xAA");
+
+ STR_FAIL("\\u", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\\u0", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\\u00", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\\u000", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\\u000g", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\\U", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\\U0000", SPDK_JSON_PARSE_INVALID);
+
+ PARSE_FAIL("\"\\u", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("\"\\u0", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("\"\\u00", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("\"\\u000", SPDK_JSON_PARSE_INCOMPLETE);
+
+ /* Surrogate pair */
+ STR_PASS("\\uD834\\uDD1E", "\xF0\x9D\x84\x9E");
+
+ /* Low surrogate without high */
+ STR_FAIL("\\uDC00", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\\uDC00\\uDC00", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\\uDC00abcdef", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\\uDEAD", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("\"\\uD834", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("\"\\uD834\\", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("\"\\uD834\\u", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("\"\\uD834\\uD", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("\"\\uD834\\uDD1", SPDK_JSON_PARSE_INCOMPLETE);
+
+ /* High surrogate without low */
+ STR_FAIL("\\uD800", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\\uD800abcdef", SPDK_JSON_PARSE_INVALID);
+
+ /* High surrogate followed by high surrogate */
+ STR_FAIL("\\uD800\\uD800", SPDK_JSON_PARSE_INVALID);
+}
+
+static void
+test_parse_number(void)
+{
+ NUM_PASS("0");
+ NUM_PASS("1");
+ NUM_PASS("100");
+ NUM_PASS("-1");
+ NUM_PASS("-0");
+ NUM_PASS("3.0");
+ NUM_PASS("3.00");
+ NUM_PASS("3.001");
+ NUM_PASS("3.14159");
+ NUM_PASS("3.141592653589793238462643383279");
+ NUM_PASS("1e400");
+ NUM_PASS("1E400");
+ NUM_PASS("0e10");
+ NUM_PASS("0e0");
+ NUM_PASS("-0e0");
+ NUM_PASS("-0e+0");
+ NUM_PASS("-0e-0");
+ NUM_PASS("1e+400");
+ NUM_PASS("1e-400");
+ NUM_PASS("6.022e23");
+ NUM_PASS("-1.234e+56");
+ NUM_PASS("1.23e+56");
+ NUM_PASS("-1.23e-56");
+ NUM_PASS("1.23e-56");
+ NUM_PASS("1e04");
+
+ /* Trailing garbage */
+ PARSE_PASS("0A", 1, "A");
+ VAL_NUMBER("0");
+
+ PARSE_PASS("0,", 1, ",");
+ VAL_NUMBER("0");
+
+ PARSE_PASS("0true", 1, "true");
+ VAL_NUMBER("0");
+
+ PARSE_PASS("00", 1, "0");
+ VAL_NUMBER("0");
+ PARSE_FAIL("[00", SPDK_JSON_PARSE_INVALID);
+
+ PARSE_PASS("007", 1, "07");
+ VAL_NUMBER("0");
+ PARSE_FAIL("[007]", SPDK_JSON_PARSE_INVALID);
+
+ PARSE_PASS("345.678.1", 1, ".1");
+ VAL_NUMBER("345.678");
+ PARSE_FAIL("[345.678.1]", SPDK_JSON_PARSE_INVALID);
+
+ PARSE_PASS("3.2e-4+5", 1, "+5");
+ VAL_NUMBER("3.2e-4");
+ PARSE_FAIL("[3.2e-4+5]", SPDK_JSON_PARSE_INVALID);
+
+ PARSE_PASS("3.4.5", 1, ".5");
+ VAL_NUMBER("3.4");
+ PARSE_FAIL("[3.4.5]", SPDK_JSON_PARSE_INVALID);
+
+ NUM_FAIL("345.", SPDK_JSON_PARSE_INCOMPLETE);
+ NUM_FAIL("+1", SPDK_JSON_PARSE_INVALID);
+ NUM_FAIL("--1", SPDK_JSON_PARSE_INVALID);
+ NUM_FAIL("3.", SPDK_JSON_PARSE_INCOMPLETE);
+ NUM_FAIL("3.+4", SPDK_JSON_PARSE_INVALID);
+ NUM_FAIL("3.2e+-4", SPDK_JSON_PARSE_INVALID);
+ NUM_FAIL("3.2e-+4", SPDK_JSON_PARSE_INVALID);
+ NUM_FAIL("3e+", SPDK_JSON_PARSE_INCOMPLETE);
+ NUM_FAIL("3e-", SPDK_JSON_PARSE_INCOMPLETE);
+ NUM_FAIL("3.e4", SPDK_JSON_PARSE_INVALID);
+ NUM_FAIL("3.2eX", SPDK_JSON_PARSE_INVALID);
+ NUM_FAIL("-", SPDK_JSON_PARSE_INCOMPLETE);
+ NUM_FAIL("NaN", SPDK_JSON_PARSE_INVALID);
+ NUM_FAIL(".123", SPDK_JSON_PARSE_INVALID);
+}
+
+static void
+test_parse_array(void)
+{
+ char buffer[SPDK_JSON_MAX_NESTING_DEPTH + 2] = {0};
+
+ PARSE_PASS("[]", 2, "");
+ VAL_ARRAY_BEGIN(0);
+ VAL_ARRAY_END();
+
+ PARSE_PASS("[true]", 3, "");
+ VAL_ARRAY_BEGIN(1);
+ VAL_TRUE();
+ VAL_ARRAY_END();
+
+ PARSE_PASS("[true, false]", 4, "");
+ VAL_ARRAY_BEGIN(2);
+ VAL_TRUE();
+ VAL_FALSE();
+ VAL_ARRAY_END();
+
+ PARSE_PASS("[\"hello\"]", 3, "");
+ VAL_ARRAY_BEGIN(1);
+ VAL_STRING("hello");
+ VAL_ARRAY_END();
+
+ PARSE_PASS("[[]]", 4, "");
+ VAL_ARRAY_BEGIN(2);
+ VAL_ARRAY_BEGIN(0);
+ VAL_ARRAY_END();
+ VAL_ARRAY_END();
+
+ PARSE_PASS("[\"hello\", \"world\"]", 4, "");
+ VAL_ARRAY_BEGIN(2);
+ VAL_STRING("hello");
+ VAL_STRING("world");
+ VAL_ARRAY_END();
+
+ PARSE_PASS("[],", 2, ",");
+ VAL_ARRAY_BEGIN(0);
+ VAL_ARRAY_END();
+
+ PARSE_FAIL("]", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("[", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("[true", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("[\"hello", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("[\"hello\"", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("[true,]", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("[,]", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("[,true]", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("[true}", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("[true,,true]", SPDK_JSON_PARSE_INVALID);
+
+ /* Nested arrays exactly up to the allowed nesting depth */
+ memset(buffer, '[', SPDK_JSON_MAX_NESTING_DEPTH);
+ buffer[SPDK_JSON_MAX_NESTING_DEPTH] = ' ';
+ PARSE_FAIL(buffer, SPDK_JSON_PARSE_INCOMPLETE);
+
+ /* Nested arrays exceeding the maximum allowed nesting depth for this implementation */
+ buffer[SPDK_JSON_MAX_NESTING_DEPTH] = '[';
+ PARSE_FAIL(buffer, SPDK_JSON_PARSE_MAX_DEPTH_EXCEEDED);
+}
+
+static void
+test_parse_object(void)
+{
+ PARSE_PASS("{}", 2, "");
+ VAL_OBJECT_BEGIN(0);
+ VAL_OBJECT_END();
+
+ PARSE_PASS("{\"a\": true}", 4, "");
+ VAL_OBJECT_BEGIN(2);
+ VAL_NAME("a");
+ VAL_TRUE();
+ VAL_OBJECT_END();
+
+ PARSE_PASS("{\"abc\": \"def\"}", 4, "");
+ VAL_OBJECT_BEGIN(2);
+ VAL_NAME("abc");
+ VAL_STRING("def");
+ VAL_OBJECT_END();
+
+ PARSE_PASS("{\"a\": true, \"b\": false}", 6, "");
+ VAL_OBJECT_BEGIN(4);
+ VAL_NAME("a");
+ VAL_TRUE();
+ VAL_NAME("b");
+ VAL_FALSE();
+ VAL_OBJECT_END();
+
+ PARSE_PASS("{\"a\": { \"b\": true } }", 7, "");
+ VAL_OBJECT_BEGIN(5);
+ VAL_NAME("a");
+ VAL_OBJECT_BEGIN(2);
+ VAL_NAME("b");
+ VAL_TRUE();
+ VAL_OBJECT_END();
+ VAL_OBJECT_END();
+
+ PARSE_PASS("{\"{test\": 0}", 4, "");
+ VAL_OBJECT_BEGIN(2);
+ VAL_NAME("{test");
+ VAL_NUMBER("0");
+ VAL_OBJECT_END();
+
+ PARSE_PASS("{\"test}\": 1}", 4, "");
+ VAL_OBJECT_BEGIN(2);
+ VAL_NAME("test}");
+ VAL_NUMBER("1");
+ VAL_OBJECT_END();
+
+ PARSE_PASS("{\"\\\"\": 2}", 4, "");
+ VAL_OBJECT_BEGIN(2);
+ VAL_NAME("\"");
+ VAL_NUMBER("2");
+ VAL_OBJECT_END();
+
+ PARSE_PASS("{\"a\":true},", 4, ",");
+ VAL_OBJECT_BEGIN(2);
+ VAL_NAME("a");
+ VAL_TRUE();
+ VAL_OBJECT_END();
+
+ /* Object end without object begin (trailing garbage) */
+ PARSE_PASS("true}", 1, "}");
+ VAL_TRUE();
+
+ PARSE_PASS("0}", 1, "}");
+ VAL_NUMBER("0");
+
+ PARSE_PASS("\"a\"}", 1, "}");
+ VAL_STRING("a");
+
+ PARSE_FAIL("}", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("{", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("{\"a", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("{\"a\"", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("{\"a\":", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("{\"a\":true", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("{\"a\":true,", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("{\"a\":true]", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("{\"a\":true,}", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("{\"a\":true,\"}", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("{\"a\":true,\"b}", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("{\"a\":true,\"b\"}", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("{\"a\":true,\"b\":}", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("{\"a\":true,\"b\",}", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("{\"a\",}", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("{,\"a\": true}", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("{a:true}", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("{'a':true}", SPDK_JSON_PARSE_INVALID);
+}
+
+static void
+test_parse_nesting(void)
+{
+ PARSE_PASS("[[[[[[[[]]]]]]]]", 16, "");
+
+ PARSE_PASS("{\"a\": [0, 1, 2]}", 8, "");
+ VAL_OBJECT_BEGIN(6);
+ VAL_NAME("a");
+ VAL_ARRAY_BEGIN(3);
+ VAL_NUMBER("0");
+ VAL_NUMBER("1");
+ VAL_NUMBER("2");
+ VAL_ARRAY_END();
+ VAL_OBJECT_END();
+
+ PARSE_PASS("{\"a\": [0, 1, 2], \"b\": 3 }", 10, "");
+ VAL_OBJECT_BEGIN(8);
+ VAL_NAME("a");
+ VAL_ARRAY_BEGIN(3);
+ VAL_NUMBER("0");
+ VAL_NUMBER("1");
+ VAL_NUMBER("2");
+ VAL_ARRAY_END();
+ VAL_NAME("b");
+ VAL_NUMBER("3");
+ VAL_OBJECT_END();
+
+ PARSE_PASS("[0, 1, {\"a\": 3}, 4, 5]", 10, "");
+ VAL_ARRAY_BEGIN(8);
+ VAL_NUMBER("0");
+ VAL_NUMBER("1");
+ VAL_OBJECT_BEGIN(2);
+ VAL_NAME("a");
+ VAL_NUMBER("3");
+ VAL_OBJECT_END();
+ VAL_NUMBER("4");
+ VAL_NUMBER("5");
+ VAL_ARRAY_END();
+
+ PARSE_PASS("\t[ { \"a\": {\"b\": [ {\"c\": 1}, 2 ],\n\"d\": 3}, \"e\" : 4}, 5 ] ", 20, "");
+ VAL_ARRAY_BEGIN(18);
+ VAL_OBJECT_BEGIN(15);
+ VAL_NAME("a");
+ VAL_OBJECT_BEGIN(10);
+ VAL_NAME("b");
+ VAL_ARRAY_BEGIN(5);
+ VAL_OBJECT_BEGIN(2);
+ VAL_NAME("c");
+ VAL_NUMBER("1");
+ VAL_OBJECT_END();
+ VAL_NUMBER("2");
+ VAL_ARRAY_END();
+ VAL_NAME("d");
+ VAL_NUMBER("3");
+ VAL_OBJECT_END();
+ VAL_NAME("e");
+ VAL_NUMBER("4");
+ VAL_OBJECT_END();
+ VAL_NUMBER("5");
+ VAL_ARRAY_END();
+
+ /* Examples from RFC 7159 */
+ PARSE_PASS(
+ "{\n"
+ " \"Image\": {\n"
+ " \"Width\": 800,\n"
+ " \"Height\": 600,\n"
+ " \"Title\": \"View from 15th Floor\",\n"
+ " \"Thumbnail\": {\n"
+ " \"Url\": \"http://www.example.com/image/481989943\",\n"
+ " \"Height\": 125,\n"
+ " \"Width\": 100\n"
+ " },\n"
+ " \"Animated\" : false,\n"
+ " \"IDs\": [116, 943, 234, 38793]\n"
+ " }\n"
+ "}\n",
+ 29, "");
+
+ VAL_OBJECT_BEGIN(27);
+ VAL_NAME("Image");
+ VAL_OBJECT_BEGIN(24);
+ VAL_NAME("Width");
+ VAL_NUMBER("800");
+ VAL_NAME("Height");
+ VAL_NUMBER("600");
+ VAL_NAME("Title");
+ VAL_STRING("View from 15th Floor");
+ VAL_NAME("Thumbnail");
+ VAL_OBJECT_BEGIN(6);
+ VAL_NAME("Url");
+ VAL_STRING("http://www.example.com/image/481989943");
+ VAL_NAME("Height");
+ VAL_NUMBER("125");
+ VAL_NAME("Width");
+ VAL_NUMBER("100");
+ VAL_OBJECT_END();
+ VAL_NAME("Animated");
+ VAL_FALSE();
+ VAL_NAME("IDs");
+ VAL_ARRAY_BEGIN(4);
+ VAL_NUMBER("116");
+ VAL_NUMBER("943");
+ VAL_NUMBER("234");
+ VAL_NUMBER("38793");
+ VAL_ARRAY_END();
+ VAL_OBJECT_END();
+ VAL_OBJECT_END();
+
+ PARSE_PASS(
+ "[\n"
+ " {\n"
+ " \"precision\": \"zip\",\n"
+ " \"Latitude\": 37.7668,\n"
+ " \"Longitude\": -122.3959,\n"
+ " \"Address\": \"\",\n"
+ " \"City\": \"SAN FRANCISCO\",\n"
+ " \"State\": \"CA\",\n"
+ " \"Zip\": \"94107\",\n"
+ " \"Country\": \"US\"\n"
+ " },\n"
+ " {\n"
+ " \"precision\": \"zip\",\n"
+ " \"Latitude\": 37.371991,\n"
+ " \"Longitude\": -122.026020,\n"
+ " \"Address\": \"\",\n"
+ " \"City\": \"SUNNYVALE\",\n"
+ " \"State\": \"CA\",\n"
+ " \"Zip\": \"94085\",\n"
+ " \"Country\": \"US\"\n"
+ " }\n"
+ "]",
+ 38, "");
+
+ VAL_ARRAY_BEGIN(36);
+ VAL_OBJECT_BEGIN(16);
+ VAL_NAME("precision");
+ VAL_STRING("zip");
+ VAL_NAME("Latitude");
+ VAL_NUMBER("37.7668");
+ VAL_NAME("Longitude");
+ VAL_NUMBER("-122.3959");
+ VAL_NAME("Address");
+ VAL_STRING("");
+ VAL_NAME("City");
+ VAL_STRING("SAN FRANCISCO");
+ VAL_NAME("State");
+ VAL_STRING("CA");
+ VAL_NAME("Zip");
+ VAL_STRING("94107");
+ VAL_NAME("Country");
+ VAL_STRING("US");
+ VAL_OBJECT_END();
+ VAL_OBJECT_BEGIN(16);
+ VAL_NAME("precision");
+ VAL_STRING("zip");
+ VAL_NAME("Latitude");
+ VAL_NUMBER("37.371991");
+ VAL_NAME("Longitude");
+ VAL_NUMBER("-122.026020");
+ VAL_NAME("Address");
+ VAL_STRING("");
+ VAL_NAME("City");
+ VAL_STRING("SUNNYVALE");
+ VAL_NAME("State");
+ VAL_STRING("CA");
+ VAL_NAME("Zip");
+ VAL_STRING("94085");
+ VAL_NAME("Country");
+ VAL_STRING("US");
+ VAL_OBJECT_END();
+ VAL_ARRAY_END();
+
+ /* Trailing garbage */
+ PARSE_PASS("{\"a\": [0, 1, 2]}]", 8, "]");
+ VAL_OBJECT_BEGIN(6);
+ VAL_NAME("a");
+ VAL_ARRAY_BEGIN(3);
+ VAL_NUMBER("0");
+ VAL_NUMBER("1");
+ VAL_NUMBER("2");
+ VAL_ARRAY_END();
+ VAL_OBJECT_END();
+
+ PARSE_PASS("{\"a\": [0, 1, 2]}}", 8, "}");
+ PARSE_PASS("{\"a\": [0, 1, 2]}]", 8, "]");
+ VAL_OBJECT_BEGIN(6);
+ VAL_NAME("a");
+ VAL_ARRAY_BEGIN(3);
+ VAL_NUMBER("0");
+ VAL_NUMBER("1");
+ VAL_NUMBER("2");
+ VAL_ARRAY_END();
+ VAL_OBJECT_END();
+
+ PARSE_FAIL("{\"a\": [0, 1, 2}]", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("{\"a\": [0, 1, 2]", SPDK_JSON_PARSE_INCOMPLETE);
+}
+
+
+static void
+test_parse_comment(void)
+{
+ /* Comments are not allowed by the JSON RFC */
+ PARSE_PASS("[0]", 3, "");
+ PARSE_FAIL("/* test */[0]", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("[/* test */0]", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("[0/* test */]", SPDK_JSON_PARSE_INVALID);
+
+ /*
+ * This is allowed since the parser stops once it reads a complete JSON object.
+ * The next parse call would fail (see tests above) when parsing the comment.
+ */
+ PARSE_PASS("[0]/* test */", 3, "/* test */");
+
+ /*
+ * Test with non-standard comments enabled.
+ */
+ PARSE_PASS_FLAGS("/* test */[0]", 3, "", SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS);
+ VAL_ARRAY_BEGIN(1);
+ VAL_NUMBER("0");
+ VAL_ARRAY_END();
+
+ PARSE_PASS_FLAGS("[/* test */0]", 3, "", SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS);
+ VAL_ARRAY_BEGIN(1);
+ VAL_NUMBER("0");
+ VAL_ARRAY_END();
+
+ PARSE_PASS_FLAGS("[0/* test */]", 3, "", SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS);
+ VAL_ARRAY_BEGIN(1);
+ VAL_NUMBER("0");
+ VAL_ARRAY_END();
+
+ PARSE_FAIL_FLAGS("/* test */", SPDK_JSON_PARSE_INCOMPLETE, SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS);
+ PARSE_FAIL_FLAGS("[/* test */", SPDK_JSON_PARSE_INCOMPLETE, SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS);
+ PARSE_FAIL_FLAGS("[0/* test */", SPDK_JSON_PARSE_INCOMPLETE, SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS);
+
+ /*
+ * Single-line comments
+ */
+ PARSE_PASS_FLAGS("// test\n0", 1, "", SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS);
+ VAL_NUMBER("0");
+
+ PARSE_PASS_FLAGS("// test\r\n0", 1, "", SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS);
+ VAL_NUMBER("0");
+
+ PARSE_PASS_FLAGS("// [0] test\n0", 1, "", SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS);
+ VAL_NUMBER("0");
+
+ PARSE_FAIL_FLAGS("//", SPDK_JSON_PARSE_INCOMPLETE, SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS);
+ PARSE_FAIL_FLAGS("// test", SPDK_JSON_PARSE_INCOMPLETE, SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS);
+ PARSE_FAIL_FLAGS("//\n", SPDK_JSON_PARSE_INCOMPLETE, SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS);
+
+ /* Invalid character following slash */
+ PARSE_FAIL_FLAGS("[0/x", SPDK_JSON_PARSE_INVALID, SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS);
+
+ /* Single slash at end of buffer */
+ PARSE_FAIL_FLAGS("[0/", SPDK_JSON_PARSE_INCOMPLETE, SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("json", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_parse_literal);
+ CU_ADD_TEST(suite, test_parse_string_simple);
+ CU_ADD_TEST(suite, test_parse_string_control_chars);
+ CU_ADD_TEST(suite, test_parse_string_utf8);
+ CU_ADD_TEST(suite, test_parse_string_escapes_twochar);
+ CU_ADD_TEST(suite, test_parse_string_escapes_unicode);
+ CU_ADD_TEST(suite, test_parse_number);
+ CU_ADD_TEST(suite, test_parse_array);
+ CU_ADD_TEST(suite, test_parse_object);
+ CU_ADD_TEST(suite, test_parse_nesting);
+ CU_ADD_TEST(suite, test_parse_comment);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+
+ CU_basic_run_tests();
+
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/json/json_util.c/.gitignore b/src/spdk/test/unit/lib/json/json_util.c/.gitignore
new file mode 100644
index 000000000..02f6d50c5
--- /dev/null
+++ b/src/spdk/test/unit/lib/json/json_util.c/.gitignore
@@ -0,0 +1 @@
+json_util_ut
diff --git a/src/spdk/test/unit/lib/json/json_util.c/Makefile b/src/spdk/test/unit/lib/json/json_util.c/Makefile
new file mode 100644
index 000000000..c9a282083
--- /dev/null
+++ b/src/spdk/test/unit/lib/json/json_util.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = json_util_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/json/json_util.c/json_util_ut.c b/src/spdk/test/unit/lib/json/json_util.c/json_util_ut.c
new file mode 100644
index 000000000..2f883521f
--- /dev/null
+++ b/src/spdk/test/unit/lib/json/json_util.c/json_util_ut.c
@@ -0,0 +1,954 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "json/json_util.c"
+
+/* For spdk_json_parse() */
+#include "json/json_parse.c"
+
+#define NUM_SETUP(x) \
+ snprintf(buf, sizeof(buf), "%s", x); \
+ v.type = SPDK_JSON_VAL_NUMBER; \
+ v.start = buf; \
+ v.len = sizeof(x) - 1
+
+#define NUM_UINT16_PASS(s, i) \
+ NUM_SETUP(s); \
+ CU_ASSERT(spdk_json_number_to_uint16(&v, &u16) == 0); \
+ CU_ASSERT(u16 == i)
+
+#define NUM_UINT16_FAIL(s) \
+ NUM_SETUP(s); \
+ CU_ASSERT(spdk_json_number_to_uint16(&v, &u16) != 0)
+
+#define NUM_INT32_PASS(s, i) \
+ NUM_SETUP(s); \
+ CU_ASSERT(spdk_json_number_to_int32(&v, &i32) == 0); \
+ CU_ASSERT(i32 == i)
+
+#define NUM_INT32_FAIL(s) \
+ NUM_SETUP(s); \
+ CU_ASSERT(spdk_json_number_to_int32(&v, &i32) != 0)
+
+#define NUM_UINT64_PASS(s, i) \
+ NUM_SETUP(s); \
+ CU_ASSERT(spdk_json_number_to_uint64(&v, &u64) == 0); \
+ CU_ASSERT(u64 == i)
+
+#define NUM_UINT64_FAIL(s) \
+ NUM_SETUP(s); \
+ CU_ASSERT(spdk_json_number_to_uint64(&v, &u64) != 0)
+
+static void
+test_strequal(void)
+{
+ struct spdk_json_val v;
+
+ v.type = SPDK_JSON_VAL_STRING;
+ v.start = "test";
+ v.len = sizeof("test") - 1;
+ CU_ASSERT(spdk_json_strequal(&v, "test") == true);
+ CU_ASSERT(spdk_json_strequal(&v, "TEST") == false);
+ CU_ASSERT(spdk_json_strequal(&v, "hello") == false);
+ CU_ASSERT(spdk_json_strequal(&v, "t") == false);
+
+ v.type = SPDK_JSON_VAL_NAME;
+ CU_ASSERT(spdk_json_strequal(&v, "test") == true);
+
+ v.type = SPDK_JSON_VAL_NUMBER;
+ CU_ASSERT(spdk_json_strequal(&v, "test") == false);
+
+ v.type = SPDK_JSON_VAL_STRING;
+ v.start = "test\0hello";
+ v.len = sizeof("test\0hello") - 1;
+ CU_ASSERT(spdk_json_strequal(&v, "test") == false);
+}
+
+static void
+test_num_to_uint16(void)
+{
+ struct spdk_json_val v;
+ char buf[100];
+ uint16_t u16 = 0;
+
+ NUM_SETUP("1234");
+ CU_ASSERT(spdk_json_number_to_uint16(&v, &u16) == 0);
+ CU_ASSERT(u16 == 1234);
+
+ NUM_UINT16_PASS("0", 0);
+ NUM_UINT16_PASS("1234", 1234);
+ NUM_UINT16_PASS("1234.00000", 1234);
+ NUM_UINT16_PASS("1.2e1", 12);
+ NUM_UINT16_PASS("12340e-1", 1234);
+
+ NUM_UINT16_FAIL("1.2");
+ NUM_UINT16_FAIL("-1234");
+ NUM_UINT16_FAIL("1.2E0");
+ NUM_UINT16_FAIL("1.234e1");
+ NUM_UINT16_FAIL("12341e-1");
+}
+
+static void
+test_num_to_int32(void)
+{
+ struct spdk_json_val v;
+ char buf[100];
+ int32_t i32 = 0;
+
+ NUM_SETUP("1234");
+ CU_ASSERT(spdk_json_number_to_int32(&v, &i32) == 0);
+ CU_ASSERT(i32 == 1234);
+
+
+ NUM_INT32_PASS("0", 0);
+ NUM_INT32_PASS("1234", 1234);
+ NUM_INT32_PASS("-1234", -1234);
+ NUM_INT32_PASS("1234.00000", 1234);
+ NUM_INT32_PASS("1.2e1", 12);
+ NUM_INT32_PASS("12340e-1", 1234);
+ NUM_INT32_PASS("-0", 0);
+
+ NUM_INT32_FAIL("1.2");
+ NUM_INT32_FAIL("1.2E0");
+ NUM_INT32_FAIL("1.234e1");
+ NUM_INT32_FAIL("12341e-1");
+}
+
+static void
+test_num_to_uint64(void)
+{
+ struct spdk_json_val v;
+ char buf[100];
+ uint64_t u64 = 0;
+
+ NUM_SETUP("1234");
+ CU_ASSERT(spdk_json_number_to_uint64(&v, &u64) == 0);
+ CU_ASSERT(u64 == 1234);
+
+
+ NUM_UINT64_PASS("0", 0);
+ NUM_UINT64_PASS("1234", 1234);
+ NUM_UINT64_PASS("1234.00000", 1234);
+ NUM_UINT64_PASS("1.2e1", 12);
+ NUM_UINT64_PASS("12340e-1", 1234);
+ NUM_UINT64_PASS("123456780e-1", 12345678);
+
+ NUM_UINT64_FAIL("1.2");
+ NUM_UINT64_FAIL("-1234");
+ NUM_UINT64_FAIL("1.2E0");
+ NUM_UINT64_FAIL("1.234e1");
+ NUM_UINT64_FAIL("12341e-1");
+ NUM_UINT64_FAIL("123456781e-1");
+}
+
+static void
+test_decode_object(void)
+{
+ struct my_object {
+ char *my_name;
+ uint32_t my_int;
+ bool my_bool;
+ };
+ struct spdk_json_val object[] = {
+ {"", 6, SPDK_JSON_VAL_OBJECT_BEGIN},
+ {"first", 5, SPDK_JSON_VAL_NAME},
+ {"HELLO", 5, SPDK_JSON_VAL_STRING},
+ {"second", 6, SPDK_JSON_VAL_NAME},
+ {"234", 3, SPDK_JSON_VAL_NUMBER},
+ {"third", 5, SPDK_JSON_VAL_NAME},
+ {"", 1, SPDK_JSON_VAL_TRUE},
+ {"", 0, SPDK_JSON_VAL_OBJECT_END},
+ };
+
+ struct spdk_json_object_decoder decoders[] = {
+ {"first", offsetof(struct my_object, my_name), spdk_json_decode_string, false},
+ {"second", offsetof(struct my_object, my_int), spdk_json_decode_uint32, false},
+ {"third", offsetof(struct my_object, my_bool), spdk_json_decode_bool, false},
+ {"fourth", offsetof(struct my_object, my_bool), spdk_json_decode_bool, true},
+ };
+ struct my_object output = {
+ .my_name = NULL,
+ .my_int = 0,
+ .my_bool = false,
+ };
+ uint32_t answer = 234;
+ char *answer_str = "HELLO";
+ bool answer_bool = true;
+
+ /* Passing Test: object containing simple types */
+ CU_ASSERT(spdk_json_decode_object(object, decoders, 4, &output) == 0);
+ SPDK_CU_ASSERT_FATAL(output.my_name != NULL);
+ CU_ASSERT(memcmp(output.my_name, answer_str, 6) == 0);
+ CU_ASSERT(output.my_int == answer);
+ CU_ASSERT(output.my_bool == answer_bool);
+
+ /* Failing Test: member with no matching decoder */
+ /* i.e. I remove the matching decoder from the boolean argument */
+ CU_ASSERT(spdk_json_decode_object(object, decoders, 2, &output) != 0);
+
+ /* Failing Test: non-optional decoder with no corresponding member */
+
+ decoders[3].optional = false;
+ CU_ASSERT(spdk_json_decode_object(object, decoders, 4, &output) != 0);
+
+ /* return to base state */
+ decoders[3].optional = true;
+
+ /* Failing Test: duplicated names for json values */
+ object[3].start = "first";
+ object[3].len = 5;
+ CU_ASSERT(spdk_json_decode_object(object, decoders, 3, &output) != 0);
+
+ /* return to base state */
+ object[3].start = "second";
+ object[3].len = 6;
+
+ /* Failing Test: invalid value for decoder */
+ object[2].start = "HELO";
+ CU_ASSERT(spdk_json_decode_object(object, decoders, 3, &output) != 0);
+
+ /* return to base state */
+ object[2].start = "HELLO";
+
+ /* Failing Test: not an object */
+ object[0].type = SPDK_JSON_VAL_ARRAY_BEGIN;
+ CU_ASSERT(spdk_json_decode_object(object, decoders, 3, &output) != 0);
+
+ free(output.my_name);
+}
+
+static void
+test_decode_array(void)
+{
+ struct spdk_json_val values[4];
+ uint32_t my_int[2] = {0, 0};
+ char *my_string[2] = {NULL, NULL};
+ size_t out_size;
+
+ /* passing integer test */
+ values[0].type = SPDK_JSON_VAL_ARRAY_BEGIN;
+ values[0].len = 2;
+ values[1].type = SPDK_JSON_VAL_NUMBER;
+ values[1].len = 4;
+ values[1].start = "1234";
+ values[2].type = SPDK_JSON_VAL_NUMBER;
+ values[2].len = 4;
+ values[2].start = "5678";
+ values[3].type = SPDK_JSON_VAL_ARRAY_END;
+ CU_ASSERT(spdk_json_decode_array(values, spdk_json_decode_uint32, my_int, 2, &out_size,
+ sizeof(uint32_t)) == 0);
+ CU_ASSERT(my_int[0] == 1234);
+ CU_ASSERT(my_int[1] == 5678);
+ CU_ASSERT(out_size == 2);
+
+ /* array length exceeds max */
+ values[0].len = 3;
+ CU_ASSERT(spdk_json_decode_array(values, spdk_json_decode_uint32, my_int, 2, &out_size,
+ sizeof(uint32_t)) != 0);
+
+ /* mixed types */
+ values[0].len = 2;
+ values[2].type = SPDK_JSON_VAL_STRING;
+ values[2].len = 5;
+ values[2].start = "HELLO";
+ CU_ASSERT(spdk_json_decode_array(values, spdk_json_decode_uint32, my_int, 2, &out_size,
+ sizeof(uint32_t)) != 0);
+
+ /* no array start */
+ values[0].type = SPDK_JSON_VAL_NUMBER;
+ values[2].type = SPDK_JSON_VAL_NUMBER;
+ values[2].len = 4;
+ values[2].start = "5678";
+ CU_ASSERT(spdk_json_decode_array(values, spdk_json_decode_uint32, my_int, 2, &out_size,
+ sizeof(uint32_t)) != 0);
+
+ /* mismatched array type and parser */
+ values[0].type = SPDK_JSON_VAL_ARRAY_BEGIN;
+ values[1].type = SPDK_JSON_VAL_STRING;
+ values[1].len = 5;
+ values[1].start = "HELLO";
+ values[2].type = SPDK_JSON_VAL_STRING;
+ values[2].len = 5;
+ values[2].start = "WORLD";
+ CU_ASSERT(spdk_json_decode_array(values, spdk_json_decode_uint32, my_int, 2, &out_size,
+ sizeof(uint32_t)) != 0);
+
+ /* passing String example */
+ CU_ASSERT(spdk_json_decode_array(values, spdk_json_decode_string, my_string, 2, &out_size,
+ sizeof(char *)) == 0);
+ SPDK_CU_ASSERT_FATAL(my_string[0] != NULL);
+ SPDK_CU_ASSERT_FATAL(my_string[1] != NULL);
+ CU_ASSERT(memcmp(my_string[0], "HELLO", 6) == 0);
+ CU_ASSERT(memcmp(my_string[1], "WORLD", 6) == 0);
+ CU_ASSERT(out_size == 2);
+
+ free(my_string[0]);
+ free(my_string[1]);
+}
+
+static void
+test_decode_bool(void)
+{
+ struct spdk_json_val v;
+ bool b;
+
+ /* valid bool (true) */
+ v.type = SPDK_JSON_VAL_TRUE;
+ b = false;
+ CU_ASSERT(spdk_json_decode_bool(&v, &b) == 0);
+ CU_ASSERT(b == true);
+
+ /* valid bool (false) */
+ v.type = SPDK_JSON_VAL_FALSE;
+ b = true;
+ CU_ASSERT(spdk_json_decode_bool(&v, &b) == 0);
+ CU_ASSERT(b == false);
+
+ /* incorrect type */
+ v.type = SPDK_JSON_VAL_NULL;
+ CU_ASSERT(spdk_json_decode_bool(&v, &b) != 0);
+}
+
+static void
+test_decode_int32(void)
+{
+ struct spdk_json_val v;
+ int32_t i;
+
+ /* correct type and valid value */
+ v.type = SPDK_JSON_VAL_NUMBER;
+ v.start = "33";
+ v.len = 2;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_int32(&v, &i) == 0);
+ CU_ASSERT(i == 33);
+
+ /* correct type and invalid value (float) */
+ v.start = "32.45";
+ v.len = 5;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_int32(&v, &i) != 0);
+
+ /* incorrect type */
+ v.type = SPDK_JSON_VAL_STRING;
+ v.start = "String";
+ v.len = 6;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_int32(&v, &i) != 0);
+
+ /* incorrect type */
+ v.type = SPDK_JSON_VAL_TRUE;
+ CU_ASSERT(spdk_json_decode_int32(&v, &i) != 0);
+
+ /* edge case (integer max) */
+ v.type = SPDK_JSON_VAL_NUMBER;
+ v.start = "2147483647";
+ v.len = 10;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_int32(&v, &i) == 0);
+ CU_ASSERT(i == 2147483647);
+
+ /* invalid value (overflow) */
+ v.start = "2147483648";
+ i = 0;
+ CU_ASSERT(spdk_json_decode_int32(&v, &i) != 0);
+
+ /* edge case (integer min) */
+ v.type = SPDK_JSON_VAL_NUMBER;
+ v.start = "-2147483648";
+ v.len = 11;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_int32(&v, &i) == 0);
+ CU_ASSERT(i == -2147483648);
+
+ /* invalid value (overflow) */
+ v.start = "-2147483649";
+ CU_ASSERT(spdk_json_decode_int32(&v, &i) != 0);
+
+ /* valid exponent */
+ v.start = "4e3";
+ v.len = 3;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_int32(&v, &i) == 0);
+ CU_ASSERT(i == 4000);
+
+ /* invalid negative exponent */
+ v.start = "-400e-4";
+ v.len = 7;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_int32(&v, &i) != 0);
+
+ /* invalid negative exponent */
+ v.start = "400e-4";
+ v.len = 6;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_int32(&v, &i) != 0);
+
+ /* valid negative exponent */
+ v.start = "-400e-2";
+ v.len = 7;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_int32(&v, &i) == 0);
+ CU_ASSERT(i == -4);
+
+ /* invalid exponent (overflow) */
+ v.start = "-2e32";
+ v.len = 5;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_int32(&v, &i) != 0);
+
+ /* valid exponent with decimal */
+ v.start = "2.13e2";
+ v.len = 6;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_int32(&v, &i) == 0);
+ CU_ASSERT(i == 213);
+
+ /* invalid exponent with decimal */
+ v.start = "2.134e2";
+ v.len = 7;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_int32(&v, &i) != 0);
+}
+
+static void
+test_decode_uint16(void)
+{
+ struct spdk_json_val v;
+ uint32_t i;
+
+ /* incorrect type */
+ v.type = SPDK_JSON_VAL_STRING;
+ v.start = "Strin";
+ v.len = 5;
+ CU_ASSERT(spdk_json_decode_uint16(&v, &i) != 0);
+
+ /* invalid value (float) */
+ v.type = SPDK_JSON_VAL_NUMBER;
+ v.start = "123.4";
+ v.len = 5;
+ CU_ASSERT(spdk_json_decode_uint16(&v, &i) != 0);
+
+ /* edge case (0) */
+ v.start = "0";
+ v.len = 1;
+ i = 456;
+ CU_ASSERT(spdk_json_decode_uint16(&v, &i) == 0);
+ CU_ASSERT(i == 0);
+
+ /* invalid value (negative) */
+ v.start = "-1";
+ v.len = 2;
+ CU_ASSERT(spdk_json_decode_uint16(&v, &i) != 0);
+
+ /* edge case (maximum) */
+ v.start = "65535";
+ v.len = 5;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint16(&v, &i) == 0);
+ CU_ASSERT(i == 65535);
+
+ /* invalid value (overflow) */
+ v.start = "65536";
+ v.len = 5;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint16(&v, &i) != 0);
+
+ /* valid exponent */
+ v.start = "66E2";
+ v.len = 4;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint16(&v, &i) == 0);
+ CU_ASSERT(i == 6600);
+
+ /* invalid exponent (overflow) */
+ v.start = "66E3";
+ v.len = 4;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint16(&v, &i) != 0);
+
+ /* invalid exponent (decimal) */
+ v.start = "65.535E2";
+ v.len = 7;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint16(&v, &i) != 0);
+
+ /* valid exponent with decimal */
+ v.start = "65.53E2";
+ v.len = 7;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint16(&v, &i) == 0);
+ CU_ASSERT(i == 6553);
+
+ /* invalid negative exponent */
+ v.start = "40e-2";
+ v.len = 5;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint16(&v, &i) != 0);
+
+ /* invalid negative exponent */
+ v.start = "-40e-1";
+ v.len = 6;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint16(&v, &i) != 0);
+
+ /* valid negative exponent */
+ v.start = "40e-1";
+ v.len = 5;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint16(&v, &i) == 0);
+ CU_ASSERT(i == 4);
+}
+
+static void
+test_decode_uint32(void)
+{
+ struct spdk_json_val v;
+ uint32_t i;
+
+ /* incorrect type */
+ v.type = SPDK_JSON_VAL_STRING;
+ v.start = "String";
+ v.len = 6;
+ CU_ASSERT(spdk_json_decode_uint32(&v, &i) != 0);
+
+ /* invalid value (float) */
+ v.type = SPDK_JSON_VAL_NUMBER;
+ v.start = "123.45";
+ v.len = 6;
+ CU_ASSERT(spdk_json_decode_uint32(&v, &i) != 0);
+
+ /* edge case (0) */
+ v.start = "0";
+ v.len = 1;
+ i = 456;
+ CU_ASSERT(spdk_json_decode_uint32(&v, &i) == 0);
+ CU_ASSERT(i == 0);
+
+ /* invalid value (negative) */
+ v.start = "-1";
+ v.len = 2;
+ CU_ASSERT(spdk_json_decode_uint32(&v, &i) != 0);
+
+ /* edge case (maximum) */
+ v.start = "4294967295";
+ v.len = 10;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint32(&v, &i) == 0);
+ CU_ASSERT(i == 4294967295);
+
+ /* invalid value (overflow) */
+ v.start = "4294967296";
+ v.len = 10;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint32(&v, &i) != 0);
+
+ /* valid exponent */
+ v.start = "42E2";
+ v.len = 4;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint32(&v, &i) == 0);
+ CU_ASSERT(i == 4200);
+
+ /* invalid exponent (overflow) */
+ v.start = "42e32";
+ v.len = 5;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint32(&v, &i) != 0);
+
+ /* invalid exponent (decimal) */
+ v.start = "42.323E2";
+ v.len = 8;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint32(&v, &i) != 0);
+
+ /* valid exponent with decimal */
+ v.start = "42.32E2";
+ v.len = 7;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint32(&v, &i) == 0);
+ CU_ASSERT(i == 4232);
+
+ /* invalid negative exponent */
+ v.start = "400e-4";
+ v.len = 6;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint32(&v, &i) != 0);
+
+ /* invalid negative exponent */
+ v.start = "-400e-2";
+ v.len = 7;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint32(&v, &i) != 0);
+
+ /* valid negative exponent */
+ v.start = "400e-2";
+ v.len = 6;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint32(&v, &i) == 0);
+ CU_ASSERT(i == 4);
+
+ /* valid negative exponent */
+ v.start = "10e-1";
+ v.len = 5;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint32(&v, &i) == 0);
+ CU_ASSERT(i == 1);
+}
+
+static void
+test_decode_uint64(void)
+{
+ struct spdk_json_val v;
+ uint64_t i;
+
+ /* incorrect type */
+ v.type = SPDK_JSON_VAL_STRING;
+ v.start = "String";
+ v.len = 6;
+ CU_ASSERT(spdk_json_decode_uint64(&v, &i) != 0);
+
+ /* invalid value (float) */
+ v.type = SPDK_JSON_VAL_NUMBER;
+ v.start = "123.45";
+ v.len = 6;
+ CU_ASSERT(spdk_json_decode_uint64(&v, &i) != 0);
+
+ /* edge case (0) */
+ v.start = "0";
+ v.len = 1;
+ i = 456;
+ CU_ASSERT(spdk_json_decode_uint64(&v, &i) == 0);
+ CU_ASSERT(i == 0);
+
+ /* invalid value (negative) */
+ v.start = "-1";
+ v.len = 2;
+ CU_ASSERT(spdk_json_decode_uint64(&v, &i) != 0);
+
+ /* edge case (maximum) */
+ v.start = "18446744073709551615";
+ v.len = 20;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint64(&v, &i) == 0);
+ CU_ASSERT(i == 18446744073709551615U);
+
+ /* invalid value (overflow) */
+ v.start = "18446744073709551616";
+ v.len = 20;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint64(&v, &i) != 0);
+
+ /* valid exponent */
+ v.start = "42E2";
+ v.len = 4;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint64(&v, &i) == 0);
+ CU_ASSERT(i == 4200);
+
+ /* invalid exponent (overflow) */
+ v.start = "42e64";
+ v.len = 5;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint64(&v, &i) != 0);
+
+ /* invalid exponent (decimal) */
+ v.start = "42.323E2";
+ v.len = 8;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint64(&v, &i) != 0);
+
+ /* valid exponent with decimal */
+ v.start = "42.32E2";
+ v.len = 7;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint64(&v, &i) == 0);
+ CU_ASSERT(i == 4232);
+
+ /* invalid negative exponent */
+ v.start = "400e-4";
+ v.len = 6;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint64(&v, &i) != 0);
+
+ /* invalid negative exponent */
+ v.start = "-400e-2";
+ v.len = 7;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint64(&v, &i) != 0);
+
+ /* valid negative exponent */
+ v.start = "400e-2";
+ v.len = 6;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint64(&v, &i) == 0);
+ CU_ASSERT(i == 4);
+}
+
+static void
+test_decode_string(void)
+{
+ struct spdk_json_val v;
+ char *value = NULL;
+
+ /* Passing Test: Standard */
+ v.type = SPDK_JSON_VAL_STRING;
+ v.start = "HELLO";
+ v.len = 5;
+ CU_ASSERT(spdk_json_decode_string(&v, &value) == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(memcmp(value, v.start, 6) == 0);
+
+ /* Edge Test: Empty String */
+ v.start = "";
+ v.len = 0;
+ CU_ASSERT(spdk_json_decode_string(&v, &value) == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(memcmp(value, v.start, 1) == 0);
+
+ /*
+ * Failing Test: Null Terminator In String
+ * It is valid for a json string to contain \u0000 and the parser will accept it.
+ * However, a null terminated C string cannot contain '\0' and should be rejected
+ * if that character is found before the end of the string.
+ */
+ v.start = "HELO";
+ v.len = 5;
+ CU_ASSERT(spdk_json_decode_string(&v, &value) != 0);
+
+ /* Failing Test: Wrong Type */
+ v.start = "45673";
+ v.type = SPDK_JSON_VAL_NUMBER;
+ CU_ASSERT(spdk_json_decode_string(&v, &value) != 0);
+
+ /* Passing Test: Special Characters */
+ v.type = SPDK_JSON_VAL_STRING;
+ v.start = "HE\bLL\tO\\WORLD";
+ v.len = 13;
+ CU_ASSERT(spdk_json_decode_string(&v, &value) == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(memcmp(value, v.start, 14) == 0);
+
+ free(value);
+}
+
+char ut_json_text[] =
+ "{"
+ " \"string\": \"Some string data\","
+ " \"object\": { "
+ " \"another_string\": \"Yet anoter string data\","
+ " \"array name with space\": [1, [], {} ]"
+ " },"
+ " \"array\": [ \"Text\", 2, {} ]"
+ "}"
+ ;
+
+static void
+test_find(void)
+{
+ struct spdk_json_val *values, *key, *val, *key2, *val2;
+ ssize_t values_cnt;
+ ssize_t rc;
+
+ values_cnt = spdk_json_parse(ut_json_text, strlen(ut_json_text), NULL, 0, NULL, 0);
+ SPDK_CU_ASSERT_FATAL(values_cnt > 0);
+
+ values = calloc(values_cnt, sizeof(struct spdk_json_val));
+ SPDK_CU_ASSERT_FATAL(values != NULL);
+
+ rc = spdk_json_parse(ut_json_text, strlen(ut_json_text), values, values_cnt, NULL, 0);
+ SPDK_CU_ASSERT_FATAL(values_cnt == rc);
+
+ key = val = NULL;
+ rc = spdk_json_find(values, "string", &key, &val, SPDK_JSON_VAL_STRING);
+ CU_ASSERT(rc == 0);
+
+ CU_ASSERT(key != NULL && spdk_json_strequal(key, "string") == true);
+ CU_ASSERT(val != NULL && spdk_json_strequal(val, "Some string data") == true);
+
+ key = val = NULL;
+ rc = spdk_json_find(values, "object", &key, &val, SPDK_JSON_VAL_OBJECT_BEGIN);
+ CU_ASSERT(rc == 0);
+
+ CU_ASSERT(key != NULL && spdk_json_strequal(key, "object") == true);
+
+ /* Find key in "object" by passing SPDK_JSON_VAL_ANY to match any type */
+ key2 = val2 = NULL;
+ rc = spdk_json_find(val, "array name with space", &key2, &val2, SPDK_JSON_VAL_ANY);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(key2 != NULL && spdk_json_strequal(key2, "array name with space") == true);
+ CU_ASSERT(val2 != NULL && val2->type == SPDK_JSON_VAL_ARRAY_BEGIN);
+
+ /* Find the "array" key in "object" by passing SPDK_JSON_VAL_ARRAY_BEGIN to match only array */
+ key2 = val2 = NULL;
+ rc = spdk_json_find(val, "array name with space", &key2, &val2, SPDK_JSON_VAL_ARRAY_BEGIN);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(key2 != NULL && spdk_json_strequal(key2, "array name with space") == true);
+ CU_ASSERT(val2 != NULL && val2->type == SPDK_JSON_VAL_ARRAY_BEGIN);
+
+ /* Negative test - key doesn't exist */
+ key2 = val2 = NULL;
+ rc = spdk_json_find(val, "this_key_does_not_exist", &key2, &val2, SPDK_JSON_VAL_ANY);
+ CU_ASSERT(rc == -ENOENT);
+
+ /* Negative test - key type doesn't match */
+ key2 = val2 = NULL;
+ rc = spdk_json_find(val, "another_string", &key2, &val2, SPDK_JSON_VAL_ARRAY_BEGIN);
+ CU_ASSERT(rc == -EDOM);
+
+ free(values);
+}
+
+static void
+test_iterating(void)
+{
+ struct spdk_json_val *values;
+ struct spdk_json_val *string_key;
+ struct spdk_json_val *object_key, *object_val;
+ struct spdk_json_val *array_key, *array_val;
+ struct spdk_json_val *another_string_key;
+ struct spdk_json_val *array_name_with_space_key, *array_name_with_space_val;
+ struct spdk_json_val *it;
+ ssize_t values_cnt;
+ ssize_t rc;
+
+ values_cnt = spdk_json_parse(ut_json_text, strlen(ut_json_text), NULL, 0, NULL, 0);
+ SPDK_CU_ASSERT_FATAL(values_cnt > 0);
+
+ values = calloc(values_cnt, sizeof(struct spdk_json_val));
+ SPDK_CU_ASSERT_FATAL(values != NULL);
+
+ rc = spdk_json_parse(ut_json_text, strlen(ut_json_text), values, values_cnt, NULL, 0);
+ SPDK_CU_ASSERT_FATAL(values_cnt == rc);
+
+ /* Iterate over object keys. JSON spec doesn't guarantee order of keys in object but
+ * SPDK implementation implicitly does.
+ */
+ string_key = spdk_json_object_first(values);
+ CU_ASSERT(spdk_json_strequal(string_key, "string") == true);
+
+ object_key = spdk_json_next(string_key);
+ object_val = json_value(object_key);
+ CU_ASSERT(spdk_json_strequal(object_key, "object") == true);
+
+ array_key = spdk_json_next(object_key);
+ array_val = json_value(array_key);
+ CU_ASSERT(spdk_json_strequal(array_key, "array") == true);
+
+ /* NULL '}' */
+ CU_ASSERT(spdk_json_next(array_key) == NULL);
+
+ /* Iterate over subobjects */
+ another_string_key = spdk_json_object_first(object_val);
+ CU_ASSERT(spdk_json_strequal(another_string_key, "another_string") == true);
+
+ array_name_with_space_key = spdk_json_next(another_string_key);
+ array_name_with_space_val = json_value(array_name_with_space_key);
+ CU_ASSERT(spdk_json_strequal(array_name_with_space_key, "array name with space") == true);
+
+ CU_ASSERT(spdk_json_next(array_name_with_space_key) == NULL);
+
+ /* Iterate over array in subobject */
+ it = spdk_json_array_first(array_name_with_space_val);
+ SPDK_CU_ASSERT_FATAL(it != NULL);
+ CU_ASSERT(it->type == SPDK_JSON_VAL_NUMBER);
+
+ it = spdk_json_next(it);
+ SPDK_CU_ASSERT_FATAL(it != NULL);
+ CU_ASSERT(it->type == SPDK_JSON_VAL_ARRAY_BEGIN);
+
+ it = spdk_json_next(it);
+ SPDK_CU_ASSERT_FATAL(it != NULL);
+ CU_ASSERT(it->type == SPDK_JSON_VAL_OBJECT_BEGIN);
+
+ it = spdk_json_next(it);
+ CU_ASSERT(it == NULL);
+
+ /* Iterate over array in root object */
+ it = spdk_json_array_first(array_val);
+ SPDK_CU_ASSERT_FATAL(it != NULL);
+ CU_ASSERT(it->type == SPDK_JSON_VAL_STRING);
+
+ it = spdk_json_next(it);
+ SPDK_CU_ASSERT_FATAL(it != NULL);
+ CU_ASSERT(it->type == SPDK_JSON_VAL_NUMBER);
+
+ it = spdk_json_next(it);
+ SPDK_CU_ASSERT_FATAL(it != NULL);
+ CU_ASSERT(it->type == SPDK_JSON_VAL_OBJECT_BEGIN);
+
+ /* Array end */
+ it = spdk_json_next(it);
+ CU_ASSERT(it == NULL);
+
+ free(values);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("json", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_strequal);
+ CU_ADD_TEST(suite, test_num_to_uint16);
+ CU_ADD_TEST(suite, test_num_to_int32);
+ CU_ADD_TEST(suite, test_num_to_uint64);
+ CU_ADD_TEST(suite, test_decode_object);
+ CU_ADD_TEST(suite, test_decode_array);
+ CU_ADD_TEST(suite, test_decode_bool);
+ CU_ADD_TEST(suite, test_decode_uint16);
+ CU_ADD_TEST(suite, test_decode_int32);
+ CU_ADD_TEST(suite, test_decode_uint32);
+ CU_ADD_TEST(suite, test_decode_uint64);
+ CU_ADD_TEST(suite, test_decode_string);
+ CU_ADD_TEST(suite, test_find);
+ CU_ADD_TEST(suite, test_iterating);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+
+ CU_basic_run_tests();
+
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/json/json_write.c/.gitignore b/src/spdk/test/unit/lib/json/json_write.c/.gitignore
new file mode 100644
index 000000000..dd576b238
--- /dev/null
+++ b/src/spdk/test/unit/lib/json/json_write.c/.gitignore
@@ -0,0 +1 @@
+json_write_ut
diff --git a/src/spdk/test/unit/lib/json/json_write.c/Makefile b/src/spdk/test/unit/lib/json/json_write.c/Makefile
new file mode 100644
index 000000000..9fe1fa916
--- /dev/null
+++ b/src/spdk/test/unit/lib/json/json_write.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = json_write_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/json/json_write.c/json_write_ut.c b/src/spdk/test/unit/lib/json/json_write.c/json_write_ut.c
new file mode 100644
index 000000000..d208f650c
--- /dev/null
+++ b/src/spdk/test/unit/lib/json/json_write.c/json_write_ut.c
@@ -0,0 +1,736 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "json/json_write.c"
+#include "json/json_parse.c"
+
+#include "spdk/util.h"
+
+static uint8_t g_buf[1000];
+static uint8_t *g_write_pos;
+
+static int
+write_cb(void *cb_ctx, const void *data, size_t size)
+{
+ size_t buf_free = g_buf + sizeof(g_buf) - g_write_pos;
+
+ if (size > buf_free) {
+ return -1;
+ }
+
+ memcpy(g_write_pos, data, size);
+ g_write_pos += size;
+
+ return 0;
+}
+
+#define BEGIN() \
+ memset(g_buf, 0, sizeof(g_buf)); \
+ g_write_pos = g_buf; \
+ w = spdk_json_write_begin(write_cb, NULL, 0); \
+ SPDK_CU_ASSERT_FATAL(w != NULL)
+
+#define END(json) \
+ CU_ASSERT(spdk_json_write_end(w) == 0); \
+ CU_ASSERT(g_write_pos - g_buf == sizeof(json) - 1); \
+ CU_ASSERT(memcmp(json, g_buf, sizeof(json) - 1) == 0)
+
+#define END_NOCMP() \
+ CU_ASSERT(spdk_json_write_end(w) == 0)
+
+#define END_FAIL() \
+ CU_ASSERT(spdk_json_write_end(w) < 0)
+
+#define VAL_STRING(str) \
+ CU_ASSERT(spdk_json_write_string_raw(w, str, sizeof(str) - 1) == 0)
+
+#define VAL_STRING_FAIL(str) \
+ CU_ASSERT(spdk_json_write_string_raw(w, str, sizeof(str) - 1) < 0)
+
+#define STR_PASS(in, out) \
+ BEGIN(); VAL_STRING(in); END("\"" out "\"")
+
+#define STR_FAIL(in) \
+ BEGIN(); VAL_STRING_FAIL(in); END_FAIL()
+
+#define VAL_STRING_UTF16LE(str) \
+ CU_ASSERT(spdk_json_write_string_utf16le_raw(w, (const uint16_t *)str, sizeof(str) / sizeof(uint16_t) - 1) == 0)
+
+#define VAL_STRING_UTF16LE_FAIL(str) \
+ CU_ASSERT(spdk_json_write_string_utf16le_raw(w, (const uint16_t *)str, sizeof(str) / sizeof(uint16_t) - 1) < 0)
+
+#define STR_UTF16LE_PASS(in, out) \
+ BEGIN(); VAL_STRING_UTF16LE(in); END("\"" out "\"")
+
+#define STR_UTF16LE_FAIL(in) \
+ BEGIN(); VAL_STRING_UTF16LE_FAIL(in); END_FAIL()
+
+#define VAL_NAME(name) \
+ CU_ASSERT(spdk_json_write_name_raw(w, name, sizeof(name) - 1) == 0)
+
+#define VAL_NULL() CU_ASSERT(spdk_json_write_null(w) == 0)
+#define VAL_TRUE() CU_ASSERT(spdk_json_write_bool(w, true) == 0)
+#define VAL_FALSE() CU_ASSERT(spdk_json_write_bool(w, false) == 0)
+
+#define VAL_INT32(i) CU_ASSERT(spdk_json_write_int32(w, i) == 0);
+#define VAL_UINT32(u) CU_ASSERT(spdk_json_write_uint32(w, u) == 0);
+
+#define VAL_INT64(i) CU_ASSERT(spdk_json_write_int64(w, i) == 0);
+#define VAL_UINT64(u) CU_ASSERT(spdk_json_write_uint64(w, u) == 0);
+
+#define VAL_ARRAY_BEGIN() CU_ASSERT(spdk_json_write_array_begin(w) == 0)
+#define VAL_ARRAY_END() CU_ASSERT(spdk_json_write_array_end(w) == 0)
+
+#define VAL_OBJECT_BEGIN() CU_ASSERT(spdk_json_write_object_begin(w) == 0)
+#define VAL_OBJECT_END() CU_ASSERT(spdk_json_write_object_end(w) == 0)
+
+#define VAL(v) CU_ASSERT(spdk_json_write_val(w, v) == 0)
+
+static void
+test_write_literal(void)
+{
+ struct spdk_json_write_ctx *w;
+
+ BEGIN();
+ VAL_NULL();
+ END("null");
+
+ BEGIN();
+ VAL_TRUE();
+ END("true");
+
+ BEGIN();
+ VAL_FALSE();
+ END("false");
+}
+
+static void
+test_write_string_simple(void)
+{
+ struct spdk_json_write_ctx *w;
+
+ STR_PASS("hello world", "hello world");
+ STR_PASS(" ", " ");
+ STR_PASS("~", "~");
+}
+
+static void
+test_write_string_escapes(void)
+{
+ struct spdk_json_write_ctx *w;
+
+ /* Two-character escapes */
+ STR_PASS("\b", "\\b");
+ STR_PASS("\f", "\\f");
+ STR_PASS("\n", "\\n");
+ STR_PASS("\r", "\\r");
+ STR_PASS("\t", "\\t");
+ STR_PASS("\"", "\\\"");
+ STR_PASS("\\", "\\\\");
+
+ /* JSON defines an escape for forward slash, but it is optional */
+ STR_PASS("/", "/");
+
+ STR_PASS("hello\nworld", "hello\\nworld");
+
+ STR_PASS("\x00", "\\u0000");
+ STR_PASS("\x01", "\\u0001");
+ STR_PASS("\x02", "\\u0002");
+
+ STR_PASS("\xC3\xB6", "\\u00F6");
+ STR_PASS("\xE2\x88\x9A", "\\u221A");
+ STR_PASS("\xEA\xAA\xAA", "\\uAAAA");
+
+ /* Surrogate pairs */
+ STR_PASS("\xF0\x9D\x84\x9E", "\\uD834\\uDD1E");
+ STR_PASS("\xF0\xA0\x9C\x8E", "\\uD841\\uDF0E");
+
+ /* Examples from RFC 3629 */
+ STR_PASS("\x41\xE2\x89\xA2\xCE\x91\x2E", "A\\u2262\\u0391.");
+ STR_PASS("\xED\x95\x9C\xEA\xB5\xAD\xEC\x96\xB4", "\\uD55C\\uAD6D\\uC5B4");
+ STR_PASS("\xE6\x97\xA5\xE6\x9C\xAC\xE8\xAA\x9E", "\\u65E5\\u672C\\u8A9E");
+ STR_PASS("\xEF\xBB\xBF\xF0\xA3\x8E\xB4", "\\uFEFF\\uD84C\\uDFB4");
+
+ /* UTF-8 edge cases */
+ STR_PASS("\x7F", "\\u007F");
+ STR_FAIL("\x80");
+ STR_FAIL("\xC1");
+ STR_FAIL("\xC2");
+ STR_PASS("\xC2\x80", "\\u0080");
+ STR_PASS("\xC2\xBF", "\\u00BF");
+ STR_PASS("\xDF\x80", "\\u07C0");
+ STR_PASS("\xDF\xBF", "\\u07FF");
+ STR_FAIL("\xDF");
+ STR_FAIL("\xE0\x80");
+ STR_FAIL("\xE0\x1F");
+ STR_FAIL("\xE0\x1F\x80");
+ STR_FAIL("\xE0");
+ STR_FAIL("\xE0\xA0");
+ STR_PASS("\xE0\xA0\x80", "\\u0800");
+ STR_PASS("\xE0\xA0\xBF", "\\u083F");
+ STR_FAIL("\xE0\xA0\xC0");
+ STR_PASS("\xE0\xBF\x80", "\\u0FC0");
+ STR_PASS("\xE0\xBF\xBF", "\\u0FFF");
+ STR_FAIL("\xE0\xC0\x80");
+ STR_FAIL("\xE1");
+ STR_FAIL("\xE1\x80");
+ STR_FAIL("\xE1\x7F\x80");
+ STR_FAIL("\xE1\x80\x7F");
+ STR_PASS("\xE1\x80\x80", "\\u1000");
+ STR_PASS("\xE1\x80\xBF", "\\u103F");
+ STR_PASS("\xE1\xBF\x80", "\\u1FC0");
+ STR_PASS("\xE1\xBF\xBF", "\\u1FFF");
+ STR_FAIL("\xE1\xC0\x80");
+ STR_FAIL("\xE1\x80\xC0");
+ STR_PASS("\xEF\x80\x80", "\\uF000");
+ STR_PASS("\xEF\xBF\xBF", "\\uFFFF");
+ STR_FAIL("\xF0");
+ STR_FAIL("\xF0\x90");
+ STR_FAIL("\xF0\x90\x80");
+ STR_FAIL("\xF0\x80\x80\x80");
+ STR_FAIL("\xF0\x8F\x80\x80");
+ STR_PASS("\xF0\x90\x80\x80", "\\uD800\\uDC00");
+ STR_PASS("\xF0\x90\x80\xBF", "\\uD800\\uDC3F");
+ STR_PASS("\xF0\x90\xBF\x80", "\\uD803\\uDFC0");
+ STR_PASS("\xF0\xBF\x80\x80", "\\uD8BC\\uDC00");
+ STR_FAIL("\xF0\xC0\x80\x80");
+ STR_FAIL("\xF1");
+ STR_FAIL("\xF1\x80");
+ STR_FAIL("\xF1\x80\x80");
+ STR_FAIL("\xF1\x80\x80\x7F");
+ STR_PASS("\xF1\x80\x80\x80", "\\uD8C0\\uDC00");
+ STR_PASS("\xF1\x80\x80\xBF", "\\uD8C0\\uDC3F");
+ STR_PASS("\xF1\x80\xBF\x80", "\\uD8C3\\uDFC0");
+ STR_PASS("\xF1\xBF\x80\x80", "\\uD9BC\\uDC00");
+ STR_PASS("\xF3\x80\x80\x80", "\\uDAC0\\uDC00");
+ STR_FAIL("\xF3\xC0\x80\x80");
+ STR_FAIL("\xF3\x80\xC0\x80");
+ STR_FAIL("\xF3\x80\x80\xC0");
+ STR_FAIL("\xF4");
+ STR_FAIL("\xF4\x80");
+ STR_FAIL("\xF4\x80\x80");
+ STR_PASS("\xF4\x80\x80\x80", "\\uDBC0\\uDC00");
+ STR_PASS("\xF4\x8F\x80\x80", "\\uDBFC\\uDC00");
+ STR_PASS("\xF4\x8F\xBF\xBF", "\\uDBFF\\uDFFF");
+ STR_FAIL("\xF4\x90\x80\x80");
+ STR_FAIL("\xF5");
+ STR_FAIL("\xF5\x80");
+ STR_FAIL("\xF5\x80\x80");
+ STR_FAIL("\xF5\x80\x80\x80");
+ STR_FAIL("\xF5\x80\x80\x80\x80");
+
+ /* Overlong encodings */
+ STR_FAIL("\xC0\x80");
+
+ /* Surrogate pairs */
+ STR_FAIL("\xED\xA0\x80"); /* U+D800 First high surrogate */
+ STR_FAIL("\xED\xAF\xBF"); /* U+DBFF Last high surrogate */
+ STR_FAIL("\xED\xB0\x80"); /* U+DC00 First low surrogate */
+ STR_FAIL("\xED\xBF\xBF"); /* U+DFFF Last low surrogate */
+ STR_FAIL("\xED\xA1\x8C\xED\xBE\xB4"); /* U+233B4 (invalid surrogate pair encoding) */
+}
+
+static void
+test_write_string_utf16le(void)
+{
+ struct spdk_json_write_ctx *w;
+
+ /* All characters in BMP */
+ STR_UTF16LE_PASS(((uint8_t[]) {
+ 'H', 0, 'e', 0, 'l', 0, 'l', 0, 'o', 0, 0x15, 0xFE, 0, 0
+ }), "Hello\\uFE15");
+
+ /* Surrogate pair */
+ STR_UTF16LE_PASS(((uint8_t[]) {
+ 'H', 0, 'i', 0, 0x34, 0xD8, 0x1E, 0xDD, '!', 0, 0, 0
+ }), "Hi\\uD834\\uDD1E!");
+
+ /* Valid high surrogate, but no low surrogate */
+ STR_UTF16LE_FAIL(((uint8_t[]) {
+ 0x00, 0xD8, 0, 0 /* U+D800 */
+ }));
+
+ /* Invalid leading low surrogate */
+ STR_UTF16LE_FAIL(((uint8_t[]) {
+ 0x00, 0xDC, 0x00, 0xDC, 0, 0 /* U+DC00 U+DC00 */
+ }));
+
+ /* Valid high surrogate followed by another high surrogate (invalid) */
+ STR_UTF16LE_FAIL(((uint8_t[]) {
+ 0x00, 0xD8, 0x00, 0xD8, 0, 0 /* U+D800 U+D800 */
+ }));
+}
+
+static void
+test_write_number_int32(void)
+{
+ struct spdk_json_write_ctx *w;
+
+ BEGIN();
+ VAL_INT32(0);
+ END("0");
+
+ BEGIN();
+ VAL_INT32(1);
+ END("1");
+
+ BEGIN();
+ VAL_INT32(123);
+ END("123");
+
+ BEGIN();
+ VAL_INT32(-123);
+ END("-123");
+
+ BEGIN();
+ VAL_INT32(2147483647);
+ END("2147483647");
+
+ BEGIN();
+ VAL_INT32(-2147483648);
+ END("-2147483648");
+}
+
+static void
+test_write_number_uint32(void)
+{
+ struct spdk_json_write_ctx *w;
+
+ BEGIN();
+ VAL_UINT32(0);
+ END("0");
+
+ BEGIN();
+ VAL_UINT32(1);
+ END("1");
+
+ BEGIN();
+ VAL_UINT32(123);
+ END("123");
+
+ BEGIN();
+ VAL_UINT32(2147483647);
+ END("2147483647");
+
+ BEGIN();
+ VAL_UINT32(4294967295);
+ END("4294967295");
+}
+
+static void
+test_write_number_int64(void)
+{
+ struct spdk_json_write_ctx *w;
+
+ BEGIN();
+ VAL_INT64(0);
+ END("0");
+
+ BEGIN();
+ VAL_INT64(1);
+ END("1");
+
+ BEGIN();
+ VAL_INT64(123);
+ END("123");
+
+ BEGIN();
+ VAL_INT64(-123);
+ END("-123");
+
+ BEGIN();
+ VAL_INT64(INT64_MAX);
+ END("9223372036854775807");
+
+ BEGIN();
+ VAL_INT64(INT64_MIN);
+ END("-9223372036854775808");
+}
+
+static void
+test_write_number_uint64(void)
+{
+ struct spdk_json_write_ctx *w;
+
+ BEGIN();
+ VAL_UINT64(0);
+ END("0");
+
+ BEGIN();
+ VAL_UINT64(1);
+ END("1");
+
+ BEGIN();
+ VAL_UINT64(123);
+ END("123");
+
+ BEGIN();
+ VAL_UINT64(INT64_MAX);
+ END("9223372036854775807");
+
+ BEGIN();
+ VAL_UINT64(UINT64_MAX);
+ END("18446744073709551615");
+}
+
+static void
+test_write_array(void)
+{
+ struct spdk_json_write_ctx *w;
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_ARRAY_END();
+ END("[]");
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_INT32(0);
+ VAL_ARRAY_END();
+ END("[0]");
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_INT32(0);
+ VAL_INT32(1);
+ VAL_ARRAY_END();
+ END("[0,1]");
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_INT32(0);
+ VAL_INT32(1);
+ VAL_INT32(2);
+ VAL_ARRAY_END();
+ END("[0,1,2]");
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_STRING("a");
+ VAL_ARRAY_END();
+ END("[\"a\"]");
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_STRING("a");
+ VAL_STRING("b");
+ VAL_ARRAY_END();
+ END("[\"a\",\"b\"]");
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_STRING("a");
+ VAL_STRING("b");
+ VAL_STRING("c");
+ VAL_ARRAY_END();
+ END("[\"a\",\"b\",\"c\"]");
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_TRUE();
+ VAL_ARRAY_END();
+ END("[true]");
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_TRUE();
+ VAL_FALSE();
+ VAL_ARRAY_END();
+ END("[true,false]");
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_TRUE();
+ VAL_FALSE();
+ VAL_TRUE();
+ VAL_ARRAY_END();
+ END("[true,false,true]");
+}
+
+static void
+test_write_object(void)
+{
+ struct spdk_json_write_ctx *w;
+
+ BEGIN();
+ VAL_OBJECT_BEGIN();
+ VAL_OBJECT_END();
+ END("{}");
+
+ BEGIN();
+ VAL_OBJECT_BEGIN();
+ VAL_NAME("a");
+ VAL_INT32(0);
+ VAL_OBJECT_END();
+ END("{\"a\":0}");
+
+ BEGIN();
+ VAL_OBJECT_BEGIN();
+ VAL_NAME("a");
+ VAL_INT32(0);
+ VAL_NAME("b");
+ VAL_INT32(1);
+ VAL_OBJECT_END();
+ END("{\"a\":0,\"b\":1}");
+
+ BEGIN();
+ VAL_OBJECT_BEGIN();
+ VAL_NAME("a");
+ VAL_INT32(0);
+ VAL_NAME("b");
+ VAL_INT32(1);
+ VAL_NAME("c");
+ VAL_INT32(2);
+ VAL_OBJECT_END();
+ END("{\"a\":0,\"b\":1,\"c\":2}");
+}
+
+static void
+test_write_nesting(void)
+{
+ struct spdk_json_write_ctx *w;
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_ARRAY_END();
+ VAL_ARRAY_END();
+ END("[[]]");
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_ARRAY_END();
+ VAL_ARRAY_END();
+ VAL_ARRAY_END();
+ END("[[[]]]");
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_INT32(0);
+ VAL_ARRAY_BEGIN();
+ VAL_ARRAY_END();
+ VAL_ARRAY_END();
+ END("[0,[]]");
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_ARRAY_END();
+ VAL_INT32(0);
+ VAL_ARRAY_END();
+ END("[[],0]");
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_INT32(0);
+ VAL_ARRAY_BEGIN();
+ VAL_INT32(1);
+ VAL_ARRAY_END();
+ VAL_INT32(2);
+ VAL_ARRAY_END();
+ END("[0,[1],2]");
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_INT32(0);
+ VAL_INT32(1);
+ VAL_ARRAY_BEGIN();
+ VAL_INT32(2);
+ VAL_INT32(3);
+ VAL_ARRAY_END();
+ VAL_INT32(4);
+ VAL_INT32(5);
+ VAL_ARRAY_END();
+ END("[0,1,[2,3],4,5]");
+
+ BEGIN();
+ VAL_OBJECT_BEGIN();
+ VAL_NAME("a");
+ VAL_OBJECT_BEGIN();
+ VAL_OBJECT_END();
+ VAL_OBJECT_END();
+ END("{\"a\":{}}");
+
+ BEGIN();
+ VAL_OBJECT_BEGIN();
+ VAL_NAME("a");
+ VAL_OBJECT_BEGIN();
+ VAL_NAME("b");
+ VAL_INT32(0);
+ VAL_OBJECT_END();
+ VAL_OBJECT_END();
+ END("{\"a\":{\"b\":0}}");
+
+ BEGIN();
+ VAL_OBJECT_BEGIN();
+ VAL_NAME("a");
+ VAL_ARRAY_BEGIN();
+ VAL_INT32(0);
+ VAL_ARRAY_END();
+ VAL_OBJECT_END();
+ END("{\"a\":[0]}");
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_OBJECT_BEGIN();
+ VAL_NAME("a");
+ VAL_INT32(0);
+ VAL_OBJECT_END();
+ VAL_ARRAY_END();
+ END("[{\"a\":0}]");
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_OBJECT_BEGIN();
+ VAL_NAME("a");
+ VAL_OBJECT_BEGIN();
+ VAL_NAME("b");
+ VAL_ARRAY_BEGIN();
+ VAL_OBJECT_BEGIN();
+ VAL_NAME("c");
+ VAL_INT32(1);
+ VAL_OBJECT_END();
+ VAL_INT32(2);
+ VAL_ARRAY_END();
+ VAL_NAME("d");
+ VAL_INT32(3);
+ VAL_OBJECT_END();
+ VAL_NAME("e");
+ VAL_INT32(4);
+ VAL_OBJECT_END();
+ VAL_INT32(5);
+ VAL_ARRAY_END();
+ END("[{\"a\":{\"b\":[{\"c\":1},2],\"d\":3},\"e\":4},5]");
+
+ /* Examples from RFC 7159 */
+ BEGIN();
+ VAL_OBJECT_BEGIN();
+ VAL_NAME("Image");
+ VAL_OBJECT_BEGIN();
+ VAL_NAME("Width");
+ VAL_INT32(800);
+ VAL_NAME("Height");
+ VAL_INT32(600);
+ VAL_NAME("Title");
+ VAL_STRING("View from 15th Floor");
+ VAL_NAME("Thumbnail");
+ VAL_OBJECT_BEGIN();
+ VAL_NAME("Url");
+ VAL_STRING("http://www.example.com/image/481989943");
+ VAL_NAME("Height");
+ VAL_INT32(125);
+ VAL_NAME("Width");
+ VAL_INT32(100);
+ VAL_OBJECT_END();
+ VAL_NAME("Animated");
+ VAL_FALSE();
+ VAL_NAME("IDs");
+ VAL_ARRAY_BEGIN();
+ VAL_INT32(116);
+ VAL_INT32(943);
+ VAL_INT32(234);
+ VAL_INT32(38793);
+ VAL_ARRAY_END();
+ VAL_OBJECT_END();
+ VAL_OBJECT_END();
+ END(
+ "{\"Image\":"
+ "{"
+ "\"Width\":800,"
+ "\"Height\":600,"
+ "\"Title\":\"View from 15th Floor\","
+ "\"Thumbnail\":{"
+ "\"Url\":\"http://www.example.com/image/481989943\","
+ "\"Height\":125,"
+ "\"Width\":100"
+ "},"
+ "\"Animated\":false,"
+ "\"IDs\":[116,943,234,38793]"
+ "}"
+ "}");
+}
+
+/* Round-trip parse and write test */
+static void
+test_write_val(void)
+{
+ struct spdk_json_write_ctx *w;
+ struct spdk_json_val values[100];
+ char src[] = "{\"a\":[1,2,3],\"b\":{\"c\":\"d\"},\"e\":true,\"f\":false,\"g\":null}";
+
+ CU_ASSERT(spdk_json_parse(src, strlen(src), values, SPDK_COUNTOF(values), NULL,
+ SPDK_JSON_PARSE_FLAG_DECODE_IN_PLACE) == 19);
+
+ BEGIN();
+ VAL(values);
+ END("{\"a\":[1,2,3],\"b\":{\"c\":\"d\"},\"e\":true,\"f\":false,\"g\":null}");
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("json", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_write_literal);
+ CU_ADD_TEST(suite, test_write_string_simple);
+ CU_ADD_TEST(suite, test_write_string_escapes);
+ CU_ADD_TEST(suite, test_write_string_utf16le);
+ CU_ADD_TEST(suite, test_write_number_int32);
+ CU_ADD_TEST(suite, test_write_number_uint32);
+ CU_ADD_TEST(suite, test_write_number_int64);
+ CU_ADD_TEST(suite, test_write_number_uint64);
+ CU_ADD_TEST(suite, test_write_array);
+ CU_ADD_TEST(suite, test_write_object);
+ CU_ADD_TEST(suite, test_write_nesting);
+ CU_ADD_TEST(suite, test_write_val);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+
+ CU_basic_run_tests();
+
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/json_mock.c b/src/spdk/test/unit/lib/json_mock.c
new file mode 100644
index 000000000..b9cee171e
--- /dev/null
+++ b/src/spdk/test/unit/lib/json_mock.c
@@ -0,0 +1,81 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/json.h"
+#include "spdk_internal/mock.h"
+
+DEFINE_STUB(spdk_json_write_begin, struct spdk_json_write_ctx *, (spdk_json_write_cb write_cb,
+ void *cb_ctx, uint32_t flags), NULL);
+
+DEFINE_STUB(spdk_json_write_end, int, (struct spdk_json_write_ctx *w), 0);
+DEFINE_STUB(spdk_json_write_null, int, (struct spdk_json_write_ctx *w), 0);
+DEFINE_STUB(spdk_json_write_bool, int, (struct spdk_json_write_ctx *w, bool val), 0);
+DEFINE_STUB(spdk_json_write_int32, int, (struct spdk_json_write_ctx *w, int32_t val), 0);
+DEFINE_STUB(spdk_json_write_uint32, int, (struct spdk_json_write_ctx *w, uint32_t val), 0);
+DEFINE_STUB(spdk_json_write_int64, int, (struct spdk_json_write_ctx *w, int64_t val), 0);
+DEFINE_STUB(spdk_json_write_uint64, int, (struct spdk_json_write_ctx *w, uint64_t val), 0);
+DEFINE_STUB(spdk_json_write_string, int, (struct spdk_json_write_ctx *w, const char *val), 0);
+DEFINE_STUB(spdk_json_write_string_raw, int, (struct spdk_json_write_ctx *w, const char *val,
+ size_t len), 0);
+
+DEFINE_STUB(spdk_json_write_array_begin, int, (struct spdk_json_write_ctx *w), 0);
+DEFINE_STUB(spdk_json_write_array_end, int, (struct spdk_json_write_ctx *w), 0);
+DEFINE_STUB(spdk_json_write_object_begin, int, (struct spdk_json_write_ctx *w), 0);
+DEFINE_STUB(spdk_json_write_object_end, int, (struct spdk_json_write_ctx *w), 0);
+DEFINE_STUB(spdk_json_write_name, int, (struct spdk_json_write_ctx *w, const char *name), 0);
+DEFINE_STUB(spdk_json_write_name_raw, int, (struct spdk_json_write_ctx *w, const char *name,
+ size_t len), 0);
+
+/* Utility functions */
+DEFINE_STUB(spdk_json_write_named_null, int, (struct spdk_json_write_ctx *w, const char *name), 0);
+DEFINE_STUB(spdk_json_write_named_bool, int, (struct spdk_json_write_ctx *w, const char *name,
+ bool val), 0);
+DEFINE_STUB(spdk_json_write_named_int32, int, (struct spdk_json_write_ctx *w, const char *name,
+ int32_t val), 0);
+DEFINE_STUB(spdk_json_write_named_uint32, int, (struct spdk_json_write_ctx *w, const char *name,
+ uint32_t val), 0);
+DEFINE_STUB(spdk_json_write_named_uint64, int, (struct spdk_json_write_ctx *w, const char *name,
+ uint64_t val), 0);
+DEFINE_STUB(spdk_json_write_named_int64, int, (struct spdk_json_write_ctx *w, const char *name,
+ int64_t val), 0);
+DEFINE_STUB(spdk_json_write_named_string, int, (struct spdk_json_write_ctx *w, const char *name,
+ const char *val), 0);
+DEFINE_STUB(spdk_json_write_named_string_fmt, int, (struct spdk_json_write_ctx *w, const char *name,
+ const char *fmt, ...), 0);
+DEFINE_STUB(spdk_json_write_named_string_fmt_v, int, (struct spdk_json_write_ctx *w,
+ const char *name, const char *fmt, va_list args), 0);
+
+DEFINE_STUB(spdk_json_write_named_array_begin, int, (struct spdk_json_write_ctx *w,
+ const char *name), 0);
+DEFINE_STUB(spdk_json_write_named_object_begin, int, (struct spdk_json_write_ctx *w,
+ const char *name), 0);
diff --git a/src/spdk/test/unit/lib/jsonrpc/Makefile b/src/spdk/test/unit/lib/jsonrpc/Makefile
new file mode 100644
index 000000000..0fc0a2e96
--- /dev/null
+++ b/src/spdk/test/unit/lib/jsonrpc/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = jsonrpc_server.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/jsonrpc/jsonrpc_server.c/.gitignore b/src/spdk/test/unit/lib/jsonrpc/jsonrpc_server.c/.gitignore
new file mode 100644
index 000000000..8852a96d2
--- /dev/null
+++ b/src/spdk/test/unit/lib/jsonrpc/jsonrpc_server.c/.gitignore
@@ -0,0 +1 @@
+jsonrpc_server_ut
diff --git a/src/spdk/test/unit/lib/jsonrpc/jsonrpc_server.c/Makefile b/src/spdk/test/unit/lib/jsonrpc/jsonrpc_server.c/Makefile
new file mode 100644
index 000000000..6c02115f7
--- /dev/null
+++ b/src/spdk/test/unit/lib/jsonrpc/jsonrpc_server.c/Makefile
@@ -0,0 +1,39 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = jsonrpc_server_ut.c
+SPDK_LIB_LIST = json
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/jsonrpc/jsonrpc_server.c/jsonrpc_server_ut.c b/src/spdk/test/unit/lib/jsonrpc/jsonrpc_server.c/jsonrpc_server_ut.c
new file mode 100644
index 000000000..8c3ffa208
--- /dev/null
+++ b/src/spdk/test/unit/lib/jsonrpc/jsonrpc_server.c/jsonrpc_server_ut.c
@@ -0,0 +1,410 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "jsonrpc/jsonrpc_server.c"
+
+static struct spdk_jsonrpc_request *g_request;
+static int g_parse_error;
+const struct spdk_json_val *g_method;
+const struct spdk_json_val *g_params;
+
+const struct spdk_json_val *g_cur_param;
+
+#define PARSE_PASS(in, trailing) \
+ CU_ASSERT(g_cur_param == NULL); \
+ g_cur_param = NULL; \
+ CU_ASSERT(jsonrpc_parse_request(conn, in, sizeof(in) - 1) == sizeof(in) - sizeof(trailing))
+
+#define REQ_BEGIN(expected_error) \
+ if (expected_error != 0 ) { \
+ CU_ASSERT(g_parse_error == expected_error); \
+ CU_ASSERT(g_params == NULL); \
+ }
+
+#define PARSE_FAIL(in) \
+ CU_ASSERT(jsonrpc_parse_request(conn, in, sizeof(in) - 1) < 0);
+
+#define REQ_BEGIN_VALID() \
+ REQ_BEGIN(0); \
+ SPDK_CU_ASSERT_FATAL(g_params != NULL);
+
+#define REQ_BEGIN_INVALID(expected_error) \
+ REQ_BEGIN(expected_error); \
+ REQ_METHOD_MISSING(); \
+ REQ_ID_MISSING(); \
+ REQ_PARAMS_MISSING()
+
+
+#define REQ_METHOD(name) \
+ CU_ASSERT(g_method && spdk_json_strequal(g_method, name) == true)
+
+#define REQ_METHOD_MISSING() \
+ CU_ASSERT(g_method == NULL)
+
+#define REQ_ID_NUM(num) \
+ CU_ASSERT(g_request->id && g_request->id->type == SPDK_JSON_VAL_NUMBER); \
+ CU_ASSERT(g_request->id && memcmp(g_request->id->start, num, sizeof(num) - 1) == 0)
+
+
+#define REQ_ID_STRING(str) \
+ CU_ASSERT(g_request->id && g_request->id->type == SPDK_JSON_VAL_STRING); \
+ CU_ASSERT(g_request->id && memcmp(g_request->id->start, num, strlen(num) - 1) == 0))
+
+#define REQ_ID_NULL() \
+ CU_ASSERT(g_request->id && g_request->id->type == SPDK_JSON_VAL_NULL)
+
+#define REQ_ID_MISSING() \
+ CU_ASSERT(g_request->id == NULL)
+
+#define REQ_PARAMS_MISSING() \
+ CU_ASSERT(g_params == NULL)
+
+#define REQ_PARAMS_BEGIN() \
+ SPDK_CU_ASSERT_FATAL(g_params != NULL); \
+ CU_ASSERT(g_cur_param == NULL); \
+ g_cur_param = g_params
+
+#define PARAM_ARRAY_BEGIN() \
+ CU_ASSERT(g_cur_param->type == SPDK_JSON_VAL_ARRAY_BEGIN); \
+ g_cur_param++
+
+#define PARAM_ARRAY_END() \
+ CU_ASSERT(g_cur_param->type == SPDK_JSON_VAL_ARRAY_END); \
+ g_cur_param++
+
+#define PARAM_OBJECT_BEGIN() \
+ CU_ASSERT(g_cur_param->type == SPDK_JSON_VAL_OBJECT_BEGIN); \
+ g_cur_param++
+
+#define PARAM_OBJECT_END() \
+ CU_ASSERT(g_cur_param->type == SPDK_JSON_VAL_OBJECT_END); \
+ g_cur_param++
+
+#define PARAM_NUM(num) \
+ CU_ASSERT(g_cur_param->type == SPDK_JSON_VAL_NUMBER); \
+ CU_ASSERT(g_cur_param->len == sizeof(num) - 1); \
+ CU_ASSERT(memcmp(g_cur_param->start, num, sizeof(num) - 1) == 0); \
+ g_cur_param++
+
+#define PARAM_NAME(str) \
+ CU_ASSERT(g_cur_param->type == SPDK_JSON_VAL_NAME); \
+ CU_ASSERT(g_cur_param->len == sizeof(str) - 1); \
+ CU_ASSERT(g_cur_param && memcmp(g_cur_param->start, str, sizeof(str) - 1) == 0); \
+ g_cur_param++
+
+#define PARAM_STRING(str) \
+ CU_ASSERT(g_cur_param->type == SPDK_JSON_VAL_STRING); \
+ CU_ASSERT(g_cur_param->len == sizeof(str) - 1); \
+ CU_ASSERT(memcmp(g_cur_param->start, str, g_params->len) == 0); \
+ g_cur_param++
+
+#define FREE_REQUEST() \
+ ut_jsonrpc_free_request(g_request, g_parse_error); \
+ g_request = NULL; \
+ g_cur_param = NULL; \
+ g_parse_error = 0; \
+ g_method = NULL; \
+ g_cur_param = g_params = NULL
+
+static void
+ut_jsonrpc_free_request(struct spdk_jsonrpc_request *request, int err)
+{
+ struct spdk_json_write_ctx *w;
+
+ if (!request) {
+ return;
+ }
+
+ /* Need to emulate response to get the response write contex free */
+ if (err == 0) {
+ w = spdk_jsonrpc_begin_result(request);
+ spdk_json_write_string(w, "UT PASS response");
+ spdk_jsonrpc_end_result(request, w);
+ } else {
+ spdk_jsonrpc_send_error_response_fmt(request, err, "UT error response");
+ }
+
+ jsonrpc_free_request(request);
+}
+
+static void
+ut_handle(struct spdk_jsonrpc_request *request, int error, const struct spdk_json_val *method,
+ const struct spdk_json_val *params)
+{
+ CU_ASSERT(g_request == NULL);
+ g_request = request;
+ g_parse_error = error;
+ g_method = method;
+ g_params = params;
+}
+
+void
+jsonrpc_server_handle_error(struct spdk_jsonrpc_request *request, int error)
+{
+ ut_handle(request, error, NULL, NULL);
+}
+
+void
+jsonrpc_server_handle_request(struct spdk_jsonrpc_request *request,
+ const struct spdk_json_val *method, const struct spdk_json_val *params)
+{
+ ut_handle(request, 0, method, params);
+}
+
+void
+jsonrpc_server_send_response(struct spdk_jsonrpc_request *request)
+{
+}
+
+static void
+test_parse_request(void)
+{
+ struct spdk_jsonrpc_server *server;
+ struct spdk_jsonrpc_server_conn *conn;
+
+ server = calloc(1, sizeof(*server));
+ SPDK_CU_ASSERT_FATAL(server != NULL);
+
+ conn = calloc(1, sizeof(*conn));
+ SPDK_CU_ASSERT_FATAL(conn != NULL);
+
+ conn->server = server;
+
+ /* rpc call with no parameters. */
+ PARSE_PASS("{ }", "");
+ REQ_BEGIN_INVALID(SPDK_JSONRPC_ERROR_INVALID_REQUEST);
+ FREE_REQUEST();
+
+ /* rpc call with method that is not a string. */
+ PARSE_PASS("{\"jsonrpc\":\"2.0\", \"method\": null }", "");
+ REQ_BEGIN_INVALID(SPDK_JSONRPC_ERROR_INVALID_REQUEST);
+ FREE_REQUEST();
+
+ /* rpc call with invalid JSON RPC version. */
+ PARSE_PASS("{\"jsonrpc\":\"42\", \"method\": \"subtract\"}", "");
+ REQ_BEGIN_INVALID(SPDK_JSONRPC_ERROR_INVALID_REQUEST);
+ FREE_REQUEST();
+
+ /* rpc call with embedded zeros. */
+ PARSE_FAIL("{\"jsonrpc\":\"2.0\",\"method\":\"foo\",\"params\":{\"bar\": \"\0\0baz\"}}");
+ REQ_BEGIN_INVALID(SPDK_JSONRPC_ERROR_PARSE_ERROR);
+ FREE_REQUEST();
+
+ /* rpc call with positional parameters */
+ PARSE_PASS("{\"jsonrpc\":\"2.0\",\"method\":\"subtract\",\"params\":[42,23],\"id\":1}", "");
+ REQ_BEGIN_VALID();
+ REQ_METHOD("subtract");
+ REQ_ID_NUM("1");
+ REQ_PARAMS_BEGIN();
+ PARAM_ARRAY_BEGIN();
+ PARAM_NUM("42");
+ PARAM_NUM("23");
+ PARAM_ARRAY_END();
+ FREE_REQUEST();
+
+ /* rpc call with named parameters */
+ PARSE_PASS("{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": {\"subtrahend\": 23, \"minuend\": 42}, \"id\": 3}",
+ "");
+ REQ_BEGIN_VALID();
+ REQ_METHOD("subtract");
+ REQ_ID_NUM("3");
+ REQ_PARAMS_BEGIN();
+ PARAM_OBJECT_BEGIN();
+ PARAM_NAME("subtrahend");
+ PARAM_NUM("23");
+ PARAM_NAME("minuend");
+ PARAM_NUM("42");
+ PARAM_OBJECT_END();
+ FREE_REQUEST();
+
+ /* notification */
+ PARSE_PASS("{\"jsonrpc\": \"2.0\", \"method\": \"update\", \"params\": [1,2,3,4,5]}", "");
+ REQ_BEGIN_VALID();
+ REQ_METHOD("update");
+ REQ_ID_MISSING();
+ REQ_PARAMS_BEGIN();
+ PARAM_ARRAY_BEGIN();
+ PARAM_NUM("1");
+ PARAM_NUM("2");
+ PARAM_NUM("3");
+ PARAM_NUM("4");
+ PARAM_NUM("5");
+ PARAM_ARRAY_END();
+ FREE_REQUEST();
+
+ /* notification with explicit NULL ID. This is discouraged by JSON RPC spec but allowed. */
+ PARSE_PASS("{\"jsonrpc\": \"2.0\", \"method\": \"update\", \"params\": [1,2,3,4,5], \"id\": null}",
+ "");
+ REQ_BEGIN_VALID();
+ REQ_METHOD("update");
+ REQ_ID_NULL();
+ REQ_PARAMS_BEGIN();
+ PARAM_ARRAY_BEGIN();
+ PARAM_NUM("1");
+ PARAM_NUM("2");
+ PARAM_NUM("3");
+ PARAM_NUM("4");
+ PARAM_NUM("5");
+ PARAM_ARRAY_END();
+ FREE_REQUEST();
+
+ /* invalid JSON */
+ PARSE_FAIL("{\"jsonrpc\": \"2.0\", \"method\": \"foobar, \"params\": \"bar\", \"baz]");
+ REQ_BEGIN_INVALID(SPDK_JSONRPC_ERROR_PARSE_ERROR);
+ FREE_REQUEST();
+
+ /* invalid request (method must be a string; params must be array or object) */
+ PARSE_PASS("{\"jsonrpc\": \"2.0\", \"method\": 1, \"params\": \"bar\"}", "");
+ REQ_BEGIN_INVALID(SPDK_JSONRPC_ERROR_INVALID_REQUEST);
+ FREE_REQUEST();
+
+ /* batch, invalid JSON */
+ PARSE_FAIL(
+ "["
+ "{\"jsonrpc\": \"2.0\", \"method\": \"sum\", \"params\": [1,2,4], \"id\": \"1\"},"
+ "{\"jsonrpc\": \"2.0\", \"method\""
+ "]");
+ REQ_BEGIN_INVALID(SPDK_JSONRPC_ERROR_PARSE_ERROR);
+ FREE_REQUEST();
+
+ /* empty array */
+ PARSE_PASS("[]", "");
+ REQ_BEGIN_INVALID(SPDK_JSONRPC_ERROR_INVALID_REQUEST);
+ FREE_REQUEST();
+
+ /* batch - not supported */
+ PARSE_PASS(
+ "["
+ "{\"jsonrpc\": \"2.0\", \"method\": \"sum\", \"params\": [1,2,4], \"id\": \"1\"},"
+ "{\"jsonrpc\": \"2.0\", \"method\": \"notify_hello\", \"params\": [7]},"
+ "{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42,23], \"id\": \"2\"},"
+ "{\"foo\": \"boo\"},"
+ "{\"jsonrpc\": \"2.0\", \"method\": \"foo.get\", \"params\": {\"name\": \"myself\"}, \"id\": \"5\"},"
+ "{\"jsonrpc\": \"2.0\", \"method\": \"get_data\", \"id\": \"9\"}"
+ "]", "");
+
+ REQ_BEGIN_INVALID(SPDK_JSONRPC_ERROR_INVALID_REQUEST);
+ FREE_REQUEST();
+
+ CU_ASSERT(conn->outstanding_requests == 0);
+ free(conn);
+ free(server);
+}
+
+static void
+test_parse_request_streaming(void)
+{
+ struct spdk_jsonrpc_server *server;
+ struct spdk_jsonrpc_server_conn *conn;
+ const char *json_req;
+ size_t len, i;
+
+ server = calloc(1, sizeof(*server));
+ SPDK_CU_ASSERT_FATAL(server != NULL);
+
+ conn = calloc(1, sizeof(*conn));
+ SPDK_CU_ASSERT_FATAL(conn != NULL);
+
+ conn->server = server;
+
+
+ /*
+ * Two valid requests end to end in the same buffer.
+ * Parse should return the first one and point to the beginning of the second one.
+ */
+ PARSE_PASS(
+ "{\"jsonrpc\":\"2.0\",\"method\":\"a\",\"params\":[1],\"id\":1}"
+ "{\"jsonrpc\":\"2.0\",\"method\":\"b\",\"params\":[2],\"id\":2}",
+ "{\"jsonrpc\":\"2.0\",\"method\":\"b\",\"params\":[2],\"id\":2}");
+
+ REQ_BEGIN_VALID();
+ REQ_METHOD("a");
+ REQ_ID_NUM("1");
+ REQ_PARAMS_BEGIN();
+ PARAM_ARRAY_BEGIN();
+ PARAM_NUM("1");
+ PARAM_ARRAY_END();
+ FREE_REQUEST();
+
+ /* Partial (but not invalid) requests - parse should not consume anything. */
+ json_req = " {\"jsonrpc\":\"2.0\",\"method\":\"b\",\"params\":[2],\"id\":2}";
+ len = strlen(json_req);
+
+ /* Try every partial length up to the full request length */
+ for (i = 0; i < len; i++) {
+ int rc = jsonrpc_parse_request(conn, json_req, i);
+ /* Partial request - no data consumed */
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_request == NULL);
+
+ /* In case of faile, don't fload console with ussless CU assert fails. */
+ FREE_REQUEST();
+ }
+
+ /* Verify that full request can be parsed successfully */
+ CU_ASSERT(jsonrpc_parse_request(conn, json_req, len) == (ssize_t)len);
+ FREE_REQUEST();
+
+ CU_ASSERT(conn->outstanding_requests == 0);
+ free(conn);
+ free(server);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("jsonrpc", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_parse_request);
+ CU_ADD_TEST(suite, test_parse_request_streaming);
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+
+ CU_basic_run_tests();
+
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ /* This is for ASAN. Don't know why but if pointer is left in global varaible
+ * it won't be detected as leak. */
+ g_request = NULL;
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/log/Makefile b/src/spdk/test/unit/lib/log/Makefile
new file mode 100644
index 000000000..79411a459
--- /dev/null
+++ b/src/spdk/test/unit/lib/log/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = log.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/log/log.c/.gitignore b/src/spdk/test/unit/lib/log/log.c/.gitignore
new file mode 100644
index 000000000..60261c07b
--- /dev/null
+++ b/src/spdk/test/unit/lib/log/log.c/.gitignore
@@ -0,0 +1 @@
+log_ut
diff --git a/src/spdk/test/unit/lib/log/log.c/Makefile b/src/spdk/test/unit/lib/log/log.c/Makefile
new file mode 100644
index 000000000..e3ba9340c
--- /dev/null
+++ b/src/spdk/test/unit/lib/log/log.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = log_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/log/log.c/log_ut.c b/src/spdk/test/unit/lib/log/log.c/log_ut.c
new file mode 100644
index 000000000..87a578b84
--- /dev/null
+++ b/src/spdk/test/unit/lib/log/log.c/log_ut.c
@@ -0,0 +1,106 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+#include "spdk/log.h"
+
+#include "log/log.c"
+#include "log/log_flags.c"
+
+static void
+log_test(void)
+{
+ spdk_log_set_level(SPDK_LOG_ERROR);
+ CU_ASSERT_EQUAL(spdk_log_get_level(), SPDK_LOG_ERROR);
+ spdk_log_set_level(SPDK_LOG_WARN);
+ CU_ASSERT_EQUAL(spdk_log_get_level(), SPDK_LOG_WARN);
+ spdk_log_set_level(SPDK_LOG_NOTICE);
+ CU_ASSERT_EQUAL(spdk_log_get_level(), SPDK_LOG_NOTICE);
+ spdk_log_set_level(SPDK_LOG_INFO);
+ CU_ASSERT_EQUAL(spdk_log_get_level(), SPDK_LOG_INFO);
+ spdk_log_set_level(SPDK_LOG_DEBUG);
+ CU_ASSERT_EQUAL(spdk_log_get_level(), SPDK_LOG_DEBUG);
+
+ spdk_log_set_print_level(SPDK_LOG_ERROR);
+ CU_ASSERT_EQUAL(spdk_log_get_print_level(), SPDK_LOG_ERROR);
+ spdk_log_set_print_level(SPDK_LOG_WARN);
+ CU_ASSERT_EQUAL(spdk_log_get_print_level(), SPDK_LOG_WARN);
+ spdk_log_set_print_level(SPDK_LOG_NOTICE);
+ CU_ASSERT_EQUAL(spdk_log_get_print_level(), SPDK_LOG_NOTICE);
+ spdk_log_set_print_level(SPDK_LOG_INFO);
+ CU_ASSERT_EQUAL(spdk_log_get_print_level(), SPDK_LOG_INFO);
+ spdk_log_set_print_level(SPDK_LOG_DEBUG);
+ CU_ASSERT_EQUAL(spdk_log_get_print_level(), SPDK_LOG_DEBUG);
+
+#ifdef DEBUG
+ CU_ASSERT(spdk_log_get_flag("log") == false);
+
+ spdk_log_set_flag("log");
+ CU_ASSERT(spdk_log_get_flag("log") == true);
+
+ spdk_log_clear_flag("log");
+ CU_ASSERT(spdk_log_get_flag("log") == false);
+#endif
+
+ spdk_log_open(NULL);
+ spdk_log_set_flag("log");
+ SPDK_WARNLOG("log warning unit test\n");
+ SPDK_DEBUGLOG(SPDK_LOG_LOG, "log test\n");
+ SPDK_LOGDUMP(SPDK_LOG_LOG, "log dump test:", "log dump", 8);
+ spdk_log_dump(stderr, "spdk dump test:", "spdk dump", 9);
+ /* Test spdk_log_dump with more than 16 chars and less than 32 chars */
+ spdk_log_dump(stderr, "spdk dump test:", "spdk dump 16 more chars", 23);
+
+ spdk_log_close();
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("log", NULL, NULL);
+
+ CU_ADD_TEST(suite, log_test);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/lvol/Makefile b/src/spdk/test/unit/lib/lvol/Makefile
new file mode 100644
index 000000000..c9276de47
--- /dev/null
+++ b/src/spdk/test/unit/lib/lvol/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = lvol.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/lvol/lvol.c/.gitignore b/src/spdk/test/unit/lib/lvol/lvol.c/.gitignore
new file mode 100644
index 000000000..57e92bfe1
--- /dev/null
+++ b/src/spdk/test/unit/lib/lvol/lvol.c/.gitignore
@@ -0,0 +1 @@
+lvol_ut
diff --git a/src/spdk/test/unit/lib/lvol/lvol.c/Makefile b/src/spdk/test/unit/lib/lvol/lvol.c/Makefile
new file mode 100644
index 000000000..aa9acde11
--- /dev/null
+++ b/src/spdk/test/unit/lib/lvol/lvol.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = lvol_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/lvol/lvol.c/lvol_ut.c b/src/spdk/test/unit/lib/lvol/lvol.c/lvol_ut.c
new file mode 100644
index 000000000..72f7b6e81
--- /dev/null
+++ b/src/spdk/test/unit/lib/lvol/lvol.c/lvol_ut.c
@@ -0,0 +1,2096 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+#include "spdk/blob.h"
+#include "spdk/thread.h"
+#include "spdk/util.h"
+
+#include "common/lib/ut_multithread.c"
+
+#include "lvol/lvol.c"
+
+#define DEV_BUFFER_SIZE (64 * 1024 * 1024)
+#define DEV_BUFFER_BLOCKLEN (4096)
+#define DEV_BUFFER_BLOCKCNT (DEV_BUFFER_SIZE / DEV_BUFFER_BLOCKLEN)
+#define BS_CLUSTER_SIZE (1024 * 1024)
+#define BS_FREE_CLUSTERS (DEV_BUFFER_SIZE / BS_CLUSTER_SIZE)
+#define BS_PAGE_SIZE (4096)
+
+#define SPDK_BLOB_OPTS_CLUSTER_SZ (1024 * 1024)
+#define SPDK_BLOB_OPTS_NUM_MD_PAGES UINT32_MAX
+#define SPDK_BLOB_OPTS_MAX_MD_OPS 32
+#define SPDK_BLOB_OPTS_MAX_CHANNEL_OPS 512
+
+#define SPDK_BLOB_THIN_PROV (1ULL << 0)
+
+const char *uuid = "828d9766-ae50-11e7-bd8d-001e67edf350";
+
+struct spdk_blob {
+ spdk_blob_id id;
+ uint32_t ref;
+ struct spdk_blob_store *bs;
+ int close_status;
+ int open_status;
+ int load_status;
+ TAILQ_ENTRY(spdk_blob) link;
+ char uuid[SPDK_UUID_STRING_LEN];
+ char name[SPDK_LVS_NAME_MAX];
+ bool thin_provisioned;
+};
+
+int g_lvserrno;
+int g_close_super_status;
+int g_resize_rc;
+int g_inflate_rc;
+int g_remove_rc;
+bool g_lvs_rename_blob_open_error = false;
+struct spdk_lvol_store *g_lvol_store;
+struct spdk_lvol *g_lvol;
+spdk_blob_id g_blobid = 1;
+struct spdk_io_channel *g_io_channel;
+
+struct spdk_blob_store {
+ struct spdk_bs_opts bs_opts;
+ spdk_blob_id super_blobid;
+ TAILQ_HEAD(, spdk_blob) blobs;
+ int get_super_status;
+};
+
+struct lvol_ut_bs_dev {
+ struct spdk_bs_dev bs_dev;
+ int init_status;
+ int load_status;
+ struct spdk_blob_store *bs;
+};
+
+void spdk_bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel,
+ spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg)
+{
+ cb_fn(cb_arg, g_inflate_rc);
+}
+
+void spdk_bs_blob_decouple_parent(struct spdk_blob_store *bs, struct spdk_io_channel *channel,
+ spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg)
+{
+ cb_fn(cb_arg, g_inflate_rc);
+}
+
+void
+spdk_bs_iter_next(struct spdk_blob_store *bs, struct spdk_blob *b,
+ spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
+{
+ struct spdk_blob *next;
+ int _errno = 0;
+
+ next = TAILQ_NEXT(b, link);
+ if (next == NULL) {
+ _errno = -ENOENT;
+ } else if (next->load_status != 0) {
+ _errno = next->load_status;
+ }
+
+ cb_fn(cb_arg, next, _errno);
+}
+
+void
+spdk_bs_iter_first(struct spdk_blob_store *bs,
+ spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
+{
+ struct spdk_blob *first;
+ int _errno = 0;
+
+ first = TAILQ_FIRST(&bs->blobs);
+ if (first == NULL) {
+ _errno = -ENOENT;
+ } else if (first->load_status != 0) {
+ _errno = first->load_status;
+ }
+
+ cb_fn(cb_arg, first, _errno);
+}
+
+uint64_t spdk_blob_get_num_clusters(struct spdk_blob *blob)
+{
+ return 0;
+}
+
+void
+spdk_bs_get_super(struct spdk_blob_store *bs,
+ spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
+{
+ if (bs->get_super_status != 0) {
+ cb_fn(cb_arg, 0, bs->get_super_status);
+ } else {
+ cb_fn(cb_arg, bs->super_blobid, 0);
+ }
+}
+
+void
+spdk_bs_set_super(struct spdk_blob_store *bs, spdk_blob_id blobid,
+ spdk_bs_op_complete cb_fn, void *cb_arg)
+{
+ bs->super_blobid = blobid;
+ cb_fn(cb_arg, 0);
+}
+
+void
+spdk_bs_load(struct spdk_bs_dev *dev, struct spdk_bs_opts *opts,
+ spdk_bs_op_with_handle_complete cb_fn, void *cb_arg)
+{
+ struct lvol_ut_bs_dev *ut_dev = SPDK_CONTAINEROF(dev, struct lvol_ut_bs_dev, bs_dev);
+ struct spdk_blob_store *bs = NULL;
+
+ if (ut_dev->load_status == 0) {
+ bs = ut_dev->bs;
+ }
+
+ cb_fn(cb_arg, bs, ut_dev->load_status);
+}
+
+struct spdk_io_channel *spdk_bs_alloc_io_channel(struct spdk_blob_store *bs)
+{
+ if (g_io_channel == NULL) {
+ g_io_channel = calloc(1, sizeof(struct spdk_io_channel));
+ SPDK_CU_ASSERT_FATAL(g_io_channel != NULL);
+ }
+ g_io_channel->ref++;
+ return g_io_channel;
+}
+
+void spdk_bs_free_io_channel(struct spdk_io_channel *channel)
+{
+ g_io_channel->ref--;
+ if (g_io_channel->ref == 0) {
+ free(g_io_channel);
+ g_io_channel = NULL;
+ }
+ return;
+}
+
+int
+spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value,
+ uint16_t value_len)
+{
+ if (!strcmp(name, "uuid")) {
+ CU_ASSERT(value_len == SPDK_UUID_STRING_LEN);
+ memcpy(blob->uuid, value, SPDK_UUID_STRING_LEN);
+ } else if (!strcmp(name, "name")) {
+ CU_ASSERT(value_len <= SPDK_LVS_NAME_MAX);
+ memcpy(blob->name, value, value_len);
+ }
+
+ return 0;
+}
+
+int
+spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name,
+ const void **value, size_t *value_len)
+{
+ if (!strcmp(name, "uuid") && strnlen(blob->uuid, SPDK_UUID_STRING_LEN) != 0) {
+ CU_ASSERT(strnlen(blob->uuid, SPDK_UUID_STRING_LEN) == (SPDK_UUID_STRING_LEN - 1));
+ *value = blob->uuid;
+ *value_len = SPDK_UUID_STRING_LEN;
+ return 0;
+ } else if (!strcmp(name, "name") && strnlen(blob->name, SPDK_LVS_NAME_MAX) != 0) {
+ *value = blob->name;
+ *value_len = strnlen(blob->name, SPDK_LVS_NAME_MAX) + 1;
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+bool spdk_blob_is_thin_provisioned(struct spdk_blob *blob)
+{
+ return blob->thin_provisioned;
+}
+
+DEFINE_STUB(spdk_blob_get_clones, int, (struct spdk_blob_store *bs, spdk_blob_id blobid,
+ spdk_blob_id *ids, size_t *count), 0);
+DEFINE_STUB(spdk_bs_get_page_size, uint64_t, (struct spdk_blob_store *bs), BS_PAGE_SIZE);
+
+int
+spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size)
+{
+ bdev->blockcnt = size;
+ return 0;
+}
+
+static void
+init_dev(struct lvol_ut_bs_dev *dev)
+{
+ memset(dev, 0, sizeof(*dev));
+ dev->bs_dev.blockcnt = DEV_BUFFER_BLOCKCNT;
+ dev->bs_dev.blocklen = DEV_BUFFER_BLOCKLEN;
+}
+
+static void
+free_dev(struct lvol_ut_bs_dev *dev)
+{
+ struct spdk_blob_store *bs = dev->bs;
+ struct spdk_blob *blob, *tmp;
+
+ if (bs == NULL) {
+ return;
+ }
+
+ TAILQ_FOREACH_SAFE(blob, &bs->blobs, link, tmp) {
+ TAILQ_REMOVE(&bs->blobs, blob, link);
+ free(blob);
+ }
+
+ free(bs);
+ dev->bs = NULL;
+}
+
+void
+spdk_bs_init(struct spdk_bs_dev *dev, struct spdk_bs_opts *o,
+ spdk_bs_op_with_handle_complete cb_fn, void *cb_arg)
+{
+ struct lvol_ut_bs_dev *ut_dev = SPDK_CONTAINEROF(dev, struct lvol_ut_bs_dev, bs_dev);
+ struct spdk_blob_store *bs;
+
+ bs = calloc(1, sizeof(*bs));
+ SPDK_CU_ASSERT_FATAL(bs != NULL);
+
+ TAILQ_INIT(&bs->blobs);
+
+ ut_dev->bs = bs;
+
+ memcpy(&bs->bs_opts, o, sizeof(struct spdk_bs_opts));
+
+ cb_fn(cb_arg, bs, 0);
+}
+
+void
+spdk_bs_unload(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, void *cb_arg)
+{
+ cb_fn(cb_arg, 0);
+}
+
+void
+spdk_bs_destroy(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn,
+ void *cb_arg)
+{
+ free(bs);
+
+ cb_fn(cb_arg, 0);
+}
+
+void
+spdk_bs_delete_blob(struct spdk_blob_store *bs, spdk_blob_id blobid,
+ spdk_blob_op_complete cb_fn, void *cb_arg)
+{
+ struct spdk_blob *blob;
+
+ TAILQ_FOREACH(blob, &bs->blobs, link) {
+ if (blob->id == blobid) {
+ TAILQ_REMOVE(&bs->blobs, blob, link);
+ free(blob);
+ break;
+ }
+ }
+
+ cb_fn(cb_arg, g_remove_rc);
+}
+
+spdk_blob_id
+spdk_blob_get_id(struct spdk_blob *blob)
+{
+ return blob->id;
+}
+
+void
+spdk_bs_opts_init(struct spdk_bs_opts *opts)
+{
+ opts->cluster_sz = SPDK_BLOB_OPTS_CLUSTER_SZ;
+ opts->num_md_pages = SPDK_BLOB_OPTS_NUM_MD_PAGES;
+ opts->max_md_ops = SPDK_BLOB_OPTS_MAX_MD_OPS;
+ opts->max_channel_ops = SPDK_BLOB_OPTS_MAX_CHANNEL_OPS;
+ memset(&opts->bstype, 0, sizeof(opts->bstype));
+}
+
+DEFINE_STUB(spdk_bs_get_cluster_size, uint64_t, (struct spdk_blob_store *bs), BS_CLUSTER_SIZE);
+
+void spdk_blob_close(struct spdk_blob *b, spdk_blob_op_complete cb_fn, void *cb_arg)
+{
+ b->ref--;
+
+ cb_fn(cb_arg, b->close_status);
+}
+
+void
+spdk_blob_resize(struct spdk_blob *blob, uint64_t sz, spdk_blob_op_complete cb_fn, void *cb_arg)
+{
+ if (g_resize_rc != 0) {
+ return cb_fn(cb_arg, g_resize_rc);
+ } else if (sz > DEV_BUFFER_SIZE / BS_CLUSTER_SIZE) {
+ return cb_fn(cb_arg, -ENOMEM);
+ }
+ cb_fn(cb_arg, 0);
+}
+
+DEFINE_STUB(spdk_blob_set_read_only, int, (struct spdk_blob *blob), 0);
+
+void
+spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
+{
+ cb_fn(cb_arg, 0);
+}
+
+void
+spdk_bs_open_blob_ext(struct spdk_blob_store *bs, spdk_blob_id blobid,
+ struct spdk_blob_open_opts *opts, spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
+{
+ spdk_bs_open_blob(bs, blobid, cb_fn, cb_arg);
+}
+
+void
+spdk_bs_open_blob(struct spdk_blob_store *bs, spdk_blob_id blobid,
+ spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
+{
+ struct spdk_blob *blob;
+
+ if (!g_lvs_rename_blob_open_error) {
+ TAILQ_FOREACH(blob, &bs->blobs, link) {
+ if (blob->id == blobid) {
+ blob->ref++;
+ cb_fn(cb_arg, blob, blob->open_status);
+ return;
+ }
+ }
+ }
+
+ cb_fn(cb_arg, NULL, -ENOENT);
+}
+
+DEFINE_STUB(spdk_bs_free_cluster_count, uint64_t, (struct spdk_blob_store *bs), BS_FREE_CLUSTERS);
+
+void
+spdk_blob_opts_init(struct spdk_blob_opts *opts)
+{
+ opts->num_clusters = 0;
+ opts->thin_provision = false;
+ opts->xattrs.count = 0;
+ opts->xattrs.names = NULL;
+ opts->xattrs.ctx = NULL;
+ opts->xattrs.get_value = NULL;
+}
+
+void
+spdk_blob_open_opts_init(struct spdk_blob_open_opts *opts)
+{
+ opts->clear_method = BLOB_CLEAR_WITH_DEFAULT;
+}
+
+void
+spdk_bs_create_blob(struct spdk_blob_store *bs,
+ spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
+{
+ spdk_bs_create_blob_ext(bs, NULL, cb_fn, cb_arg);
+}
+
+void
+spdk_bs_create_blob_ext(struct spdk_blob_store *bs, const struct spdk_blob_opts *opts,
+ spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
+{
+ struct spdk_blob *b;
+
+ if (opts && opts->num_clusters > DEV_BUFFER_SIZE / BS_CLUSTER_SIZE) {
+ cb_fn(cb_arg, 0, -1);
+ return;
+ }
+
+ b = calloc(1, sizeof(*b));
+ SPDK_CU_ASSERT_FATAL(b != NULL);
+
+ b->id = g_blobid++;
+ if (opts != NULL && opts->thin_provision) {
+ b->thin_provisioned = true;
+ }
+ b->bs = bs;
+
+ TAILQ_INSERT_TAIL(&bs->blobs, b, link);
+ cb_fn(cb_arg, b->id, 0);
+}
+
+void
+spdk_bs_create_snapshot(struct spdk_blob_store *bs, spdk_blob_id blobid,
+ const struct spdk_blob_xattr_opts *snapshot_xattrs,
+ spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
+{
+ spdk_bs_create_blob_ext(bs, NULL, cb_fn, cb_arg);
+}
+
+void
+spdk_bs_create_clone(struct spdk_blob_store *bs, spdk_blob_id blobid,
+ const struct spdk_blob_xattr_opts *clone_xattrs,
+ spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
+{
+ spdk_bs_create_blob_ext(bs, NULL, cb_fn, cb_arg);
+}
+
+static void
+lvol_store_op_with_handle_complete(void *cb_arg, struct spdk_lvol_store *lvol_store, int lvserrno)
+{
+ g_lvol_store = lvol_store;
+ g_lvserrno = lvserrno;
+}
+
+static void
+lvol_op_with_handle_complete(void *cb_arg, struct spdk_lvol *lvol, int lvserrno)
+{
+ g_lvol = lvol;
+ g_lvserrno = lvserrno;
+}
+
+static void
+op_complete(void *cb_arg, int lvserrno)
+{
+ g_lvserrno = lvserrno;
+}
+
+static void
+lvs_init_unload_success(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_opts opts;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ g_lvserrno = -1;
+
+ CU_ASSERT(TAILQ_EMPTY(&g_lvol_stores));
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(!TAILQ_EMPTY(&g_lvol_stores));
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, false, LVOL_CLEAR_WITH_DEFAULT,
+ lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ /* Lvol store has an open lvol, this unload should fail. */
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(g_lvol_store, op_complete, NULL);
+ CU_ASSERT(rc == -EBUSY);
+ CU_ASSERT(g_lvserrno == -EBUSY);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(!TAILQ_EMPTY(&g_lvol_stores));
+
+ /* Lvol has to be closed (or destroyed) before unloading lvol store. */
+ spdk_lvol_close(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(g_lvol_store, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+ CU_ASSERT(TAILQ_EMPTY(&g_lvol_stores));
+
+ free_dev(&dev);
+}
+
+static void
+lvs_init_destroy_success(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_opts opts;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ g_lvserrno = -1;
+
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, false, LVOL_CLEAR_WITH_DEFAULT,
+ lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ /* Lvol store contains one lvol, this destroy should fail. */
+ g_lvserrno = -1;
+ rc = spdk_lvs_destroy(g_lvol_store, op_complete, NULL);
+ CU_ASSERT(rc == -EBUSY);
+ CU_ASSERT(g_lvserrno == -EBUSY);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+ spdk_lvol_close(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ spdk_lvol_destroy(g_lvol, op_complete, NULL);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_destroy(g_lvol_store, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+}
+
+static void
+lvs_init_opts_success(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_opts opts;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ g_lvserrno = -1;
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+ opts.cluster_sz = 8192;
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(dev.bs->bs_opts.cluster_sz == opts.cluster_sz);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(g_lvol_store, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ free_dev(&dev);
+}
+
+static void
+lvs_unload_lvs_is_null_fail(void)
+{
+ int rc = 0;
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(NULL, op_complete, NULL);
+ CU_ASSERT(rc == -ENODEV);
+ CU_ASSERT(g_lvserrno == -1);
+}
+
+static void
+lvs_names(void)
+{
+ struct lvol_ut_bs_dev dev_x, dev_y, dev_x2;
+ struct spdk_lvs_opts opts_none, opts_x, opts_y, opts_full;
+ struct spdk_lvol_store *lvs_x, *lvs_y, *lvs_x2;
+ int rc = 0;
+
+ init_dev(&dev_x);
+ init_dev(&dev_y);
+ init_dev(&dev_x2);
+
+ spdk_lvs_opts_init(&opts_none);
+ spdk_lvs_opts_init(&opts_x);
+ opts_x.name[0] = 'x';
+ spdk_lvs_opts_init(&opts_y);
+ opts_y.name[0] = 'y';
+ spdk_lvs_opts_init(&opts_full);
+ memset(opts_full.name, 'a', sizeof(opts_full.name));
+
+ /* Test that opts with no name fails spdk_lvs_init(). */
+ CU_ASSERT(TAILQ_EMPTY(&g_lvol_stores));
+ rc = spdk_lvs_init(&dev_x.bs_dev, &opts_none, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc != 0);
+ CU_ASSERT(g_lvol_store == NULL);
+ CU_ASSERT(TAILQ_EMPTY(&g_lvol_stores));
+
+ /* Test that opts with no null terminator for name fails spdk_lvs_init(). */
+ CU_ASSERT(TAILQ_EMPTY(&g_lvol_stores));
+ rc = spdk_lvs_init(&dev_x.bs_dev, &opts_full, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc != 0);
+ CU_ASSERT(g_lvol_store == NULL);
+ CU_ASSERT(TAILQ_EMPTY(&g_lvol_stores));
+
+ /* Test that we can create an lvolstore with name 'x'. */
+ CU_ASSERT(TAILQ_EMPTY(&g_lvol_stores));
+ g_lvol_store = NULL;
+ rc = spdk_lvs_init(&dev_x.bs_dev, &opts_x, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(!TAILQ_EMPTY(&g_lvol_stores));
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ lvs_x = g_lvol_store;
+
+ /* Test that we can create an lvolstore with name 'y'. */
+ g_lvol_store = NULL;
+ rc = spdk_lvs_init(&dev_y.bs_dev, &opts_y, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ lvs_y = g_lvol_store;
+
+ /* Test that we cannot create another lvolstore with name 'x'. */
+ rc = spdk_lvs_init(&dev_x2.bs_dev, &opts_x, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == -EEXIST);
+
+ /* Now destroy lvolstore 'x' and then confirm we can create a new lvolstore with name 'x'. */
+ g_lvserrno = -1;
+ rc = spdk_lvs_destroy(lvs_x, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+ rc = spdk_lvs_init(&dev_x.bs_dev, &opts_x, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ lvs_x = g_lvol_store;
+
+ /*
+ * Unload lvolstore 'x'. Then we should be able to create another lvolstore with name 'x'.
+ */
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(lvs_x, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+ rc = spdk_lvs_init(&dev_x2.bs_dev, &opts_x, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ lvs_x2 = g_lvol_store;
+
+ /* Confirm that we cannot load the first lvolstore 'x'. */
+ g_lvserrno = 0;
+ spdk_lvs_load(&dev_x.bs_dev, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno != 0);
+
+ /* Destroy the second lvolstore 'x'. Then we should be able to load the first lvolstore 'x'. */
+ g_lvserrno = -1;
+ rc = spdk_lvs_destroy(lvs_x2, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvserrno = -1;
+ spdk_lvs_load(&dev_x.bs_dev, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ lvs_x = g_lvol_store;
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_destroy(lvs_x, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_destroy(lvs_y, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+}
+
+static void
+lvol_create_destroy_success(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_opts opts;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, false, LVOL_CLEAR_WITH_DEFAULT,
+ lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ spdk_lvol_close(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ spdk_lvol_destroy(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(g_lvol_store, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ free_dev(&dev);
+}
+
+static void
+lvol_create_fail(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_opts opts;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ g_lvol_store = NULL;
+ g_lvserrno = 0;
+ rc = spdk_lvs_init(NULL, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc != 0);
+ CU_ASSERT(g_lvol_store == NULL);
+
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+ g_lvol = NULL;
+ rc = spdk_lvol_create(NULL, "lvol", 10, false, LVOL_CLEAR_WITH_DEFAULT,
+ lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(rc != 0);
+ CU_ASSERT(g_lvol == NULL);
+
+ g_lvol = NULL;
+ rc = spdk_lvol_create(g_lvol_store, "lvol", DEV_BUFFER_SIZE + 1, false, LVOL_CLEAR_WITH_DEFAULT,
+ lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno != 0);
+ CU_ASSERT(g_lvol == NULL);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(g_lvol_store, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ free_dev(&dev);
+}
+
+static void
+lvol_destroy_fail(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_opts opts;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, false, LVOL_CLEAR_WITH_DEFAULT,
+ lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ spdk_lvol_close(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ spdk_lvol_destroy(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, false, LVOL_CLEAR_WITH_DEFAULT,
+ lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ spdk_lvol_close(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ g_remove_rc = -1;
+ spdk_lvol_destroy(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno != 0);
+ CU_ASSERT(TAILQ_EMPTY(&g_lvol_store->lvols));
+ g_remove_rc = 0;
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(g_lvol_store, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ free_dev(&dev);
+}
+
+static void
+lvol_close_fail(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_opts opts;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, false, LVOL_CLEAR_WITH_DEFAULT,
+ lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ spdk_lvol_close(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(g_lvol_store, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ free_dev(&dev);
+}
+
+static void
+lvol_close_success(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_opts opts;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, false, LVOL_CLEAR_WITH_DEFAULT,
+ lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ spdk_lvol_close(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(g_lvol_store, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ free_dev(&dev);
+}
+
+static void
+lvol_resize(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_opts opts;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ g_resize_rc = 0;
+ g_lvserrno = -1;
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, false, LVOL_CLEAR_WITH_DEFAULT,
+ lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ /* Resize to same size */
+ spdk_lvol_resize(g_lvol, 10, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ /* Resize to smaller size */
+ spdk_lvol_resize(g_lvol, 5, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ /* Resize to bigger size */
+ spdk_lvol_resize(g_lvol, 15, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ /* Resize to size = 0 */
+ spdk_lvol_resize(g_lvol, 0, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ /* Resize to bigger size than available */
+ g_lvserrno = 0;
+ spdk_lvol_resize(g_lvol, 0xFFFFFFFF, op_complete, NULL);
+ CU_ASSERT(g_lvserrno != 0);
+
+ /* Fail resize */
+ g_resize_rc = -1;
+ g_lvserrno = 0;
+ spdk_lvol_resize(g_lvol, 10, op_complete, NULL);
+ CU_ASSERT(g_lvserrno != 0);
+ g_resize_rc = 0;
+
+ g_resize_rc = 0;
+ spdk_lvol_close(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ spdk_lvol_destroy(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(g_lvol_store, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ free_dev(&dev);
+}
+
+static void
+lvol_set_read_only(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_opts opts;
+ int rc = 0;
+ struct spdk_lvol *lvol, *clone;
+
+ init_dev(&dev);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, false, LVOL_CLEAR_WITH_DEFAULT,
+ lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ lvol = g_lvol;
+
+ /* Set lvol as read only */
+ spdk_lvol_set_read_only(lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ /* Create lvol clone from read only lvol */
+ spdk_lvol_create_clone(lvol, "clone", lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ CU_ASSERT_STRING_EQUAL(g_lvol->name, "clone");
+ clone = g_lvol;
+
+ spdk_lvol_close(lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ spdk_lvol_close(clone, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(g_lvol_store, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ free_dev(&dev);
+}
+
+static void
+null_cb(void *ctx, struct spdk_blob_store *bs, int bserrno)
+{
+ SPDK_CU_ASSERT_FATAL(bs != NULL);
+}
+
+static void
+lvs_load(void)
+{
+ int rc = -1;
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_with_handle_req *req;
+ struct spdk_bs_opts bs_opts = {};
+ struct spdk_blob *super_blob;
+
+ req = calloc(1, sizeof(*req));
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+
+ init_dev(&dev);
+ spdk_bs_opts_init(&bs_opts);
+ snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "LVOLSTORE");
+ spdk_bs_init(&dev.bs_dev, &bs_opts, null_cb, NULL);
+ SPDK_CU_ASSERT_FATAL(dev.bs != NULL);
+
+ /* Fail on bs load */
+ dev.load_status = -1;
+ CU_ASSERT(TAILQ_EMPTY(&g_lvol_stores));
+ spdk_lvs_load(&dev.bs_dev, lvol_store_op_with_handle_complete, req);
+ CU_ASSERT(g_lvserrno != 0);
+ CU_ASSERT(g_lvol_store == NULL);
+ CU_ASSERT(TAILQ_EMPTY(&g_lvol_stores));
+
+ /* Fail on getting super blob */
+ dev.load_status = 0;
+ dev.bs->get_super_status = -1;
+ spdk_lvs_load(&dev.bs_dev, lvol_store_op_with_handle_complete, req);
+ CU_ASSERT(g_lvserrno == -ENODEV);
+ CU_ASSERT(g_lvol_store == NULL);
+ CU_ASSERT(TAILQ_EMPTY(&g_lvol_stores));
+
+ /* Fail on opening super blob */
+ g_lvserrno = 0;
+ super_blob = calloc(1, sizeof(*super_blob));
+ super_blob->id = 0x100;
+ super_blob->open_status = -1;
+ TAILQ_INSERT_TAIL(&dev.bs->blobs, super_blob, link);
+ dev.bs->super_blobid = 0x100;
+ dev.bs->get_super_status = 0;
+ spdk_lvs_load(&dev.bs_dev, lvol_store_op_with_handle_complete, req);
+ CU_ASSERT(g_lvserrno == -ENODEV);
+ CU_ASSERT(g_lvol_store == NULL);
+ CU_ASSERT(TAILQ_EMPTY(&g_lvol_stores));
+
+ /* Fail on getting uuid */
+ g_lvserrno = 0;
+ super_blob->open_status = 0;
+ spdk_lvs_load(&dev.bs_dev, lvol_store_op_with_handle_complete, req);
+ CU_ASSERT(g_lvserrno == -EINVAL);
+ CU_ASSERT(g_lvol_store == NULL);
+ CU_ASSERT(TAILQ_EMPTY(&g_lvol_stores));
+
+ /* Fail on getting name */
+ g_lvserrno = 0;
+ spdk_blob_set_xattr(super_blob, "uuid", uuid, SPDK_UUID_STRING_LEN);
+ spdk_lvs_load(&dev.bs_dev, lvol_store_op_with_handle_complete, req);
+ CU_ASSERT(g_lvserrno == -EINVAL);
+ CU_ASSERT(g_lvol_store == NULL);
+ CU_ASSERT(TAILQ_EMPTY(&g_lvol_stores));
+
+ /* Fail on closing super blob */
+ g_lvserrno = 0;
+ spdk_blob_set_xattr(super_blob, "name", "lvs", strnlen("lvs", SPDK_LVS_NAME_MAX) + 1);
+ super_blob->close_status = -1;
+ spdk_lvs_load(&dev.bs_dev, lvol_store_op_with_handle_complete, req);
+ CU_ASSERT(g_lvserrno == -ENODEV);
+ CU_ASSERT(g_lvol_store == NULL);
+ CU_ASSERT(TAILQ_EMPTY(&g_lvol_stores));
+
+ /* Load successfully */
+ g_lvserrno = 0;
+ super_blob->close_status = 0;
+ spdk_lvs_load(&dev.bs_dev, lvol_store_op_with_handle_complete, req);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store != NULL);
+ CU_ASSERT(!TAILQ_EMPTY(&g_lvol_stores));
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(g_lvol_store, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(TAILQ_EMPTY(&g_lvol_stores));
+
+ free(req);
+ free_dev(&dev);
+}
+
+static void
+lvols_load(void)
+{
+ int rc = -1;
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_with_handle_req *req;
+ struct spdk_bs_opts bs_opts;
+ struct spdk_blob *super_blob, *blob1, *blob2, *blob3;
+
+ req = calloc(1, sizeof(*req));
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+
+ init_dev(&dev);
+ spdk_bs_opts_init(&bs_opts);
+ snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "LVOLSTORE");
+ spdk_bs_init(&dev.bs_dev, &bs_opts, null_cb, NULL);
+ super_blob = calloc(1, sizeof(*super_blob));
+ SPDK_CU_ASSERT_FATAL(super_blob != NULL);
+ super_blob->id = 0x100;
+ spdk_blob_set_xattr(super_blob, "uuid", uuid, SPDK_UUID_STRING_LEN);
+ spdk_blob_set_xattr(super_blob, "name", "lvs", strnlen("lvs", SPDK_LVS_NAME_MAX) + 1);
+ TAILQ_INSERT_TAIL(&dev.bs->blobs, super_blob, link);
+ dev.bs->super_blobid = 0x100;
+
+ /*
+ * Create 3 blobs, write different char values to the last char in the UUID
+ * to make sure they are unique.
+ */
+ blob1 = calloc(1, sizeof(*blob1));
+ SPDK_CU_ASSERT_FATAL(blob1 != NULL);
+ blob1->id = 0x1;
+ spdk_blob_set_xattr(blob1, "uuid", uuid, SPDK_UUID_STRING_LEN);
+ spdk_blob_set_xattr(blob1, "name", "lvol1", strnlen("lvol1", SPDK_LVOL_NAME_MAX) + 1);
+ blob1->uuid[SPDK_UUID_STRING_LEN - 2] = '1';
+
+ blob2 = calloc(1, sizeof(*blob2));
+ SPDK_CU_ASSERT_FATAL(blob2 != NULL);
+ blob2->id = 0x2;
+ spdk_blob_set_xattr(blob2, "uuid", uuid, SPDK_UUID_STRING_LEN);
+ spdk_blob_set_xattr(blob2, "name", "lvol2", strnlen("lvol2", SPDK_LVOL_NAME_MAX) + 1);
+ blob2->uuid[SPDK_UUID_STRING_LEN - 2] = '2';
+
+ blob3 = calloc(1, sizeof(*blob3));
+ SPDK_CU_ASSERT_FATAL(blob3 != NULL);
+ blob3->id = 0x2;
+ spdk_blob_set_xattr(blob3, "uuid", uuid, SPDK_UUID_STRING_LEN);
+ spdk_blob_set_xattr(blob3, "name", "lvol3", strnlen("lvol3", SPDK_LVOL_NAME_MAX) + 1);
+ blob3->uuid[SPDK_UUID_STRING_LEN - 2] = '3';
+
+ /* Load lvs with 0 blobs */
+ g_lvserrno = 0;
+ spdk_lvs_load(&dev.bs_dev, lvol_store_op_with_handle_complete, req);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store != NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(g_lvol_store, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+
+ TAILQ_INSERT_TAIL(&dev.bs->blobs, blob1, link);
+ TAILQ_INSERT_TAIL(&dev.bs->blobs, blob2, link);
+ TAILQ_INSERT_TAIL(&dev.bs->blobs, blob3, link);
+
+ /* Load lvs again with 3 blobs, but fail on 1st one */
+ g_lvol_store = NULL;
+ g_lvserrno = 0;
+ blob1->load_status = -1;
+ spdk_lvs_load(&dev.bs_dev, lvol_store_op_with_handle_complete, req);
+ CU_ASSERT(g_lvserrno != 0);
+ CU_ASSERT(g_lvol_store == NULL);
+
+ /* Load lvs again with 3 blobs, but fail on 3rd one */
+ g_lvol_store = NULL;
+ g_lvserrno = 0;
+ blob1->load_status = 0;
+ blob2->load_status = 0;
+ blob3->load_status = -1;
+ spdk_lvs_load(&dev.bs_dev, lvol_store_op_with_handle_complete, req);
+ CU_ASSERT(g_lvserrno != 0);
+ CU_ASSERT(g_lvol_store == NULL);
+
+ /* Load lvs again with 3 blobs, with success */
+ g_lvol_store = NULL;
+ g_lvserrno = 0;
+ blob1->load_status = 0;
+ blob2->load_status = 0;
+ blob3->load_status = 0;
+ spdk_lvs_load(&dev.bs_dev, lvol_store_op_with_handle_complete, req);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(!TAILQ_EMPTY(&g_lvol_store->lvols));
+
+ g_lvserrno = -1;
+ /* rc = */ spdk_lvs_unload(g_lvol_store, op_complete, NULL);
+ /*
+ * Disable these two asserts for now. lvolstore should allow unload as long
+ * as the lvols were not opened - but this is coming a future patch.
+ */
+ /* CU_ASSERT(rc == 0); */
+ /* CU_ASSERT(g_lvserrno == 0); */
+
+ free(req);
+ free_dev(&dev);
+}
+
+static void
+lvol_open(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_with_handle_req *req;
+ struct spdk_bs_opts bs_opts;
+ struct spdk_blob *super_blob, *blob1, *blob2, *blob3;
+ struct spdk_lvol *lvol, *tmp;
+
+ req = calloc(1, sizeof(*req));
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+
+ init_dev(&dev);
+ spdk_bs_opts_init(&bs_opts);
+ snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "LVOLSTORE");
+ spdk_bs_init(&dev.bs_dev, &bs_opts, null_cb, NULL);
+ super_blob = calloc(1, sizeof(*super_blob));
+ SPDK_CU_ASSERT_FATAL(super_blob != NULL);
+ super_blob->id = 0x100;
+ spdk_blob_set_xattr(super_blob, "uuid", uuid, SPDK_UUID_STRING_LEN);
+ spdk_blob_set_xattr(super_blob, "name", "lvs", strnlen("lvs", SPDK_LVS_NAME_MAX) + 1);
+ TAILQ_INSERT_TAIL(&dev.bs->blobs, super_blob, link);
+ dev.bs->super_blobid = 0x100;
+
+ /*
+ * Create 3 blobs, write different char values to the last char in the UUID
+ * to make sure they are unique.
+ */
+ blob1 = calloc(1, sizeof(*blob1));
+ SPDK_CU_ASSERT_FATAL(blob1 != NULL);
+ blob1->id = 0x1;
+ spdk_blob_set_xattr(blob1, "uuid", uuid, SPDK_UUID_STRING_LEN);
+ spdk_blob_set_xattr(blob1, "name", "lvol1", strnlen("lvol1", SPDK_LVOL_NAME_MAX) + 1);
+ blob1->uuid[SPDK_UUID_STRING_LEN - 2] = '1';
+
+ blob2 = calloc(1, sizeof(*blob2));
+ SPDK_CU_ASSERT_FATAL(blob2 != NULL);
+ blob2->id = 0x2;
+ spdk_blob_set_xattr(blob2, "uuid", uuid, SPDK_UUID_STRING_LEN);
+ spdk_blob_set_xattr(blob2, "name", "lvol2", strnlen("lvol2", SPDK_LVOL_NAME_MAX) + 1);
+ blob2->uuid[SPDK_UUID_STRING_LEN - 2] = '2';
+
+ blob3 = calloc(1, sizeof(*blob3));
+ SPDK_CU_ASSERT_FATAL(blob3 != NULL);
+ blob3->id = 0x2;
+ spdk_blob_set_xattr(blob3, "uuid", uuid, SPDK_UUID_STRING_LEN);
+ spdk_blob_set_xattr(blob3, "name", "lvol3", strnlen("lvol3", SPDK_LVOL_NAME_MAX) + 1);
+ blob3->uuid[SPDK_UUID_STRING_LEN - 2] = '3';
+
+ TAILQ_INSERT_TAIL(&dev.bs->blobs, blob1, link);
+ TAILQ_INSERT_TAIL(&dev.bs->blobs, blob2, link);
+ TAILQ_INSERT_TAIL(&dev.bs->blobs, blob3, link);
+
+ /* Load lvs with 3 blobs */
+ g_lvol_store = NULL;
+ g_lvserrno = 0;
+ spdk_lvs_load(&dev.bs_dev, lvol_store_op_with_handle_complete, req);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&g_lvol_stores));
+
+ blob1->open_status = -1;
+ blob2->open_status = -1;
+ blob3->open_status = -1;
+
+ /* Fail opening all lvols */
+ TAILQ_FOREACH_SAFE(lvol, &g_lvol_store->lvols, link, tmp) {
+ spdk_lvol_open(lvol, lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno != 0);
+ }
+
+ blob1->open_status = 0;
+ blob2->open_status = 0;
+ blob3->open_status = 0;
+
+ /* Open all lvols */
+ TAILQ_FOREACH_SAFE(lvol, &g_lvol_store->lvols, link, tmp) {
+ spdk_lvol_open(lvol, lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ }
+
+ /* Close all lvols */
+ TAILQ_FOREACH_SAFE(lvol, &g_lvol_store->lvols, link, tmp) {
+ spdk_lvol_close(lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ }
+
+ g_lvserrno = -1;
+ spdk_lvs_destroy(g_lvol_store, op_complete, NULL);
+
+ free(req);
+ free(blob1);
+ free(blob2);
+ free(blob3);
+}
+
+static void
+lvol_snapshot(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvol *lvol;
+ struct spdk_lvs_opts opts;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, true, LVOL_CLEAR_WITH_DEFAULT,
+ lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ lvol = g_lvol;
+
+ spdk_lvol_create_snapshot(lvol, "snap", lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ CU_ASSERT_STRING_EQUAL(g_lvol->name, "snap");
+
+ /* Lvol has to be closed (or destroyed) before unloading lvol store. */
+ spdk_lvol_close(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvserrno = -1;
+
+ spdk_lvol_close(lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvserrno = -1;
+
+ rc = spdk_lvs_unload(g_lvol_store, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ free_dev(&dev);
+}
+
+static void
+lvol_snapshot_fail(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvol *lvol, *snap;
+ struct spdk_lvs_opts opts;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, true, LVOL_CLEAR_WITH_DEFAULT,
+ lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ lvol = g_lvol;
+
+ spdk_lvol_create_snapshot(NULL, "snap", lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno < 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol == NULL);
+
+ spdk_lvol_create_snapshot(lvol, "", lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno < 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol == NULL);
+
+ spdk_lvol_create_snapshot(lvol, NULL, lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno < 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol == NULL);
+
+ spdk_lvol_create_snapshot(lvol, "snap", lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ CU_ASSERT_STRING_EQUAL(g_lvol->name, "snap");
+
+ snap = g_lvol;
+
+ spdk_lvol_create_snapshot(lvol, "snap", lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno < 0);
+
+ spdk_lvol_close(lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvserrno = -1;
+
+ spdk_lvol_close(snap, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvserrno = -1;
+
+ rc = spdk_lvs_unload(g_lvol_store, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ free_dev(&dev);
+}
+
+static void
+lvol_clone(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvol *lvol;
+ struct spdk_lvol *snap;
+ struct spdk_lvs_opts opts;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, true, LVOL_CLEAR_WITH_DEFAULT,
+ lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ lvol = g_lvol;
+
+ spdk_lvol_create_snapshot(lvol, "snap", lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ CU_ASSERT_STRING_EQUAL(g_lvol->name, "snap");
+
+ snap = g_lvol;
+
+ spdk_lvol_create_clone(snap, "clone", lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ CU_ASSERT_STRING_EQUAL(g_lvol->name, "clone");
+
+ /* Lvol has to be closed (or destroyed) before unloading lvol store. */
+ spdk_lvol_close(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvserrno = -1;
+
+ spdk_lvol_close(snap, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvserrno = -1;
+
+ spdk_lvol_close(lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvserrno = -1;
+
+ rc = spdk_lvs_unload(g_lvol_store, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ free_dev(&dev);
+}
+
+static void
+lvol_clone_fail(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvol *lvol;
+ struct spdk_lvol *snap;
+ struct spdk_lvol *clone;
+ struct spdk_lvs_opts opts;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, true, LVOL_CLEAR_WITH_DEFAULT,
+ lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ lvol = g_lvol;
+
+ spdk_lvol_create_snapshot(lvol, "snap", lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ CU_ASSERT_STRING_EQUAL(g_lvol->name, "snap");
+
+ snap = g_lvol;
+
+ spdk_lvol_create_clone(NULL, "clone", lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno < 0);
+
+ spdk_lvol_create_clone(snap, "", lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno < 0);
+
+ spdk_lvol_create_clone(snap, NULL, lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno < 0);
+
+ spdk_lvol_create_clone(snap, "clone", lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ CU_ASSERT_STRING_EQUAL(g_lvol->name, "clone");
+
+ clone = g_lvol;
+
+ spdk_lvol_create_clone(snap, "clone", lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno < 0);
+
+ /* Lvol has to be closed (or destroyed) before unloading lvol store. */
+ spdk_lvol_close(clone, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvserrno = -1;
+
+ spdk_lvol_close(snap, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvserrno = -1;
+
+ spdk_lvol_close(lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvserrno = -1;
+
+ rc = spdk_lvs_unload(g_lvol_store, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ free_dev(&dev);
+}
+
+static void
+lvol_names(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_opts opts;
+ struct spdk_lvol_store *lvs;
+ struct spdk_lvol *lvol, *lvol2;
+ char fullname[SPDK_LVOL_NAME_MAX];
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ g_lvserrno = -1;
+ g_lvol_store = NULL;
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ lvs = g_lvol_store;
+
+ rc = spdk_lvol_create(lvs, NULL, 1, false, LVOL_CLEAR_WITH_DEFAULT, lvol_op_with_handle_complete,
+ NULL);
+ CU_ASSERT(rc == -EINVAL);
+
+ rc = spdk_lvol_create(lvs, "", 1, false, LVOL_CLEAR_WITH_DEFAULT, lvol_op_with_handle_complete,
+ NULL);
+ CU_ASSERT(rc == -EINVAL);
+
+ memset(fullname, 'x', sizeof(fullname));
+ rc = spdk_lvol_create(lvs, fullname, 1, false, LVOL_CLEAR_WITH_DEFAULT,
+ lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == -EINVAL);
+
+ g_lvserrno = -1;
+ rc = spdk_lvol_create(lvs, "lvol", 1, false, LVOL_CLEAR_WITH_DEFAULT, lvol_op_with_handle_complete,
+ NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ lvol = g_lvol;
+
+ rc = spdk_lvol_create(lvs, "lvol", 1, false, LVOL_CLEAR_WITH_DEFAULT, lvol_op_with_handle_complete,
+ NULL);
+ CU_ASSERT(rc == -EEXIST);
+
+ g_lvserrno = -1;
+ rc = spdk_lvol_create(lvs, "lvol2", 1, false, LVOL_CLEAR_WITH_DEFAULT, lvol_op_with_handle_complete,
+ NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ lvol2 = g_lvol;
+
+ spdk_lvol_close(lvol, op_complete, NULL);
+ spdk_lvol_destroy(lvol, op_complete, NULL);
+
+ g_lvserrno = -1;
+ g_lvol = NULL;
+ rc = spdk_lvol_create(lvs, "lvol", 1, false, LVOL_CLEAR_WITH_DEFAULT, lvol_op_with_handle_complete,
+ NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ lvol = g_lvol;
+
+ spdk_lvol_close(lvol, op_complete, NULL);
+ spdk_lvol_destroy(lvol, op_complete, NULL);
+
+ spdk_lvol_close(lvol2, op_complete, NULL);
+ spdk_lvol_destroy(lvol2, op_complete, NULL);
+
+ /* Simulate creating two lvols with same name simultaneously. */
+ lvol = calloc(1, sizeof(*lvol));
+ SPDK_CU_ASSERT_FATAL(lvol != NULL);
+ snprintf(lvol->name, sizeof(lvol->name), "tmp_name");
+ TAILQ_INSERT_TAIL(&lvs->pending_lvols, lvol, link);
+ rc = spdk_lvol_create(lvs, "tmp_name", 1, false, LVOL_CLEAR_WITH_DEFAULT,
+ lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == -EEXIST);
+
+ /* Remove name from temporary list and try again. */
+ TAILQ_REMOVE(&lvs->pending_lvols, lvol, link);
+ free(lvol);
+
+ rc = spdk_lvol_create(lvs, "tmp_name", 1, false, LVOL_CLEAR_WITH_DEFAULT,
+ lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ lvol = g_lvol;
+
+ spdk_lvol_close(lvol, op_complete, NULL);
+ spdk_lvol_destroy(lvol, op_complete, NULL);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_destroy(lvs, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+}
+
+static void
+lvol_rename(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_opts opts;
+ struct spdk_lvol_store *lvs;
+ struct spdk_lvol *lvol, *lvol2;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ g_lvserrno = -1;
+ g_lvol_store = NULL;
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ lvs = g_lvol_store;
+
+ /* Trying to create new lvol */
+ g_lvserrno = -1;
+ rc = spdk_lvol_create(lvs, "lvol", 1, false, LVOL_CLEAR_WITH_DEFAULT, lvol_op_with_handle_complete,
+ NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ lvol = g_lvol;
+
+ /* Trying to create second lvol with existing lvol name */
+ g_lvserrno = -1;
+ g_lvol = NULL;
+ rc = spdk_lvol_create(lvs, "lvol", 1, false, LVOL_CLEAR_WITH_DEFAULT, lvol_op_with_handle_complete,
+ NULL);
+ CU_ASSERT(rc == -EEXIST);
+ CU_ASSERT(g_lvserrno == -1);
+ SPDK_CU_ASSERT_FATAL(g_lvol == NULL);
+
+ /* Trying to create second lvol with non existing name */
+ g_lvserrno = -1;
+ rc = spdk_lvol_create(lvs, "lvol2", 1, false, LVOL_CLEAR_WITH_DEFAULT, lvol_op_with_handle_complete,
+ NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ lvol2 = g_lvol;
+
+ /* Trying to rename lvol with not existing name */
+ spdk_lvol_rename(lvol, "lvol_new", op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT_STRING_EQUAL(lvol->name, "lvol_new");
+
+ /* Trying to rename lvol with other lvol name */
+ spdk_lvol_rename(lvol2, "lvol_new", op_complete, NULL);
+ CU_ASSERT(g_lvserrno == -EEXIST);
+ CU_ASSERT_STRING_NOT_EQUAL(lvol2->name, "lvol_new");
+
+ spdk_lvol_close(lvol, op_complete, NULL);
+ spdk_lvol_destroy(lvol, op_complete, NULL);
+
+ spdk_lvol_close(lvol2, op_complete, NULL);
+ spdk_lvol_destroy(lvol2, op_complete, NULL);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_destroy(lvs, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+}
+
+static void
+lvs_rename(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_opts opts;
+ struct spdk_lvol_store *lvs, *lvs2;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+ g_lvserrno = -1;
+ g_lvol_store = NULL;
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ lvs = g_lvol_store;
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "unimportant_lvs_name");
+ g_lvserrno = -1;
+ g_lvol_store = NULL;
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ lvs2 = g_lvol_store;
+
+ /* Trying to rename lvs with new name */
+ spdk_lvs_rename(lvs, "new_lvs_name", op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT_STRING_EQUAL(lvs->name, "new_lvs_name");
+
+ /* Trying to rename lvs with name lvs already has */
+ spdk_lvs_rename(lvs, "new_lvs_name", op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT_STRING_EQUAL(lvs->name, "new_lvs_name");
+
+ /* Trying to rename lvs with name already existing */
+ spdk_lvs_rename(lvs2, "new_lvs_name", op_complete, NULL);
+ CU_ASSERT(g_lvserrno == -EEXIST);
+ CU_ASSERT_STRING_EQUAL(lvs2->name, "unimportant_lvs_name");
+
+ /* Trying to rename lvs with another rename process started with the same name */
+ /* Simulate renaming process in progress */
+ snprintf(lvs2->new_name, sizeof(lvs2->new_name), "another_new_lvs_name");
+ CU_ASSERT_STRING_EQUAL(lvs2->new_name, "another_new_lvs_name");
+ /* Start second process */
+ spdk_lvs_rename(lvs, "another_new_lvs_name", op_complete, NULL);
+ CU_ASSERT(g_lvserrno == -EEXIST);
+ CU_ASSERT_STRING_EQUAL(lvs->name, "new_lvs_name");
+ /* reverting lvs2 new name to proper value */
+ snprintf(lvs2->new_name, sizeof(lvs2->new_name), "unimportant_lvs_name");
+ CU_ASSERT_STRING_EQUAL(lvs2->new_name, "unimportant_lvs_name");
+
+ /* Simulate error while lvs rename */
+ g_lvs_rename_blob_open_error = true;
+ spdk_lvs_rename(lvs, "complete_new_lvs_name", op_complete, NULL);
+ CU_ASSERT(g_lvserrno != 0);
+ CU_ASSERT_STRING_EQUAL(lvs->name, "new_lvs_name");
+ CU_ASSERT_STRING_EQUAL(lvs->new_name, "new_lvs_name");
+ g_lvs_rename_blob_open_error = false;
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_destroy(lvs, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_destroy(lvs2, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+}
+static void lvol_refcnt(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_opts opts;
+ struct spdk_lvol *lvol;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, false, LVOL_CLEAR_WITH_DEFAULT,
+ lvol_op_with_handle_complete, NULL);
+
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ CU_ASSERT(g_lvol->ref_count == 1);
+
+ lvol = g_lvol;
+ spdk_lvol_open(g_lvol, lvol_op_with_handle_complete, NULL);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ CU_ASSERT(lvol->ref_count == 2);
+
+ /* Trying to destroy lvol while its open should fail */
+ spdk_lvol_destroy(lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno != 0);
+
+ spdk_lvol_close(lvol, op_complete, NULL);
+ CU_ASSERT(lvol->ref_count == 1);
+ CU_ASSERT(g_lvserrno == 0);
+
+ spdk_lvol_close(lvol, op_complete, NULL);
+ CU_ASSERT(lvol->ref_count == 0);
+ CU_ASSERT(g_lvserrno == 0);
+
+ /* Try to close already closed lvol */
+ spdk_lvol_close(lvol, op_complete, NULL);
+ CU_ASSERT(lvol->ref_count == 0);
+ CU_ASSERT(g_lvserrno != 0);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(g_lvol_store, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ free_dev(&dev);
+}
+
+static void
+lvol_create_thin_provisioned(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_opts opts;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, false, LVOL_CLEAR_WITH_DEFAULT,
+ lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ CU_ASSERT(g_lvol->blob->thin_provisioned == false);
+
+ spdk_lvol_close(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ spdk_lvol_destroy(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, true, LVOL_CLEAR_WITH_DEFAULT,
+ lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ CU_ASSERT(g_lvol->blob->thin_provisioned == true);
+
+ spdk_lvol_close(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ spdk_lvol_destroy(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(g_lvol_store, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ free_dev(&dev);
+}
+
+static void
+lvol_inflate(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_opts opts;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, false, LVOL_CLEAR_WITH_DEFAULT,
+ lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ g_inflate_rc = -1;
+ spdk_lvol_inflate(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno != 0);
+
+ g_inflate_rc = 0;
+ spdk_lvol_inflate(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ spdk_lvol_close(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ spdk_lvol_destroy(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(g_lvol_store, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ free_dev(&dev);
+
+ /* Make sure that all references to the io_channel was closed after
+ * inflate call
+ */
+ CU_ASSERT(g_io_channel == NULL);
+}
+
+static void
+lvol_decouple_parent(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_opts opts;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, false, LVOL_CLEAR_WITH_DEFAULT,
+ lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ g_inflate_rc = -1;
+ spdk_lvol_decouple_parent(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno != 0);
+
+ g_inflate_rc = 0;
+ spdk_lvol_decouple_parent(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ spdk_lvol_close(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ spdk_lvol_destroy(g_lvol, op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(g_lvol_store, op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ free_dev(&dev);
+
+ /* Make sure that all references to the io_channel was closed after
+ * inflate call
+ */
+ CU_ASSERT(g_io_channel == NULL);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("lvol", NULL, NULL);
+
+ CU_ADD_TEST(suite, lvs_init_unload_success);
+ CU_ADD_TEST(suite, lvs_init_destroy_success);
+ CU_ADD_TEST(suite, lvs_init_opts_success);
+ CU_ADD_TEST(suite, lvs_unload_lvs_is_null_fail);
+ CU_ADD_TEST(suite, lvs_names);
+ CU_ADD_TEST(suite, lvol_create_destroy_success);
+ CU_ADD_TEST(suite, lvol_create_fail);
+ CU_ADD_TEST(suite, lvol_destroy_fail);
+ CU_ADD_TEST(suite, lvol_close_fail);
+ CU_ADD_TEST(suite, lvol_close_success);
+ CU_ADD_TEST(suite, lvol_resize);
+ CU_ADD_TEST(suite, lvol_set_read_only);
+ CU_ADD_TEST(suite, lvs_load);
+ CU_ADD_TEST(suite, lvols_load);
+ CU_ADD_TEST(suite, lvol_open);
+ CU_ADD_TEST(suite, lvol_snapshot);
+ CU_ADD_TEST(suite, lvol_snapshot_fail);
+ CU_ADD_TEST(suite, lvol_clone);
+ CU_ADD_TEST(suite, lvol_clone_fail);
+ CU_ADD_TEST(suite, lvol_refcnt);
+ CU_ADD_TEST(suite, lvol_names);
+ CU_ADD_TEST(suite, lvol_create_thin_provisioned);
+ CU_ADD_TEST(suite, lvol_rename);
+ CU_ADD_TEST(suite, lvs_rename);
+ CU_ADD_TEST(suite, lvol_inflate);
+ CU_ADD_TEST(suite, lvol_decouple_parent);
+
+ allocate_threads(1);
+ set_thread(0);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ free_threads();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/notify/Makefile b/src/spdk/test/unit/lib/notify/Makefile
new file mode 100644
index 000000000..9b29a3e07
--- /dev/null
+++ b/src/spdk/test/unit/lib/notify/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = notify.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/notify/notify.c/.gitignore b/src/spdk/test/unit/lib/notify/notify.c/.gitignore
new file mode 100644
index 000000000..f20d6130e
--- /dev/null
+++ b/src/spdk/test/unit/lib/notify/notify.c/.gitignore
@@ -0,0 +1 @@
+notify_ut
diff --git a/src/spdk/test/unit/lib/notify/notify.c/Makefile b/src/spdk/test/unit/lib/notify/notify.c/Makefile
new file mode 100644
index 000000000..c6490b778
--- /dev/null
+++ b/src/spdk/test/unit/lib/notify/notify.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+TEST_FILE = notify_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/notify/notify.c/notify_ut.c b/src/spdk/test/unit/lib/notify/notify.c/notify_ut.c
new file mode 100644
index 000000000..9a1095fb3
--- /dev/null
+++ b/src/spdk/test/unit/lib/notify/notify.c/notify_ut.c
@@ -0,0 +1,111 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+#include "spdk_cunit.h"
+#include "common/lib/test_env.c"
+#include "unit/lib/json_mock.c"
+#include "notify/notify.c"
+
+static int
+event_cb(uint64_t idx, const struct spdk_notify_event *event, void *ctx)
+{
+ const struct spdk_notify_event **_event = ctx;
+
+ *_event = event;
+ return 0;
+}
+
+static void
+notify(void)
+{
+ struct spdk_notify_type *n1, *n2;
+ const struct spdk_notify_event *event;
+ const char *name;
+ uint64_t cnt;
+
+ n1 = spdk_notify_type_register("one");
+ n2 = spdk_notify_type_register("two");
+
+ name = spdk_notify_type_get_name(n1);
+ CU_ASSERT(strcmp(name, "one") == 0);
+
+ name = spdk_notify_type_get_name(n2);
+ CU_ASSERT(strcmp(name, "two") == 0);
+
+
+ spdk_notify_send("one", "one_context");
+ spdk_notify_send("two", "two_context");
+
+ event = NULL;
+ cnt = spdk_notify_foreach_event(0, 1, event_cb, &event);
+ SPDK_CU_ASSERT_FATAL(cnt == 1);
+ SPDK_CU_ASSERT_FATAL(event != NULL);
+ CU_ASSERT(strcmp(event->type, "one") == 0);
+ CU_ASSERT(strcmp(event->ctx, "one_context") == 0);
+
+ event = NULL;
+ cnt = spdk_notify_foreach_event(1, 1, event_cb, &event);
+ SPDK_CU_ASSERT_FATAL(cnt == 1);
+ SPDK_CU_ASSERT_FATAL(event != NULL);
+ CU_ASSERT(strcmp(event->type, "two") == 0);
+ CU_ASSERT(strcmp(event->ctx, "two_context") == 0);
+
+ /* This event should not exist yet */
+ event = NULL;
+ cnt = spdk_notify_foreach_event(2, 1, event_cb, &event);
+ CU_ASSERT(cnt == 0);
+ CU_ASSERT(event == NULL);
+
+ SPDK_CU_ASSERT_FATAL(event == NULL);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("app_suite", NULL, NULL);
+ CU_ADD_TEST(suite, notify);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/Makefile b/src/spdk/test/unit/lib/nvme/Makefile
new file mode 100644
index 000000000..5f74579d2
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/Makefile
@@ -0,0 +1,47 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = nvme.c nvme_ctrlr.c nvme_ctrlr_cmd.c nvme_ctrlr_ocssd_cmd.c nvme_ns.c nvme_ns_cmd.c nvme_ns_ocssd_cmd.c nvme_pcie.c nvme_poll_group.c nvme_qpair.c \
+ nvme_quirks.c nvme_tcp.c nvme_uevent.c \
+
+DIRS-$(CONFIG_RDMA) += nvme_rdma.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme.c/.gitignore
new file mode 100644
index 000000000..90c0c1678
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme.c/.gitignore
@@ -0,0 +1 @@
+nvme_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme.c/Makefile
new file mode 100644
index 000000000..4202cf54c
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme.c/nvme_ut.c b/src/spdk/test/unit/lib/nvme/nvme.c/nvme_ut.c
new file mode 100644
index 000000000..cf51a14bd
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme.c/nvme_ut.c
@@ -0,0 +1,1376 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation. All rights reserved.
+ * Copyright (c) 2020 Mellanox Technologies LTD. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "spdk/env.h"
+
+#include "nvme/nvme.c"
+
+#include "spdk_internal/mock.h"
+
+#include "common/lib/test_env.c"
+
+DEFINE_STUB_V(nvme_ctrlr_proc_get_ref, (struct spdk_nvme_ctrlr *ctrlr));
+DEFINE_STUB_V(nvme_ctrlr_proc_put_ref, (struct spdk_nvme_ctrlr *ctrlr));
+DEFINE_STUB_V(nvme_ctrlr_fail, (struct spdk_nvme_ctrlr *ctrlr, bool hotremove));
+DEFINE_STUB(spdk_nvme_transport_available_by_name, bool,
+ (const char *transport_name), true);
+/* return anything non-NULL, this won't be deferenced anywhere in this test */
+DEFINE_STUB(nvme_ctrlr_get_current_process, struct spdk_nvme_ctrlr_process *,
+ (struct spdk_nvme_ctrlr *ctrlr), (struct spdk_nvme_ctrlr_process *)(uintptr_t)0x1);
+DEFINE_STUB(nvme_ctrlr_process_init, int,
+ (struct spdk_nvme_ctrlr *ctrlr), 0);
+DEFINE_STUB(nvme_ctrlr_get_ref_count, int,
+ (struct spdk_nvme_ctrlr *ctrlr), 0);
+DEFINE_STUB(dummy_probe_cb, bool,
+ (void *cb_ctx, const struct spdk_nvme_transport_id *trid,
+ struct spdk_nvme_ctrlr_opts *opts), false);
+DEFINE_STUB(nvme_transport_ctrlr_construct, struct spdk_nvme_ctrlr *,
+ (const struct spdk_nvme_transport_id *trid,
+ const struct spdk_nvme_ctrlr_opts *opts,
+ void *devhandle), NULL);
+DEFINE_STUB_V(nvme_io_msg_ctrlr_detach, (struct spdk_nvme_ctrlr *ctrlr));
+DEFINE_STUB(spdk_nvme_transport_available, bool,
+ (enum spdk_nvme_transport_type trtype), true);
+DEFINE_STUB(nvme_uevent_connect, int, (void), 1);
+
+
+static bool ut_destruct_called = false;
+void
+nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
+{
+ ut_destruct_called = true;
+}
+
+void
+spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size)
+{
+ memset(opts, 0, opts_size);
+ opts->opts_size = opts_size;
+}
+
+static void
+memset_trid(struct spdk_nvme_transport_id *trid1, struct spdk_nvme_transport_id *trid2)
+{
+ memset(trid1, 0, sizeof(struct spdk_nvme_transport_id));
+ memset(trid2, 0, sizeof(struct spdk_nvme_transport_id));
+}
+
+static bool ut_check_trtype = false;
+static bool ut_test_probe_internal = false;
+
+static int
+ut_nvme_pcie_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx,
+ bool direct_connect)
+{
+ struct spdk_nvme_ctrlr *ctrlr;
+ struct spdk_nvme_qpair qpair = {};
+ int rc;
+
+ if (probe_ctx->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) {
+ return -1;
+ }
+
+ ctrlr = calloc(1, sizeof(*ctrlr));
+ CU_ASSERT(ctrlr != NULL);
+ ctrlr->adminq = &qpair;
+
+ /* happy path with first controller */
+ MOCK_SET(nvme_transport_ctrlr_construct, ctrlr);
+ rc = nvme_ctrlr_probe(&probe_ctx->trid, probe_ctx, NULL);
+ CU_ASSERT(rc == 0);
+
+ /* failed with the second controller */
+ MOCK_SET(nvme_transport_ctrlr_construct, NULL);
+ rc = nvme_ctrlr_probe(&probe_ctx->trid, probe_ctx, NULL);
+ CU_ASSERT(rc != 0);
+ MOCK_CLEAR_P(nvme_transport_ctrlr_construct);
+
+ return -1;
+}
+
+int
+nvme_transport_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
+{
+ free(ctrlr);
+ return 0;
+}
+
+int
+nvme_transport_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx,
+ bool direct_connect)
+{
+ struct spdk_nvme_ctrlr *ctrlr = NULL;
+
+ if (ut_check_trtype == true) {
+ CU_ASSERT(probe_ctx->trid.trtype == SPDK_NVME_TRANSPORT_PCIE);
+ }
+
+ if (ut_test_probe_internal) {
+ return ut_nvme_pcie_ctrlr_scan(probe_ctx, direct_connect);
+ }
+
+ if (direct_connect == true && probe_ctx->probe_cb) {
+ nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock);
+ ctrlr = nvme_get_ctrlr_by_trid(&probe_ctx->trid);
+ nvme_robust_mutex_lock(&g_spdk_nvme_driver->lock);
+ probe_ctx->probe_cb(probe_ctx->cb_ctx, &probe_ctx->trid, &ctrlr->opts);
+ }
+ return 0;
+}
+
+static bool ut_attach_cb_called = false;
+static void
+dummy_attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
+ struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_ctrlr_opts *opts)
+{
+ ut_attach_cb_called = true;
+}
+
+static void
+test_spdk_nvme_probe(void)
+{
+ int rc = 0;
+ const struct spdk_nvme_transport_id *trid = NULL;
+ void *cb_ctx = NULL;
+ spdk_nvme_probe_cb probe_cb = NULL;
+ spdk_nvme_attach_cb attach_cb = dummy_attach_cb;
+ spdk_nvme_remove_cb remove_cb = NULL;
+ struct spdk_nvme_ctrlr ctrlr;
+ pthread_mutexattr_t attr;
+ struct nvme_driver dummy;
+ g_spdk_nvme_driver = &dummy;
+
+ /* driver init fails */
+ MOCK_SET(spdk_process_is_primary, false);
+ MOCK_SET(spdk_memzone_lookup, NULL);
+ rc = spdk_nvme_probe(trid, cb_ctx, probe_cb, attach_cb, remove_cb);
+ CU_ASSERT(rc == -1);
+
+ /*
+ * For secondary processes, the attach_cb should automatically get
+ * called for any controllers already initialized by the primary
+ * process.
+ */
+ MOCK_SET(spdk_nvme_transport_available_by_name, false);
+ MOCK_SET(spdk_process_is_primary, true);
+ dummy.initialized = true;
+ g_spdk_nvme_driver = &dummy;
+ rc = spdk_nvme_probe(trid, cb_ctx, probe_cb, attach_cb, remove_cb);
+ CU_ASSERT(rc == -1);
+
+ /* driver init passes, transport available, secondary call attach_cb */
+ MOCK_SET(spdk_nvme_transport_available_by_name, true);
+ MOCK_SET(spdk_process_is_primary, false);
+ MOCK_SET(spdk_memzone_lookup, g_spdk_nvme_driver);
+ dummy.initialized = true;
+ memset(&ctrlr, 0, sizeof(struct spdk_nvme_ctrlr));
+ CU_ASSERT(pthread_mutexattr_init(&attr) == 0);
+ CU_ASSERT(pthread_mutex_init(&dummy.lock, &attr) == 0);
+ TAILQ_INIT(&dummy.shared_attached_ctrlrs);
+ TAILQ_INSERT_TAIL(&dummy.shared_attached_ctrlrs, &ctrlr, tailq);
+ ut_attach_cb_called = false;
+ /* setup nvme_transport_ctrlr_scan() stub to also check the trype */
+ ut_check_trtype = true;
+ rc = spdk_nvme_probe(trid, cb_ctx, probe_cb, attach_cb, remove_cb);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(ut_attach_cb_called == true);
+
+ /* driver init passes, transport available, we are primary */
+ MOCK_SET(spdk_process_is_primary, true);
+ rc = spdk_nvme_probe(trid, cb_ctx, probe_cb, attach_cb, remove_cb);
+ CU_ASSERT(rc == 0);
+
+ g_spdk_nvme_driver = NULL;
+ /* reset to pre-test values */
+ MOCK_CLEAR(spdk_memzone_lookup);
+ ut_check_trtype = false;
+
+ pthread_mutex_destroy(&dummy.lock);
+ pthread_mutexattr_destroy(&attr);
+}
+
+static void
+test_spdk_nvme_connect(void)
+{
+ struct spdk_nvme_ctrlr *ret_ctrlr = NULL;
+ struct spdk_nvme_transport_id trid = {};
+ struct spdk_nvme_ctrlr_opts opts = {};
+ struct spdk_nvme_ctrlr ctrlr;
+ pthread_mutexattr_t attr;
+ struct nvme_driver dummy;
+
+ /* initialize the variable to prepare the test */
+ dummy.initialized = true;
+ TAILQ_INIT(&dummy.shared_attached_ctrlrs);
+ g_spdk_nvme_driver = &dummy;
+ CU_ASSERT(pthread_mutexattr_init(&attr) == 0);
+ CU_ASSERT(pthread_mutex_init(&g_spdk_nvme_driver->lock, &attr) == 0);
+
+ /* set NULL trid pointer to test immediate return */
+ ret_ctrlr = spdk_nvme_connect(NULL, NULL, 0);
+ CU_ASSERT(ret_ctrlr == NULL);
+
+ /* driver init passes, transport available, secondary process connects ctrlr */
+ MOCK_SET(spdk_process_is_primary, false);
+ MOCK_SET(spdk_memzone_lookup, g_spdk_nvme_driver);
+ MOCK_SET(spdk_nvme_transport_available_by_name, true);
+ memset(&trid, 0, sizeof(trid));
+ trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
+ ret_ctrlr = spdk_nvme_connect(&trid, NULL, 0);
+ CU_ASSERT(ret_ctrlr == NULL);
+
+ /* driver init passes, setup one ctrlr on the attached_list */
+ memset(&ctrlr, 0, sizeof(struct spdk_nvme_ctrlr));
+ snprintf(ctrlr.trid.traddr, sizeof(ctrlr.trid.traddr), "0000:01:00.0");
+ ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_driver->shared_attached_ctrlrs, &ctrlr, tailq);
+ /* get the ctrlr from the attached list */
+ snprintf(trid.traddr, sizeof(trid.traddr), "0000:01:00.0");
+ ret_ctrlr = spdk_nvme_connect(&trid, NULL, 0);
+ CU_ASSERT(ret_ctrlr == &ctrlr);
+ /* get the ctrlr from the attached list with default ctrlr opts */
+ ctrlr.opts.num_io_queues = DEFAULT_MAX_IO_QUEUES;
+ ret_ctrlr = spdk_nvme_connect(&trid, NULL, 0);
+ CU_ASSERT(ret_ctrlr == &ctrlr);
+ CU_ASSERT_EQUAL(ret_ctrlr->opts.num_io_queues, DEFAULT_MAX_IO_QUEUES);
+ /* get the ctrlr from the attached list with default ctrlr opts and consistent opts_size */
+ opts.num_io_queues = 1;
+ ret_ctrlr = spdk_nvme_connect(&trid, &opts, sizeof(opts));
+ CU_ASSERT(ret_ctrlr == &ctrlr);
+ CU_ASSERT_EQUAL(ret_ctrlr->opts.num_io_queues, 1);
+ CU_ASSERT_EQUAL(ret_ctrlr->opts.opts_size, sizeof(opts));
+
+ /* opts_size is 0 */
+ ret_ctrlr = spdk_nvme_connect(&trid, &opts, 0);
+ CU_ASSERT(ret_ctrlr == &ctrlr);
+ CU_ASSERT_EQUAL(ret_ctrlr->opts.opts_size, 0);
+
+ /* opts_size is less than sizeof(*opts) if opts != NULL */
+ ret_ctrlr = spdk_nvme_connect(&trid, &opts, 4);
+ CU_ASSERT(ret_ctrlr == &ctrlr);
+ CU_ASSERT_EQUAL(ret_ctrlr->opts.num_io_queues, 1);
+ CU_ASSERT_EQUAL(ret_ctrlr->opts.opts_size, 4);
+ /* remove the attached ctrlr on the attached_list */
+ CU_ASSERT(spdk_nvme_detach(&ctrlr) == 0);
+ CU_ASSERT(TAILQ_EMPTY(&g_spdk_nvme_driver->shared_attached_ctrlrs));
+
+ /* driver init passes, transport available, primary process connects ctrlr */
+ MOCK_SET(spdk_process_is_primary, true);
+ /* setup one ctrlr on the attached_list */
+ memset(&ctrlr, 0, sizeof(struct spdk_nvme_ctrlr));
+ snprintf(ctrlr.trid.traddr, sizeof(ctrlr.trid.traddr), "0000:02:00.0");
+ ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_driver->shared_attached_ctrlrs, &ctrlr, tailq);
+ /* get the ctrlr from the attached list */
+ snprintf(trid.traddr, sizeof(trid.traddr), "0000:02:00.0");
+ ret_ctrlr = spdk_nvme_connect(&trid, NULL, 0);
+ CU_ASSERT(ret_ctrlr == &ctrlr);
+ /* get the ctrlr from the attached list with default ctrlr opts */
+ ctrlr.opts.num_io_queues = DEFAULT_MAX_IO_QUEUES;
+ ret_ctrlr = spdk_nvme_connect(&trid, NULL, 0);
+ CU_ASSERT(ret_ctrlr == &ctrlr);
+ CU_ASSERT_EQUAL(ret_ctrlr->opts.num_io_queues, DEFAULT_MAX_IO_QUEUES);
+ /* get the ctrlr from the attached list with default ctrlr opts and consistent opts_size */
+ opts.num_io_queues = 2;
+ ret_ctrlr = spdk_nvme_connect(&trid, &opts, sizeof(opts));
+ CU_ASSERT(ret_ctrlr == &ctrlr);
+ CU_ASSERT_EQUAL(ret_ctrlr->opts.num_io_queues, 2);
+ /* remove the attached ctrlr on the attached_list */
+ CU_ASSERT(spdk_nvme_detach(ret_ctrlr) == 0);
+ CU_ASSERT(TAILQ_EMPTY(&g_spdk_nvme_driver->shared_attached_ctrlrs));
+
+ /* test driver init failure return */
+ MOCK_SET(spdk_process_is_primary, false);
+ MOCK_SET(spdk_memzone_lookup, NULL);
+ ret_ctrlr = spdk_nvme_connect(&trid, NULL, 0);
+ CU_ASSERT(ret_ctrlr == NULL);
+}
+
+static struct spdk_nvme_probe_ctx *
+test_nvme_init_get_probe_ctx(void)
+{
+ struct spdk_nvme_probe_ctx *probe_ctx;
+
+ probe_ctx = calloc(1, sizeof(*probe_ctx));
+ SPDK_CU_ASSERT_FATAL(probe_ctx != NULL);
+ TAILQ_INIT(&probe_ctx->init_ctrlrs);
+
+ return probe_ctx;
+}
+
+static void
+test_nvme_init_controllers(void)
+{
+ int rc = 0;
+ struct nvme_driver test_driver;
+ void *cb_ctx = NULL;
+ spdk_nvme_attach_cb attach_cb = dummy_attach_cb;
+ struct spdk_nvme_probe_ctx *probe_ctx;
+ struct spdk_nvme_ctrlr *ctrlr;
+ pthread_mutexattr_t attr;
+
+ g_spdk_nvme_driver = &test_driver;
+ ctrlr = calloc(1, sizeof(*ctrlr));
+ SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
+ ctrlr->trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
+ CU_ASSERT(pthread_mutexattr_init(&attr) == 0);
+ CU_ASSERT(pthread_mutex_init(&test_driver.lock, &attr) == 0);
+ TAILQ_INIT(&test_driver.shared_attached_ctrlrs);
+
+ /*
+ * Try to initialize, but nvme_ctrlr_process_init will fail.
+ * Verify correct behavior when it does.
+ */
+ MOCK_SET(nvme_ctrlr_process_init, 1);
+ MOCK_SET(spdk_process_is_primary, 1);
+ g_spdk_nvme_driver->initialized = false;
+ ut_destruct_called = false;
+ probe_ctx = test_nvme_init_get_probe_ctx();
+ TAILQ_INSERT_TAIL(&probe_ctx->init_ctrlrs, ctrlr, tailq);
+ probe_ctx->cb_ctx = cb_ctx;
+ probe_ctx->attach_cb = attach_cb;
+ probe_ctx->trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
+ rc = nvme_init_controllers(probe_ctx);
+ CU_ASSERT(rc != 0);
+ CU_ASSERT(g_spdk_nvme_driver->initialized == true);
+ CU_ASSERT(ut_destruct_called == true);
+
+ /*
+ * Controller init OK, need to move the controller state machine
+ * forward by setting the ctrl state so that it can be moved
+ * the shared_attached_ctrlrs list.
+ */
+ probe_ctx = test_nvme_init_get_probe_ctx();
+ TAILQ_INSERT_TAIL(&probe_ctx->init_ctrlrs, ctrlr, tailq);
+ ctrlr->state = NVME_CTRLR_STATE_READY;
+ MOCK_SET(nvme_ctrlr_process_init, 0);
+ rc = nvme_init_controllers(probe_ctx);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(ut_attach_cb_called == true);
+ CU_ASSERT(TAILQ_EMPTY(&g_nvme_attached_ctrlrs));
+ CU_ASSERT(TAILQ_FIRST(&g_spdk_nvme_driver->shared_attached_ctrlrs) == ctrlr);
+ TAILQ_REMOVE(&g_spdk_nvme_driver->shared_attached_ctrlrs, ctrlr, tailq);
+
+ /*
+ * Non-PCIe controllers should be added to the per-process list, not the shared list.
+ */
+ memset(ctrlr, 0, sizeof(struct spdk_nvme_ctrlr));
+ ctrlr->trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
+ probe_ctx = test_nvme_init_get_probe_ctx();
+ TAILQ_INSERT_TAIL(&probe_ctx->init_ctrlrs, ctrlr, tailq);
+ ctrlr->state = NVME_CTRLR_STATE_READY;
+ MOCK_SET(nvme_ctrlr_process_init, 0);
+ rc = nvme_init_controllers(probe_ctx);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(ut_attach_cb_called == true);
+ CU_ASSERT(TAILQ_EMPTY(&g_spdk_nvme_driver->shared_attached_ctrlrs));
+ CU_ASSERT(TAILQ_FIRST(&g_nvme_attached_ctrlrs) == ctrlr);
+ TAILQ_REMOVE(&g_nvme_attached_ctrlrs, ctrlr, tailq);
+ free(ctrlr);
+ CU_ASSERT(TAILQ_EMPTY(&g_nvme_attached_ctrlrs));
+
+ g_spdk_nvme_driver = NULL;
+ pthread_mutexattr_destroy(&attr);
+ pthread_mutex_destroy(&test_driver.lock);
+}
+
+static void
+test_nvme_driver_init(void)
+{
+ int rc;
+ struct nvme_driver dummy;
+ g_spdk_nvme_driver = &dummy;
+
+ /* adjust this so testing doesn't take so long */
+ g_nvme_driver_timeout_ms = 100;
+
+ /* process is primary and mem already reserved */
+ MOCK_SET(spdk_process_is_primary, true);
+ dummy.initialized = true;
+ rc = nvme_driver_init();
+ CU_ASSERT(rc == 0);
+
+ /*
+ * Process is primary and mem not yet reserved but the call
+ * to spdk_memzone_reserve() returns NULL.
+ */
+ g_spdk_nvme_driver = NULL;
+ MOCK_SET(spdk_process_is_primary, true);
+ MOCK_SET(spdk_memzone_reserve, NULL);
+ rc = nvme_driver_init();
+ CU_ASSERT(rc == -1);
+
+ /* process is not primary, no mem already reserved */
+ MOCK_SET(spdk_process_is_primary, false);
+ MOCK_SET(spdk_memzone_lookup, NULL);
+ g_spdk_nvme_driver = NULL;
+ rc = nvme_driver_init();
+ CU_ASSERT(rc == -1);
+
+ /* process is not primary, mem is already reserved & init'd */
+ MOCK_SET(spdk_process_is_primary, false);
+ MOCK_SET(spdk_memzone_lookup, (void *)&dummy);
+ dummy.initialized = true;
+ rc = nvme_driver_init();
+ CU_ASSERT(rc == 0);
+
+ /* process is not primary, mem is reserved but not initialized */
+ /* and times out */
+ MOCK_SET(spdk_process_is_primary, false);
+ MOCK_SET(spdk_memzone_reserve, (void *)&dummy);
+ dummy.initialized = false;
+ rc = nvme_driver_init();
+ CU_ASSERT(rc == -1);
+
+ /* process is primary, got mem but mutex won't init */
+ MOCK_SET(spdk_process_is_primary, true);
+ MOCK_SET(spdk_memzone_reserve, (void *)&dummy);
+ MOCK_SET(pthread_mutexattr_init, -1);
+ g_spdk_nvme_driver = NULL;
+ dummy.initialized = true;
+ rc = nvme_driver_init();
+ /* for FreeBSD we can't can't effectively mock this path */
+#ifndef __FreeBSD__
+ CU_ASSERT(rc != 0);
+#else
+ CU_ASSERT(rc == 0);
+#endif
+
+ /* process is primary, got mem, mutex OK */
+ MOCK_SET(spdk_process_is_primary, true);
+ MOCK_CLEAR(pthread_mutexattr_init);
+ g_spdk_nvme_driver = NULL;
+ rc = nvme_driver_init();
+ CU_ASSERT(g_spdk_nvme_driver->initialized == false);
+ CU_ASSERT(TAILQ_EMPTY(&g_spdk_nvme_driver->shared_attached_ctrlrs));
+ CU_ASSERT(rc == 0);
+
+ g_spdk_nvme_driver = NULL;
+ MOCK_CLEAR(spdk_memzone_reserve);
+ MOCK_CLEAR(spdk_memzone_lookup);
+}
+
+static void
+test_spdk_nvme_detach(void)
+{
+ int rc = 1;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_ctrlr *ret_ctrlr;
+ struct nvme_driver test_driver;
+
+ memset(&ctrlr, 0, sizeof(ctrlr));
+ ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
+
+ g_spdk_nvme_driver = &test_driver;
+ TAILQ_INIT(&test_driver.shared_attached_ctrlrs);
+ TAILQ_INSERT_TAIL(&test_driver.shared_attached_ctrlrs, &ctrlr, tailq);
+ CU_ASSERT(pthread_mutex_init(&test_driver.lock, NULL) == 0);
+
+ /*
+ * Controllers are ref counted so mock the function that returns
+ * the ref count so that detach will actually call the destruct
+ * function which we've mocked simply to verify that it gets
+ * called (we aren't testing what the real destruct function does
+ * here.)
+ */
+ MOCK_SET(nvme_ctrlr_get_ref_count, 0);
+ rc = spdk_nvme_detach(&ctrlr);
+ ret_ctrlr = TAILQ_FIRST(&test_driver.shared_attached_ctrlrs);
+ CU_ASSERT(ret_ctrlr == NULL);
+ CU_ASSERT(ut_destruct_called == true);
+ CU_ASSERT(rc == 0);
+
+ /*
+ * Mock the ref count to 1 so we confirm that the destruct
+ * function is not called and that attached ctrl list is
+ * not empty.
+ */
+ MOCK_SET(nvme_ctrlr_get_ref_count, 1);
+ TAILQ_INSERT_TAIL(&test_driver.shared_attached_ctrlrs, &ctrlr, tailq);
+ ut_destruct_called = false;
+ rc = spdk_nvme_detach(&ctrlr);
+ ret_ctrlr = TAILQ_FIRST(&test_driver.shared_attached_ctrlrs);
+ CU_ASSERT(ret_ctrlr != NULL);
+ CU_ASSERT(ut_destruct_called == false);
+ CU_ASSERT(rc == 0);
+
+ /*
+ * Non-PCIe controllers should be on the per-process attached_ctrlrs list, not the
+ * shared_attached_ctrlrs list. Test an RDMA controller and ensure it is removed
+ * from the correct list.
+ */
+ memset(&ctrlr, 0, sizeof(ctrlr));
+ ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
+ TAILQ_INIT(&g_nvme_attached_ctrlrs);
+ TAILQ_INSERT_TAIL(&g_nvme_attached_ctrlrs, &ctrlr, tailq);
+ MOCK_SET(nvme_ctrlr_get_ref_count, 0);
+ rc = spdk_nvme_detach(&ctrlr);
+ CU_ASSERT(TAILQ_EMPTY(&g_nvme_attached_ctrlrs));
+ CU_ASSERT(ut_destruct_called == true);
+ CU_ASSERT(rc == 0);
+
+ g_spdk_nvme_driver = NULL;
+ pthread_mutex_destroy(&test_driver.lock);
+}
+
+static void
+test_nvme_completion_poll_cb(void)
+{
+ struct nvme_completion_poll_status *status;
+ struct spdk_nvme_cpl cpl;
+
+ status = calloc(1, sizeof(*status));
+ SPDK_CU_ASSERT_FATAL(status != NULL);
+
+ memset(&cpl, 0xff, sizeof(cpl));
+
+ nvme_completion_poll_cb(status, &cpl);
+ CU_ASSERT(status->done == true);
+ CU_ASSERT(memcmp(&cpl, &status->cpl,
+ sizeof(struct spdk_nvme_cpl)) == 0);
+
+ free(status);
+}
+
+/* stub callback used by test_nvme_user_copy_cmd_complete() */
+static struct spdk_nvme_cpl ut_spdk_nvme_cpl = {0};
+static void
+dummy_cb(void *user_cb_arg, struct spdk_nvme_cpl *cpl)
+{
+ ut_spdk_nvme_cpl = *cpl;
+}
+
+static void
+test_nvme_user_copy_cmd_complete(void)
+{
+ struct nvme_request req;
+ int test_data = 0xdeadbeef;
+ int buff_size = sizeof(int);
+ void *buff;
+ static struct spdk_nvme_cpl cpl;
+
+ memset(&req, 0, sizeof(req));
+ memset(&cpl, 0x5a, sizeof(cpl));
+
+ /* test without a user buffer provided */
+ req.user_cb_fn = (void *)dummy_cb;
+ nvme_user_copy_cmd_complete(&req, &cpl);
+ CU_ASSERT(memcmp(&ut_spdk_nvme_cpl, &cpl, sizeof(cpl)) == 0);
+
+ /* test with a user buffer provided */
+ req.user_buffer = malloc(buff_size);
+ SPDK_CU_ASSERT_FATAL(req.user_buffer != NULL);
+ memset(req.user_buffer, 0, buff_size);
+ req.payload_size = buff_size;
+ buff = spdk_zmalloc(buff_size, 0x100, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
+ SPDK_CU_ASSERT_FATAL(buff != NULL);
+ req.payload = NVME_PAYLOAD_CONTIG(buff, NULL);
+ memcpy(buff, &test_data, buff_size);
+ req.cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
+ req.pid = getpid();
+
+ /* zero out the test value set in the callback */
+ memset(&ut_spdk_nvme_cpl, 0, sizeof(ut_spdk_nvme_cpl));
+
+ nvme_user_copy_cmd_complete(&req, &cpl);
+ CU_ASSERT(memcmp(req.user_buffer, &test_data, buff_size) == 0);
+ CU_ASSERT(memcmp(&ut_spdk_nvme_cpl, &cpl, sizeof(cpl)) == 0);
+
+ /*
+ * Now test the same path as above but this time choose an opc
+ * that results in a different data transfer type.
+ */
+ memset(&ut_spdk_nvme_cpl, 0, sizeof(ut_spdk_nvme_cpl));
+ memset(req.user_buffer, 0, buff_size);
+ buff = spdk_zmalloc(buff_size, 0x100, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
+ SPDK_CU_ASSERT_FATAL(buff != NULL);
+ req.payload = NVME_PAYLOAD_CONTIG(buff, NULL);
+ memcpy(buff, &test_data, buff_size);
+ req.cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
+ nvme_user_copy_cmd_complete(&req, &cpl);
+ CU_ASSERT(memcmp(req.user_buffer, &test_data, buff_size) != 0);
+ CU_ASSERT(memcmp(&ut_spdk_nvme_cpl, &cpl, sizeof(cpl)) == 0);
+
+ /* clean up */
+ free(req.user_buffer);
+}
+
+static void
+test_nvme_allocate_request_null(void)
+{
+ struct spdk_nvme_qpair qpair;
+ spdk_nvme_cmd_cb cb_fn = (spdk_nvme_cmd_cb)0x1234;
+ void *cb_arg = (void *)0x5678;
+ struct nvme_request *req = NULL;
+ struct nvme_request dummy_req;
+
+ STAILQ_INIT(&qpair.free_req);
+ STAILQ_INIT(&qpair.queued_req);
+
+ /*
+ * Put a dummy on the queue so we can make a request
+ * and confirm that what comes back is what we expect.
+ */
+ STAILQ_INSERT_HEAD(&qpair.free_req, &dummy_req, stailq);
+
+ req = nvme_allocate_request_null(&qpair, cb_fn, cb_arg);
+
+ /*
+ * Compare the req with the parmaters that we passed in
+ * as well as what the function is supposed to update.
+ */
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ CU_ASSERT(req->cb_fn == cb_fn);
+ CU_ASSERT(req->cb_arg == cb_arg);
+ CU_ASSERT(req->pid == getpid());
+ CU_ASSERT(nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_CONTIG);
+ CU_ASSERT(req->payload.md == NULL);
+ CU_ASSERT(req->payload.contig_or_cb_arg == NULL);
+}
+
+static void
+test_nvme_allocate_request(void)
+{
+ struct spdk_nvme_qpair qpair;
+ struct nvme_payload payload;
+ uint32_t payload_struct_size = sizeof(payload);
+ spdk_nvme_cmd_cb cb_fn = (spdk_nvme_cmd_cb)0x1234;
+ void *cb_arg = (void *)0x6789;
+ struct nvme_request *req = NULL;
+ struct nvme_request dummy_req;
+
+ /* Fill the whole payload struct with a known pattern */
+ memset(&payload, 0x5a, payload_struct_size);
+ STAILQ_INIT(&qpair.free_req);
+ STAILQ_INIT(&qpair.queued_req);
+
+ /* Test trying to allocate a request when no requests are available */
+ req = nvme_allocate_request(&qpair, &payload, payload_struct_size, 0,
+ cb_fn, cb_arg);
+ CU_ASSERT(req == NULL);
+
+ /* put a dummy on the queue, and then allocate one */
+ STAILQ_INSERT_HEAD(&qpair.free_req, &dummy_req, stailq);
+ req = nvme_allocate_request(&qpair, &payload, payload_struct_size, 0,
+ cb_fn, cb_arg);
+
+ /* all the req elements should now match the passed in parameters */
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ CU_ASSERT(req->cb_fn == cb_fn);
+ CU_ASSERT(req->cb_arg == cb_arg);
+ CU_ASSERT(memcmp(&req->payload, &payload, payload_struct_size) == 0);
+ CU_ASSERT(req->payload_size == payload_struct_size);
+ CU_ASSERT(req->pid == getpid());
+}
+
+static void
+test_nvme_free_request(void)
+{
+ struct nvme_request match_req;
+ struct spdk_nvme_qpair qpair;
+ struct nvme_request *req;
+
+ /* put a req on the Q, take it off and compare */
+ memset(&match_req.cmd, 0x5a, sizeof(struct spdk_nvme_cmd));
+ match_req.qpair = &qpair;
+ /* the code under tests asserts this condition */
+ match_req.num_children = 0;
+ STAILQ_INIT(&qpair.free_req);
+
+ nvme_free_request(&match_req);
+ req = STAILQ_FIRST(&match_req.qpair->free_req);
+ CU_ASSERT(req == &match_req);
+}
+
+static void
+test_nvme_allocate_request_user_copy(void)
+{
+ struct spdk_nvme_qpair qpair;
+ spdk_nvme_cmd_cb cb_fn = (spdk_nvme_cmd_cb)0x12345;
+ void *cb_arg = (void *)0x12345;
+ bool host_to_controller = true;
+ struct nvme_request *req;
+ struct nvme_request dummy_req;
+ int test_data = 0xdeadbeef;
+ void *buffer = NULL;
+ uint32_t payload_size = sizeof(int);
+
+ STAILQ_INIT(&qpair.free_req);
+ STAILQ_INIT(&qpair.queued_req);
+
+ /* no buffer or valid payload size, early NULL return */
+ req = nvme_allocate_request_user_copy(&qpair, buffer, payload_size, cb_fn,
+ cb_arg, host_to_controller);
+ CU_ASSERT(req == NULL);
+
+ /* good buffer and valid payload size */
+ buffer = malloc(payload_size);
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+ memcpy(buffer, &test_data, payload_size);
+
+ /* put a dummy on the queue */
+ STAILQ_INSERT_HEAD(&qpair.free_req, &dummy_req, stailq);
+
+ MOCK_CLEAR(spdk_malloc);
+ MOCK_CLEAR(spdk_zmalloc);
+ req = nvme_allocate_request_user_copy(&qpair, buffer, payload_size, cb_fn,
+ cb_arg, host_to_controller);
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ CU_ASSERT(req->user_cb_fn == cb_fn);
+ CU_ASSERT(req->user_cb_arg == cb_arg);
+ CU_ASSERT(req->user_buffer == buffer);
+ CU_ASSERT(req->cb_arg == req);
+ CU_ASSERT(memcmp(req->payload.contig_or_cb_arg, buffer, payload_size) == 0);
+ spdk_free(req->payload.contig_or_cb_arg);
+
+ /* same thing but additional path coverage, no copy */
+ host_to_controller = false;
+ STAILQ_INSERT_HEAD(&qpair.free_req, &dummy_req, stailq);
+
+ req = nvme_allocate_request_user_copy(&qpair, buffer, payload_size, cb_fn,
+ cb_arg, host_to_controller);
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ CU_ASSERT(req->user_cb_fn == cb_fn);
+ CU_ASSERT(req->user_cb_arg == cb_arg);
+ CU_ASSERT(req->user_buffer == buffer);
+ CU_ASSERT(req->cb_arg == req);
+ CU_ASSERT(memcmp(req->payload.contig_or_cb_arg, buffer, payload_size) != 0);
+ spdk_free(req->payload.contig_or_cb_arg);
+
+ /* good buffer and valid payload size but make spdk_zmalloc fail */
+ /* set the mock pointer to NULL for spdk_zmalloc */
+ MOCK_SET(spdk_zmalloc, NULL);
+ req = nvme_allocate_request_user_copy(&qpair, buffer, payload_size, cb_fn,
+ cb_arg, host_to_controller);
+ CU_ASSERT(req == NULL);
+ free(buffer);
+ MOCK_CLEAR(spdk_zmalloc);
+}
+
+static void
+test_nvme_ctrlr_probe(void)
+{
+ int rc = 0;
+ struct spdk_nvme_ctrlr ctrlr = {};
+ struct spdk_nvme_qpair qpair = {};
+ const struct spdk_nvme_transport_id trid = {};
+ struct spdk_nvme_probe_ctx probe_ctx = {};
+ void *devhandle = NULL;
+ void *cb_ctx = NULL;
+ struct spdk_nvme_ctrlr *dummy = NULL;
+
+ ctrlr.adminq = &qpair;
+
+ TAILQ_INIT(&probe_ctx.init_ctrlrs);
+ nvme_driver_init();
+
+ /* test when probe_cb returns false */
+
+ MOCK_SET(dummy_probe_cb, false);
+ nvme_probe_ctx_init(&probe_ctx, &trid, cb_ctx, dummy_probe_cb, NULL, NULL);
+ rc = nvme_ctrlr_probe(&trid, &probe_ctx, devhandle);
+ CU_ASSERT(rc == 1);
+
+ /* probe_cb returns true but we can't construct a ctrl */
+ MOCK_SET(dummy_probe_cb, true);
+ MOCK_SET(nvme_transport_ctrlr_construct, NULL);
+ nvme_probe_ctx_init(&probe_ctx, &trid, cb_ctx, dummy_probe_cb, NULL, NULL);
+ rc = nvme_ctrlr_probe(&trid, &probe_ctx, devhandle);
+ CU_ASSERT(rc == -1);
+
+ /* happy path */
+ MOCK_SET(dummy_probe_cb, true);
+ MOCK_SET(nvme_transport_ctrlr_construct, &ctrlr);
+ nvme_probe_ctx_init(&probe_ctx, &trid, cb_ctx, dummy_probe_cb, NULL, NULL);
+ rc = nvme_ctrlr_probe(&trid, &probe_ctx, devhandle);
+ CU_ASSERT(rc == 0);
+ dummy = TAILQ_FIRST(&probe_ctx.init_ctrlrs);
+ SPDK_CU_ASSERT_FATAL(dummy != NULL);
+ CU_ASSERT(dummy == ut_nvme_transport_ctrlr_construct);
+ TAILQ_REMOVE(&probe_ctx.init_ctrlrs, dummy, tailq);
+ MOCK_CLEAR_P(nvme_transport_ctrlr_construct);
+
+ free(g_spdk_nvme_driver);
+}
+
+static void
+test_nvme_robust_mutex_init_shared(void)
+{
+ pthread_mutex_t mtx;
+ int rc = 0;
+
+ /* test where both pthread calls succeed */
+ MOCK_SET(pthread_mutexattr_init, 0);
+ MOCK_SET(pthread_mutex_init, 0);
+ rc = nvme_robust_mutex_init_shared(&mtx);
+ CU_ASSERT(rc == 0);
+
+ /* test where we can't init attr's but init mutex works */
+ MOCK_SET(pthread_mutexattr_init, -1);
+ MOCK_SET(pthread_mutex_init, 0);
+ rc = nvme_robust_mutex_init_shared(&mtx);
+ /* for FreeBSD the only possible return value is 0 */
+#ifndef __FreeBSD__
+ CU_ASSERT(rc != 0);
+#else
+ CU_ASSERT(rc == 0);
+#endif
+
+ /* test where we can init attr's but the mutex init fails */
+ MOCK_SET(pthread_mutexattr_init, 0);
+ MOCK_SET(pthread_mutex_init, -1);
+ rc = nvme_robust_mutex_init_shared(&mtx);
+ /* for FreeBSD the only possible return value is 0 */
+#ifndef __FreeBSD__
+ CU_ASSERT(rc != 0);
+#else
+ CU_ASSERT(rc == 0);
+#endif
+}
+
+static void
+test_opc_data_transfer(void)
+{
+ enum spdk_nvme_data_transfer xfer;
+
+ xfer = spdk_nvme_opc_get_data_transfer(SPDK_NVME_OPC_FLUSH);
+ CU_ASSERT(xfer == SPDK_NVME_DATA_NONE);
+
+ xfer = spdk_nvme_opc_get_data_transfer(SPDK_NVME_OPC_WRITE);
+ CU_ASSERT(xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER);
+
+ xfer = spdk_nvme_opc_get_data_transfer(SPDK_NVME_OPC_READ);
+ CU_ASSERT(xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST);
+
+ xfer = spdk_nvme_opc_get_data_transfer(SPDK_NVME_OPC_GET_LOG_PAGE);
+ CU_ASSERT(xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST);
+}
+
+static void
+test_trid_parse_and_compare(void)
+{
+ struct spdk_nvme_transport_id trid1, trid2;
+ int ret;
+
+ /* set trid1 trid2 value to id parse */
+ ret = spdk_nvme_transport_id_parse(NULL, "trtype:PCIe traddr:0000:04:00.0");
+ CU_ASSERT(ret == -EINVAL);
+ memset(&trid1, 0, sizeof(trid1));
+ ret = spdk_nvme_transport_id_parse(&trid1, NULL);
+ CU_ASSERT(ret == -EINVAL);
+ ret = spdk_nvme_transport_id_parse(NULL, NULL);
+ CU_ASSERT(ret == -EINVAL);
+ memset(&trid1, 0, sizeof(trid1));
+ ret = spdk_nvme_transport_id_parse(&trid1, "trtype-PCIe traddr-0000-04-00.0");
+ CU_ASSERT(ret == -EINVAL);
+ memset(&trid1, 0, sizeof(trid1));
+ ret = spdk_nvme_transport_id_parse(&trid1, "trtype-PCIe traddr-0000-04-00.0-:");
+ CU_ASSERT(ret == -EINVAL);
+ memset(&trid1, 0, sizeof(trid1));
+ ret = spdk_nvme_transport_id_parse(&trid1, " \t\n:");
+ CU_ASSERT(ret == -EINVAL);
+ memset(&trid1, 0, sizeof(trid1));
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid1,
+ "trtype:rdma\n"
+ "adrfam:ipv4\n"
+ "traddr:192.168.100.8\n"
+ "trsvcid:4420\n"
+ "subnqn:nqn.2014-08.org.nvmexpress.discovery") == 0);
+ CU_ASSERT(trid1.trtype == SPDK_NVME_TRANSPORT_RDMA);
+ CU_ASSERT(trid1.adrfam == SPDK_NVMF_ADRFAM_IPV4);
+ CU_ASSERT(strcmp(trid1.traddr, "192.168.100.8") == 0);
+ CU_ASSERT(strcmp(trid1.trsvcid, "4420") == 0);
+ CU_ASSERT(strcmp(trid1.subnqn, "nqn.2014-08.org.nvmexpress.discovery") == 0);
+
+ memset(&trid2, 0, sizeof(trid2));
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid2, "trtype:PCIe traddr:0000:04:00.0") == 0);
+ CU_ASSERT(trid2.trtype == SPDK_NVME_TRANSPORT_PCIE);
+ CU_ASSERT(strcmp(trid2.traddr, "0000:04:00.0") == 0);
+
+ CU_ASSERT(spdk_nvme_transport_id_compare(&trid1, &trid2) != 0);
+
+ /* set trid1 trid2 and test id_compare */
+ memset_trid(&trid1, &trid2);
+ trid1.adrfam = SPDK_NVMF_ADRFAM_IPV6;
+ trid2.adrfam = SPDK_NVMF_ADRFAM_IPV4;
+ ret = spdk_nvme_transport_id_compare(&trid1, &trid2);
+ CU_ASSERT(ret > 0);
+
+ memset_trid(&trid1, &trid2);
+ snprintf(trid1.traddr, sizeof(trid1.traddr), "192.168.100.8");
+ snprintf(trid2.traddr, sizeof(trid2.traddr), "192.168.100.9");
+ ret = spdk_nvme_transport_id_compare(&trid1, &trid2);
+ CU_ASSERT(ret < 0);
+
+ memset_trid(&trid1, &trid2);
+ snprintf(trid1.trsvcid, sizeof(trid1.trsvcid), "4420");
+ snprintf(trid2.trsvcid, sizeof(trid2.trsvcid), "4421");
+ ret = spdk_nvme_transport_id_compare(&trid1, &trid2);
+ CU_ASSERT(ret < 0);
+
+ memset_trid(&trid1, &trid2);
+ snprintf(trid1.subnqn, sizeof(trid1.subnqn), "subnqn:nqn.2016-08.org.nvmexpress.discovery");
+ snprintf(trid2.subnqn, sizeof(trid2.subnqn), "subnqn:nqn.2017-08.org.nvmexpress.discovery");
+ ret = spdk_nvme_transport_id_compare(&trid1, &trid2);
+ CU_ASSERT(ret < 0);
+
+ memset_trid(&trid1, &trid2);
+ snprintf(trid1.subnqn, sizeof(trid1.subnqn), "subnqn:nqn.2016-08.org.nvmexpress.discovery");
+ snprintf(trid2.subnqn, sizeof(trid2.subnqn), "subnqn:nqn.2016-08.org.nvmexpress.discovery");
+ ret = spdk_nvme_transport_id_compare(&trid1, &trid2);
+ CU_ASSERT(ret == 0);
+
+ memset_trid(&trid1, &trid2);
+ snprintf(trid1.subnqn, sizeof(trid1.subnqn), "subnqn:nqn.2016-08.org.nvmexpress.discovery");
+ snprintf(trid2.subnqn, sizeof(trid2.subnqn), "subnqn:nqn.2016-08.org.Nvmexpress.discovery");
+ ret = spdk_nvme_transport_id_compare(&trid1, &trid2);
+ CU_ASSERT(ret > 0);
+
+ memset_trid(&trid1, &trid2);
+ ret = spdk_nvme_transport_id_compare(&trid1, &trid2);
+ CU_ASSERT(ret == 0);
+
+ /* Compare PCI addresses via spdk_pci_addr_compare (rather than as strings) */
+ memset_trid(&trid1, &trid2);
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid1, "trtype:PCIe traddr:0000:04:00.0") == 0);
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid2, "trtype:PCIe traddr:04:00.0") == 0);
+ CU_ASSERT(spdk_nvme_transport_id_compare(&trid1, &trid2) == 0);
+
+ memset_trid(&trid1, &trid2);
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid1, "trtype:PCIe traddr:0000:05:00.0") == 0);
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid2, "trtype:PCIe traddr:04:00.0") == 0);
+ CU_ASSERT(spdk_nvme_transport_id_compare(&trid1, &trid2) > 0);
+
+ memset_trid(&trid1, &trid2);
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid1, "trtype:PCIe traddr:0000:04:00.0") == 0);
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid2, "trtype:PCIe traddr:05:00.0") == 0);
+ CU_ASSERT(spdk_nvme_transport_id_compare(&trid1, &trid2) < 0);
+
+ memset_trid(&trid1, &trid2);
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid1, "trtype=PCIe traddr=0000:04:00.0") == 0);
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid2, "trtype=PCIe traddr=05:00.0") == 0);
+ CU_ASSERT(spdk_nvme_transport_id_compare(&trid1, &trid2) < 0);
+
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid1,
+ "trtype:tcp\n"
+ "adrfam:ipv4\n"
+ "traddr:192.168.100.8\n"
+ "trsvcid:4420\n"
+ "priority:2\n"
+ "subnqn:nqn.2014-08.org.nvmexpress.discovery") == 0);
+ CU_ASSERT(trid1.priority == 2);
+}
+
+static void
+test_spdk_nvme_transport_id_parse_trtype(void)
+{
+
+ enum spdk_nvme_transport_type *trtype;
+ enum spdk_nvme_transport_type sct;
+ char *str;
+
+ trtype = NULL;
+ str = "unit_test";
+
+ /* test function returned value when trtype is NULL but str not NULL */
+ CU_ASSERT(spdk_nvme_transport_id_parse_trtype(trtype, str) == (-EINVAL));
+
+ /* test function returned value when str is NULL but trtype not NULL */
+ trtype = &sct;
+ str = NULL;
+ CU_ASSERT(spdk_nvme_transport_id_parse_trtype(trtype, str) == (-EINVAL));
+
+ /* test function returned value when str and strtype not NULL, but str value
+ * not "PCIe" or "RDMA" */
+ str = "unit_test";
+ CU_ASSERT(spdk_nvme_transport_id_parse_trtype(trtype, str) == 0);
+ CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_CUSTOM);
+
+ /* test trtype value when use function "strcasecmp" to compare str and "PCIe",not case-sensitive */
+ str = "PCIe";
+ spdk_nvme_transport_id_parse_trtype(trtype, str);
+ CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_PCIE);
+
+ str = "pciE";
+ spdk_nvme_transport_id_parse_trtype(trtype, str);
+ CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_PCIE);
+
+ /* test trtype value when use function "strcasecmp" to compare str and "RDMA",not case-sensitive */
+ str = "RDMA";
+ spdk_nvme_transport_id_parse_trtype(trtype, str);
+ CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_RDMA);
+
+ str = "rdma";
+ spdk_nvme_transport_id_parse_trtype(trtype, str);
+ CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_RDMA);
+
+ /* test trtype value when use function "strcasecmp" to compare str and "FC",not case-sensitive */
+ str = "FC";
+ spdk_nvme_transport_id_parse_trtype(trtype, str);
+ CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_FC);
+
+ str = "fc";
+ spdk_nvme_transport_id_parse_trtype(trtype, str);
+ CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_FC);
+
+ /* test trtype value when use function "strcasecmp" to compare str and "TCP",not case-sensitive */
+ str = "TCP";
+ spdk_nvme_transport_id_parse_trtype(trtype, str);
+ CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_TCP);
+
+ str = "tcp";
+ spdk_nvme_transport_id_parse_trtype(trtype, str);
+ CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_TCP);
+}
+
+static void
+test_spdk_nvme_transport_id_parse_adrfam(void)
+{
+
+ enum spdk_nvmf_adrfam *adrfam;
+ enum spdk_nvmf_adrfam sct;
+ char *str;
+
+ adrfam = NULL;
+ str = "unit_test";
+
+ /* test function returned value when adrfam is NULL but str not NULL */
+ CU_ASSERT(spdk_nvme_transport_id_parse_adrfam(adrfam, str) == (-EINVAL));
+
+ /* test function returned value when str is NULL but adrfam not NULL */
+ adrfam = &sct;
+ str = NULL;
+ CU_ASSERT(spdk_nvme_transport_id_parse_adrfam(adrfam, str) == (-EINVAL));
+
+ /* test function returned value when str and adrfam not NULL, but str value
+ * not "IPv4" or "IPv6" or "IB" or "FC" */
+ str = "unit_test";
+ CU_ASSERT(spdk_nvme_transport_id_parse_adrfam(adrfam, str) == (-ENOENT));
+
+ /* test adrfam value when use function "strcasecmp" to compare str and "IPv4",not case-sensitive */
+ str = "IPv4";
+ spdk_nvme_transport_id_parse_adrfam(adrfam, str);
+ CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_IPV4);
+
+ str = "ipV4";
+ spdk_nvme_transport_id_parse_adrfam(adrfam, str);
+ CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_IPV4);
+
+ /* test adrfam value when use function "strcasecmp" to compare str and "IPv6",not case-sensitive */
+ str = "IPv6";
+ spdk_nvme_transport_id_parse_adrfam(adrfam, str);
+ CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_IPV6);
+
+ str = "ipV6";
+ spdk_nvme_transport_id_parse_adrfam(adrfam, str);
+ CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_IPV6);
+
+ /* test adrfam value when use function "strcasecmp" to compare str and "IB",not case-sensitive */
+ str = "IB";
+ spdk_nvme_transport_id_parse_adrfam(adrfam, str);
+ CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_IB);
+
+ str = "ib";
+ spdk_nvme_transport_id_parse_adrfam(adrfam, str);
+ CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_IB);
+
+ /* test adrfam value when use function "strcasecmp" to compare str and "FC",not case-sensitive */
+ str = "FC";
+ spdk_nvme_transport_id_parse_adrfam(adrfam, str);
+ CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_FC);
+
+ str = "fc";
+ spdk_nvme_transport_id_parse_adrfam(adrfam, str);
+ CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_FC);
+
+}
+
+static void
+test_trid_trtype_str(void)
+{
+ const char *s;
+
+ s = spdk_nvme_transport_id_trtype_str(-5);
+ CU_ASSERT(s == NULL);
+
+ s = spdk_nvme_transport_id_trtype_str(SPDK_NVME_TRANSPORT_PCIE);
+ SPDK_CU_ASSERT_FATAL(s != NULL);
+ CU_ASSERT(strcmp(s, "PCIe") == 0);
+
+ s = spdk_nvme_transport_id_trtype_str(SPDK_NVME_TRANSPORT_RDMA);
+ SPDK_CU_ASSERT_FATAL(s != NULL);
+ CU_ASSERT(strcmp(s, "RDMA") == 0);
+
+ s = spdk_nvme_transport_id_trtype_str(SPDK_NVME_TRANSPORT_FC);
+ SPDK_CU_ASSERT_FATAL(s != NULL);
+ CU_ASSERT(strcmp(s, "FC") == 0);
+
+ s = spdk_nvme_transport_id_trtype_str(SPDK_NVME_TRANSPORT_TCP);
+ SPDK_CU_ASSERT_FATAL(s != NULL);
+ CU_ASSERT(strcmp(s, "TCP") == 0);
+}
+
+static void
+test_trid_adrfam_str(void)
+{
+ const char *s;
+
+ s = spdk_nvme_transport_id_adrfam_str(-5);
+ CU_ASSERT(s == NULL);
+
+ s = spdk_nvme_transport_id_adrfam_str(SPDK_NVMF_ADRFAM_IPV4);
+ SPDK_CU_ASSERT_FATAL(s != NULL);
+ CU_ASSERT(strcmp(s, "IPv4") == 0);
+
+ s = spdk_nvme_transport_id_adrfam_str(SPDK_NVMF_ADRFAM_IPV6);
+ SPDK_CU_ASSERT_FATAL(s != NULL);
+ CU_ASSERT(strcmp(s, "IPv6") == 0);
+
+ s = spdk_nvme_transport_id_adrfam_str(SPDK_NVMF_ADRFAM_IB);
+ SPDK_CU_ASSERT_FATAL(s != NULL);
+ CU_ASSERT(strcmp(s, "IB") == 0);
+
+ s = spdk_nvme_transport_id_adrfam_str(SPDK_NVMF_ADRFAM_FC);
+ SPDK_CU_ASSERT_FATAL(s != NULL);
+ CU_ASSERT(strcmp(s, "FC") == 0);
+}
+
+/* stub callback used by the test_nvme_request_check_timeout */
+static bool ut_timeout_cb_call = false;
+static void
+dummy_timeout_cb(void *cb_arg, struct spdk_nvme_ctrlr *ctrlr,
+ struct spdk_nvme_qpair *qpair, uint16_t cid)
+{
+ ut_timeout_cb_call = true;
+}
+
+static void
+test_nvme_request_check_timeout(void)
+{
+ int rc;
+ struct spdk_nvme_qpair qpair;
+ struct nvme_request req;
+ struct spdk_nvme_ctrlr_process active_proc;
+ uint16_t cid = 0;
+ uint64_t now_tick = 0;
+
+ memset(&qpair, 0x0, sizeof(qpair));
+ memset(&req, 0x0, sizeof(req));
+ memset(&active_proc, 0x0, sizeof(active_proc));
+ req.qpair = &qpair;
+ active_proc.timeout_cb_fn = dummy_timeout_cb;
+
+ /* if have called timeout_cb_fn then return directly */
+ req.timed_out = true;
+ rc = nvme_request_check_timeout(&req, cid, &active_proc, now_tick);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(ut_timeout_cb_call == false);
+
+ /* if timeout isn't enabled then return directly */
+ req.timed_out = false;
+ req.submit_tick = 0;
+ rc = nvme_request_check_timeout(&req, cid, &active_proc, now_tick);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(ut_timeout_cb_call == false);
+
+ /* req->pid isn't right then return directly */
+ req.submit_tick = 1;
+ req.pid = g_spdk_nvme_pid + 1;
+ rc = nvme_request_check_timeout(&req, cid, &active_proc, now_tick);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(ut_timeout_cb_call == false);
+
+ /* AER command has no timeout */
+ req.pid = g_spdk_nvme_pid;
+ req.cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
+ rc = nvme_request_check_timeout(&req, cid, &active_proc, now_tick);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(ut_timeout_cb_call == false);
+
+ /* time isn't out */
+ qpair.id = 1;
+ rc = nvme_request_check_timeout(&req, cid, &active_proc, now_tick);
+ CU_ASSERT(rc == 1);
+ CU_ASSERT(ut_timeout_cb_call == false);
+
+ now_tick = 2;
+ rc = nvme_request_check_timeout(&req, cid, &active_proc, now_tick);
+ CU_ASSERT(req.timed_out == true);
+ CU_ASSERT(ut_timeout_cb_call == true);
+ CU_ASSERT(rc == 0);
+}
+
+struct nvme_completion_poll_status g_status;
+uint64_t completion_delay, timeout_in_secs;
+int g_process_comp_result;
+
+int
+spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
+{
+ spdk_delay_us(completion_delay * spdk_get_ticks_hz());
+
+ g_status.done = completion_delay < timeout_in_secs && g_process_comp_result == 0 ? true : false;
+
+ return g_process_comp_result;
+}
+
+static void
+test_nvme_wait_for_completion(void)
+{
+ struct spdk_nvme_qpair qpair;
+ int rc = 0;
+
+ memset(&qpair, 0, sizeof(qpair));
+
+ /* completion timeout */
+ memset(&g_status, 0, sizeof(g_status));
+ completion_delay = 2;
+ timeout_in_secs = 1;
+ rc = nvme_wait_for_completion_timeout(&qpair, &g_status, timeout_in_secs);
+ CU_ASSERT(g_status.timed_out == true);
+ CU_ASSERT(g_status.done == false);
+ CU_ASSERT(rc == -ECANCELED);
+
+ /* spdk_nvme_qpair_process_completions returns error */
+ memset(&g_status, 0, sizeof(g_status));
+ g_process_comp_result = -1;
+ completion_delay = 1;
+ timeout_in_secs = 2;
+ rc = nvme_wait_for_completion_timeout(&qpair, &g_status, timeout_in_secs);
+ CU_ASSERT(rc == -ECANCELED);
+ CU_ASSERT(g_status.timed_out == true);
+ CU_ASSERT(g_status.done == false);
+ CU_ASSERT(g_status.cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(g_status.cpl.status.sc == SPDK_NVME_SC_ABORTED_SQ_DELETION);
+
+ g_process_comp_result = 0;
+
+ /* complete in time */
+ memset(&g_status, 0, sizeof(g_status));
+ completion_delay = 1;
+ timeout_in_secs = 2;
+ rc = nvme_wait_for_completion_timeout(&qpair, &g_status, timeout_in_secs);
+ CU_ASSERT(g_status.timed_out == false);
+ CU_ASSERT(g_status.done == true);
+ CU_ASSERT(rc == 0);
+
+ /* nvme_wait_for_completion */
+ /* spdk_nvme_qpair_process_completions returns error */
+ memset(&g_status, 0, sizeof(g_status));
+ g_process_comp_result = -1;
+ rc = nvme_wait_for_completion(&qpair, &g_status);
+ CU_ASSERT(rc == -ECANCELED);
+ CU_ASSERT(g_status.timed_out == true);
+ CU_ASSERT(g_status.done == false);
+ CU_ASSERT(g_status.cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(g_status.cpl.status.sc == SPDK_NVME_SC_ABORTED_SQ_DELETION);
+
+ /* successful completion */
+ memset(&g_status, 0, sizeof(g_status));
+ g_process_comp_result = 0;
+ rc = nvme_wait_for_completion(&qpair, &g_status);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_status.timed_out == false);
+ CU_ASSERT(g_status.done == true);
+}
+
+static void
+test_nvme_ctrlr_probe_internal(void)
+{
+ struct spdk_nvme_probe_ctx *probe_ctx;
+ struct spdk_nvme_transport_id trid = {};
+ struct nvme_driver dummy;
+ int rc;
+
+ probe_ctx = calloc(1, sizeof(*probe_ctx));
+ CU_ASSERT(probe_ctx != NULL);
+
+ MOCK_SET(spdk_process_is_primary, true);
+ MOCK_SET(spdk_memzone_reserve, (void *)&dummy);
+ g_spdk_nvme_driver = NULL;
+ rc = nvme_driver_init();
+ CU_ASSERT(rc == 0);
+
+ ut_test_probe_internal = true;
+ MOCK_SET(dummy_probe_cb, true);
+ trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
+ nvme_probe_ctx_init(probe_ctx, &trid, NULL, dummy_probe_cb, NULL, NULL);
+ rc = nvme_probe_internal(probe_ctx, false);
+ CU_ASSERT(rc < 0);
+ CU_ASSERT(TAILQ_EMPTY(&probe_ctx->init_ctrlrs));
+
+ free(probe_ctx);
+ ut_test_probe_internal = false;
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvme", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_opc_data_transfer);
+ CU_ADD_TEST(suite, test_spdk_nvme_transport_id_parse_trtype);
+ CU_ADD_TEST(suite, test_spdk_nvme_transport_id_parse_adrfam);
+ CU_ADD_TEST(suite, test_trid_parse_and_compare);
+ CU_ADD_TEST(suite, test_trid_trtype_str);
+ CU_ADD_TEST(suite, test_trid_adrfam_str);
+ CU_ADD_TEST(suite, test_nvme_ctrlr_probe);
+ CU_ADD_TEST(suite, test_spdk_nvme_probe);
+ CU_ADD_TEST(suite, test_spdk_nvme_connect);
+ CU_ADD_TEST(suite, test_nvme_ctrlr_probe_internal);
+ CU_ADD_TEST(suite, test_nvme_init_controllers);
+ CU_ADD_TEST(suite, test_nvme_driver_init);
+ CU_ADD_TEST(suite, test_spdk_nvme_detach);
+ CU_ADD_TEST(suite, test_nvme_completion_poll_cb);
+ CU_ADD_TEST(suite, test_nvme_user_copy_cmd_complete);
+ CU_ADD_TEST(suite, test_nvme_allocate_request_null);
+ CU_ADD_TEST(suite, test_nvme_allocate_request);
+ CU_ADD_TEST(suite, test_nvme_free_request);
+ CU_ADD_TEST(suite, test_nvme_allocate_request_user_copy);
+ CU_ADD_TEST(suite, test_nvme_robust_mutex_init_shared);
+ CU_ADD_TEST(suite, test_nvme_request_check_timeout);
+ CU_ADD_TEST(suite, test_nvme_wait_for_completion);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/.gitignore
new file mode 100644
index 000000000..97a75bee8
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/.gitignore
@@ -0,0 +1 @@
+nvme_ctrlr_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/Makefile
new file mode 100644
index 000000000..3ce33dc4e
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_ctrlr_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/nvme_ctrlr_ut.c b/src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/nvme_ctrlr_ut.c
new file mode 100644
index 000000000..f5b374639
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/nvme_ctrlr_ut.c
@@ -0,0 +1,2150 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation. All rights reserved.
+ * Copyright (c) 2020 Mellanox Technologies LTD. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "spdk_internal/log.h"
+
+#include "common/lib/test_env.c"
+
+struct spdk_log_flag SPDK_LOG_NVME = {
+ .name = "nvme",
+ .enabled = false,
+};
+
+#include "nvme/nvme_ctrlr.c"
+#include "nvme/nvme_quirks.c"
+
+pid_t g_spdk_nvme_pid;
+
+struct nvme_driver _g_nvme_driver = {
+ .lock = PTHREAD_MUTEX_INITIALIZER,
+};
+
+struct nvme_driver *g_spdk_nvme_driver = &_g_nvme_driver;
+
+struct spdk_nvme_registers g_ut_nvme_regs = {};
+
+__thread int nvme_thread_ioq_index = -1;
+
+uint32_t set_size = 1;
+
+int set_status_cpl = -1;
+
+DEFINE_STUB(nvme_ctrlr_cmd_set_host_id, int,
+ (struct spdk_nvme_ctrlr *ctrlr, void *host_id, uint32_t host_id_size,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
+DEFINE_STUB_V(nvme_ns_set_identify_data, (struct spdk_nvme_ns *ns));
+DEFINE_STUB_V(nvme_qpair_abort_reqs, (struct spdk_nvme_qpair *qpair, uint32_t dnr));
+DEFINE_STUB(spdk_nvme_poll_group_remove, int, (struct spdk_nvme_poll_group *group,
+ struct spdk_nvme_qpair *qpair), 0);
+DEFINE_STUB_V(nvme_io_msg_ctrlr_update, (struct spdk_nvme_ctrlr *ctrlr));
+
+struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
+ const struct spdk_nvme_ctrlr_opts *opts,
+ void *devhandle)
+{
+ return NULL;
+}
+
+int
+nvme_transport_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
+{
+ nvme_ctrlr_destruct_finish(ctrlr);
+
+ return 0;
+}
+
+int
+nvme_transport_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return 0;
+}
+
+int
+nvme_transport_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value)
+{
+ SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 4);
+ *(uint32_t *)((uintptr_t)&g_ut_nvme_regs + offset) = value;
+ return 0;
+}
+
+int
+nvme_transport_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value)
+{
+ SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 8);
+ *(uint64_t *)((uintptr_t)&g_ut_nvme_regs + offset) = value;
+ return 0;
+}
+
+int
+nvme_transport_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value)
+{
+ SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 4);
+ *value = *(uint32_t *)((uintptr_t)&g_ut_nvme_regs + offset);
+ return 0;
+}
+
+int
+nvme_transport_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value)
+{
+ SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 8);
+ *value = *(uint64_t *)((uintptr_t)&g_ut_nvme_regs + offset);
+ return 0;
+}
+
+uint32_t
+nvme_transport_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return UINT32_MAX;
+}
+
+uint16_t
+nvme_transport_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return 1;
+}
+
+void *
+nvme_transport_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
+{
+ return NULL;
+}
+
+int
+nvme_transport_ctrlr_unmap_cmb(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return 0;
+}
+
+struct spdk_nvme_qpair *
+nvme_transport_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
+ const struct spdk_nvme_io_qpair_opts *opts)
+{
+ struct spdk_nvme_qpair *qpair;
+
+ qpair = calloc(1, sizeof(*qpair));
+ SPDK_CU_ASSERT_FATAL(qpair != NULL);
+
+ qpair->ctrlr = ctrlr;
+ qpair->id = qid;
+ qpair->qprio = opts->qprio;
+
+ return qpair;
+}
+
+int
+nvme_transport_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
+{
+ free(qpair);
+ return 0;
+}
+
+void
+nvme_transport_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
+{
+}
+
+int
+nvme_transport_qpair_reset(struct spdk_nvme_qpair *qpair)
+{
+ return 0;
+}
+
+void
+nvme_transport_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair)
+{
+}
+
+void
+nvme_transport_qpair_abort_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr)
+{
+}
+
+int
+nvme_driver_init(void)
+{
+ return 0;
+}
+
+int nvme_qpair_init(struct spdk_nvme_qpair *qpair, uint16_t id,
+ struct spdk_nvme_ctrlr *ctrlr,
+ enum spdk_nvme_qprio qprio,
+ uint32_t num_requests)
+{
+ qpair->id = id;
+ qpair->qprio = qprio;
+ qpair->ctrlr = ctrlr;
+
+ return 0;
+}
+
+static struct spdk_nvme_cpl fake_cpl = {};
+static enum spdk_nvme_generic_command_status_code set_status_code = SPDK_NVME_SC_SUCCESS;
+
+static void
+fake_cpl_sc(spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ fake_cpl.status.sc = set_status_code;
+ cb_fn(cb_arg, &fake_cpl);
+}
+
+int
+spdk_nvme_ctrlr_cmd_set_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
+ uint32_t cdw11, uint32_t cdw12, void *payload, uint32_t payload_size,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ CU_ASSERT(0);
+ return -1;
+}
+
+int
+spdk_nvme_ctrlr_cmd_get_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
+ uint32_t cdw11, void *payload, uint32_t payload_size,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ fake_cpl_sc(cb_fn, cb_arg);
+ return 0;
+}
+
+int
+spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page,
+ uint32_t nsid, void *payload, uint32_t payload_size,
+ uint64_t offset, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ fake_cpl_sc(cb_fn, cb_arg);
+ return 0;
+}
+
+int
+nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_ASYNC_EVENT_REQUEST);
+
+ /*
+ * For the purposes of this unit test, we don't need to bother emulating request submission.
+ */
+
+ return 0;
+}
+
+static int32_t g_wait_for_completion_return_val;
+
+int32_t
+spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
+{
+ return g_wait_for_completion_return_val;
+}
+
+void
+nvme_qpair_complete_error_reqs(struct spdk_nvme_qpair *qpair)
+{
+}
+
+
+void
+nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl)
+{
+ struct nvme_completion_poll_status *status = arg;
+ /* This should not happen it test env since this callback is always called
+ * before wait_for_completion_* while this field can only be set to true in
+ * wait_for_completion_* functions */
+ CU_ASSERT(status->timed_out == false);
+
+ status->cpl = *cpl;
+ status->done = true;
+}
+
+static struct nvme_completion_poll_status *g_failed_status;
+
+int
+nvme_wait_for_completion_robust_lock(
+ struct spdk_nvme_qpair *qpair,
+ struct nvme_completion_poll_status *status,
+ pthread_mutex_t *robust_mutex)
+{
+ if (spdk_nvme_qpair_process_completions(qpair, 0) < 0) {
+ g_failed_status = status;
+ status->timed_out = true;
+ return -1;
+ }
+
+ status->done = true;
+ if (set_status_cpl == 1) {
+ status->cpl.status.sc = 1;
+ }
+ return spdk_nvme_cpl_is_error(&status->cpl) ? -EIO : 0;
+}
+
+int
+nvme_wait_for_completion(struct spdk_nvme_qpair *qpair,
+ struct nvme_completion_poll_status *status)
+{
+ return nvme_wait_for_completion_robust_lock(qpair, status, NULL);
+}
+
+int
+nvme_wait_for_completion_timeout(struct spdk_nvme_qpair *qpair,
+ struct nvme_completion_poll_status *status,
+ uint64_t timeout_in_secs)
+{
+ return nvme_wait_for_completion_robust_lock(qpair, status, NULL);
+}
+
+int
+nvme_ctrlr_cmd_set_async_event_config(struct spdk_nvme_ctrlr *ctrlr,
+ union spdk_nvme_feat_async_event_configuration config, spdk_nvme_cmd_cb cb_fn,
+ void *cb_arg)
+{
+ fake_cpl_sc(cb_fn, cb_arg);
+ return 0;
+}
+
+int
+nvme_ctrlr_cmd_identify(struct spdk_nvme_ctrlr *ctrlr, uint8_t cns, uint16_t cntid, uint32_t nsid,
+ void *payload, size_t payload_size,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ if (cns == SPDK_NVME_IDENTIFY_ACTIVE_NS_LIST) {
+ uint32_t count = 0;
+ uint32_t i = 0;
+ struct spdk_nvme_ns_list *ns_list = (struct spdk_nvme_ns_list *)payload;
+
+ for (i = 1; i <= ctrlr->num_ns; i++) {
+ if (i <= nsid) {
+ continue;
+ }
+
+ ns_list->ns_list[count++] = i;
+ if (count == SPDK_COUNTOF(ns_list->ns_list)) {
+ break;
+ }
+ }
+
+ }
+
+ fake_cpl_sc(cb_fn, cb_arg);
+ return 0;
+}
+
+int
+nvme_ctrlr_cmd_set_num_queues(struct spdk_nvme_ctrlr *ctrlr,
+ uint32_t num_queues, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ fake_cpl_sc(cb_fn, cb_arg);
+ return 0;
+}
+
+int
+nvme_ctrlr_cmd_get_num_queues(struct spdk_nvme_ctrlr *ctrlr,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ CU_ASSERT(0);
+ return -1;
+}
+
+int
+nvme_ctrlr_cmd_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
+ struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ return 0;
+}
+
+int
+nvme_ctrlr_cmd_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
+ struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ return 0;
+}
+
+int
+nvme_ctrlr_cmd_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data *payload,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ return 0;
+}
+
+int
+nvme_ctrlr_cmd_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, spdk_nvme_cmd_cb cb_fn,
+ void *cb_arg)
+{
+ return 0;
+}
+
+int
+nvme_ctrlr_cmd_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, struct spdk_nvme_format *format,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ return 0;
+}
+
+int
+nvme_ctrlr_cmd_fw_commit(struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_fw_commit *fw_commit,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ CU_ASSERT(fw_commit->ca == SPDK_NVME_FW_COMMIT_REPLACE_IMG);
+ if (fw_commit->fs == 0) {
+ return -1;
+ }
+ set_status_cpl = 1;
+ if (ctrlr->is_resetting == true) {
+ set_status_cpl = 0;
+ }
+ return 0;
+}
+
+int
+nvme_ctrlr_cmd_fw_image_download(struct spdk_nvme_ctrlr *ctrlr,
+ uint32_t size, uint32_t offset, void *payload,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ if ((size != 0 && payload == NULL) || (size == 0 && payload != NULL)) {
+ return -1;
+ }
+ CU_ASSERT(offset == 0);
+ return 0;
+}
+
+void
+nvme_ns_destruct(struct spdk_nvme_ns *ns)
+{
+}
+
+int
+nvme_ns_construct(struct spdk_nvme_ns *ns, uint32_t id,
+ struct spdk_nvme_ctrlr *ctrlr)
+{
+ return 0;
+}
+
+int
+nvme_ns_update(struct spdk_nvme_ns *ns)
+{
+ return 0;
+}
+
+void
+spdk_pci_device_detach(struct spdk_pci_device *device)
+{
+}
+
+#define DECLARE_AND_CONSTRUCT_CTRLR() \
+ struct spdk_nvme_ctrlr ctrlr = {}; \
+ struct spdk_nvme_qpair adminq = {}; \
+ struct nvme_request req; \
+ \
+ STAILQ_INIT(&adminq.free_req); \
+ STAILQ_INSERT_HEAD(&adminq.free_req, &req, stailq); \
+ ctrlr.adminq = &adminq;
+
+static void
+test_nvme_ctrlr_init_en_1_rdy_0(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
+
+ /*
+ * Initial state: CC.EN = 1, CSTS.RDY = 0
+ */
+ g_ut_nvme_regs.cc.bits.en = 1;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1);
+
+ /*
+ * Transition to CSTS.RDY = 1.
+ * init() should set CC.EN = 0.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
+
+ /*
+ * Transition to CSTS.RDY = 0.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+
+ /*
+ * Transition to CC.EN = 1
+ */
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+
+ /*
+ * Transition to CSTS.RDY = 1.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
+
+ /*
+ * Transition to READY.
+ */
+ while (ctrlr.state != NVME_CTRLR_STATE_READY) {
+ nvme_ctrlr_process_init(&ctrlr);
+ }
+
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+}
+
+static void
+test_nvme_ctrlr_init_en_1_rdy_1(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
+
+ /*
+ * Initial state: CC.EN = 1, CSTS.RDY = 1
+ * init() should set CC.EN = 0.
+ */
+ g_ut_nvme_regs.cc.bits.en = 1;
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
+
+ /*
+ * Transition to CSTS.RDY = 0.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+
+ /*
+ * Transition to CC.EN = 1
+ */
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+
+ /*
+ * Transition to CSTS.RDY = 1.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
+
+ /*
+ * Transition to READY.
+ */
+ while (ctrlr.state != NVME_CTRLR_STATE_READY) {
+ nvme_ctrlr_process_init(&ctrlr);
+ }
+
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+}
+
+static void
+test_nvme_ctrlr_init_en_0_rdy_0_ams_rr(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
+
+ /*
+ * Initial state: CC.EN = 0, CSTS.RDY = 0
+ * init() should set CC.EN = 1.
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Default round robin enabled
+ */
+ g_ut_nvme_regs.cap.bits.ams = 0x0;
+ ctrlr.cap = g_ut_nvme_regs.cap;
+
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ /*
+ * Case 1: default round robin arbitration mechanism selected
+ */
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
+ CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 2: weighted round robin arbitration mechanism selected
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 3: vendor specific arbitration mechanism selected
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 4: invalid arbitration mechanism selected
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS + 1;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 5: reset to default round robin arbitration mechanism
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
+ CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
+
+ /*
+ * Transition to CSTS.RDY = 1.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
+
+ /*
+ * Transition to READY.
+ */
+ while (ctrlr.state != NVME_CTRLR_STATE_READY) {
+ nvme_ctrlr_process_init(&ctrlr);
+ }
+
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+}
+
+static void
+test_nvme_ctrlr_init_en_0_rdy_0_ams_wrr(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
+
+ /*
+ * Initial state: CC.EN = 0, CSTS.RDY = 0
+ * init() should set CC.EN = 1.
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Weighted round robin enabled
+ */
+ g_ut_nvme_regs.cap.bits.ams = SPDK_NVME_CAP_AMS_WRR;
+ ctrlr.cap = g_ut_nvme_regs.cap;
+
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ /*
+ * Case 1: default round robin arbitration mechanism selected
+ */
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
+ CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 2: weighted round robin arbitration mechanism selected
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_WRR);
+ CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_WRR);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 3: vendor specific arbitration mechanism selected
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 4: invalid arbitration mechanism selected
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS + 1;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 5: reset to weighted round robin arbitration mechanism
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_WRR);
+ CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_WRR);
+
+ /*
+ * Transition to CSTS.RDY = 1.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
+
+ /*
+ * Transition to READY.
+ */
+ while (ctrlr.state != NVME_CTRLR_STATE_READY) {
+ nvme_ctrlr_process_init(&ctrlr);
+ }
+
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+}
+static void
+test_nvme_ctrlr_init_en_0_rdy_0_ams_vs(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
+
+ /*
+ * Initial state: CC.EN = 0, CSTS.RDY = 0
+ * init() should set CC.EN = 1.
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Default round robin enabled
+ */
+ g_ut_nvme_regs.cap.bits.ams = SPDK_NVME_CAP_AMS_VS;
+ ctrlr.cap = g_ut_nvme_regs.cap;
+
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ /*
+ * Case 1: default round robin arbitration mechanism selected
+ */
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
+ CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 2: weighted round robin arbitration mechanism selected
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 3: vendor specific arbitration mechanism selected
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_VS);
+ CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_VS);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 4: invalid arbitration mechanism selected
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS + 1;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 5: reset to vendor specific arbitration mechanism
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_VS);
+ CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_VS);
+
+ /*
+ * Transition to CSTS.RDY = 1.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
+
+ /*
+ * Transition to READY.
+ */
+ while (ctrlr.state != NVME_CTRLR_STATE_READY) {
+ nvme_ctrlr_process_init(&ctrlr);
+ }
+
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+}
+
+static void
+test_nvme_ctrlr_init_en_0_rdy_0(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
+
+ /*
+ * Initial state: CC.EN = 0, CSTS.RDY = 0
+ * init() should set CC.EN = 1.
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+
+ /*
+ * Transition to CSTS.RDY = 1.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
+
+ /*
+ * Transition to READY.
+ */
+ while (ctrlr.state != NVME_CTRLR_STATE_READY) {
+ nvme_ctrlr_process_init(&ctrlr);
+ }
+
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+}
+
+static void
+test_nvme_ctrlr_init_en_0_rdy_1(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
+
+ /*
+ * Initial state: CC.EN = 0, CSTS.RDY = 1
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+
+ /*
+ * Transition to CSTS.RDY = 0.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+
+ /*
+ * Transition to CC.EN = 1
+ */
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+
+ /*
+ * Transition to CSTS.RDY = 1.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
+
+ /*
+ * Transition to READY.
+ */
+ while (ctrlr.state != NVME_CTRLR_STATE_READY) {
+ nvme_ctrlr_process_init(&ctrlr);
+ }
+
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+}
+
+static void
+setup_qpairs(struct spdk_nvme_ctrlr *ctrlr, uint32_t num_io_queues)
+{
+ uint32_t i;
+
+ CU_ASSERT(pthread_mutex_init(&ctrlr->ctrlr_lock, NULL) == 0);
+
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(ctrlr) == 0);
+
+ ctrlr->page_size = 0x1000;
+ ctrlr->opts.num_io_queues = num_io_queues;
+ ctrlr->free_io_qids = spdk_bit_array_create(num_io_queues + 1);
+ SPDK_CU_ASSERT_FATAL(ctrlr->free_io_qids != NULL);
+
+ spdk_bit_array_clear(ctrlr->free_io_qids, 0);
+ for (i = 1; i <= num_io_queues; i++) {
+ spdk_bit_array_set(ctrlr->free_io_qids, i);
+ }
+}
+
+static void
+cleanup_qpairs(struct spdk_nvme_ctrlr *ctrlr)
+{
+ nvme_ctrlr_destruct(ctrlr);
+}
+
+static void
+test_alloc_io_qpair_rr_1(void)
+{
+ struct spdk_nvme_io_qpair_opts opts;
+ struct spdk_nvme_ctrlr ctrlr = {};
+ struct spdk_nvme_qpair *q0;
+
+ setup_qpairs(&ctrlr, 1);
+
+ /*
+ * Fake to simulate the controller with default round robin
+ * arbitration mechanism.
+ */
+ g_ut_nvme_regs.cc.bits.ams = SPDK_NVME_CC_AMS_RR;
+
+ spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, sizeof(opts));
+
+ q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, NULL, 0);
+ SPDK_CU_ASSERT_FATAL(q0 != NULL);
+ SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
+ /* Only 1 I/O qpair was allocated, so this should fail */
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, NULL, 0) == NULL);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
+
+ /*
+ * Now that the qpair has been returned to the free list,
+ * we should be able to allocate it again.
+ */
+ q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, NULL, 0);
+ SPDK_CU_ASSERT_FATAL(q0 != NULL);
+ SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
+
+ /* Only 0 qprio is acceptable for default round robin arbitration mechanism */
+ opts.qprio = 1;
+ q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q0 == NULL);
+
+ opts.qprio = 2;
+ q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q0 == NULL);
+
+ opts.qprio = 3;
+ q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q0 == NULL);
+
+ /* Only 0 ~ 3 qprio is acceptable */
+ opts.qprio = 4;
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts)) == NULL);
+
+ cleanup_qpairs(&ctrlr);
+}
+
+static void
+test_alloc_io_qpair_wrr_1(void)
+{
+ struct spdk_nvme_io_qpair_opts opts;
+ struct spdk_nvme_ctrlr ctrlr = {};
+ struct spdk_nvme_qpair *q0, *q1;
+
+ setup_qpairs(&ctrlr, 2);
+
+ /*
+ * Fake to simulate the controller with weighted round robin
+ * arbitration mechanism.
+ */
+ g_ut_nvme_regs.cc.bits.ams = SPDK_NVME_CC_AMS_WRR;
+
+ spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, sizeof(opts));
+
+ /*
+ * Allocate 2 qpairs and free them
+ */
+ opts.qprio = 0;
+ q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q0 != NULL);
+ SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
+
+ opts.qprio = 1;
+ q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q1 != NULL);
+ SPDK_CU_ASSERT_FATAL(q1->qprio == 1);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
+
+ /*
+ * Allocate 2 qpairs and free them in the reverse order
+ */
+ opts.qprio = 2;
+ q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q0 != NULL);
+ SPDK_CU_ASSERT_FATAL(q0->qprio == 2);
+
+ opts.qprio = 3;
+ q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q1 != NULL);
+ SPDK_CU_ASSERT_FATAL(q1->qprio == 3);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
+
+ /* Only 0 ~ 3 qprio is acceptable */
+ opts.qprio = 4;
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts)) == NULL);
+
+ cleanup_qpairs(&ctrlr);
+}
+
+static void
+test_alloc_io_qpair_wrr_2(void)
+{
+ struct spdk_nvme_io_qpair_opts opts;
+ struct spdk_nvme_ctrlr ctrlr = {};
+ struct spdk_nvme_qpair *q0, *q1, *q2, *q3;
+
+ setup_qpairs(&ctrlr, 4);
+
+ /*
+ * Fake to simulate the controller with weighted round robin
+ * arbitration mechanism.
+ */
+ g_ut_nvme_regs.cc.bits.ams = SPDK_NVME_CC_AMS_WRR;
+
+ spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, sizeof(opts));
+
+ opts.qprio = 0;
+ q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q0 != NULL);
+ SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
+
+ opts.qprio = 1;
+ q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q1 != NULL);
+ SPDK_CU_ASSERT_FATAL(q1->qprio == 1);
+
+ opts.qprio = 2;
+ q2 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q2 != NULL);
+ SPDK_CU_ASSERT_FATAL(q2->qprio == 2);
+
+ opts.qprio = 3;
+ q3 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q3 != NULL);
+ SPDK_CU_ASSERT_FATAL(q3->qprio == 3);
+
+ /* Only 4 I/O qpairs was allocated, so this should fail */
+ opts.qprio = 0;
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts)) == NULL);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q3) == 0);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q2) == 0);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
+
+ /*
+ * Now that the qpair has been returned to the free list,
+ * we should be able to allocate it again.
+ *
+ * Allocate 4 I/O qpairs and half of them with same qprio.
+ */
+ opts.qprio = 1;
+ q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q0 != NULL);
+ SPDK_CU_ASSERT_FATAL(q0->qprio == 1);
+
+ opts.qprio = 1;
+ q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q1 != NULL);
+ SPDK_CU_ASSERT_FATAL(q1->qprio == 1);
+
+ opts.qprio = 3;
+ q2 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q2 != NULL);
+ SPDK_CU_ASSERT_FATAL(q2->qprio == 3);
+
+ opts.qprio = 3;
+ q3 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q3 != NULL);
+ SPDK_CU_ASSERT_FATAL(q3->qprio == 3);
+
+ /*
+ * Free all I/O qpairs in reverse order
+ */
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q2) == 0);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q3) == 0);
+
+ cleanup_qpairs(&ctrlr);
+}
+
+bool g_connect_qpair_called = false;
+int g_connect_qpair_return_code = 0;
+int nvme_transport_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
+{
+ g_connect_qpair_called = true;
+ return g_connect_qpair_return_code;
+}
+
+static void
+test_spdk_nvme_ctrlr_reconnect_io_qpair(void)
+{
+ struct spdk_nvme_ctrlr ctrlr = {};
+ struct spdk_nvme_qpair qpair = {};
+ int rc;
+
+ /* Various states of controller disconnect. */
+ qpair.id = 1;
+ qpair.ctrlr = &ctrlr;
+ ctrlr.is_removed = 1;
+ ctrlr.is_failed = 0;
+ ctrlr.is_resetting = 0;
+ rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
+ CU_ASSERT(rc == -ENODEV)
+
+ ctrlr.is_removed = 0;
+ ctrlr.is_failed = 1;
+ rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
+ CU_ASSERT(rc == -ENXIO)
+
+ ctrlr.is_failed = 0;
+ ctrlr.is_resetting = 1;
+ rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
+ CU_ASSERT(rc == -EAGAIN)
+
+ /* Confirm precedence for controller states: removed > resetting > failed */
+ ctrlr.is_removed = 1;
+ ctrlr.is_failed = 1;
+ rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
+ CU_ASSERT(rc == -ENODEV)
+
+ ctrlr.is_removed = 0;
+ rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
+ CU_ASSERT(rc == -EAGAIN)
+
+ ctrlr.is_resetting = 0;
+ rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
+ CU_ASSERT(rc == -ENXIO)
+
+ /* qpair not failed. Make sure we don't call down to the transport */
+ ctrlr.is_failed = 0;
+ qpair.state = NVME_QPAIR_CONNECTED;
+ g_connect_qpair_called = false;
+ rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
+ CU_ASSERT(g_connect_qpair_called == false);
+ CU_ASSERT(rc == 0)
+
+ /* transport qpair is failed. make sure we call down to the transport */
+ qpair.state = NVME_QPAIR_DISCONNECTED;
+ rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
+ CU_ASSERT(g_connect_qpair_called == true);
+ CU_ASSERT(rc == 0)
+}
+
+static void
+test_nvme_ctrlr_fail(void)
+{
+ struct spdk_nvme_ctrlr ctrlr = {};
+
+ ctrlr.opts.num_io_queues = 0;
+ nvme_ctrlr_fail(&ctrlr, false);
+
+ CU_ASSERT(ctrlr.is_failed == true);
+}
+
+static void
+test_nvme_ctrlr_construct_intel_support_log_page_list(void)
+{
+ bool res;
+ struct spdk_nvme_ctrlr ctrlr = {};
+ struct spdk_nvme_intel_log_page_directory payload = {};
+ struct spdk_pci_id pci_id = {};
+
+ /* Get quirks for a device with all 0 vendor/device id */
+ ctrlr.quirks = nvme_get_quirks(&pci_id);
+ CU_ASSERT(ctrlr.quirks == 0);
+
+ nvme_ctrlr_construct_intel_support_log_page_list(&ctrlr, &payload);
+ res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE);
+ CU_ASSERT(res == false);
+
+ /* Set the vendor to Intel, but provide no device id */
+ pci_id.class_id = SPDK_PCI_CLASS_NVME;
+ ctrlr.cdata.vid = pci_id.vendor_id = SPDK_PCI_VID_INTEL;
+ payload.temperature_statistics_log_len = 1;
+ ctrlr.quirks = nvme_get_quirks(&pci_id);
+ memset(ctrlr.log_page_supported, 0, sizeof(ctrlr.log_page_supported));
+
+ nvme_ctrlr_construct_intel_support_log_page_list(&ctrlr, &payload);
+ res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY);
+ CU_ASSERT(res == true);
+ res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE);
+ CU_ASSERT(res == true);
+ res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY);
+ CU_ASSERT(res == false);
+ res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_SMART);
+ CU_ASSERT(res == false);
+
+ /* set valid vendor id, device id and sub device id */
+ ctrlr.cdata.vid = SPDK_PCI_VID_INTEL;
+ payload.temperature_statistics_log_len = 0;
+ pci_id.vendor_id = SPDK_PCI_VID_INTEL;
+ pci_id.device_id = 0x0953;
+ pci_id.subvendor_id = SPDK_PCI_VID_INTEL;
+ pci_id.subdevice_id = 0x3702;
+ ctrlr.quirks = nvme_get_quirks(&pci_id);
+ memset(ctrlr.log_page_supported, 0, sizeof(ctrlr.log_page_supported));
+
+ nvme_ctrlr_construct_intel_support_log_page_list(&ctrlr, &payload);
+ res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY);
+ CU_ASSERT(res == true);
+ res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE);
+ CU_ASSERT(res == false);
+ res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY);
+ CU_ASSERT(res == true);
+ res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_SMART);
+ CU_ASSERT(res == false);
+}
+
+static void
+test_nvme_ctrlr_set_supported_features(void)
+{
+ bool res;
+ struct spdk_nvme_ctrlr ctrlr = {};
+
+ /* set a invalid vendor id */
+ ctrlr.cdata.vid = 0xFFFF;
+ nvme_ctrlr_set_supported_features(&ctrlr);
+ res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_FEAT_ARBITRATION);
+ CU_ASSERT(res == true);
+ res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_INTEL_FEAT_MAX_LBA);
+ CU_ASSERT(res == false);
+
+ ctrlr.cdata.vid = SPDK_PCI_VID_INTEL;
+ nvme_ctrlr_set_supported_features(&ctrlr);
+ res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_FEAT_ARBITRATION);
+ CU_ASSERT(res == true);
+ res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_INTEL_FEAT_MAX_LBA);
+ CU_ASSERT(res == true);
+}
+
+static void
+test_ctrlr_get_default_ctrlr_opts(void)
+{
+ struct spdk_nvme_ctrlr_opts opts = {};
+
+ CU_ASSERT(spdk_uuid_parse(&g_spdk_nvme_driver->default_extended_host_id,
+ "e53e9258-c93b-48b5-be1a-f025af6d232a") == 0);
+
+ memset(&opts, 0, sizeof(opts));
+
+ /* set a smaller opts_size */
+ CU_ASSERT(sizeof(opts) > 8);
+ spdk_nvme_ctrlr_get_default_ctrlr_opts(&opts, 8);
+ CU_ASSERT_EQUAL(opts.num_io_queues, DEFAULT_MAX_IO_QUEUES);
+ CU_ASSERT_TRUE(opts.use_cmb_sqs);
+ /* check below fields are not initialized by default value */
+ CU_ASSERT_EQUAL(opts.arb_mechanism, 0);
+ CU_ASSERT_EQUAL(opts.keep_alive_timeout_ms, 0);
+ CU_ASSERT_EQUAL(opts.io_queue_size, 0);
+ CU_ASSERT_EQUAL(opts.io_queue_requests, 0);
+ for (int i = 0; i < 8; i++) {
+ CU_ASSERT(opts.host_id[i] == 0);
+ }
+ for (int i = 0; i < 16; i++) {
+ CU_ASSERT(opts.extended_host_id[i] == 0);
+ }
+ CU_ASSERT(strlen(opts.hostnqn) == 0);
+ CU_ASSERT(strlen(opts.src_addr) == 0);
+ CU_ASSERT(strlen(opts.src_svcid) == 0);
+ CU_ASSERT_EQUAL(opts.admin_timeout_ms, 0);
+
+ /* set a consistent opts_size */
+ spdk_nvme_ctrlr_get_default_ctrlr_opts(&opts, sizeof(opts));
+ CU_ASSERT_EQUAL(opts.num_io_queues, DEFAULT_MAX_IO_QUEUES);
+ CU_ASSERT_TRUE(opts.use_cmb_sqs);
+ CU_ASSERT_EQUAL(opts.arb_mechanism, SPDK_NVME_CC_AMS_RR);
+ CU_ASSERT_EQUAL(opts.keep_alive_timeout_ms, 10 * 1000);
+ CU_ASSERT_EQUAL(opts.io_queue_size, DEFAULT_IO_QUEUE_SIZE);
+ CU_ASSERT_EQUAL(opts.io_queue_requests, DEFAULT_IO_QUEUE_REQUESTS);
+ for (int i = 0; i < 8; i++) {
+ CU_ASSERT(opts.host_id[i] == 0);
+ }
+ CU_ASSERT_STRING_EQUAL(opts.hostnqn,
+ "2014-08.org.nvmexpress:uuid:e53e9258-c93b-48b5-be1a-f025af6d232a");
+ CU_ASSERT(memcmp(opts.extended_host_id, &g_spdk_nvme_driver->default_extended_host_id,
+ sizeof(opts.extended_host_id)) == 0);
+ CU_ASSERT(strlen(opts.src_addr) == 0);
+ CU_ASSERT(strlen(opts.src_svcid) == 0);
+ CU_ASSERT_EQUAL(opts.admin_timeout_ms, NVME_MAX_ADMIN_TIMEOUT_IN_SECS * 1000);
+}
+
+static void
+test_ctrlr_get_default_io_qpair_opts(void)
+{
+ struct spdk_nvme_ctrlr ctrlr = {};
+ struct spdk_nvme_io_qpair_opts opts = {};
+
+ memset(&opts, 0, sizeof(opts));
+
+ /* set a smaller opts_size */
+ ctrlr.opts.io_queue_size = DEFAULT_IO_QUEUE_SIZE;
+ CU_ASSERT(sizeof(opts) > 8);
+ spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, 8);
+ CU_ASSERT_EQUAL(opts.qprio, SPDK_NVME_QPRIO_URGENT);
+ CU_ASSERT_EQUAL(opts.io_queue_size, DEFAULT_IO_QUEUE_SIZE);
+ /* check below field is not initialized by default value */
+ CU_ASSERT_EQUAL(opts.io_queue_requests, 0);
+
+ /* set a consistent opts_size */
+ ctrlr.opts.io_queue_size = DEFAULT_IO_QUEUE_SIZE;
+ ctrlr.opts.io_queue_requests = DEFAULT_IO_QUEUE_REQUESTS;
+ spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, sizeof(opts));
+ CU_ASSERT_EQUAL(opts.qprio, SPDK_NVME_QPRIO_URGENT);
+ CU_ASSERT_EQUAL(opts.io_queue_size, DEFAULT_IO_QUEUE_SIZE);
+ CU_ASSERT_EQUAL(opts.io_queue_requests, DEFAULT_IO_QUEUE_REQUESTS);
+}
+
+#if 0 /* TODO: move to PCIe-specific unit test */
+static void
+test_nvme_ctrlr_alloc_cmb(void)
+{
+ int rc;
+ uint64_t offset;
+ struct spdk_nvme_ctrlr ctrlr = {};
+
+ ctrlr.cmb_size = 0x1000000;
+ ctrlr.cmb_current_offset = 0x100;
+ rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x200, 0x1000, &offset);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(offset == 0x1000);
+ CU_ASSERT(ctrlr.cmb_current_offset == 0x1200);
+
+ rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x800, 0x1000, &offset);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(offset == 0x2000);
+ CU_ASSERT(ctrlr.cmb_current_offset == 0x2800);
+
+ rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x800000, 0x100000, &offset);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(offset == 0x100000);
+ CU_ASSERT(ctrlr.cmb_current_offset == 0x900000);
+
+ rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x8000000, 0x1000, &offset);
+ CU_ASSERT(rc == -1);
+}
+#endif
+
+static void
+test_spdk_nvme_ctrlr_update_firmware(void)
+{
+ struct spdk_nvme_ctrlr ctrlr = {};
+ void *payload = NULL;
+ int point_payload = 1;
+ int slot = 0;
+ int ret = 0;
+ struct spdk_nvme_status status;
+ enum spdk_nvme_fw_commit_action commit_action = SPDK_NVME_FW_COMMIT_REPLACE_IMG;
+
+ /* Set invalid size check function return value */
+ set_size = 5;
+ ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
+ CU_ASSERT(ret == -1);
+
+ /* When payload is NULL but set_size < min_page_size */
+ set_size = 4;
+ ctrlr.min_page_size = 5;
+ ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
+ CU_ASSERT(ret == -1);
+
+ /* When payload not NULL but min_page_size is 0 */
+ set_size = 4;
+ ctrlr.min_page_size = 0;
+ payload = &point_payload;
+ ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
+ CU_ASSERT(ret == -1);
+
+ /* Check firmware image download when payload not NULL and min_page_size not 0 , status.cpl value is 1 */
+ set_status_cpl = 1;
+ set_size = 4;
+ ctrlr.min_page_size = 5;
+ payload = &point_payload;
+ ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
+ CU_ASSERT(ret == -ENXIO);
+
+ /* Check firmware image download and set status.cpl value is 0 */
+ set_status_cpl = 0;
+ set_size = 4;
+ ctrlr.min_page_size = 5;
+ payload = &point_payload;
+ ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
+ CU_ASSERT(ret == -1);
+
+ /* Check firmware commit */
+ ctrlr.is_resetting = false;
+ set_status_cpl = 0;
+ slot = 1;
+ set_size = 4;
+ ctrlr.min_page_size = 5;
+ payload = &point_payload;
+ ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
+ CU_ASSERT(ret == -ENXIO);
+
+ /* Set size check firmware download and firmware commit */
+ ctrlr.is_resetting = true;
+ set_status_cpl = 0;
+ slot = 1;
+ set_size = 4;
+ ctrlr.min_page_size = 5;
+ payload = &point_payload;
+ ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
+ CU_ASSERT(ret == 0);
+
+ /* nvme_wait_for_completion returns an error */
+ g_wait_for_completion_return_val = -1;
+ ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
+ CU_ASSERT(ret == -ENXIO);
+ CU_ASSERT(g_failed_status != NULL);
+ CU_ASSERT(g_failed_status->timed_out == true);
+ /* status should be freed by callback, which is not triggered in test env.
+ Store status to global variable and free it manually.
+ If spdk_nvme_ctrlr_update_firmware changes its behaviour and frees the status
+ itself, we'll get a double free here.. */
+ free(g_failed_status);
+ g_failed_status = NULL;
+ g_wait_for_completion_return_val = 0;
+
+ set_status_cpl = 0;
+}
+
+int
+nvme_ctrlr_cmd_doorbell_buffer_config(struct spdk_nvme_ctrlr *ctrlr, uint64_t prp1, uint64_t prp2,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ fake_cpl_sc(cb_fn, cb_arg);
+ return 0;
+}
+
+static void
+test_spdk_nvme_ctrlr_doorbell_buffer_config(void)
+{
+ struct spdk_nvme_ctrlr ctrlr = {};
+ int ret = -1;
+
+ ctrlr.cdata.oacs.doorbell_buffer_config = 1;
+ ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
+ ctrlr.page_size = 0x1000;
+ MOCK_CLEAR(spdk_malloc);
+ MOCK_CLEAR(spdk_zmalloc);
+ ret = nvme_ctrlr_set_doorbell_buffer_config(&ctrlr);
+ CU_ASSERT(ret == 0);
+ nvme_ctrlr_free_doorbell_buffer(&ctrlr);
+}
+
+static void
+test_nvme_ctrlr_test_active_ns(void)
+{
+ uint32_t nsid, minor;
+ size_t ns_id_count;
+ struct spdk_nvme_ctrlr ctrlr = {.state = NVME_CTRLR_STATE_READY};
+
+ ctrlr.page_size = 0x1000;
+
+ for (minor = 0; minor <= 2; minor++) {
+ ctrlr.vs.bits.mjr = 1;
+ ctrlr.vs.bits.mnr = minor;
+ ctrlr.vs.bits.ter = 0;
+ ctrlr.num_ns = 1531;
+ nvme_ctrlr_identify_active_ns(&ctrlr);
+
+ for (nsid = 1; nsid <= ctrlr.num_ns; nsid++) {
+ CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, nsid) == true);
+ }
+ ctrlr.num_ns = 1559;
+ for (; nsid <= ctrlr.num_ns; nsid++) {
+ CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, nsid) == false);
+ }
+ ctrlr.num_ns = 1531;
+ for (nsid = 0; nsid < ctrlr.num_ns; nsid++) {
+ ctrlr.active_ns_list[nsid] = 0;
+ }
+ CU_ASSERT(spdk_nvme_ctrlr_get_first_active_ns(&ctrlr) == 0);
+
+ ctrlr.active_ns_list[0] = 1;
+ CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1) == true);
+ CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 2) == false);
+ nsid = spdk_nvme_ctrlr_get_first_active_ns(&ctrlr);
+ CU_ASSERT(nsid == 1);
+
+ ctrlr.active_ns_list[1] = 3;
+ CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1) == true);
+ CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 2) == false);
+ CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 3) == true);
+ nsid = spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, nsid);
+ CU_ASSERT(nsid == 3);
+ nsid = spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, nsid);
+ CU_ASSERT(nsid == 0);
+
+ memset(ctrlr.active_ns_list, 0, ctrlr.num_ns);
+ for (nsid = 0; nsid < ctrlr.num_ns; nsid++) {
+ ctrlr.active_ns_list[nsid] = nsid + 1;
+ }
+
+ ns_id_count = 0;
+ for (nsid = spdk_nvme_ctrlr_get_first_active_ns(&ctrlr);
+ nsid != 0; nsid = spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, nsid)) {
+ CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, nsid) == true);
+ ns_id_count++;
+ }
+ CU_ASSERT(ns_id_count == ctrlr.num_ns);
+
+ nvme_ctrlr_destruct(&ctrlr);
+ }
+}
+
+static void
+test_nvme_ctrlr_test_active_ns_error_case(void)
+{
+ int rc;
+ struct spdk_nvme_ctrlr ctrlr = {.state = NVME_CTRLR_STATE_READY};
+
+ ctrlr.page_size = 0x1000;
+ ctrlr.vs.bits.mjr = 1;
+ ctrlr.vs.bits.mnr = 2;
+ ctrlr.vs.bits.ter = 0;
+ ctrlr.num_ns = 2;
+
+ set_status_code = SPDK_NVME_SC_INVALID_FIELD;
+ rc = nvme_ctrlr_identify_active_ns(&ctrlr);
+ CU_ASSERT(rc == -ENXIO);
+ set_status_code = SPDK_NVME_SC_SUCCESS;
+}
+
+static void
+test_nvme_ctrlr_init_delay(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
+
+ /*
+ * Initial state: CC.EN = 0, CSTS.RDY = 0
+ * init() should set CC.EN = 1.
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ /* Test that the initialization delay works correctly. We only
+ * do the initialization delay on SSDs that require it, so
+ * set that quirk here.
+ */
+ ctrlr.quirks = NVME_QUIRK_DELAY_BEFORE_INIT;
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.state = NVME_CTRLR_STATE_INIT_DELAY;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(ctrlr.sleep_timeout_tsc != 0);
+
+ /* delay 1s, just return as sleep time isn't enough */
+ spdk_delay_us(1 * spdk_get_ticks_hz());
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(ctrlr.sleep_timeout_tsc != 0);
+
+ /* sleep timeout, start to initialize */
+ spdk_delay_us(2 * spdk_get_ticks_hz());
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+
+ /*
+ * Transition to CSTS.RDY = 1.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
+
+ /*
+ * Transition to READY.
+ */
+ while (ctrlr.state != NVME_CTRLR_STATE_READY) {
+ nvme_ctrlr_process_init(&ctrlr);
+ }
+
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+}
+
+static void
+test_spdk_nvme_ctrlr_set_trid(void)
+{
+ struct spdk_nvme_ctrlr ctrlr = {0};
+ struct spdk_nvme_transport_id new_trid = {{0}};
+
+ ctrlr.is_failed = false;
+ ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
+ snprintf(ctrlr.trid.subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
+ snprintf(ctrlr.trid.traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.8");
+ snprintf(ctrlr.trid.trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
+ CU_ASSERT(spdk_nvme_ctrlr_set_trid(&ctrlr, &new_trid) == -EPERM);
+
+ ctrlr.is_failed = true;
+ new_trid.trtype = SPDK_NVME_TRANSPORT_TCP;
+ CU_ASSERT(spdk_nvme_ctrlr_set_trid(&ctrlr, &new_trid) == -EINVAL);
+ CU_ASSERT(ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_RDMA);
+
+ new_trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
+ snprintf(new_trid.subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode2");
+ CU_ASSERT(spdk_nvme_ctrlr_set_trid(&ctrlr, &new_trid) == -EINVAL);
+ CU_ASSERT(strncmp(ctrlr.trid.subnqn, "nqn.2016-06.io.spdk:cnode1", SPDK_NVMF_NQN_MAX_LEN) == 0);
+
+
+ snprintf(new_trid.subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
+ snprintf(new_trid.traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.9");
+ snprintf(new_trid.trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4421");
+ CU_ASSERT(spdk_nvme_ctrlr_set_trid(&ctrlr, &new_trid) == 0);
+ CU_ASSERT(strncmp(ctrlr.trid.traddr, "192.168.100.9", SPDK_NVMF_TRADDR_MAX_LEN) == 0);
+ CU_ASSERT(strncmp(ctrlr.trid.trsvcid, "4421", SPDK_NVMF_TRSVCID_MAX_LEN) == 0);
+}
+
+static void
+test_nvme_ctrlr_init_set_nvmf_ioccsz(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ /* equivalent of 4096 bytes */
+ ctrlr.cdata.nvmf_specific.ioccsz = 260;
+ ctrlr.cdata.nvmf_specific.icdoff = 1;
+
+ /* Check PCI trtype, */
+ ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
+
+ ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONSTRUCT_NS);
+
+ CU_ASSERT(ctrlr.ioccsz_bytes == 0);
+ CU_ASSERT(ctrlr.icdoff == 0);
+
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /* Check RDMA trtype, */
+ ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
+
+ ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONSTRUCT_NS);
+
+ CU_ASSERT(ctrlr.ioccsz_bytes == 4096);
+ CU_ASSERT(ctrlr.icdoff == 1);
+ ctrlr.ioccsz_bytes = 0;
+ ctrlr.icdoff = 0;
+
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /* Check TCP trtype, */
+ ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_TCP;
+
+ ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONSTRUCT_NS);
+
+ CU_ASSERT(ctrlr.ioccsz_bytes == 4096);
+ CU_ASSERT(ctrlr.icdoff == 1);
+ ctrlr.ioccsz_bytes = 0;
+ ctrlr.icdoff = 0;
+
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /* Check FC trtype, */
+ ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_FC;
+
+ ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONSTRUCT_NS);
+
+ CU_ASSERT(ctrlr.ioccsz_bytes == 4096);
+ CU_ASSERT(ctrlr.icdoff == 1);
+ ctrlr.ioccsz_bytes = 0;
+ ctrlr.icdoff = 0;
+
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /* Check CUSTOM trtype, */
+ ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_CUSTOM;
+
+ ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONSTRUCT_NS);
+
+ CU_ASSERT(ctrlr.ioccsz_bytes == 0);
+ CU_ASSERT(ctrlr.icdoff == 0);
+
+ nvme_ctrlr_destruct(&ctrlr);
+}
+
+static void
+test_nvme_ctrlr_init_set_num_queues(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0); /* -> SET_NUM_QUEUES */
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
+
+ ctrlr.opts.num_io_queues = 64;
+ /* Num queues is zero-based. So, use 31 to get 32 queues */
+ fake_cpl.cdw0 = 31 + (31 << 16);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0); /* -> CONSTRUCT_NS */
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONSTRUCT_NS);
+ CU_ASSERT(ctrlr.opts.num_io_queues == 32);
+ fake_cpl.cdw0 = 0;
+
+ nvme_ctrlr_destruct(&ctrlr);
+}
+
+static void
+test_nvme_ctrlr_init_set_keep_alive_timeout(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ ctrlr.opts.keep_alive_timeout_ms = 60000;
+ ctrlr.cdata.kas = 1;
+ ctrlr.state = NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT;
+ fake_cpl.cdw0 = 120000;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0); /* -> SET_HOST_ID */
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_HOST_ID);
+ CU_ASSERT(ctrlr.opts.keep_alive_timeout_ms == 120000);
+ fake_cpl.cdw0 = 0;
+
+ /* Target does not support Get Feature "Keep Alive Timer" */
+ ctrlr.opts.keep_alive_timeout_ms = 60000;
+ ctrlr.cdata.kas = 1;
+ ctrlr.state = NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT;
+ set_status_code = SPDK_NVME_SC_INVALID_FIELD;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0); /* -> SET_HOST_ID */
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_HOST_ID);
+ CU_ASSERT(ctrlr.opts.keep_alive_timeout_ms == 60000);
+ set_status_code = SPDK_NVME_SC_SUCCESS;
+
+ /* Target fails Get Feature "Keep Alive Timer" for another reason */
+ ctrlr.opts.keep_alive_timeout_ms = 60000;
+ ctrlr.cdata.kas = 1;
+ ctrlr.state = NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT;
+ set_status_code = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0); /* -> ERROR */
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ERROR);
+ set_status_code = SPDK_NVME_SC_SUCCESS;
+
+ nvme_ctrlr_destruct(&ctrlr);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvme_ctrlr", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_1_rdy_0);
+ CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_1_rdy_1);
+ CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_0_rdy_0);
+ CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_0_rdy_1);
+ CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_0_rdy_0_ams_rr);
+ CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_0_rdy_0_ams_wrr);
+ CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_0_rdy_0_ams_vs);
+ CU_ADD_TEST(suite, test_nvme_ctrlr_init_delay);
+ CU_ADD_TEST(suite, test_alloc_io_qpair_rr_1);
+ CU_ADD_TEST(suite, test_ctrlr_get_default_ctrlr_opts);
+ CU_ADD_TEST(suite, test_ctrlr_get_default_io_qpair_opts);
+ CU_ADD_TEST(suite, test_alloc_io_qpair_wrr_1);
+ CU_ADD_TEST(suite, test_alloc_io_qpair_wrr_2);
+ CU_ADD_TEST(suite, test_spdk_nvme_ctrlr_update_firmware);
+ CU_ADD_TEST(suite, test_nvme_ctrlr_fail);
+ CU_ADD_TEST(suite, test_nvme_ctrlr_construct_intel_support_log_page_list);
+ CU_ADD_TEST(suite, test_nvme_ctrlr_set_supported_features);
+ CU_ADD_TEST(suite, test_spdk_nvme_ctrlr_doorbell_buffer_config);
+#if 0 /* TODO: move to PCIe-specific unit test */
+ CU_ADD_TEST(suite, test_nvme_ctrlr_alloc_cmb);
+#endif
+ CU_ADD_TEST(suite, test_nvme_ctrlr_test_active_ns);
+ CU_ADD_TEST(suite, test_nvme_ctrlr_test_active_ns_error_case);
+ CU_ADD_TEST(suite, test_spdk_nvme_ctrlr_reconnect_io_qpair);
+ CU_ADD_TEST(suite, test_spdk_nvme_ctrlr_set_trid);
+ CU_ADD_TEST(suite, test_nvme_ctrlr_init_set_nvmf_ioccsz);
+ CU_ADD_TEST(suite, test_nvme_ctrlr_init_set_num_queues);
+ CU_ADD_TEST(suite, test_nvme_ctrlr_init_set_keep_alive_timeout);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/.gitignore
new file mode 100644
index 000000000..1568b4763
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/.gitignore
@@ -0,0 +1 @@
+nvme_ctrlr_cmd_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/Makefile
new file mode 100644
index 000000000..5c647dd31
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_ctrlr_cmd_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/nvme_ctrlr_cmd_ut.c b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/nvme_ctrlr_cmd_ut.c
new file mode 100644
index 000000000..581d6134c
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/nvme_ctrlr_cmd_ut.c
@@ -0,0 +1,751 @@
+
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "nvme/nvme_ctrlr_cmd.c"
+
+#define CTRLR_CDATA_ELPE 5
+
+pid_t g_spdk_nvme_pid;
+
+struct nvme_request g_req;
+
+uint32_t error_num_entries;
+uint32_t health_log_nsid = 1;
+uint8_t feature = 1;
+uint32_t feature_cdw11 = 1;
+uint32_t feature_cdw12 = 1;
+uint8_t get_feature = 1;
+uint32_t get_feature_cdw11 = 1;
+uint32_t fw_img_size = 1024;
+uint32_t fw_img_offset = 0;
+uint16_t abort_cid = 1;
+uint16_t abort_sqid = 1;
+uint32_t namespace_management_nsid = 1;
+uint64_t PRP_ENTRY_1 = 4096;
+uint64_t PRP_ENTRY_2 = 4096;
+uint32_t format_nvme_nsid = 1;
+uint32_t sanitize_nvme_nsid = 1;
+uint32_t expected_host_id_size = 0xFF;
+
+uint32_t expected_feature_ns = 2;
+uint32_t expected_feature_cdw10 = SPDK_NVME_FEAT_LBA_RANGE_TYPE;
+uint32_t expected_feature_cdw11 = 1;
+uint32_t expected_feature_cdw12 = 1;
+
+typedef void (*verify_request_fn_t)(struct nvme_request *req);
+verify_request_fn_t verify_fn;
+
+static void verify_firmware_log_page(struct nvme_request *req)
+{
+ uint32_t temp_cdw10;
+
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE);
+ CU_ASSERT(req->cmd.nsid == SPDK_NVME_GLOBAL_NS_TAG);
+
+ temp_cdw10 = ((sizeof(struct spdk_nvme_firmware_page) / sizeof(uint32_t) - 1) << 16) |
+ SPDK_NVME_LOG_FIRMWARE_SLOT;
+ CU_ASSERT(req->cmd.cdw10 == temp_cdw10);
+}
+
+static void verify_health_log_page(struct nvme_request *req)
+{
+ uint32_t temp_cdw10;
+
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE);
+ CU_ASSERT(req->cmd.nsid == health_log_nsid);
+
+ temp_cdw10 = ((sizeof(struct spdk_nvme_health_information_page) / sizeof(uint32_t) - 1) << 16) |
+ SPDK_NVME_LOG_HEALTH_INFORMATION;
+ CU_ASSERT(req->cmd.cdw10 == temp_cdw10);
+}
+
+static void verify_error_log_page(struct nvme_request *req)
+{
+ uint32_t temp_cdw10;
+
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE);
+ CU_ASSERT(req->cmd.nsid == SPDK_NVME_GLOBAL_NS_TAG);
+
+ temp_cdw10 = (((sizeof(struct spdk_nvme_error_information_entry) * error_num_entries) /
+ sizeof(uint32_t) - 1) << 16) | SPDK_NVME_LOG_ERROR;
+ CU_ASSERT(req->cmd.cdw10 == temp_cdw10);
+}
+
+static void verify_set_feature_cmd(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_SET_FEATURES);
+ CU_ASSERT(req->cmd.cdw10 == feature);
+ CU_ASSERT(req->cmd.cdw11 == feature_cdw11);
+ CU_ASSERT(req->cmd.cdw12 == feature_cdw12);
+}
+
+static void verify_set_feature_ns_cmd(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_SET_FEATURES);
+ CU_ASSERT(req->cmd.cdw10 == expected_feature_cdw10);
+ CU_ASSERT(req->cmd.cdw11 == expected_feature_cdw11);
+ CU_ASSERT(req->cmd.cdw12 == expected_feature_cdw12);
+ CU_ASSERT(req->cmd.nsid == expected_feature_ns);
+}
+
+static void verify_get_feature_cmd(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_FEATURES);
+ CU_ASSERT(req->cmd.cdw10 == get_feature);
+ CU_ASSERT(req->cmd.cdw11 == get_feature_cdw11);
+}
+
+static void verify_get_feature_ns_cmd(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_FEATURES);
+ CU_ASSERT(req->cmd.cdw10 == expected_feature_cdw10);
+ CU_ASSERT(req->cmd.cdw11 == expected_feature_cdw11);
+ CU_ASSERT(req->cmd.nsid == expected_feature_ns);
+}
+
+static void verify_abort_cmd(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_ABORT);
+ CU_ASSERT(req->cmd.cdw10 == (((uint32_t)abort_cid << 16) | abort_sqid));
+}
+
+static void verify_io_cmd_raw_no_payload_build(struct nvme_request *req)
+{
+ struct spdk_nvme_cmd command = {};
+ struct nvme_payload payload = {};
+
+ CU_ASSERT(memcmp(&req->cmd, &command, sizeof(req->cmd)) == 0);
+ CU_ASSERT(memcmp(&req->payload, &payload, sizeof(req->payload)) == 0);
+}
+
+static void verify_io_raw_cmd(struct nvme_request *req)
+{
+ struct spdk_nvme_cmd command = {};
+
+ CU_ASSERT(memcmp(&req->cmd, &command, sizeof(req->cmd)) == 0);
+}
+
+static void verify_io_raw_cmd_with_md(struct nvme_request *req)
+{
+ struct spdk_nvme_cmd command = {};
+
+ CU_ASSERT(memcmp(&req->cmd, &command, sizeof(req->cmd)) == 0);
+}
+
+static void verify_set_host_id_cmd(struct nvme_request *req)
+{
+ switch (expected_host_id_size) {
+ case 8:
+ CU_ASSERT(req->cmd.cdw10 == SPDK_NVME_FEAT_HOST_IDENTIFIER);
+ CU_ASSERT(req->cmd.cdw11 == 0);
+ CU_ASSERT(req->cmd.cdw12 == 0);
+ break;
+ case 16:
+ CU_ASSERT(req->cmd.cdw10 == SPDK_NVME_FEAT_HOST_IDENTIFIER);
+ CU_ASSERT(req->cmd.cdw11 == 1);
+ CU_ASSERT(req->cmd.cdw12 == 0);
+ break;
+ default:
+ CU_ASSERT(0);
+ }
+}
+
+static void verify_intel_smart_log_page(struct nvme_request *req)
+{
+ uint32_t temp_cdw10;
+
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE);
+ CU_ASSERT(req->cmd.nsid == health_log_nsid);
+
+ temp_cdw10 = ((sizeof(struct spdk_nvme_intel_smart_information_page) /
+ sizeof(uint32_t) - 1) << 16) |
+ SPDK_NVME_INTEL_LOG_SMART;
+ CU_ASSERT(req->cmd.cdw10 == temp_cdw10);
+}
+
+static void verify_intel_temperature_log_page(struct nvme_request *req)
+{
+ uint32_t temp_cdw10;
+
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE);
+
+ temp_cdw10 = ((sizeof(struct spdk_nvme_intel_temperature_page) / sizeof(uint32_t) - 1) << 16) |
+ SPDK_NVME_INTEL_LOG_TEMPERATURE;
+ CU_ASSERT(req->cmd.cdw10 == temp_cdw10);
+}
+
+static void verify_intel_read_latency_log_page(struct nvme_request *req)
+{
+ uint32_t temp_cdw10;
+
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE);
+
+ temp_cdw10 = ((sizeof(struct spdk_nvme_intel_rw_latency_page) / sizeof(uint32_t) - 1) << 16) |
+ SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY;
+ CU_ASSERT(req->cmd.cdw10 == temp_cdw10);
+}
+
+static void verify_intel_write_latency_log_page(struct nvme_request *req)
+{
+ uint32_t temp_cdw10;
+
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE);
+
+ temp_cdw10 = ((sizeof(struct spdk_nvme_intel_rw_latency_page) / sizeof(uint32_t) - 1) << 16) |
+ SPDK_NVME_INTEL_LOG_WRITE_CMD_LATENCY;
+ CU_ASSERT(req->cmd.cdw10 == temp_cdw10);
+}
+
+static void verify_intel_get_log_page_directory(struct nvme_request *req)
+{
+ uint32_t temp_cdw10;
+
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE);
+
+ temp_cdw10 = ((sizeof(struct spdk_nvme_intel_log_page_directory) / sizeof(uint32_t) - 1) << 16) |
+ SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY;
+ CU_ASSERT(req->cmd.cdw10 == temp_cdw10);
+}
+
+static void verify_intel_marketing_description_log_page(struct nvme_request *req)
+{
+ uint32_t temp_cdw10;
+
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE);
+
+ temp_cdw10 = ((sizeof(struct spdk_nvme_intel_marketing_description_page) / sizeof(
+ uint32_t) - 1) << 16) |
+ SPDK_NVME_INTEL_MARKETING_DESCRIPTION;
+ CU_ASSERT(req->cmd.cdw10 == temp_cdw10);
+}
+
+static void verify_namespace_attach(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_NS_ATTACHMENT);
+ CU_ASSERT(req->cmd.cdw10 == SPDK_NVME_NS_CTRLR_ATTACH);
+ CU_ASSERT(req->cmd.nsid == namespace_management_nsid);
+}
+
+static void verify_namespace_detach(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_NS_ATTACHMENT);
+ CU_ASSERT(req->cmd.cdw10 == SPDK_NVME_NS_CTRLR_DETACH);
+ CU_ASSERT(req->cmd.nsid == namespace_management_nsid);
+}
+
+static void verify_namespace_create(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_NS_MANAGEMENT);
+ CU_ASSERT(req->cmd.cdw10 == SPDK_NVME_NS_MANAGEMENT_CREATE);
+ CU_ASSERT(req->cmd.nsid == 0);
+}
+
+static void verify_namespace_delete(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_NS_MANAGEMENT);
+ CU_ASSERT(req->cmd.cdw10 == SPDK_NVME_NS_MANAGEMENT_DELETE);
+ CU_ASSERT(req->cmd.nsid == namespace_management_nsid);
+}
+
+static void verify_doorbell_buffer_config(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_DOORBELL_BUFFER_CONFIG);
+ CU_ASSERT(req->cmd.dptr.prp.prp1 == PRP_ENTRY_1);
+ CU_ASSERT(req->cmd.dptr.prp.prp2 == PRP_ENTRY_2);
+}
+
+static void verify_format_nvme(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_FORMAT_NVM);
+ CU_ASSERT(req->cmd.cdw10 == 0);
+ CU_ASSERT(req->cmd.nsid == format_nvme_nsid);
+}
+
+static void verify_fw_commit(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_FIRMWARE_COMMIT);
+ CU_ASSERT(req->cmd.cdw10 == 0x09);
+}
+
+static void verify_fw_image_download(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD);
+ CU_ASSERT(req->cmd.cdw10 == (fw_img_size >> 2) - 1);
+ CU_ASSERT(req->cmd.cdw11 == fw_img_offset >> 2);
+}
+
+static void verify_nvme_sanitize(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_SANITIZE);
+ CU_ASSERT(req->cmd.cdw10 == 0x309);
+ CU_ASSERT(req->cmd.cdw11 == 0);
+ CU_ASSERT(req->cmd.nsid == sanitize_nvme_nsid);
+}
+
+struct nvme_request *
+nvme_allocate_request_user_copy(struct spdk_nvme_qpair *qpair, void *buffer, uint32_t payload_size,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg, bool host_to_controller)
+{
+ /* For the unit test, we don't actually need to copy the buffer */
+ return nvme_allocate_request_contig(qpair, buffer, payload_size, cb_fn, cb_arg);
+}
+
+int
+nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
+{
+ verify_fn(req);
+ /* stop analyzer from thinking stack variable addresses are stored in a global */
+ memset(req, 0, sizeof(*req));
+
+ return 0;
+}
+
+int
+nvme_ctrlr_submit_admin_request(struct spdk_nvme_ctrlr *ctrlr, struct nvme_request *req)
+{
+ verify_fn(req);
+ /* stop analyzer from thinking stack variable addresses are stored in a global */
+ memset(req, 0, sizeof(*req));
+
+ return 0;
+}
+
+#define DECLARE_AND_CONSTRUCT_CTRLR() \
+ struct spdk_nvme_ctrlr ctrlr = {}; \
+ struct spdk_nvme_qpair adminq = {}; \
+ struct nvme_request req; \
+ \
+ STAILQ_INIT(&adminq.free_req); \
+ STAILQ_INSERT_HEAD(&adminq.free_req, &req, stailq); \
+ ctrlr.adminq = &adminq;
+
+static void
+test_firmware_get_log_page(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_firmware_page payload = {};
+
+ verify_fn = verify_firmware_log_page;
+
+ spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_LOG_FIRMWARE_SLOT, SPDK_NVME_GLOBAL_NS_TAG,
+ &payload,
+ sizeof(payload), 0, NULL, NULL);
+}
+
+static void
+test_health_get_log_page(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_health_information_page payload = {};
+
+ verify_fn = verify_health_log_page;
+
+ spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_LOG_HEALTH_INFORMATION, health_log_nsid,
+ &payload,
+ sizeof(payload), 0, NULL, NULL);
+}
+
+static void
+test_error_get_log_page(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_error_information_entry payload = {};
+
+ ctrlr.cdata.elpe = CTRLR_CDATA_ELPE;
+
+ verify_fn = verify_error_log_page;
+
+ /* valid page */
+ error_num_entries = 1;
+ spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_LOG_ERROR, SPDK_NVME_GLOBAL_NS_TAG, &payload,
+ sizeof(payload), 0, NULL, NULL);
+}
+
+static void test_intel_smart_get_log_page(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_intel_smart_information_page payload = {};
+
+ verify_fn = verify_intel_smart_log_page;
+
+ spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_INTEL_LOG_SMART, health_log_nsid, &payload,
+ sizeof(payload), 0, NULL, NULL);
+}
+
+static void test_intel_temperature_get_log_page(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_intel_temperature_page payload = {};
+
+ verify_fn = verify_intel_temperature_log_page;
+
+ spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE, SPDK_NVME_GLOBAL_NS_TAG,
+ &payload, sizeof(payload), 0, NULL, NULL);
+}
+
+static void test_intel_read_latency_get_log_page(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_intel_rw_latency_page payload = {};
+
+ verify_fn = verify_intel_read_latency_log_page;
+
+ spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY,
+ SPDK_NVME_GLOBAL_NS_TAG,
+ &payload, sizeof(payload), 0, NULL, NULL);
+}
+
+static void test_intel_write_latency_get_log_page(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_intel_rw_latency_page payload = {};
+
+ verify_fn = verify_intel_write_latency_log_page;
+
+ spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_INTEL_LOG_WRITE_CMD_LATENCY,
+ SPDK_NVME_GLOBAL_NS_TAG,
+ &payload, sizeof(payload), 0, NULL, NULL);
+}
+
+static void test_intel_get_log_page_directory(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_intel_log_page_directory payload = {};
+
+ verify_fn = verify_intel_get_log_page_directory;
+
+ spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY,
+ SPDK_NVME_GLOBAL_NS_TAG,
+ &payload, sizeof(payload), 0, NULL, NULL);
+}
+
+static void test_intel_marketing_description_get_log_page(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_intel_marketing_description_page payload = {};
+
+ verify_fn = verify_intel_marketing_description_log_page;
+
+ spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_INTEL_MARKETING_DESCRIPTION,
+ SPDK_NVME_GLOBAL_NS_TAG,
+ &payload, sizeof(payload), 0, NULL, NULL);
+}
+
+static void test_generic_get_log_pages(void)
+{
+ test_error_get_log_page();
+ test_health_get_log_page();
+ test_firmware_get_log_page();
+}
+
+static void test_intel_get_log_pages(void)
+{
+ test_intel_get_log_page_directory();
+ test_intel_smart_get_log_page();
+ test_intel_temperature_get_log_page();
+ test_intel_read_latency_get_log_page();
+ test_intel_write_latency_get_log_page();
+ test_intel_marketing_description_get_log_page();
+}
+
+static void
+test_set_feature_cmd(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ verify_fn = verify_set_feature_cmd;
+
+ spdk_nvme_ctrlr_cmd_set_feature(&ctrlr, feature, feature_cdw11, feature_cdw12, NULL, 0, NULL, NULL);
+}
+
+static void
+test_get_feature_ns_cmd(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ verify_fn = verify_get_feature_ns_cmd;
+
+ spdk_nvme_ctrlr_cmd_get_feature_ns(&ctrlr, expected_feature_cdw10,
+ expected_feature_cdw11, NULL, 0,
+ NULL, NULL, expected_feature_ns);
+}
+
+static void
+test_set_feature_ns_cmd(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ verify_fn = verify_set_feature_ns_cmd;
+
+ spdk_nvme_ctrlr_cmd_set_feature_ns(&ctrlr, expected_feature_cdw10,
+ expected_feature_cdw11, expected_feature_cdw12,
+ NULL, 0, NULL, NULL, expected_feature_ns);
+}
+
+static void
+test_get_feature_cmd(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ verify_fn = verify_get_feature_cmd;
+
+ spdk_nvme_ctrlr_cmd_get_feature(&ctrlr, get_feature, get_feature_cdw11, NULL, 0, NULL, NULL);
+}
+
+static void
+test_abort_cmd(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_qpair qpair = {};
+
+ STAILQ_INIT(&ctrlr.queued_aborts);
+
+ verify_fn = verify_abort_cmd;
+
+ qpair.id = abort_sqid;
+ spdk_nvme_ctrlr_cmd_abort(&ctrlr, &qpair, abort_cid, NULL, NULL);
+}
+
+static void
+test_io_cmd_raw_no_payload_build(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_qpair qpair = {};
+ struct spdk_nvme_cmd cmd = {};
+
+ verify_fn = verify_io_cmd_raw_no_payload_build;
+
+ spdk_nvme_ctrlr_io_cmd_raw_no_payload_build(&ctrlr, &qpair, &cmd, NULL, NULL);
+}
+
+static void
+test_io_raw_cmd(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_qpair qpair = {};
+ struct spdk_nvme_cmd cmd = {};
+
+ verify_fn = verify_io_raw_cmd;
+
+ spdk_nvme_ctrlr_cmd_io_raw(&ctrlr, &qpair, &cmd, NULL, 1, NULL, NULL);
+}
+
+static void
+test_io_raw_cmd_with_md(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_qpair qpair = {};
+ struct spdk_nvme_cmd cmd = {};
+
+ verify_fn = verify_io_raw_cmd_with_md;
+
+ spdk_nvme_ctrlr_cmd_io_raw_with_md(&ctrlr, &qpair, &cmd, NULL, 1, NULL, NULL, NULL);
+}
+
+static int
+test_set_host_id_by_case(uint32_t host_id_size)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ int rc = 0;
+
+ expected_host_id_size = host_id_size;
+ verify_fn = verify_set_host_id_cmd;
+
+ rc = nvme_ctrlr_cmd_set_host_id(&ctrlr, NULL, expected_host_id_size, NULL, NULL);
+
+ return rc;
+}
+
+static void
+test_set_host_id_cmds(void)
+{
+ int rc = 0;
+
+ rc = test_set_host_id_by_case(8);
+ CU_ASSERT(rc == 0);
+ rc = test_set_host_id_by_case(16);
+ CU_ASSERT(rc == 0);
+ rc = test_set_host_id_by_case(1024);
+ CU_ASSERT(rc == -EINVAL);
+}
+
+static void
+test_get_log_pages(void)
+{
+ test_generic_get_log_pages();
+ test_intel_get_log_pages();
+}
+
+static void
+test_namespace_attach(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_ctrlr_list payload = {};
+
+ verify_fn = verify_namespace_attach;
+
+ nvme_ctrlr_cmd_attach_ns(&ctrlr, namespace_management_nsid, &payload, NULL, NULL);
+}
+
+static void
+test_namespace_detach(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_ctrlr_list payload = {};
+
+ verify_fn = verify_namespace_detach;
+
+ nvme_ctrlr_cmd_detach_ns(&ctrlr, namespace_management_nsid, &payload, NULL, NULL);
+}
+
+static void
+test_namespace_create(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_ns_data payload = {};
+
+ verify_fn = verify_namespace_create;
+ nvme_ctrlr_cmd_create_ns(&ctrlr, &payload, NULL, NULL);
+}
+
+static void
+test_namespace_delete(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ verify_fn = verify_namespace_delete;
+ nvme_ctrlr_cmd_delete_ns(&ctrlr, namespace_management_nsid, NULL, NULL);
+}
+
+static void
+test_doorbell_buffer_config(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ verify_fn = verify_doorbell_buffer_config;
+
+ nvme_ctrlr_cmd_doorbell_buffer_config(&ctrlr, PRP_ENTRY_1, PRP_ENTRY_2, NULL, NULL);
+}
+
+static void
+test_format_nvme(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_format format = {};
+
+ verify_fn = verify_format_nvme;
+
+ nvme_ctrlr_cmd_format(&ctrlr, format_nvme_nsid, &format, NULL, NULL);
+}
+
+static void
+test_fw_commit(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_fw_commit fw_commit = {};
+
+ fw_commit.ca = SPDK_NVME_FW_COMMIT_REPLACE_AND_ENABLE_IMG;
+ fw_commit.fs = 1;
+
+ verify_fn = verify_fw_commit;
+
+ nvme_ctrlr_cmd_fw_commit(&ctrlr, &fw_commit, NULL, NULL);
+}
+
+static void
+test_fw_image_download(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ verify_fn = verify_fw_image_download;
+
+ nvme_ctrlr_cmd_fw_image_download(&ctrlr, fw_img_size, fw_img_offset, NULL,
+ NULL, NULL);
+}
+
+static void
+test_sanitize(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_sanitize sanitize = {};
+
+ sanitize.sanact = 1;
+ sanitize.ause = 1;
+ sanitize.oipbp = 1;
+ sanitize.ndas = 1;
+
+ verify_fn = verify_nvme_sanitize;
+
+ nvme_ctrlr_cmd_sanitize(&ctrlr, sanitize_nvme_nsid, &sanitize, 0, NULL, NULL);
+
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvme_ctrlr_cmd", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_get_log_pages);
+ CU_ADD_TEST(suite, test_set_feature_cmd);
+ CU_ADD_TEST(suite, test_set_feature_ns_cmd);
+ CU_ADD_TEST(suite, test_get_feature_cmd);
+ CU_ADD_TEST(suite, test_get_feature_ns_cmd);
+ CU_ADD_TEST(suite, test_abort_cmd);
+ CU_ADD_TEST(suite, test_set_host_id_cmds);
+ CU_ADD_TEST(suite, test_io_cmd_raw_no_payload_build);
+ CU_ADD_TEST(suite, test_io_raw_cmd);
+ CU_ADD_TEST(suite, test_io_raw_cmd_with_md);
+ CU_ADD_TEST(suite, test_namespace_attach);
+ CU_ADD_TEST(suite, test_namespace_detach);
+ CU_ADD_TEST(suite, test_namespace_create);
+ CU_ADD_TEST(suite, test_namespace_delete);
+ CU_ADD_TEST(suite, test_doorbell_buffer_config);
+ CU_ADD_TEST(suite, test_format_nvme);
+ CU_ADD_TEST(suite, test_fw_commit);
+ CU_ADD_TEST(suite, test_fw_image_download);
+ CU_ADD_TEST(suite, test_sanitize);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/.gitignore
new file mode 100644
index 000000000..2813105d4
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/.gitignore
@@ -0,0 +1 @@
+nvme_ctrlr_ocssd_cmd_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/Makefile
new file mode 100644
index 000000000..9446b8d53
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_ctrlr_ocssd_cmd_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/nvme_ctrlr_ocssd_cmd_ut.c b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/nvme_ctrlr_ocssd_cmd_ut.c
new file mode 100644
index 000000000..69de8c5b0
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/nvme_ctrlr_ocssd_cmd_ut.c
@@ -0,0 +1,106 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "nvme/nvme_ctrlr_ocssd_cmd.c"
+
+#define DECLARE_AND_CONSTRUCT_CTRLR() \
+ struct spdk_nvme_ctrlr ctrlr = {}; \
+ struct spdk_nvme_qpair adminq = {}; \
+ struct nvme_request req; \
+ \
+ STAILQ_INIT(&adminq.free_req); \
+ STAILQ_INSERT_HEAD(&adminq.free_req, &req, stailq); \
+ ctrlr.adminq = &adminq;
+
+pid_t g_spdk_nvme_pid;
+struct nvme_request g_req;
+typedef void (*verify_request_fn_t)(struct nvme_request *req);
+verify_request_fn_t verify_fn;
+
+static const uint32_t expected_geometry_ns = 1;
+
+int
+nvme_ctrlr_submit_admin_request(struct spdk_nvme_ctrlr *ctrlr, struct nvme_request *req)
+{
+ verify_fn(req);
+ memset(req, 0, sizeof(*req));
+ return 0;
+}
+
+struct nvme_request *
+nvme_allocate_request_user_copy(struct spdk_nvme_qpair *qpair, void *buffer, uint32_t payload_size,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg, bool host_to_controller)
+{
+ /* For the unit test, we don't actually need to copy the buffer */
+ return nvme_allocate_request_contig(qpair, buffer, payload_size, cb_fn, cb_arg);
+}
+
+static void verify_geometry_cmd(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_OCSSD_OPC_GEOMETRY);
+ CU_ASSERT(req->cmd.nsid == expected_geometry_ns);
+}
+
+static void
+test_geometry_cmd(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ struct spdk_ocssd_geometry_data geo;
+
+ verify_fn = verify_geometry_cmd;
+
+ spdk_nvme_ocssd_ctrlr_cmd_geometry(&ctrlr, expected_geometry_ns, &geo,
+ sizeof(geo), NULL, NULL);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvme_ctrlr_cmd", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_geometry_cmd);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ns.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_ns.c/.gitignore
new file mode 100644
index 000000000..ada0ec86d
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ns.c/.gitignore
@@ -0,0 +1 @@
+nvme_ns_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ns.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_ns.c/Makefile
new file mode 100644
index 000000000..add85ee9f
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ns.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_ns_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ns.c/nvme_ns_ut.c b/src/spdk/test/unit/lib/nvme/nvme_ns.c/nvme_ns_ut.c
new file mode 100644
index 000000000..22c59e06c
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ns.c/nvme_ns_ut.c
@@ -0,0 +1,153 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "spdk/env.h"
+
+#include "nvme/nvme_ns.c"
+
+#include "common/lib/test_env.c"
+
+SPDK_LOG_REGISTER_COMPONENT("nvme", SPDK_LOG_NVME)
+
+DEFINE_STUB(nvme_wait_for_completion_robust_lock, int,
+ (struct spdk_nvme_qpair *qpair,
+ struct nvme_completion_poll_status *status,
+ pthread_mutex_t *robust_mutex), 0);
+
+int
+nvme_ctrlr_cmd_identify(struct spdk_nvme_ctrlr *ctrlr, uint8_t cns, uint16_t cntid, uint32_t nsid,
+ void *payload, size_t payload_size,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ return -1;
+}
+
+void
+nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl)
+{
+}
+
+int32_t
+spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
+{
+ return -1;
+}
+
+static void
+test_nvme_ns_construct(void)
+{
+ struct spdk_nvme_ns ns = {};
+ uint32_t id = 1;
+ struct spdk_nvme_ctrlr ctrlr = {};
+
+ nvme_ns_construct(&ns, id, &ctrlr);
+ CU_ASSERT(ns.id == 1);
+}
+
+static void
+test_nvme_ns_uuid(void)
+{
+ struct spdk_nvme_ns ns = {};
+ const struct spdk_uuid *uuid;
+ struct spdk_uuid expected_uuid;
+
+ memset(&expected_uuid, 0xA5, sizeof(expected_uuid));
+
+ /* Empty list - no UUID should be found */
+ memset(ns.id_desc_list, 0, sizeof(ns.id_desc_list));
+ uuid = spdk_nvme_ns_get_uuid(&ns);
+ CU_ASSERT(uuid == NULL);
+
+ /* NGUID only (no UUID in list) */
+ memset(ns.id_desc_list, 0, sizeof(ns.id_desc_list));
+ ns.id_desc_list[0] = 0x02; /* NIDT == NGUID */
+ ns.id_desc_list[1] = 0x10; /* NIDL */
+ memset(&ns.id_desc_list[4], 0xCC, 0x10);
+ uuid = spdk_nvme_ns_get_uuid(&ns);
+ CU_ASSERT(uuid == NULL);
+
+ /* Just UUID in the list */
+ memset(ns.id_desc_list, 0, sizeof(ns.id_desc_list));
+ ns.id_desc_list[0] = 0x03; /* NIDT == UUID */
+ ns.id_desc_list[1] = 0x10; /* NIDL */
+ memcpy(&ns.id_desc_list[4], &expected_uuid, sizeof(expected_uuid));
+ uuid = spdk_nvme_ns_get_uuid(&ns);
+ SPDK_CU_ASSERT_FATAL(uuid != NULL);
+ CU_ASSERT(memcmp(uuid, &expected_uuid, sizeof(*uuid)) == 0);
+
+ /* UUID followed by NGUID */
+ memset(ns.id_desc_list, 0, sizeof(ns.id_desc_list));
+ ns.id_desc_list[0] = 0x03; /* NIDT == UUID */
+ ns.id_desc_list[1] = 0x10; /* NIDL */
+ memcpy(&ns.id_desc_list[4], &expected_uuid, sizeof(expected_uuid));
+ ns.id_desc_list[20] = 0x02; /* NIDT == NGUID */
+ ns.id_desc_list[21] = 0x10; /* NIDL */
+ memset(&ns.id_desc_list[24], 0xCC, 0x10);
+ uuid = spdk_nvme_ns_get_uuid(&ns);
+ SPDK_CU_ASSERT_FATAL(uuid != NULL);
+ CU_ASSERT(memcmp(uuid, &expected_uuid, sizeof(*uuid)) == 0);
+
+ /* NGUID followed by UUID */
+ memset(ns.id_desc_list, 0, sizeof(ns.id_desc_list));
+ ns.id_desc_list[0] = 0x02; /* NIDT == NGUID */
+ ns.id_desc_list[1] = 0x10; /* NIDL */
+ memset(&ns.id_desc_list[4], 0xCC, 0x10);
+ ns.id_desc_list[20] = 0x03; /* NIDT = UUID */
+ ns.id_desc_list[21] = 0x10; /* NIDL */
+ memcpy(&ns.id_desc_list[24], &expected_uuid, sizeof(expected_uuid));
+ uuid = spdk_nvme_ns_get_uuid(&ns);
+ SPDK_CU_ASSERT_FATAL(uuid != NULL);
+ CU_ASSERT(memcmp(uuid, &expected_uuid, sizeof(*uuid)) == 0);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvme", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_nvme_ns_construct);
+ CU_ADD_TEST(suite, test_nvme_ns_uuid);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/.gitignore
new file mode 100644
index 000000000..5583ec23e
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/.gitignore
@@ -0,0 +1 @@
+nvme_ns_cmd_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/Makefile
new file mode 100644
index 000000000..ff451d72a
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_ns_cmd_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/nvme_ns_cmd_ut.c b/src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/nvme_ns_cmd_ut.c
new file mode 100644
index 000000000..fe0014f56
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/nvme_ns_cmd_ut.c
@@ -0,0 +1,1739 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "nvme/nvme_ns_cmd.c"
+#include "nvme/nvme.c"
+
+#include "common/lib/test_env.c"
+
+static struct nvme_driver _g_nvme_driver = {
+ .lock = PTHREAD_MUTEX_INITIALIZER,
+};
+
+static struct nvme_request *g_request = NULL;
+
+int
+spdk_pci_enumerate(struct spdk_pci_driver *driver, spdk_pci_enum_cb enum_cb, void *enum_ctx)
+{
+ return -1;
+}
+
+static void nvme_request_reset_sgl(void *cb_arg, uint32_t sgl_offset)
+{
+}
+
+static int nvme_request_next_sge(void *cb_arg, void **address, uint32_t *length)
+{
+ uint32_t *lba_count = cb_arg;
+
+ /*
+ * We need to set address to something here, since the SGL splitting code will
+ * use it to determine PRP compatibility. Just use a rather arbitrary address
+ * for now - these tests will not actually cause data to be read from or written
+ * to this address.
+ */
+ *address = (void *)(uintptr_t)0x10000000;
+ *length = *lba_count;
+ return 0;
+}
+
+bool
+spdk_nvme_transport_available_by_name(const char *transport_name)
+{
+ return true;
+}
+
+struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
+ const struct spdk_nvme_ctrlr_opts *opts,
+ void *devhandle)
+{
+ return NULL;
+}
+
+void
+nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
+{
+}
+
+int
+nvme_ctrlr_add_process(struct spdk_nvme_ctrlr *ctrlr, void *devhandle)
+{
+ return 0;
+}
+
+int
+nvme_ctrlr_process_init(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return 0;
+}
+
+void
+nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr, bool hot_remove)
+{
+}
+
+struct spdk_pci_addr
+spdk_pci_device_get_addr(struct spdk_pci_device *pci_dev)
+{
+ struct spdk_pci_addr pci_addr;
+
+ memset(&pci_addr, 0, sizeof(pci_addr));
+ return pci_addr;
+}
+
+struct spdk_pci_id
+spdk_pci_device_get_id(struct spdk_pci_device *pci_dev)
+{
+ struct spdk_pci_id pci_id;
+
+ memset(&pci_id, 0xFF, sizeof(pci_id));
+
+ return pci_id;
+}
+
+void
+spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size)
+{
+ memset(opts, 0, sizeof(*opts));
+}
+
+uint32_t
+spdk_nvme_ns_get_sector_size(struct spdk_nvme_ns *ns)
+{
+ return ns->sector_size;
+}
+
+uint32_t
+spdk_nvme_ns_get_max_io_xfer_size(struct spdk_nvme_ns *ns)
+{
+ return ns->ctrlr->max_xfer_size;
+}
+
+int
+nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
+{
+ g_request = req;
+
+ return 0;
+}
+
+void
+nvme_ctrlr_proc_get_ref(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return;
+}
+
+void
+nvme_ctrlr_proc_put_ref(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return;
+}
+
+int
+nvme_ctrlr_get_ref_count(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return 0;
+}
+
+int
+nvme_transport_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx,
+ bool direct_connect)
+{
+ return 0;
+}
+
+static void
+prepare_for_test(struct spdk_nvme_ns *ns, struct spdk_nvme_ctrlr *ctrlr,
+ struct spdk_nvme_qpair *qpair,
+ uint32_t sector_size, uint32_t md_size, uint32_t max_xfer_size,
+ uint32_t stripe_size, bool extended_lba)
+{
+ uint32_t num_requests = 32;
+ uint32_t i;
+
+ ctrlr->max_xfer_size = max_xfer_size;
+ /*
+ * Clear the flags field - we especially want to make sure the SGL_SUPPORTED flag is not set
+ * so that we test the SGL splitting path.
+ */
+ ctrlr->flags = 0;
+ ctrlr->min_page_size = 4096;
+ ctrlr->page_size = 4096;
+ memset(&ctrlr->opts, 0, sizeof(ctrlr->opts));
+ memset(ns, 0, sizeof(*ns));
+ ns->ctrlr = ctrlr;
+ ns->sector_size = sector_size;
+ ns->extended_lba_size = sector_size;
+ if (extended_lba) {
+ ns->flags |= SPDK_NVME_NS_EXTENDED_LBA_SUPPORTED;
+ ns->extended_lba_size += md_size;
+ }
+ ns->md_size = md_size;
+ ns->sectors_per_max_io = spdk_nvme_ns_get_max_io_xfer_size(ns) / ns->extended_lba_size;
+ ns->sectors_per_stripe = stripe_size / ns->extended_lba_size;
+
+ memset(qpair, 0, sizeof(*qpair));
+ qpair->ctrlr = ctrlr;
+ qpair->req_buf = calloc(num_requests, sizeof(struct nvme_request));
+ SPDK_CU_ASSERT_FATAL(qpair->req_buf != NULL);
+
+ for (i = 0; i < num_requests; i++) {
+ struct nvme_request *req = qpair->req_buf + i * sizeof(struct nvme_request);
+
+ req->qpair = qpair;
+ STAILQ_INSERT_HEAD(&qpair->free_req, req, stailq);
+ }
+
+ g_request = NULL;
+}
+
+static void
+cleanup_after_test(struct spdk_nvme_qpair *qpair)
+{
+ free(qpair->req_buf);
+}
+
+static void
+nvme_cmd_interpret_rw(const struct spdk_nvme_cmd *cmd,
+ uint64_t *lba, uint32_t *num_blocks)
+{
+ *lba = *(const uint64_t *)&cmd->cdw10;
+ *num_blocks = (cmd->cdw12 & 0xFFFFu) + 1;
+}
+
+static void
+split_test(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_qpair qpair;
+ struct spdk_nvme_ctrlr ctrlr;
+ void *payload;
+ uint64_t lba, cmd_lba;
+ uint32_t lba_count, cmd_lba_count;
+ int rc;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+ payload = malloc(512);
+ lba = 0;
+ lba_count = 1;
+
+ rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+
+ CU_ASSERT(g_request->num_children == 0);
+ nvme_cmd_interpret_rw(&g_request->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT(cmd_lba == lba);
+ CU_ASSERT(cmd_lba_count == lba_count);
+
+ free(payload);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+split_test2(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ struct nvme_request *child;
+ void *payload;
+ uint64_t lba, cmd_lba;
+ uint32_t lba_count, cmd_lba_count;
+ int rc;
+
+ /*
+ * Controller has max xfer of 128 KB (256 blocks).
+ * Submit an I/O of 256 KB starting at LBA 0, which should be split
+ * on the max I/O boundary into two I/Os of 128 KB.
+ */
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+ payload = malloc(256 * 1024);
+ lba = 0;
+ lba_count = (256 * 1024) / 512;
+
+ rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+
+ CU_ASSERT(g_request->num_children == 2);
+
+ child = TAILQ_FIRST(&g_request->children);
+ nvme_request_remove_child(g_request, child);
+ nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT(child->num_children == 0);
+ CU_ASSERT(child->payload_size == 128 * 1024);
+ CU_ASSERT(cmd_lba == 0);
+ CU_ASSERT(cmd_lba_count == 256); /* 256 * 512 byte blocks = 128 KB */
+ nvme_free_request(child);
+
+ child = TAILQ_FIRST(&g_request->children);
+ nvme_request_remove_child(g_request, child);
+ nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT(child->num_children == 0);
+ CU_ASSERT(child->payload_size == 128 * 1024);
+ CU_ASSERT(cmd_lba == 256);
+ CU_ASSERT(cmd_lba_count == 256);
+ nvme_free_request(child);
+
+ CU_ASSERT(TAILQ_EMPTY(&g_request->children));
+
+ free(payload);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+split_test3(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ struct nvme_request *child;
+ void *payload;
+ uint64_t lba, cmd_lba;
+ uint32_t lba_count, cmd_lba_count;
+ int rc;
+
+ /*
+ * Controller has max xfer of 128 KB (256 blocks).
+ * Submit an I/O of 256 KB starting at LBA 10, which should be split
+ * into two I/Os:
+ * 1) LBA = 10, count = 256 blocks
+ * 2) LBA = 266, count = 256 blocks
+ */
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+ payload = malloc(256 * 1024);
+ lba = 10; /* Start at an LBA that isn't aligned to the stripe size */
+ lba_count = (256 * 1024) / 512;
+
+ rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+
+ child = TAILQ_FIRST(&g_request->children);
+ nvme_request_remove_child(g_request, child);
+ nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT(child->num_children == 0);
+ CU_ASSERT(child->payload_size == 128 * 1024);
+ CU_ASSERT(cmd_lba == 10);
+ CU_ASSERT(cmd_lba_count == 256);
+ nvme_free_request(child);
+
+ child = TAILQ_FIRST(&g_request->children);
+ nvme_request_remove_child(g_request, child);
+ nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT(child->num_children == 0);
+ CU_ASSERT(child->payload_size == 128 * 1024);
+ CU_ASSERT(cmd_lba == 266);
+ CU_ASSERT(cmd_lba_count == 256);
+ nvme_free_request(child);
+
+ CU_ASSERT(TAILQ_EMPTY(&g_request->children));
+
+ free(payload);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+split_test4(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ struct nvme_request *child;
+ void *payload;
+ uint64_t lba, cmd_lba;
+ uint32_t lba_count, cmd_lba_count;
+ int rc;
+
+ /*
+ * Controller has max xfer of 128 KB (256 blocks) and a stripe size of 128 KB.
+ * (Same as split_test3 except with driver-assisted striping enabled.)
+ * Submit an I/O of 256 KB starting at LBA 10, which should be split
+ * into three I/Os:
+ * 1) LBA = 10, count = 246 blocks (less than max I/O size to align to stripe size)
+ * 2) LBA = 256, count = 256 blocks (aligned to stripe size and max I/O size)
+ * 3) LBA = 512, count = 10 blocks (finish off the remaining I/O size)
+ */
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 128 * 1024, false);
+ payload = malloc(256 * 1024);
+ lba = 10; /* Start at an LBA that isn't aligned to the stripe size */
+ lba_count = (256 * 1024) / 512;
+
+ rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL,
+ SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 3);
+
+ child = TAILQ_FIRST(&g_request->children);
+ nvme_request_remove_child(g_request, child);
+ nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT(child->num_children == 0);
+ CU_ASSERT(child->payload_size == (256 - 10) * 512);
+ CU_ASSERT(child->payload_offset == 0);
+ CU_ASSERT(cmd_lba == 10);
+ CU_ASSERT(cmd_lba_count == 256 - 10);
+ CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS) != 0);
+ CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_LIMITED_RETRY) == 0);
+ nvme_free_request(child);
+
+ child = TAILQ_FIRST(&g_request->children);
+ nvme_request_remove_child(g_request, child);
+ nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT(child->num_children == 0);
+ CU_ASSERT(child->payload_size == 128 * 1024);
+ CU_ASSERT(child->payload_offset == (256 - 10) * 512);
+ CU_ASSERT(cmd_lba == 256);
+ CU_ASSERT(cmd_lba_count == 256);
+ CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS) != 0);
+ CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_LIMITED_RETRY) == 0);
+ nvme_free_request(child);
+
+ child = TAILQ_FIRST(&g_request->children);
+ nvme_request_remove_child(g_request, child);
+ nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT(child->num_children == 0);
+ CU_ASSERT(child->payload_size == 10 * 512);
+ CU_ASSERT(child->payload_offset == (512 - 10) * 512);
+ CU_ASSERT(cmd_lba == 512);
+ CU_ASSERT(cmd_lba_count == 10);
+ CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS) != 0);
+ CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_LIMITED_RETRY) == 0);
+ nvme_free_request(child);
+
+ CU_ASSERT(TAILQ_EMPTY(&g_request->children));
+
+ free(payload);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_cmd_child_request(void)
+{
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ int rc = 0;
+ struct nvme_request *child, *tmp;
+ void *payload;
+ uint64_t lba = 0x1000;
+ uint32_t i = 0;
+ uint32_t offset = 0;
+ uint32_t sector_size = 512;
+ uint32_t max_io_size = 128 * 1024;
+ uint32_t sectors_per_max_io = max_io_size / sector_size;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_io_size, 0, false);
+
+ payload = malloc(128 * 1024);
+ rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, sectors_per_max_io, NULL, NULL, 0);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->payload_offset == 0);
+ CU_ASSERT(g_request->num_children == 0);
+ nvme_free_request(g_request);
+
+ rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, sectors_per_max_io - 1, NULL, NULL, 0);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->payload_offset == 0);
+ CU_ASSERT(g_request->num_children == 0);
+ nvme_free_request(g_request);
+
+ rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, sectors_per_max_io * 4, NULL, NULL, 0);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->num_children == 4);
+
+ rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, (DEFAULT_IO_QUEUE_REQUESTS + 1) * sector_size,
+ NULL,
+ NULL, 0);
+ SPDK_CU_ASSERT_FATAL(rc == -EINVAL);
+
+ TAILQ_FOREACH_SAFE(child, &g_request->children, child_tailq, tmp) {
+ nvme_request_remove_child(g_request, child);
+ CU_ASSERT(child->payload_offset == offset);
+ CU_ASSERT(child->cmd.opc == SPDK_NVME_OPC_READ);
+ CU_ASSERT(child->cmd.nsid == ns.id);
+ CU_ASSERT(child->cmd.cdw10 == (lba + sectors_per_max_io * i));
+ CU_ASSERT(child->cmd.cdw12 == ((sectors_per_max_io - 1) | 0));
+ offset += max_io_size;
+ nvme_free_request(child);
+ i++;
+ }
+
+ free(payload);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_flush(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ spdk_nvme_cmd_cb cb_fn = NULL;
+ void *cb_arg = NULL;
+ int rc;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+
+ rc = spdk_nvme_ns_cmd_flush(&ns, &qpair, cb_fn, cb_arg);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_FLUSH);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_write_zeroes(void)
+{
+ struct spdk_nvme_ns ns = { 0 };
+ struct spdk_nvme_ctrlr ctrlr = { 0 };
+ struct spdk_nvme_qpair qpair;
+ spdk_nvme_cmd_cb cb_fn = NULL;
+ void *cb_arg = NULL;
+ uint64_t cmd_lba;
+ uint32_t cmd_lba_count;
+ int rc;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+
+ rc = spdk_nvme_ns_cmd_write_zeroes(&ns, &qpair, 0, 2, cb_fn, cb_arg, 0);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_WRITE_ZEROES);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ nvme_cmd_interpret_rw(&g_request->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT_EQUAL(cmd_lba, 0);
+ CU_ASSERT_EQUAL(cmd_lba_count, 2);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_write_uncorrectable(void)
+{
+ struct spdk_nvme_ns ns = { 0 };
+ struct spdk_nvme_ctrlr ctrlr = { 0 };
+ struct spdk_nvme_qpair qpair;
+ spdk_nvme_cmd_cb cb_fn = NULL;
+ void *cb_arg = NULL;
+ uint64_t cmd_lba;
+ uint32_t cmd_lba_count;
+ int rc;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+
+ rc = spdk_nvme_ns_cmd_write_uncorrectable(&ns, &qpair, 0, 2, cb_fn, cb_arg);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_WRITE_UNCORRECTABLE);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ nvme_cmd_interpret_rw(&g_request->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT_EQUAL(cmd_lba, 0);
+ CU_ASSERT_EQUAL(cmd_lba_count, 2);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_dataset_management(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ spdk_nvme_cmd_cb cb_fn = NULL;
+ void *cb_arg = NULL;
+ struct spdk_nvme_dsm_range ranges[256];
+ uint16_t i;
+ int rc = 0;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+
+ for (i = 0; i < 256; i++) {
+ ranges[i].starting_lba = i;
+ ranges[i].length = 1;
+ ranges[i].attributes.raw = 0;
+ }
+
+ /* TRIM one LBA */
+ rc = spdk_nvme_ns_cmd_dataset_management(&ns, &qpair, SPDK_NVME_DSM_ATTR_DEALLOCATE,
+ ranges, 1, cb_fn, cb_arg);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_DATASET_MANAGEMENT);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw10 == 0);
+ CU_ASSERT(g_request->cmd.cdw11_bits.dsm.ad == 1);
+ spdk_free(g_request->payload.contig_or_cb_arg);
+ nvme_free_request(g_request);
+
+ /* TRIM 256 LBAs */
+ rc = spdk_nvme_ns_cmd_dataset_management(&ns, &qpair, SPDK_NVME_DSM_ATTR_DEALLOCATE,
+ ranges, 256, cb_fn, cb_arg);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_DATASET_MANAGEMENT);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw10 == 255u);
+ CU_ASSERT(g_request->cmd.cdw11_bits.dsm.ad == 1);
+ spdk_free(g_request->payload.contig_or_cb_arg);
+ nvme_free_request(g_request);
+
+ rc = spdk_nvme_ns_cmd_dataset_management(&ns, &qpair, SPDK_NVME_DSM_ATTR_DEALLOCATE,
+ NULL, 0, cb_fn, cb_arg);
+ CU_ASSERT(rc != 0);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_readv(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ int rc = 0;
+ void *cb_arg;
+ uint32_t lba_count = 256;
+ uint32_t sector_size = 512;
+ uint64_t sge_length = lba_count * sector_size;
+
+ cb_arg = malloc(512);
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, 128 * 1024, 0, false);
+ rc = spdk_nvme_ns_cmd_readv(&ns, &qpair, 0x1000, lba_count, NULL, &sge_length, 0,
+ nvme_request_reset_sgl, nvme_request_next_sge);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_READ);
+ CU_ASSERT(nvme_payload_type(&g_request->payload) == NVME_PAYLOAD_TYPE_SGL);
+ CU_ASSERT(g_request->payload.reset_sgl_fn == nvme_request_reset_sgl);
+ CU_ASSERT(g_request->payload.next_sge_fn == nvme_request_next_sge);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == &sge_length);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+
+ rc = spdk_nvme_ns_cmd_readv(&ns, &qpair, 0x1000, 256, NULL, cb_arg, 0, nvme_request_reset_sgl,
+ NULL);
+ CU_ASSERT(rc != 0);
+
+ free(cb_arg);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_writev(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ int rc = 0;
+ void *cb_arg;
+ uint32_t lba_count = 256;
+ uint32_t sector_size = 512;
+ uint64_t sge_length = lba_count * sector_size;
+
+ cb_arg = malloc(512);
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, 128 * 1024, 0, false);
+ rc = spdk_nvme_ns_cmd_writev(&ns, &qpair, 0x1000, lba_count, NULL, &sge_length, 0,
+ nvme_request_reset_sgl, nvme_request_next_sge);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_WRITE);
+ CU_ASSERT(nvme_payload_type(&g_request->payload) == NVME_PAYLOAD_TYPE_SGL);
+ CU_ASSERT(g_request->payload.reset_sgl_fn == nvme_request_reset_sgl);
+ CU_ASSERT(g_request->payload.next_sge_fn == nvme_request_next_sge);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == &sge_length);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+
+ rc = spdk_nvme_ns_cmd_writev(&ns, &qpair, 0x1000, 256, NULL, cb_arg, 0,
+ NULL, nvme_request_next_sge);
+ CU_ASSERT(rc != 0);
+
+ free(cb_arg);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_comparev(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ int rc = 0;
+ void *cb_arg;
+ uint32_t lba_count = 256;
+ uint32_t sector_size = 512;
+ uint64_t sge_length = lba_count * sector_size;
+
+ cb_arg = malloc(512);
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, 128 * 1024, 0, false);
+ rc = spdk_nvme_ns_cmd_comparev(&ns, &qpair, 0x1000, lba_count, NULL, &sge_length, 0,
+ nvme_request_reset_sgl, nvme_request_next_sge);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_COMPARE);
+ CU_ASSERT(nvme_payload_type(&g_request->payload) == NVME_PAYLOAD_TYPE_SGL);
+ CU_ASSERT(g_request->payload.reset_sgl_fn == nvme_request_reset_sgl);
+ CU_ASSERT(g_request->payload.next_sge_fn == nvme_request_next_sge);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == &sge_length);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+
+ rc = spdk_nvme_ns_cmd_comparev(&ns, &qpair, 0x1000, 256, NULL, cb_arg, 0,
+ nvme_request_reset_sgl, NULL);
+ CU_ASSERT(rc != 0);
+
+ free(cb_arg);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_comparev_with_md(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ int rc = 0;
+ char *buffer = NULL;
+ char *metadata = NULL;
+ uint32_t block_size, md_size;
+ struct nvme_request *child0, *child1;
+ uint32_t lba_count = 256;
+ uint32_t sector_size = 512;
+ uint64_t sge_length = lba_count * sector_size;
+
+ block_size = 512;
+ md_size = 128;
+
+ buffer = malloc((block_size + md_size) * 384);
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+ metadata = malloc(md_size * 384);
+ SPDK_CU_ASSERT_FATAL(metadata != NULL);
+
+ /*
+ * 512 byte data + 128 byte metadata
+ * Separate metadata buffer
+ * Max data transfer size 128 KB
+ * No stripe size
+ *
+ * 256 blocks * 512 bytes per block = single 128 KB I/O (no splitting required)
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 128 * 1024, 0, false);
+
+ rc = spdk_nvme_ns_cmd_comparev_with_md(&ns, &qpair, 0x1000, 256, NULL, &sge_length, 0,
+ nvme_request_reset_sgl, nvme_request_next_sge, metadata, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload.md == metadata);
+ CU_ASSERT(g_request->payload_size == 256 * 512);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 128 byte metadata
+ * Extended LBA
+ * Max data transfer size 128 KB
+ * No stripe size
+ *
+ * 256 blocks * (512 + 128) bytes per block = two I/Os:
+ * child 0: 204 blocks - 204 * (512 + 128) = 127.5 KB
+ * child 1: 52 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 128 * 1024, 0, true);
+
+ rc = spdk_nvme_ns_cmd_comparev_with_md(&ns, &qpair, 0x1000, 256, NULL, &sge_length, 0,
+ nvme_request_reset_sgl, nvme_request_next_sge, NULL, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload.md == NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 204 * (512 + 128));
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload.md == NULL);
+ CU_ASSERT(child1->payload_offset == 204 * (512 + 128));
+ CU_ASSERT(child1->payload_size == 52 * (512 + 128));
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Extended LBA
+ * Max data transfer size 128 KB
+ * No stripe size
+ * No protection information
+ *
+ * 256 blocks * (512 + 8) bytes per block = two I/Os:
+ * child 0: 252 blocks - 252 * (512 + 8) = 127.96875 KB
+ * child 1: 4 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, true);
+
+ rc = spdk_nvme_ns_cmd_comparev_with_md(&ns, &qpair, 0x1000, 256, NULL, &sge_length, 0,
+ nvme_request_reset_sgl, nvme_request_next_sge, NULL, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload.md == NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 252 * (512 + 8));
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload.md == NULL);
+ CU_ASSERT(child1->payload_offset == 252 * (512 + 8));
+ CU_ASSERT(child1->payload_size == 4 * (512 + 8));
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Extended LBA
+ * Max data transfer size 128 KB
+ * No stripe size
+ * Protection information enabled + PRACT
+ *
+ * Special case for 8-byte metadata + PI + PRACT: no metadata transferred
+ * In theory, 256 blocks * 512 bytes per block = one I/O (128 KB)
+ * However, the splitting code does not account for PRACT when calculating
+ * max sectors per transfer, so we actually get two I/Os:
+ * child 0: 252 blocks
+ * child 1: 4 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, true);
+ ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
+
+ rc = spdk_nvme_ns_cmd_comparev_with_md(&ns, &qpair, 0x1000, 256, NULL, &sge_length,
+ SPDK_NVME_IO_FLAGS_PRACT, nvme_request_reset_sgl, nvme_request_next_sge, NULL, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 252 * 512); /* NOTE: does not include metadata! */
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload.md == NULL);
+ CU_ASSERT(child1->payload_offset == 252 * 512);
+ CU_ASSERT(child1->payload_size == 4 * 512);
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Separate metadata buffer
+ * Max data transfer size 128 KB
+ * No stripe size
+ * Protection information enabled + PRACT
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, false);
+ ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
+
+ rc = spdk_nvme_ns_cmd_comparev_with_md(&ns, &qpair, 0x1000, 256, NULL, &sge_length,
+ SPDK_NVME_IO_FLAGS_PRACT, nvme_request_reset_sgl, nvme_request_next_sge, metadata, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload.md == metadata);
+ CU_ASSERT(g_request->payload_size == 256 * 512);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Separate metadata buffer
+ * Max data transfer size 128 KB
+ * No stripe size
+ * Protection information enabled + PRACT
+ *
+ * 384 blocks * 512 bytes = two I/Os:
+ * child 0: 256 blocks
+ * child 1: 128 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, false);
+ ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
+
+ rc = spdk_nvme_ns_cmd_comparev_with_md(&ns, &qpair, 0x1000, 384, NULL, &sge_length,
+ SPDK_NVME_IO_FLAGS_PRACT, nvme_request_reset_sgl, nvme_request_next_sge, metadata, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 256 * 512);
+ CU_ASSERT(child0->md_offset == 0);
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload_offset == 256 * 512);
+ CU_ASSERT(child1->payload_size == 128 * 512);
+ CU_ASSERT(child1->md_offset == 256 * 8);
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ free(buffer);
+ free(metadata);
+}
+
+static void
+test_nvme_ns_cmd_compare_and_write(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ int rc = 0;
+ uint64_t lba = 0x1000;
+ uint32_t lba_count = 256;
+ uint64_t cmd_lba;
+ uint32_t cmd_lba_count;
+ uint32_t sector_size = 512;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, 128 * 1024, 0, false);
+
+ rc = spdk_nvme_ns_cmd_compare(&ns, &qpair, NULL, lba, lba_count, NULL, NULL,
+ SPDK_NVME_IO_FLAGS_FUSE_FIRST);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_COMPARE);
+ CU_ASSERT(g_request->cmd.fuse == SPDK_NVME_CMD_FUSE_FIRST);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+
+ nvme_cmd_interpret_rw(&g_request->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT_EQUAL(cmd_lba, lba);
+ CU_ASSERT_EQUAL(cmd_lba_count, lba_count);
+
+ nvme_free_request(g_request);
+
+ rc = spdk_nvme_ns_cmd_write(&ns, &qpair, NULL, lba, lba_count, NULL, NULL,
+ SPDK_NVME_IO_FLAGS_FUSE_SECOND);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_WRITE);
+ CU_ASSERT(g_request->cmd.fuse == SPDK_NVME_CMD_FUSE_SECOND);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ nvme_cmd_interpret_rw(&g_request->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT_EQUAL(cmd_lba, lba);
+ CU_ASSERT_EQUAL(cmd_lba_count, lba_count);
+
+ nvme_free_request(g_request);
+
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_io_flags(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ void *payload;
+ uint64_t lba;
+ uint32_t lba_count;
+ uint64_t cmd_lba;
+ uint32_t cmd_lba_count;
+ int rc;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 128 * 1024, false);
+ payload = malloc(256 * 1024);
+ lba = 0;
+ lba_count = (4 * 1024) / 512;
+
+ rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL,
+ SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT((g_request->cmd.cdw12 & SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS) != 0);
+ CU_ASSERT((g_request->cmd.cdw12 & SPDK_NVME_IO_FLAGS_LIMITED_RETRY) == 0);
+ nvme_free_request(g_request);
+
+ rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL,
+ SPDK_NVME_IO_FLAGS_LIMITED_RETRY);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT((g_request->cmd.cdw12 & SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS) == 0);
+ CU_ASSERT((g_request->cmd.cdw12 & SPDK_NVME_IO_FLAGS_LIMITED_RETRY) != 0);
+ nvme_free_request(g_request);
+
+ rc = spdk_nvme_ns_cmd_write(&ns, &qpair, payload, lba, lba_count, NULL, NULL,
+ SPDK_NVME_IO_FLAGS_VALID_MASK);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ nvme_cmd_interpret_rw(&g_request->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT_EQUAL(cmd_lba_count, lba_count);
+ CU_ASSERT_EQUAL(cmd_lba, lba);
+ CU_ASSERT_EQUAL(g_request->cmd.cdw12 & SPDK_NVME_IO_FLAGS_CDW12_MASK,
+ SPDK_NVME_IO_FLAGS_CDW12_MASK);
+ nvme_free_request(g_request);
+
+ rc = spdk_nvme_ns_cmd_write(&ns, &qpair, payload, lba, lba_count, NULL, NULL,
+ ~SPDK_NVME_IO_FLAGS_VALID_MASK);
+ CU_ASSERT(rc == -EINVAL);
+
+ free(payload);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_reservation_register(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ struct spdk_nvme_reservation_register_data *payload;
+ bool ignore_key = 1;
+ spdk_nvme_cmd_cb cb_fn = NULL;
+ void *cb_arg = NULL;
+ int rc = 0;
+ uint32_t tmp_cdw10;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+ payload = malloc(sizeof(struct spdk_nvme_reservation_register_data));
+
+ rc = spdk_nvme_ns_cmd_reservation_register(&ns, &qpair, payload, ignore_key,
+ SPDK_NVME_RESERVE_REGISTER_KEY,
+ SPDK_NVME_RESERVE_PTPL_NO_CHANGES,
+ cb_fn, cb_arg);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_RESERVATION_REGISTER);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+
+ tmp_cdw10 = SPDK_NVME_RESERVE_REGISTER_KEY;
+ tmp_cdw10 |= ignore_key ? 1 << 3 : 0;
+ tmp_cdw10 |= (uint32_t)SPDK_NVME_RESERVE_PTPL_NO_CHANGES << 30;
+
+ CU_ASSERT(g_request->cmd.cdw10 == tmp_cdw10);
+
+ spdk_free(g_request->payload.contig_or_cb_arg);
+ nvme_free_request(g_request);
+ free(payload);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_reservation_release(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ struct spdk_nvme_reservation_key_data *payload;
+ bool ignore_key = 1;
+ spdk_nvme_cmd_cb cb_fn = NULL;
+ void *cb_arg = NULL;
+ int rc = 0;
+ uint32_t tmp_cdw10;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+ payload = malloc(sizeof(struct spdk_nvme_reservation_key_data));
+
+ rc = spdk_nvme_ns_cmd_reservation_release(&ns, &qpair, payload, ignore_key,
+ SPDK_NVME_RESERVE_RELEASE,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE,
+ cb_fn, cb_arg);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_RESERVATION_RELEASE);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+
+ tmp_cdw10 = SPDK_NVME_RESERVE_RELEASE;
+ tmp_cdw10 |= ignore_key ? 1 << 3 : 0;
+ tmp_cdw10 |= (uint32_t)SPDK_NVME_RESERVE_WRITE_EXCLUSIVE << 8;
+
+ CU_ASSERT(g_request->cmd.cdw10 == tmp_cdw10);
+
+ spdk_free(g_request->payload.contig_or_cb_arg);
+ nvme_free_request(g_request);
+ free(payload);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_reservation_acquire(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ struct spdk_nvme_reservation_acquire_data *payload;
+ bool ignore_key = 1;
+ spdk_nvme_cmd_cb cb_fn = NULL;
+ void *cb_arg = NULL;
+ int rc = 0;
+ uint32_t tmp_cdw10;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+ payload = malloc(sizeof(struct spdk_nvme_reservation_acquire_data));
+
+ rc = spdk_nvme_ns_cmd_reservation_acquire(&ns, &qpair, payload, ignore_key,
+ SPDK_NVME_RESERVE_ACQUIRE,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE,
+ cb_fn, cb_arg);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_RESERVATION_ACQUIRE);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+
+ tmp_cdw10 = SPDK_NVME_RESERVE_ACQUIRE;
+ tmp_cdw10 |= ignore_key ? 1 << 3 : 0;
+ tmp_cdw10 |= (uint32_t)SPDK_NVME_RESERVE_WRITE_EXCLUSIVE << 8;
+
+ CU_ASSERT(g_request->cmd.cdw10 == tmp_cdw10);
+
+ spdk_free(g_request->payload.contig_or_cb_arg);
+ nvme_free_request(g_request);
+ free(payload);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_reservation_report(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ struct spdk_nvme_reservation_status_data *payload;
+ spdk_nvme_cmd_cb cb_fn = NULL;
+ void *cb_arg = NULL;
+ int rc = 0;
+ uint32_t size = sizeof(struct spdk_nvme_reservation_status_data);
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+
+ payload = calloc(1, size);
+ SPDK_CU_ASSERT_FATAL(payload != NULL);
+
+ rc = spdk_nvme_ns_cmd_reservation_report(&ns, &qpair, payload, size, cb_fn, cb_arg);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_RESERVATION_REPORT);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+
+ CU_ASSERT(g_request->cmd.cdw10 == (size / 4));
+
+ spdk_free(g_request->payload.contig_or_cb_arg);
+ nvme_free_request(g_request);
+ free(payload);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_write_with_md(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ int rc = 0;
+ char *buffer = NULL;
+ char *metadata = NULL;
+ uint32_t block_size, md_size;
+ struct nvme_request *child0, *child1;
+
+ block_size = 512;
+ md_size = 128;
+
+ buffer = malloc((block_size + md_size) * 384);
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+ metadata = malloc(md_size * 384);
+ SPDK_CU_ASSERT_FATAL(metadata != NULL);
+
+ /*
+ * 512 byte data + 128 byte metadata
+ * Separate metadata buffer
+ * Max data transfer size 128 KB
+ * No stripe size
+ *
+ * 256 blocks * 512 bytes per block = single 128 KB I/O (no splitting required)
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 128 * 1024, 0, false);
+
+ rc = spdk_nvme_ns_cmd_write_with_md(&ns, &qpair, buffer, metadata, 0x1000, 256, NULL, NULL, 0, 0,
+ 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload.md == metadata);
+ CU_ASSERT(g_request->md_size == 256 * 128);
+ CU_ASSERT(g_request->payload_size == 256 * 512);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 128 byte metadata
+ * Extended LBA
+ * Max data transfer size 128 KB
+ * No stripe size
+ *
+ * 256 blocks * (512 + 128) bytes per block = two I/Os:
+ * child 0: 204 blocks - 204 * (512 + 128) = 127.5 KB
+ * child 1: 52 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 128 * 1024, 0, true);
+
+ rc = spdk_nvme_ns_cmd_write_with_md(&ns, &qpair, buffer, NULL, 0x1000, 256, NULL, NULL, 0, 0,
+ 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload.md == NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 204 * (512 + 128));
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload.md == NULL);
+ CU_ASSERT(child1->payload_offset == 204 * (512 + 128));
+ CU_ASSERT(child1->payload_size == 52 * (512 + 128));
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Extended LBA
+ * Max data transfer size 128 KB
+ * No stripe size
+ * No protection information
+ *
+ * 256 blocks * (512 + 8) bytes per block = two I/Os:
+ * child 0: 252 blocks - 252 * (512 + 8) = 127.96875 KB
+ * child 1: 4 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, true);
+
+ rc = spdk_nvme_ns_cmd_write_with_md(&ns, &qpair, buffer, NULL, 0x1000, 256, NULL, NULL, 0, 0,
+ 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload.md == NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 252 * (512 + 8));
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload.md == NULL);
+ CU_ASSERT(child1->payload_offset == 252 * (512 + 8));
+ CU_ASSERT(child1->payload_size == 4 * (512 + 8));
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Extended LBA
+ * Max data transfer size 128 KB
+ * No stripe size
+ * Protection information enabled + PRACT
+ *
+ * Special case for 8-byte metadata + PI + PRACT: no metadata transferred
+ * In theory, 256 blocks * 512 bytes per block = one I/O (128 KB)
+ * However, the splitting code does not account for PRACT when calculating
+ * max sectors per transfer, so we actually get two I/Os:
+ * child 0: 252 blocks
+ * child 1: 4 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, true);
+ ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
+
+ rc = spdk_nvme_ns_cmd_write_with_md(&ns, &qpair, buffer, NULL, 0x1000, 256, NULL, NULL,
+ SPDK_NVME_IO_FLAGS_PRACT, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 252 * 512); /* NOTE: does not include metadata! */
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload.md == NULL);
+ CU_ASSERT(child1->payload_offset == 252 * 512);
+ CU_ASSERT(child1->payload_size == 4 * 512);
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Separate metadata buffer
+ * Max data transfer size 128 KB
+ * No stripe size
+ * Protection information enabled + PRACT
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, false);
+ ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
+
+ rc = spdk_nvme_ns_cmd_write_with_md(&ns, &qpair, buffer, metadata, 0x1000, 256, NULL, NULL,
+ SPDK_NVME_IO_FLAGS_PRACT, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload.md == metadata);
+ CU_ASSERT(g_request->md_size == 256 * 8);
+ CU_ASSERT(g_request->payload_size == 256 * 512);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Separate metadata buffer
+ * Max data transfer size 128 KB
+ * No stripe size
+ * Protection information enabled + PRACT
+ *
+ * 384 blocks * 512 bytes = two I/Os:
+ * child 0: 256 blocks
+ * child 1: 128 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, false);
+ ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
+
+ rc = spdk_nvme_ns_cmd_write_with_md(&ns, &qpair, buffer, metadata, 0x1000, 384, NULL, NULL,
+ SPDK_NVME_IO_FLAGS_PRACT, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 256 * 512);
+ CU_ASSERT(child0->md_offset == 0);
+ CU_ASSERT(child0->md_size == 256 * 8);
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload_offset == 256 * 512);
+ CU_ASSERT(child1->payload_size == 128 * 512);
+ CU_ASSERT(child1->md_offset == 256 * 8);
+ CU_ASSERT(child1->md_size == 128 * 8);
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ free(buffer);
+ free(metadata);
+}
+
+static void
+test_nvme_ns_cmd_read_with_md(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ int rc = 0;
+ char *buffer = NULL;
+ char *metadata = NULL;
+ uint32_t block_size, md_size;
+
+ block_size = 512;
+ md_size = 128;
+
+ buffer = malloc(block_size * 256);
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+ metadata = malloc(md_size * 256);
+ SPDK_CU_ASSERT_FATAL(metadata != NULL);
+
+ /*
+ * 512 byte data + 128 byte metadata
+ * Separate metadata buffer
+ * Max data transfer size 128 KB
+ * No stripe size
+ *
+ * 256 blocks * 512 bytes per block = single 128 KB I/O (no splitting required)
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 128 * 1024, 0, false);
+
+ rc = spdk_nvme_ns_cmd_read_with_md(&ns, &qpair, buffer, metadata, 0x1000, 256, NULL, NULL, 0, 0,
+ 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload.md == metadata);
+ CU_ASSERT(g_request->md_size == 256 * md_size);
+ CU_ASSERT(g_request->payload_size == 256 * 512);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+ free(buffer);
+ free(metadata);
+}
+
+static void
+test_nvme_ns_cmd_compare_with_md(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ int rc = 0;
+ char *buffer = NULL;
+ char *metadata = NULL;
+ uint32_t block_size, md_size;
+ struct nvme_request *child0, *child1;
+
+ block_size = 512;
+ md_size = 128;
+
+ buffer = malloc((block_size + md_size) * 384);
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+ metadata = malloc(md_size * 384);
+ SPDK_CU_ASSERT_FATAL(metadata != NULL);
+
+ /*
+ * 512 byte data + 128 byte metadata
+ * Separate metadata buffer
+ * Max data transfer size 128 KB
+ * No stripe size
+ *
+ * 256 blocks * 512 bytes per block = single 128 KB I/O (no splitting required)
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 128 * 1024, 0, false);
+
+ rc = spdk_nvme_ns_cmd_compare_with_md(&ns, &qpair, buffer, metadata, 0x1000, 256,
+ NULL, NULL, 0, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload.md == metadata);
+ CU_ASSERT(g_request->payload_size == 256 * 512);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 128 byte metadata
+ * Extended LBA
+ * Max data transfer size 128 KB
+ * No stripe size
+ *
+ * 256 blocks * (512 + 128) bytes per block = two I/Os:
+ * child 0: 204 blocks - 204 * (512 + 128) = 127.5 KB
+ * child 1: 52 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 128 * 1024, 0, true);
+
+ rc = spdk_nvme_ns_cmd_compare_with_md(&ns, &qpair, buffer, NULL, 0x1000, 256,
+ NULL, NULL, 0, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload.md == NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 204 * (512 + 128));
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload.md == NULL);
+ CU_ASSERT(child1->payload_offset == 204 * (512 + 128));
+ CU_ASSERT(child1->payload_size == 52 * (512 + 128));
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Extended LBA
+ * Max data transfer size 128 KB
+ * No stripe size
+ * No protection information
+ *
+ * 256 blocks * (512 + 8) bytes per block = two I/Os:
+ * child 0: 252 blocks - 252 * (512 + 8) = 127.96875 KB
+ * child 1: 4 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, true);
+
+ rc = spdk_nvme_ns_cmd_compare_with_md(&ns, &qpair, buffer, NULL, 0x1000, 256,
+ NULL, NULL, 0, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload.md == NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 252 * (512 + 8));
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload.md == NULL);
+ CU_ASSERT(child1->payload_offset == 252 * (512 + 8));
+ CU_ASSERT(child1->payload_size == 4 * (512 + 8));
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Extended LBA
+ * Max data transfer size 128 KB
+ * No stripe size
+ * Protection information enabled + PRACT
+ *
+ * Special case for 8-byte metadata + PI + PRACT: no metadata transferred
+ * In theory, 256 blocks * 512 bytes per block = one I/O (128 KB)
+ * However, the splitting code does not account for PRACT when calculating
+ * max sectors per transfer, so we actually get two I/Os:
+ * child 0: 252 blocks
+ * child 1: 4 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, true);
+ ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
+
+ rc = spdk_nvme_ns_cmd_compare_with_md(&ns, &qpair, buffer, NULL, 0x1000, 256,
+ NULL, NULL, SPDK_NVME_IO_FLAGS_PRACT, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 252 * 512); /* NOTE: does not include metadata! */
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload.md == NULL);
+ CU_ASSERT(child1->payload_offset == 252 * 512);
+ CU_ASSERT(child1->payload_size == 4 * 512);
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Separate metadata buffer
+ * Max data transfer size 128 KB
+ * No stripe size
+ * Protection information enabled + PRACT
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, false);
+ ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
+
+ rc = spdk_nvme_ns_cmd_compare_with_md(&ns, &qpair, buffer, metadata, 0x1000, 256,
+ NULL, NULL, SPDK_NVME_IO_FLAGS_PRACT, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload.md == metadata);
+ CU_ASSERT(g_request->payload_size == 256 * 512);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Separate metadata buffer
+ * Max data transfer size 128 KB
+ * No stripe size
+ * Protection information enabled + PRACT
+ *
+ * 384 blocks * 512 bytes = two I/Os:
+ * child 0: 256 blocks
+ * child 1: 128 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, false);
+ ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
+
+ rc = spdk_nvme_ns_cmd_compare_with_md(&ns, &qpair, buffer, metadata, 0x1000, 384,
+ NULL, NULL, SPDK_NVME_IO_FLAGS_PRACT, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 256 * 512);
+ CU_ASSERT(child0->md_offset == 0);
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload_offset == 256 * 512);
+ CU_ASSERT(child1->payload_size == 128 * 512);
+ CU_ASSERT(child1->md_offset == 256 * 8);
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ free(buffer);
+ free(metadata);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvme_ns_cmd", NULL, NULL);
+
+ CU_ADD_TEST(suite, split_test);
+ CU_ADD_TEST(suite, split_test2);
+ CU_ADD_TEST(suite, split_test3);
+ CU_ADD_TEST(suite, split_test4);
+ CU_ADD_TEST(suite, test_nvme_ns_cmd_flush);
+ CU_ADD_TEST(suite, test_nvme_ns_cmd_dataset_management);
+ CU_ADD_TEST(suite, test_io_flags);
+ CU_ADD_TEST(suite, test_nvme_ns_cmd_write_zeroes);
+ CU_ADD_TEST(suite, test_nvme_ns_cmd_write_uncorrectable);
+ CU_ADD_TEST(suite, test_nvme_ns_cmd_reservation_register);
+ CU_ADD_TEST(suite, test_nvme_ns_cmd_reservation_release);
+ CU_ADD_TEST(suite, test_nvme_ns_cmd_reservation_acquire);
+ CU_ADD_TEST(suite, test_nvme_ns_cmd_reservation_report);
+ CU_ADD_TEST(suite, test_cmd_child_request);
+ CU_ADD_TEST(suite, test_nvme_ns_cmd_readv);
+ CU_ADD_TEST(suite, test_nvme_ns_cmd_read_with_md);
+ CU_ADD_TEST(suite, test_nvme_ns_cmd_writev);
+ CU_ADD_TEST(suite, test_nvme_ns_cmd_write_with_md);
+ CU_ADD_TEST(suite, test_nvme_ns_cmd_comparev);
+ CU_ADD_TEST(suite, test_nvme_ns_cmd_compare_and_write);
+ CU_ADD_TEST(suite, test_nvme_ns_cmd_compare_with_md);
+ CU_ADD_TEST(suite, test_nvme_ns_cmd_comparev_with_md);
+
+ g_spdk_nvme_driver = &_g_nvme_driver;
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/.gitignore
new file mode 100644
index 000000000..8f4f47a17
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/.gitignore
@@ -0,0 +1 @@
+nvme_ns_ocssd_cmd_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/Makefile
new file mode 100644
index 000000000..35fdb83a0
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_ns_ocssd_cmd_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/nvme_ns_ocssd_cmd_ut.c b/src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/nvme_ns_ocssd_cmd_ut.c
new file mode 100644
index 000000000..fa25a4640
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/nvme_ns_ocssd_cmd_ut.c
@@ -0,0 +1,650 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "nvme/nvme_ns_ocssd_cmd.c"
+#include "nvme/nvme_ns_cmd.c"
+#include "nvme/nvme.c"
+
+#include "common/lib/test_env.c"
+
+#define OCSSD_SECTOR_SIZE 0x1000
+
+static struct nvme_driver _g_nvme_driver = {
+ .lock = PTHREAD_MUTEX_INITIALIZER,
+};
+
+static struct nvme_request *g_request = NULL;
+
+int
+nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
+{
+ g_request = req;
+
+ return 0;
+}
+
+void
+nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
+{
+}
+
+void
+nvme_ctrlr_proc_get_ref(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return;
+}
+
+int
+nvme_ctrlr_process_init(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return 0;
+}
+
+void
+nvme_ctrlr_proc_put_ref(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return;
+}
+
+void
+spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size)
+{
+ memset(opts, 0, sizeof(*opts));
+}
+
+bool
+spdk_nvme_transport_available_by_name(const char *transport_name)
+{
+ return true;
+}
+
+struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
+ const struct spdk_nvme_ctrlr_opts *opts,
+ void *devhandle)
+{
+ return NULL;
+}
+
+int
+nvme_ctrlr_get_ref_count(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return 0;
+}
+
+int
+nvme_transport_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx,
+ bool direct_connect)
+{
+ return 0;
+}
+
+uint32_t
+spdk_nvme_ns_get_max_io_xfer_size(struct spdk_nvme_ns *ns)
+{
+ return ns->ctrlr->max_xfer_size;
+}
+
+static void
+prepare_for_test(struct spdk_nvme_ns *ns, struct spdk_nvme_ctrlr *ctrlr,
+ struct spdk_nvme_qpair *qpair,
+ uint32_t sector_size, uint32_t md_size, uint32_t max_xfer_size,
+ uint32_t stripe_size, bool extended_lba)
+{
+ uint32_t num_requests = 32;
+ uint32_t i;
+
+ ctrlr->max_xfer_size = max_xfer_size;
+ /*
+ * Clear the flags field - we especially want to make sure the SGL_SUPPORTED flag is not set
+ * so that we test the SGL splitting path.
+ */
+ ctrlr->flags = 0;
+ ctrlr->min_page_size = 4096;
+ ctrlr->page_size = 4096;
+ memset(&ctrlr->opts, 0, sizeof(ctrlr->opts));
+ memset(ns, 0, sizeof(*ns));
+ ns->ctrlr = ctrlr;
+ ns->sector_size = sector_size;
+ ns->extended_lba_size = sector_size;
+ if (extended_lba) {
+ ns->flags |= SPDK_NVME_NS_EXTENDED_LBA_SUPPORTED;
+ ns->extended_lba_size += md_size;
+ }
+ ns->md_size = md_size;
+ ns->sectors_per_max_io = spdk_nvme_ns_get_max_io_xfer_size(ns) / ns->extended_lba_size;
+ ns->sectors_per_stripe = stripe_size / ns->extended_lba_size;
+
+ memset(qpair, 0, sizeof(*qpair));
+ qpair->ctrlr = ctrlr;
+ qpair->req_buf = calloc(num_requests, sizeof(struct nvme_request));
+ SPDK_CU_ASSERT_FATAL(qpair->req_buf != NULL);
+
+ for (i = 0; i < num_requests; i++) {
+ struct nvme_request *req = qpair->req_buf + i * sizeof(struct nvme_request);
+
+ req->qpair = qpair;
+ STAILQ_INSERT_HEAD(&qpair->free_req, req, stailq);
+ }
+
+ g_request = NULL;
+}
+
+static void
+cleanup_after_test(struct spdk_nvme_qpair *qpair)
+{
+ free(qpair->req_buf);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_reset_single_entry(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = OCSSD_SECTOR_SIZE;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_xfer_size, 0, false);
+ uint64_t lba_list = 0x12345678;
+ spdk_nvme_ocssd_ns_cmd_vector_reset(&ns, &qpair, &lba_list, 1,
+ NULL, NULL, NULL);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_RESET);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw10 == lba_list);
+ CU_ASSERT(g_request->cmd.cdw12 == 0);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_reset(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = OCSSD_SECTOR_SIZE;
+ const uint32_t vector_size = 0x10;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_xfer_size, 0, false);
+ uint64_t lba_list[vector_size];
+ spdk_nvme_ocssd_ns_cmd_vector_reset(&ns, &qpair, lba_list, vector_size,
+ NULL, NULL, NULL);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_RESET);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw12 == vector_size - 1);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_read_with_md_single_entry(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = OCSSD_SECTOR_SIZE;
+ const uint32_t md_size = 0x80;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ char *buffer = malloc(sector_size);
+ char *metadata = malloc(md_size);
+ uint64_t lba_list = 0x12345678;
+
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+ SPDK_CU_ASSERT_FATAL(metadata != NULL);
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, md_size, max_xfer_size, 0, false);
+ rc = spdk_nvme_ocssd_ns_cmd_vector_read_with_md(&ns, &qpair, buffer, metadata,
+ &lba_list, 1, NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload.md == metadata);
+ CU_ASSERT(g_request->payload_size == OCSSD_SECTOR_SIZE);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == buffer);
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_READ);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw10 == lba_list);
+ CU_ASSERT(g_request->cmd.cdw12 == 0);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ free(buffer);
+ free(metadata);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_read_with_md(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = OCSSD_SECTOR_SIZE;
+ const uint32_t md_size = 0x80;
+ const uint32_t vector_size = 0x10;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ char *buffer = malloc(sector_size * vector_size);
+ char *metadata = malloc(md_size * vector_size);
+ uint64_t lba_list[vector_size];
+
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+ SPDK_CU_ASSERT_FATAL(metadata != NULL);
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, md_size, max_xfer_size, 0, false);
+ rc = spdk_nvme_ocssd_ns_cmd_vector_read_with_md(&ns, &qpair, buffer, metadata,
+ lba_list, vector_size,
+ NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload.md == metadata);
+ CU_ASSERT(g_request->payload_size == max_xfer_size);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == buffer);
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_READ);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw12 == vector_size - 1);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ free(buffer);
+ free(metadata);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_read_single_entry(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = OCSSD_SECTOR_SIZE;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ char *buffer = malloc(sector_size);
+ uint64_t lba_list = 0x12345678;
+
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_xfer_size, 0, false);
+ rc = spdk_nvme_ocssd_ns_cmd_vector_read(&ns, &qpair, buffer, &lba_list, 1,
+ NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload_size == OCSSD_SECTOR_SIZE);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == buffer);
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_READ);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw10 == lba_list);
+ CU_ASSERT(g_request->cmd.cdw12 == 0);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+ free(buffer);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_read(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = OCSSD_SECTOR_SIZE;
+ const uint32_t vector_size = 0x10;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ char *buffer = malloc(sector_size * vector_size);
+ uint64_t lba_list[vector_size];
+
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_xfer_size, 0, false);
+ rc = spdk_nvme_ocssd_ns_cmd_vector_read(&ns, &qpair, buffer, lba_list, vector_size,
+ NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload_size == max_xfer_size);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == buffer);
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_READ);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw12 == vector_size - 1);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+ free(buffer);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_write_with_md_single_entry(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = OCSSD_SECTOR_SIZE;
+ const uint32_t md_size = 0x80;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ char *buffer = malloc(sector_size);
+ char *metadata = malloc(md_size);
+ uint64_t lba_list = 0x12345678;
+
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+ SPDK_CU_ASSERT_FATAL(metadata != NULL);
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, md_size, max_xfer_size, 0, false);
+ spdk_nvme_ocssd_ns_cmd_vector_write_with_md(&ns, &qpair, buffer, metadata,
+ &lba_list, 1, NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload.md == metadata);
+ CU_ASSERT(g_request->payload_size == OCSSD_SECTOR_SIZE);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == buffer);
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_WRITE);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw10 == lba_list);
+ CU_ASSERT(g_request->cmd.cdw12 == 0);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ free(buffer);
+ free(metadata);
+}
+
+
+static void
+test_nvme_ocssd_ns_cmd_vector_write_with_md(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = OCSSD_SECTOR_SIZE;
+ const uint32_t md_size = 0x80;
+ const uint32_t vector_size = 0x10;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ char *buffer = malloc(sector_size * vector_size);
+ char *metadata = malloc(md_size * vector_size);
+ uint64_t lba_list[vector_size];
+
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+ SPDK_CU_ASSERT_FATAL(metadata != NULL);
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, md_size, max_xfer_size, 0, false);
+ spdk_nvme_ocssd_ns_cmd_vector_write_with_md(&ns, &qpair, buffer, metadata,
+ lba_list, vector_size,
+ NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload.md == metadata);
+ CU_ASSERT(g_request->payload_size == max_xfer_size);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == buffer);
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_WRITE);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw12 == vector_size - 1);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ free(buffer);
+ free(metadata);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_write_single_entry(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = OCSSD_SECTOR_SIZE;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ char *buffer = malloc(sector_size);
+ uint64_t lba_list = 0x12345678;
+
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_xfer_size, 0, false);
+ spdk_nvme_ocssd_ns_cmd_vector_write(&ns, &qpair, buffer,
+ &lba_list, 1, NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload_size == OCSSD_SECTOR_SIZE);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == buffer);
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_WRITE);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw10 == lba_list);
+ CU_ASSERT(g_request->cmd.cdw12 == 0);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ free(buffer);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_write(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = OCSSD_SECTOR_SIZE;
+ const uint32_t vector_size = 0x10;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ char *buffer = malloc(sector_size * vector_size);
+ uint64_t lba_list[vector_size];
+
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_xfer_size, 0, false);
+ spdk_nvme_ocssd_ns_cmd_vector_write(&ns, &qpair, buffer,
+ lba_list, vector_size,
+ NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload_size == max_xfer_size);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == buffer);
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_WRITE);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw12 == vector_size - 1);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ free(buffer);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_copy_single_entry(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = OCSSD_SECTOR_SIZE;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ uint64_t src_lba_list = 0x12345678;
+ uint64_t dst_lba_list = 0x87654321;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_xfer_size, 0, false);
+ spdk_nvme_ocssd_ns_cmd_vector_copy(&ns, &qpair, &dst_lba_list, &src_lba_list, 1,
+ NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_COPY);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw10 == src_lba_list);
+ CU_ASSERT(g_request->cmd.cdw12 == 0);
+ CU_ASSERT(g_request->cmd.cdw14 == dst_lba_list);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_copy(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = OCSSD_SECTOR_SIZE;
+ const uint32_t vector_size = 0x10;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ uint64_t src_lba_list[vector_size];
+ uint64_t dst_lba_list[vector_size];
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_xfer_size, 0, false);
+ spdk_nvme_ocssd_ns_cmd_vector_copy(&ns, &qpair,
+ dst_lba_list, src_lba_list, vector_size,
+ NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_COPY);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw12 == vector_size - 1);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvme_ns_cmd", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_nvme_ocssd_ns_cmd_vector_reset);
+ CU_ADD_TEST(suite, test_nvme_ocssd_ns_cmd_vector_reset_single_entry);
+ CU_ADD_TEST(suite, test_nvme_ocssd_ns_cmd_vector_read_with_md);
+ CU_ADD_TEST(suite, test_nvme_ocssd_ns_cmd_vector_read_with_md_single_entry);
+ CU_ADD_TEST(suite, test_nvme_ocssd_ns_cmd_vector_read);
+ CU_ADD_TEST(suite, test_nvme_ocssd_ns_cmd_vector_read_single_entry);
+ CU_ADD_TEST(suite, test_nvme_ocssd_ns_cmd_vector_write_with_md);
+ CU_ADD_TEST(suite, test_nvme_ocssd_ns_cmd_vector_write_with_md_single_entry);
+ CU_ADD_TEST(suite, test_nvme_ocssd_ns_cmd_vector_write);
+ CU_ADD_TEST(suite, test_nvme_ocssd_ns_cmd_vector_write_single_entry);
+ CU_ADD_TEST(suite, test_nvme_ocssd_ns_cmd_vector_copy);
+ CU_ADD_TEST(suite, test_nvme_ocssd_ns_cmd_vector_copy_single_entry);
+
+ g_spdk_nvme_driver = &_g_nvme_driver;
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_pcie.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_pcie.c/.gitignore
new file mode 100644
index 000000000..8fc291095
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_pcie.c/.gitignore
@@ -0,0 +1 @@
+nvme_pcie_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_pcie.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_pcie.c/Makefile
new file mode 100644
index 000000000..09032a935
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_pcie.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_pcie_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_pcie.c/nvme_pcie_ut.c b/src/spdk/test/unit/lib/nvme/nvme_pcie.c/nvme_pcie_ut.c
new file mode 100644
index 000000000..ccc59b4da
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_pcie.c/nvme_pcie_ut.c
@@ -0,0 +1,498 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#define UNIT_TEST_NO_VTOPHYS
+
+#include "nvme/nvme_pcie.c"
+#include "common/lib/nvme/common_stubs.h"
+
+pid_t g_spdk_nvme_pid;
+DEFINE_STUB(spdk_mem_register, int, (void *vaddr, size_t len), 0);
+DEFINE_STUB(spdk_mem_unregister, int, (void *vaddr, size_t len), 0);
+
+DEFINE_STUB(nvme_get_quirks, uint64_t, (const struct spdk_pci_id *id), 0);
+
+DEFINE_STUB(nvme_wait_for_completion, int,
+ (struct spdk_nvme_qpair *qpair,
+ struct nvme_completion_poll_status *status), 0);
+DEFINE_STUB_V(nvme_completion_poll_cb, (void *arg, const struct spdk_nvme_cpl *cpl));
+
+DEFINE_STUB(nvme_ctrlr_submit_admin_request, int, (struct spdk_nvme_ctrlr *ctrlr,
+ struct nvme_request *req), 0);
+DEFINE_STUB_V(nvme_ctrlr_free_processes, (struct spdk_nvme_ctrlr *ctrlr));
+DEFINE_STUB(nvme_ctrlr_proc_get_devhandle, struct spdk_pci_device *,
+ (struct spdk_nvme_ctrlr *ctrlr), NULL);
+
+DEFINE_STUB(spdk_pci_device_map_bar, int, (struct spdk_pci_device *dev, uint32_t bar,
+ void **mapped_addr, uint64_t *phys_addr, uint64_t *size), 0);
+DEFINE_STUB(spdk_pci_device_unmap_bar, int, (struct spdk_pci_device *dev, uint32_t bar, void *addr),
+ 0);
+DEFINE_STUB(spdk_pci_device_attach, int, (struct spdk_pci_driver *driver, spdk_pci_enum_cb enum_cb,
+ void *enum_ctx, struct spdk_pci_addr *pci_address), 0);
+DEFINE_STUB(spdk_pci_device_claim, int, (struct spdk_pci_device *dev), 0);
+DEFINE_STUB_V(spdk_pci_device_unclaim, (struct spdk_pci_device *dev));
+DEFINE_STUB_V(spdk_pci_device_detach, (struct spdk_pci_device *device));
+DEFINE_STUB(spdk_pci_device_cfg_write16, int, (struct spdk_pci_device *dev, uint16_t value,
+ uint32_t offset), 0);
+DEFINE_STUB(spdk_pci_device_cfg_read16, int, (struct spdk_pci_device *dev, uint16_t *value,
+ uint32_t offset), 0);
+DEFINE_STUB(spdk_pci_device_get_id, struct spdk_pci_id, (struct spdk_pci_device *dev), {0})
+
+DEFINE_STUB(nvme_uevent_connect, int, (void), 0);
+
+struct spdk_log_flag SPDK_LOG_NVME = {
+ .name = "nvme",
+ .enabled = false,
+};
+
+struct nvme_driver *g_spdk_nvme_driver = NULL;
+
+bool g_device_is_enumerated = false;
+
+void
+nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr, bool hot_remove)
+{
+ CU_ASSERT(ctrlr != NULL);
+ if (hot_remove) {
+ ctrlr->is_removed = true;
+ }
+
+ ctrlr->is_failed = true;
+}
+
+struct spdk_uevent_entry {
+ struct spdk_uevent uevent;
+ STAILQ_ENTRY(spdk_uevent_entry) link;
+};
+
+static STAILQ_HEAD(, spdk_uevent_entry) g_uevents = STAILQ_HEAD_INITIALIZER(g_uevents);
+
+int
+nvme_get_uevent(int fd, struct spdk_uevent *uevent)
+{
+ struct spdk_uevent_entry *entry;
+
+ if (STAILQ_EMPTY(&g_uevents)) {
+ return 0;
+ }
+
+ entry = STAILQ_FIRST(&g_uevents);
+ STAILQ_REMOVE_HEAD(&g_uevents, link);
+
+ *uevent = entry->uevent;
+
+ return 1;
+}
+
+int
+spdk_pci_enumerate(struct spdk_pci_driver *driver, spdk_pci_enum_cb enum_cb, void *enum_ctx)
+{
+ g_device_is_enumerated = true;
+
+ return 0;
+}
+
+static uint64_t g_vtophys_size = 0;
+
+DEFINE_RETURN_MOCK(spdk_vtophys, uint64_t);
+uint64_t
+spdk_vtophys(void *buf, uint64_t *size)
+{
+ if (size) {
+ *size = g_vtophys_size;
+ }
+
+ HANDLE_RETURN_MOCK(spdk_vtophys);
+
+ return (uintptr_t)buf;
+}
+
+DEFINE_STUB(spdk_pci_device_get_addr, struct spdk_pci_addr, (struct spdk_pci_device *dev), {});
+DEFINE_STUB(nvme_ctrlr_probe, int, (const struct spdk_nvme_transport_id *trid,
+ struct spdk_nvme_probe_ctx *probe_ctx, void *devhandle), 0);
+DEFINE_STUB(spdk_pci_device_is_removed, bool, (struct spdk_pci_device *dev), false);
+DEFINE_STUB(nvme_get_ctrlr_by_trid_unsafe, struct spdk_nvme_ctrlr *,
+ (const struct spdk_nvme_transport_id *trid), NULL);
+DEFINE_STUB(spdk_nvme_ctrlr_get_regs_csts, union spdk_nvme_csts_register,
+ (struct spdk_nvme_ctrlr *ctrlr), {});
+DEFINE_STUB(nvme_ctrlr_get_process, struct spdk_nvme_ctrlr_process *,
+ (struct spdk_nvme_ctrlr *ctrlr, pid_t pid), NULL);
+DEFINE_STUB(nvme_completion_is_retry, bool, (const struct spdk_nvme_cpl *cpl), false);
+DEFINE_STUB_V(spdk_nvme_qpair_print_command, (struct spdk_nvme_qpair *qpair,
+ struct spdk_nvme_cmd *cmd));
+DEFINE_STUB_V(spdk_nvme_qpair_print_completion, (struct spdk_nvme_qpair *qpair,
+ struct spdk_nvme_cpl *cpl));
+
+static void
+prp_list_prep(struct nvme_tracker *tr, struct nvme_request *req, uint32_t *prp_index)
+{
+ memset(req, 0, sizeof(*req));
+ memset(tr, 0, sizeof(*tr));
+ tr->req = req;
+ tr->prp_sgl_bus_addr = 0xDEADBEEF;
+ *prp_index = 0;
+}
+
+static void
+test_prp_list_append(void)
+{
+ struct nvme_request req;
+ struct nvme_tracker tr;
+ uint32_t prp_index;
+
+ /* Non-DWORD-aligned buffer (invalid) */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100001, 0x1000, 0x1000) == -EFAULT);
+
+ /* 512-byte buffer, 4K aligned */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000, 0x200, 0x1000) == 0);
+ CU_ASSERT(prp_index == 1);
+ CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
+
+ /* 512-byte buffer, non-4K-aligned */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x108000, 0x200, 0x1000) == 0);
+ CU_ASSERT(prp_index == 1);
+ CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x108000);
+
+ /* 4K buffer, 4K aligned */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000, 0x1000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 1);
+ CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
+
+ /* 4K buffer, non-4K aligned */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800, 0x1000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 2);
+ CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800);
+ CU_ASSERT(req.cmd.dptr.prp.prp2 == 0x101000);
+
+ /* 8K buffer, 4K aligned */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000, 0x2000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 2);
+ CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
+ CU_ASSERT(req.cmd.dptr.prp.prp2 == 0x101000);
+
+ /* 8K buffer, non-4K aligned */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800, 0x2000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 3);
+ CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800);
+ CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr);
+ CU_ASSERT(tr.u.prp[0] == 0x101000);
+ CU_ASSERT(tr.u.prp[1] == 0x102000);
+
+ /* 12K buffer, 4K aligned */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000, 0x3000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 3);
+ CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
+ CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr);
+ CU_ASSERT(tr.u.prp[0] == 0x101000);
+ CU_ASSERT(tr.u.prp[1] == 0x102000);
+
+ /* 12K buffer, non-4K aligned */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800, 0x3000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 4);
+ CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800);
+ CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr);
+ CU_ASSERT(tr.u.prp[0] == 0x101000);
+ CU_ASSERT(tr.u.prp[1] == 0x102000);
+ CU_ASSERT(tr.u.prp[2] == 0x103000);
+
+ /* Two 4K buffers, both 4K aligned */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000, 0x1000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 1);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x900000, 0x1000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 2);
+ CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
+ CU_ASSERT(req.cmd.dptr.prp.prp2 == 0x900000);
+
+ /* Two 4K buffers, first non-4K aligned, second 4K aligned */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800, 0x1000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 2);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x900000, 0x1000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 3);
+ CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800);
+ CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr);
+ CU_ASSERT(tr.u.prp[0] == 0x101000);
+ CU_ASSERT(tr.u.prp[1] == 0x900000);
+
+ /* Two 4K buffers, both non-4K aligned (invalid) */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800, 0x1000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 2);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x900800, 0x1000, 0x1000) == -EFAULT);
+ CU_ASSERT(prp_index == 2);
+
+ /* 4K buffer, 4K aligned, but vtophys fails */
+ MOCK_SET(spdk_vtophys, SPDK_VTOPHYS_ERROR);
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000, 0x1000, 0x1000) == -EFAULT);
+ MOCK_CLEAR(spdk_vtophys);
+
+ /* Largest aligned buffer that can be described in NVME_MAX_PRP_LIST_ENTRIES (plus PRP1) */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000,
+ (NVME_MAX_PRP_LIST_ENTRIES + 1) * 0x1000, 0x1000) == 0);
+ CU_ASSERT(prp_index == NVME_MAX_PRP_LIST_ENTRIES + 1);
+
+ /* Largest non-4K-aligned buffer that can be described in NVME_MAX_PRP_LIST_ENTRIES (plus PRP1) */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800,
+ NVME_MAX_PRP_LIST_ENTRIES * 0x1000, 0x1000) == 0);
+ CU_ASSERT(prp_index == NVME_MAX_PRP_LIST_ENTRIES + 1);
+
+ /* Buffer too large to be described in NVME_MAX_PRP_LIST_ENTRIES */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000,
+ (NVME_MAX_PRP_LIST_ENTRIES + 2) * 0x1000, 0x1000) == -EFAULT);
+
+ /* Non-4K-aligned buffer too large to be described in NVME_MAX_PRP_LIST_ENTRIES */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800,
+ (NVME_MAX_PRP_LIST_ENTRIES + 1) * 0x1000, 0x1000) == -EFAULT);
+}
+
+static void
+test_nvme_pcie_hotplug_monitor(void)
+{
+ struct nvme_pcie_ctrlr pctrlr = {};
+ struct spdk_uevent_entry entry = {};
+ struct nvme_driver driver;
+ pthread_mutexattr_t attr;
+ struct spdk_nvme_probe_ctx test_nvme_probe_ctx = {};
+
+ /* Initiate variables and ctrlr */
+ driver.initialized = true;
+ driver.hotplug_fd = 123;
+ CU_ASSERT(pthread_mutexattr_init(&attr) == 0);
+ CU_ASSERT(pthread_mutex_init(&driver.lock, &attr) == 0);
+ TAILQ_INIT(&driver.shared_attached_ctrlrs);
+ g_spdk_nvme_driver = &driver;
+
+ /* Case 1: SPDK_NVME_UEVENT_ADD/ NVME_VFIO */
+ entry.uevent.subsystem = SPDK_NVME_UEVENT_SUBSYSTEM_VFIO;
+ entry.uevent.action = SPDK_NVME_UEVENT_ADD;
+ snprintf(entry.uevent.traddr, sizeof(entry.uevent.traddr), "0000:05:00.0");
+ CU_ASSERT(STAILQ_EMPTY(&g_uevents));
+ STAILQ_INSERT_TAIL(&g_uevents, &entry, link);
+
+ _nvme_pcie_hotplug_monitor(&test_nvme_probe_ctx);
+
+ CU_ASSERT(STAILQ_EMPTY(&g_uevents));
+ CU_ASSERT(g_device_is_enumerated == true);
+ g_device_is_enumerated = false;
+
+ /* Case 2: SPDK_NVME_UEVENT_ADD/ NVME_UIO */
+ entry.uevent.subsystem = SPDK_NVME_UEVENT_SUBSYSTEM_UIO;
+ entry.uevent.action = SPDK_NVME_UEVENT_ADD;
+ snprintf(entry.uevent.traddr, sizeof(entry.uevent.traddr), "0000:05:00.0");
+ CU_ASSERT(STAILQ_EMPTY(&g_uevents));
+ STAILQ_INSERT_TAIL(&g_uevents, &entry, link);
+
+ _nvme_pcie_hotplug_monitor(&test_nvme_probe_ctx);
+
+ CU_ASSERT(STAILQ_EMPTY(&g_uevents));
+ CU_ASSERT(g_device_is_enumerated == true);
+ g_device_is_enumerated = false;
+
+ /* Case 3: SPDK_NVME_UEVENT_REMOVE/ NVME_UIO */
+ entry.uevent.subsystem = SPDK_NVME_UEVENT_SUBSYSTEM_UIO;
+ entry.uevent.action = SPDK_NVME_UEVENT_REMOVE;
+ snprintf(entry.uevent.traddr, sizeof(entry.uevent.traddr), "0000:05:00.0");
+ CU_ASSERT(STAILQ_EMPTY(&g_uevents));
+ STAILQ_INSERT_TAIL(&g_uevents, &entry, link);
+
+ MOCK_SET(nvme_get_ctrlr_by_trid_unsafe, &pctrlr.ctrlr);
+
+ _nvme_pcie_hotplug_monitor(&test_nvme_probe_ctx);
+
+ CU_ASSERT(STAILQ_EMPTY(&g_uevents));
+ CU_ASSERT(pctrlr.ctrlr.is_failed == true);
+ pctrlr.ctrlr.is_failed = false;
+ MOCK_CLEAR(nvme_get_ctrlr_by_trid_unsafe);
+
+ /* Case 4: SPDK_NVME_UEVENT_REMOVE/ NVME_VFIO */
+ entry.uevent.subsystem = SPDK_NVME_UEVENT_SUBSYSTEM_VFIO;
+ entry.uevent.action = SPDK_NVME_UEVENT_REMOVE;
+ snprintf(entry.uevent.traddr, sizeof(entry.uevent.traddr), "0000:05:00.0");
+ CU_ASSERT(STAILQ_EMPTY(&g_uevents));
+ STAILQ_INSERT_TAIL(&g_uevents, &entry, link);
+ MOCK_SET(nvme_get_ctrlr_by_trid_unsafe, &pctrlr.ctrlr);
+
+ _nvme_pcie_hotplug_monitor(&test_nvme_probe_ctx);
+
+ CU_ASSERT(STAILQ_EMPTY(&g_uevents));
+ CU_ASSERT(pctrlr.ctrlr.is_failed == true);
+ pctrlr.ctrlr.is_failed = false;
+ MOCK_CLEAR(nvme_get_ctrlr_by_trid_unsafe);
+
+ /* Case 5: Removed device detected in another process */
+ pctrlr.ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
+ snprintf(pctrlr.ctrlr.trid.traddr, sizeof(pctrlr.ctrlr.trid.traddr), "0000:02:00.0");
+ pctrlr.ctrlr.remove_cb = NULL;
+ pctrlr.ctrlr.is_failed = false;
+ pctrlr.ctrlr.is_removed = false;
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_driver->shared_attached_ctrlrs, &pctrlr.ctrlr, tailq);
+
+ MOCK_SET(spdk_pci_device_is_removed, false);
+
+ _nvme_pcie_hotplug_monitor(&test_nvme_probe_ctx);
+
+ CU_ASSERT(pctrlr.ctrlr.is_failed == false);
+
+ MOCK_SET(spdk_pci_device_is_removed, true);
+
+ _nvme_pcie_hotplug_monitor(&test_nvme_probe_ctx);
+
+ CU_ASSERT(pctrlr.ctrlr.is_failed == true);
+
+ pthread_mutex_destroy(&driver.lock);
+ pthread_mutexattr_destroy(&attr);
+ g_spdk_nvme_driver = NULL;
+}
+
+static void test_shadow_doorbell_update(void)
+{
+ bool ret;
+
+ /* nvme_pcie_qpair_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old) */
+ ret = nvme_pcie_qpair_need_event(10, 15, 14);
+ CU_ASSERT(ret == false);
+
+ ret = nvme_pcie_qpair_need_event(14, 15, 14);
+ CU_ASSERT(ret == true);
+}
+
+static void
+test_build_contig_hw_sgl_request(void)
+{
+ struct spdk_nvme_qpair qpair = {};
+ struct nvme_request req = {};
+ struct nvme_tracker tr = {};
+ int rc;
+
+ /* Test 1: Payload covered by a single mapping */
+ req.payload_size = 100;
+ req.payload = NVME_PAYLOAD_CONTIG(0, 0);
+ g_vtophys_size = 100;
+ MOCK_SET(spdk_vtophys, 0xDEADBEEF);
+
+ rc = nvme_pcie_qpair_build_contig_hw_sgl_request(&qpair, &req, &tr, 0);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
+ CU_ASSERT(req.cmd.dptr.sgl1.address == 0xDEADBEEF);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 100);
+
+ MOCK_CLEAR(spdk_vtophys);
+ g_vtophys_size = 0;
+ memset(&qpair, 0, sizeof(qpair));
+ memset(&req, 0, sizeof(req));
+ memset(&tr, 0, sizeof(tr));
+
+ /* Test 2: Payload covered by a single mapping, but request is at an offset */
+ req.payload_size = 100;
+ req.payload_offset = 50;
+ req.payload = NVME_PAYLOAD_CONTIG(0, 0);
+ g_vtophys_size = 1000;
+ MOCK_SET(spdk_vtophys, 0xDEADBEEF);
+
+ rc = nvme_pcie_qpair_build_contig_hw_sgl_request(&qpair, &req, &tr, 0);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
+ CU_ASSERT(req.cmd.dptr.sgl1.address == 0xDEADBEEF);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 100);
+
+ MOCK_CLEAR(spdk_vtophys);
+ g_vtophys_size = 0;
+ memset(&qpair, 0, sizeof(qpair));
+ memset(&req, 0, sizeof(req));
+ memset(&tr, 0, sizeof(tr));
+
+ /* Test 3: Payload spans two mappings */
+ req.payload_size = 100;
+ req.payload = NVME_PAYLOAD_CONTIG(0, 0);
+ g_vtophys_size = 60;
+ tr.prp_sgl_bus_addr = 0xFF0FF;
+ MOCK_SET(spdk_vtophys, 0xDEADBEEF);
+
+ rc = nvme_pcie_qpair_build_contig_hw_sgl_request(&qpair, &req, &tr, 0);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_LAST_SEGMENT);
+ CU_ASSERT(req.cmd.dptr.sgl1.address == tr.prp_sgl_bus_addr);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 2 * sizeof(struct spdk_nvme_sgl_descriptor));
+ CU_ASSERT(tr.u.sgl[0].unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
+ CU_ASSERT(tr.u.sgl[0].unkeyed.length == 60);
+ CU_ASSERT(tr.u.sgl[0].address == 0xDEADBEEF);
+ CU_ASSERT(tr.u.sgl[1].unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
+ CU_ASSERT(tr.u.sgl[1].unkeyed.length == 40);
+ CU_ASSERT(tr.u.sgl[1].address == 0xDEADBEEF);
+
+ MOCK_CLEAR(spdk_vtophys);
+ g_vtophys_size = 0;
+ memset(&qpair, 0, sizeof(qpair));
+ memset(&req, 0, sizeof(req));
+ memset(&tr, 0, sizeof(tr));
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvme_pcie", NULL, NULL);
+ CU_ADD_TEST(suite, test_prp_list_append);
+ CU_ADD_TEST(suite, test_nvme_pcie_hotplug_monitor);
+ CU_ADD_TEST(suite, test_shadow_doorbell_update);
+ CU_ADD_TEST(suite, test_build_contig_hw_sgl_request);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_poll_group.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_poll_group.c/.gitignore
new file mode 100644
index 000000000..e4223e112
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_poll_group.c/.gitignore
@@ -0,0 +1 @@
+nvme_poll_group_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_poll_group.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_poll_group.c/Makefile
new file mode 100644
index 000000000..4715b5449
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_poll_group.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_poll_group_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_poll_group.c/nvme_poll_group_ut.c b/src/spdk/test/unit/lib/nvme/nvme_poll_group.c/nvme_poll_group_ut.c
new file mode 100644
index 000000000..1503a49c5
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_poll_group.c/nvme_poll_group_ut.c
@@ -0,0 +1,484 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "nvme/nvme_poll_group.c"
+#include "common/lib/test_env.c"
+
+struct spdk_nvme_transport {
+ const char name[32];
+ TAILQ_ENTRY(spdk_nvme_transport) link;
+};
+
+struct spdk_nvme_transport t1 = {
+ .name = "transport1",
+};
+
+struct spdk_nvme_transport t2 = {
+ .name = "transport2",
+};
+
+struct spdk_nvme_transport t3 = {
+ .name = "transport3",
+};
+
+struct spdk_nvme_transport t4 = {
+ .name = "transport4",
+};
+
+int64_t g_process_completions_return_value = 0;
+int g_destroy_return_value = 0;
+
+TAILQ_HEAD(nvme_transport_list, spdk_nvme_transport) g_spdk_nvme_transports =
+ TAILQ_HEAD_INITIALIZER(g_spdk_nvme_transports);
+
+static void
+unit_test_disconnected_qpair_cb(struct spdk_nvme_qpair *qpair, void *poll_group_ctx)
+{
+
+}
+
+const struct spdk_nvme_transport *
+nvme_get_first_transport(void)
+{
+ return TAILQ_FIRST(&g_spdk_nvme_transports);
+}
+
+const struct spdk_nvme_transport *
+nvme_get_next_transport(const struct spdk_nvme_transport *transport)
+{
+ return TAILQ_NEXT(transport, link);
+}
+
+int
+nvme_transport_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair)
+{
+ struct spdk_nvme_transport_poll_group *tgroup;
+ struct spdk_nvme_qpair *iter_qp, *tmp_iter_qp;
+
+ tgroup = qpair->poll_group;
+
+ STAILQ_FOREACH_SAFE(iter_qp, &tgroup->connected_qpairs, poll_group_stailq, tmp_iter_qp) {
+ if (qpair == iter_qp) {
+ STAILQ_REMOVE(&tgroup->connected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
+ STAILQ_INSERT_TAIL(&tgroup->disconnected_qpairs, qpair, poll_group_stailq);
+ return 0;
+ }
+ }
+
+ STAILQ_FOREACH(iter_qp, &tgroup->disconnected_qpairs, poll_group_stailq) {
+ if (qpair == iter_qp) {
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+int
+nvme_transport_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair)
+{
+ struct spdk_nvme_transport_poll_group *tgroup;
+ struct spdk_nvme_qpair *iter_qp, *tmp_iter_qp;
+
+ tgroup = qpair->poll_group;
+
+ STAILQ_FOREACH_SAFE(iter_qp, &tgroup->disconnected_qpairs, poll_group_stailq, tmp_iter_qp) {
+ if (qpair == iter_qp) {
+ STAILQ_REMOVE(&tgroup->disconnected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
+ STAILQ_INSERT_TAIL(&tgroup->connected_qpairs, qpair, poll_group_stailq);
+ return 0;
+ }
+ }
+
+ STAILQ_FOREACH(iter_qp, &tgroup->connected_qpairs, poll_group_stailq) {
+ if (qpair == iter_qp) {
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+struct spdk_nvme_transport_poll_group *
+nvme_transport_poll_group_create(const struct spdk_nvme_transport *transport)
+{
+ struct spdk_nvme_transport_poll_group *group = NULL;
+
+ /* TODO: separate this transport function table from the transport specific one. */
+ group = calloc(1, sizeof(*group));
+ if (group) {
+ group->transport = transport;
+ STAILQ_INIT(&group->connected_qpairs);
+ STAILQ_INIT(&group->disconnected_qpairs);
+ }
+
+ return group;
+}
+
+int
+nvme_transport_poll_group_destroy(struct spdk_nvme_transport_poll_group *tgroup)
+{
+ return g_destroy_return_value;
+}
+
+int
+nvme_transport_poll_group_add(struct spdk_nvme_transport_poll_group *tgroup,
+ struct spdk_nvme_qpair *qpair)
+{
+ STAILQ_INSERT_TAIL(&tgroup->connected_qpairs, qpair, poll_group_stailq);
+ qpair->poll_group = tgroup;
+
+ return 0;
+}
+
+int
+nvme_transport_poll_group_remove(struct spdk_nvme_transport_poll_group *tgroup,
+ struct spdk_nvme_qpair *qpair)
+{
+ struct spdk_nvme_qpair *iter_qp, *tmp_iter_qp;
+
+ STAILQ_FOREACH_SAFE(iter_qp, &tgroup->connected_qpairs, poll_group_stailq, tmp_iter_qp) {
+ if (qpair == iter_qp) {
+ STAILQ_REMOVE(&tgroup->connected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
+ return 0;
+ }
+ }
+
+ STAILQ_FOREACH_SAFE(iter_qp, &tgroup->disconnected_qpairs, poll_group_stailq, tmp_iter_qp) {
+ if (qpair == iter_qp) {
+ STAILQ_REMOVE(&tgroup->disconnected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
+ return 0;
+ }
+ }
+
+ return -ENODEV;
+}
+
+int64_t
+nvme_transport_poll_group_process_completions(struct spdk_nvme_transport_poll_group *group,
+ uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
+{
+ return g_process_completions_return_value;
+}
+
+static void
+test_spdk_nvme_poll_group_create(void)
+{
+ struct spdk_nvme_poll_group *group;
+
+ /* basic case - create a poll group with no internal transport poll groups. */
+ group = spdk_nvme_poll_group_create(NULL);
+
+ SPDK_CU_ASSERT_FATAL(group != NULL);
+ CU_ASSERT(STAILQ_EMPTY(&group->tgroups));
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_poll_group_destroy(group) == 0);
+
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, &t1, link);
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, &t2, link);
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, &t3, link);
+
+ /* advanced case - create a poll group with three internal poll groups. */
+ group = spdk_nvme_poll_group_create(NULL);
+ CU_ASSERT(STAILQ_EMPTY(&group->tgroups));
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_poll_group_destroy(group) == 0);
+
+ /* Failing case - failed to allocate a poll group. */
+ MOCK_SET(calloc, NULL);
+ group = spdk_nvme_poll_group_create(NULL);
+ CU_ASSERT(group == NULL);
+ MOCK_CLEAR(calloc);
+
+ TAILQ_REMOVE(&g_spdk_nvme_transports, &t1, link);
+ TAILQ_REMOVE(&g_spdk_nvme_transports, &t2, link);
+ TAILQ_REMOVE(&g_spdk_nvme_transports, &t3, link);
+}
+
+static void
+test_spdk_nvme_poll_group_add_remove(void)
+{
+ struct spdk_nvme_poll_group *group;
+ struct spdk_nvme_transport_poll_group *tgroup = NULL, *tmp_tgroup, *tgroup_1 = NULL,
+ *tgroup_2 = NULL,
+ *tgroup_4 = NULL;
+ struct spdk_nvme_qpair *qpair;
+ struct spdk_nvme_qpair qpair1_1 = {0};
+ struct spdk_nvme_qpair qpair1_2 = {0};
+ struct spdk_nvme_qpair qpair2_1 = {0};
+ struct spdk_nvme_qpair qpair2_2 = {0};
+ struct spdk_nvme_qpair qpair4_1 = {0};
+ struct spdk_nvme_qpair qpair4_2 = {0};
+ int i = 0;
+
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, &t1, link);
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, &t2, link);
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, &t3, link);
+
+ group = spdk_nvme_poll_group_create(NULL);
+ SPDK_CU_ASSERT_FATAL(group != NULL);
+ CU_ASSERT(STAILQ_EMPTY(&group->tgroups));
+
+ /* Add qpairs to a single transport. */
+ qpair1_1.transport = &t1;
+ qpair1_1.state = NVME_QPAIR_DISCONNECTED;
+ qpair1_2.transport = &t1;
+ qpair1_2.state = NVME_QPAIR_ENABLED;
+ CU_ASSERT(spdk_nvme_poll_group_add(group, &qpair1_1) == 0);
+ CU_ASSERT(spdk_nvme_poll_group_add(group, &qpair1_2) == -EINVAL);
+ STAILQ_FOREACH(tmp_tgroup, &group->tgroups, link) {
+ if (tmp_tgroup->transport == &t1) {
+ tgroup = tmp_tgroup;
+ } else {
+ CU_ASSERT(STAILQ_EMPTY(&tmp_tgroup->connected_qpairs));
+ }
+ i++;
+ }
+ CU_ASSERT(i == 1);
+ SPDK_CU_ASSERT_FATAL(tgroup != NULL);
+ qpair = STAILQ_FIRST(&tgroup->connected_qpairs);
+ SPDK_CU_ASSERT_FATAL(qpair == &qpair1_1);
+ qpair = STAILQ_NEXT(qpair, poll_group_stailq);
+ CU_ASSERT(qpair == NULL);
+
+ /* Add qpairs to a second transport. */
+ qpair2_1.transport = &t2;
+ qpair2_2.transport = &t2;
+ CU_ASSERT(spdk_nvme_poll_group_add(group, &qpair2_1) == 0);
+ CU_ASSERT(spdk_nvme_poll_group_add(group, &qpair2_2) == 0);
+ qpair4_1.transport = &t4;
+ qpair4_2.transport = &t4;
+ /* Add qpairs for a transport that doesn't exist. */
+ CU_ASSERT(spdk_nvme_poll_group_add(group, &qpair4_1) == -ENODEV);
+ CU_ASSERT(spdk_nvme_poll_group_add(group, &qpair4_2) == -ENODEV);
+ i = 0;
+ STAILQ_FOREACH(tmp_tgroup, &group->tgroups, link) {
+ if (tmp_tgroup->transport == &t1) {
+ tgroup_1 = tmp_tgroup;
+ } else if (tmp_tgroup->transport == &t2) {
+ tgroup_2 = tmp_tgroup;
+ } else {
+ CU_ASSERT(STAILQ_EMPTY(&tmp_tgroup->connected_qpairs));
+ }
+ i++;
+ }
+ CU_ASSERT(i == 2);
+ SPDK_CU_ASSERT_FATAL(tgroup_1 != NULL);
+ qpair = STAILQ_FIRST(&tgroup_1->connected_qpairs);
+ SPDK_CU_ASSERT_FATAL(qpair == &qpair1_1);
+ qpair = STAILQ_NEXT(qpair, poll_group_stailq);
+ CU_ASSERT(qpair == NULL);
+ SPDK_CU_ASSERT_FATAL(tgroup_2 != NULL);
+ qpair = STAILQ_FIRST(&tgroup_2->connected_qpairs);
+ SPDK_CU_ASSERT_FATAL(qpair == &qpair2_1);
+ qpair = STAILQ_NEXT(qpair, poll_group_stailq);
+ SPDK_CU_ASSERT_FATAL(qpair == &qpair2_2);
+ qpair = STAILQ_NEXT(qpair, poll_group_stailq);
+ CU_ASSERT(qpair == NULL);
+
+ /* Try removing a qpair that belongs to a transport not in our poll group. */
+ CU_ASSERT(spdk_nvme_poll_group_remove(group, &qpair4_1) == -ENODEV);
+
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, &t4, link);
+ CU_ASSERT(spdk_nvme_poll_group_add(group, &qpair4_1) == 0);
+ CU_ASSERT(spdk_nvme_poll_group_add(group, &qpair4_2) == 0);
+ STAILQ_FOREACH(tmp_tgroup, &group->tgroups, link) {
+ if (tmp_tgroup->transport == &t1) {
+ tgroup_1 = tmp_tgroup;
+ } else if (tmp_tgroup->transport == &t2) {
+ tgroup_2 = tmp_tgroup;
+ } else if (tmp_tgroup->transport == &t4) {
+ tgroup_4 = tmp_tgroup;
+ } else {
+ CU_ASSERT(STAILQ_EMPTY(&tmp_tgroup->connected_qpairs));
+ }
+ }
+ SPDK_CU_ASSERT_FATAL(tgroup_1 != NULL);
+ qpair = STAILQ_FIRST(&tgroup_1->connected_qpairs);
+ SPDK_CU_ASSERT_FATAL(qpair == &qpair1_1);
+ qpair = STAILQ_NEXT(qpair, poll_group_stailq);
+ CU_ASSERT(qpair == NULL);
+ SPDK_CU_ASSERT_FATAL(tgroup_2 != NULL);
+ qpair = STAILQ_FIRST(&tgroup_2->connected_qpairs);
+ SPDK_CU_ASSERT_FATAL(qpair == &qpair2_1);
+ qpair = STAILQ_NEXT(qpair, poll_group_stailq);
+ SPDK_CU_ASSERT_FATAL(qpair == &qpair2_2);
+ qpair = STAILQ_NEXT(qpair, poll_group_stailq);
+ CU_ASSERT(qpair == NULL);
+ SPDK_CU_ASSERT_FATAL(tgroup_4 != NULL);
+ qpair = STAILQ_FIRST(&tgroup_4->connected_qpairs);
+ SPDK_CU_ASSERT_FATAL(qpair == &qpair4_1);
+ qpair = STAILQ_NEXT(qpair, poll_group_stailq);
+ SPDK_CU_ASSERT_FATAL(qpair == &qpair4_2);
+ qpair = STAILQ_NEXT(qpair, poll_group_stailq);
+ CU_ASSERT(qpair == NULL);
+
+ /* remove all qpairs */
+ CU_ASSERT(spdk_nvme_poll_group_remove(group, &qpair1_1) == 0);
+ CU_ASSERT(spdk_nvme_poll_group_remove(group, &qpair2_1) == 0);
+ CU_ASSERT(spdk_nvme_poll_group_remove(group, &qpair2_2) == 0);
+ CU_ASSERT(spdk_nvme_poll_group_remove(group, &qpair4_1) == 0);
+ CU_ASSERT(spdk_nvme_poll_group_remove(group, &qpair4_2) == 0);
+ /* Confirm the fourth transport group was created. */
+ i = 0;
+ STAILQ_FOREACH_SAFE(tgroup, &group->tgroups, link, tmp_tgroup) {
+ CU_ASSERT(STAILQ_EMPTY(&tgroup->connected_qpairs));
+ STAILQ_REMOVE(&group->tgroups, tgroup, spdk_nvme_transport_poll_group, link);
+ free(tgroup);
+ i++;
+ }
+ CU_ASSERT(i == 3);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_poll_group_destroy(group) == 0);
+
+ TAILQ_REMOVE(&g_spdk_nvme_transports, &t1, link);
+ TAILQ_REMOVE(&g_spdk_nvme_transports, &t2, link);
+ TAILQ_REMOVE(&g_spdk_nvme_transports, &t3, link);
+ TAILQ_REMOVE(&g_spdk_nvme_transports, &t4, link);
+}
+
+static void
+test_spdk_nvme_poll_group_process_completions(void)
+{
+ struct spdk_nvme_poll_group *group;
+ struct spdk_nvme_transport_poll_group *tgroup, *tmp_tgroup;
+ struct spdk_nvme_qpair qpair1_1 = {0};
+
+ group = spdk_nvme_poll_group_create(NULL);
+ SPDK_CU_ASSERT_FATAL(group != NULL);
+
+ /* If we don't have any transport poll groups, we shouldn't get any completions. */
+ g_process_completions_return_value = 32;
+ CU_ASSERT(spdk_nvme_poll_group_process_completions(group, 128,
+ unit_test_disconnected_qpair_cb) == 0);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_poll_group_destroy(group) == 0);
+
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, &t1, link);
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, &t2, link);
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, &t3, link);
+
+ /* try it with three transport poll groups. */
+ group = spdk_nvme_poll_group_create(NULL);
+ SPDK_CU_ASSERT_FATAL(group != NULL);
+ qpair1_1.state = NVME_QPAIR_DISCONNECTED;
+ qpair1_1.transport = &t1;
+ CU_ASSERT(spdk_nvme_poll_group_add(group, &qpair1_1) == 0);
+ qpair1_1.state = NVME_QPAIR_ENABLED;
+ CU_ASSERT(nvme_poll_group_connect_qpair(&qpair1_1) == 0);
+ CU_ASSERT(spdk_nvme_poll_group_process_completions(group, 128,
+ unit_test_disconnected_qpair_cb) == 32);
+ CU_ASSERT(spdk_nvme_poll_group_remove(group, &qpair1_1) == 0);
+ STAILQ_FOREACH_SAFE(tgroup, &group->tgroups, link, tmp_tgroup) {
+ CU_ASSERT(STAILQ_EMPTY(&tgroup->connected_qpairs));
+ STAILQ_REMOVE(&group->tgroups, tgroup, spdk_nvme_transport_poll_group, link);
+ free(tgroup);
+ }
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_poll_group_destroy(group) == 0);
+
+ TAILQ_REMOVE(&g_spdk_nvme_transports, &t1, link);
+ TAILQ_REMOVE(&g_spdk_nvme_transports, &t2, link);
+ TAILQ_REMOVE(&g_spdk_nvme_transports, &t3, link);
+}
+
+static void
+test_spdk_nvme_poll_group_destroy(void)
+{
+ struct spdk_nvme_poll_group *group;
+ struct spdk_nvme_transport_poll_group *tgroup, *tgroup_1, *tgroup_2;
+ struct spdk_nvme_qpair qpair1_1 = {0};
+ int num_tgroups = 0;
+
+ /* Simple destruction of empty poll group. */
+ group = spdk_nvme_poll_group_create(NULL);
+ SPDK_CU_ASSERT_FATAL(group != NULL);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_poll_group_destroy(group) == 0);
+
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, &t1, link);
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, &t2, link);
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, &t3, link);
+ group = spdk_nvme_poll_group_create(NULL);
+ SPDK_CU_ASSERT_FATAL(group != NULL);
+
+ qpair1_1.transport = &t1;
+ CU_ASSERT(spdk_nvme_poll_group_add(group, &qpair1_1) == 0);
+
+ /* Don't remove busy poll groups. */
+ g_destroy_return_value = -EBUSY;
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_poll_group_destroy(group) == -EBUSY);
+ STAILQ_FOREACH(tgroup, &group->tgroups, link) {
+ num_tgroups++;
+ }
+ CU_ASSERT(num_tgroups == 1);
+
+ /* destroy poll group with internal poll groups. */
+ g_destroy_return_value = 0;
+ tgroup_1 = STAILQ_FIRST(&group->tgroups);
+ tgroup_2 = STAILQ_NEXT(tgroup_1, link);
+ CU_ASSERT(tgroup_2 == NULL)
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_poll_group_destroy(group) == 0);
+ free(tgroup_1);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("nvme_ns_cmd", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "nvme_poll_group_create_test", test_spdk_nvme_poll_group_create) == NULL ||
+ CU_add_test(suite, "nvme_poll_group_add_remove_test",
+ test_spdk_nvme_poll_group_add_remove) == NULL ||
+ CU_add_test(suite, "nvme_poll_group_process_completions",
+ test_spdk_nvme_poll_group_process_completions) == NULL ||
+ CU_add_test(suite, "nvme_poll_group_destroy_test", test_spdk_nvme_poll_group_destroy) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_qpair.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_qpair.c/.gitignore
new file mode 100644
index 000000000..1bb18e997
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_qpair.c/.gitignore
@@ -0,0 +1 @@
+nvme_qpair_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_qpair.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_qpair.c/Makefile
new file mode 100644
index 000000000..d7762a384
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_qpair.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_qpair_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_qpair.c/nvme_qpair_ut.c b/src/spdk/test/unit/lib/nvme/nvme_qpair.c/nvme_qpair_ut.c
new file mode 100644
index 000000000..e34c70413
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_qpair.c/nvme_qpair_ut.c
@@ -0,0 +1,625 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "common/lib/test_env.c"
+
+pid_t g_spdk_nvme_pid;
+
+bool trace_flag = false;
+#define SPDK_LOG_NVME trace_flag
+
+#include "nvme/nvme_qpair.c"
+
+struct nvme_driver _g_nvme_driver = {
+ .lock = PTHREAD_MUTEX_INITIALIZER,
+};
+
+DEFINE_STUB_V(nvme_transport_qpair_abort_reqs, (struct spdk_nvme_qpair *qpair, uint32_t dnr));
+DEFINE_STUB(nvme_transport_qpair_submit_request, int,
+ (struct spdk_nvme_qpair *qpair, struct nvme_request *req), 0);
+DEFINE_STUB(spdk_nvme_ctrlr_free_io_qpair, int, (struct spdk_nvme_qpair *qpair), 0);
+DEFINE_STUB_V(nvme_transport_ctrlr_disconnect_qpair, (struct spdk_nvme_ctrlr *ctrlr,
+ struct spdk_nvme_qpair *qpair));
+DEFINE_STUB_V(nvme_ctrlr_disconnect_qpair, (struct spdk_nvme_qpair *qpair));
+
+void
+nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr, bool hot_remove)
+{
+ if (hot_remove) {
+ ctrlr->is_removed = true;
+ }
+ ctrlr->is_failed = true;
+}
+
+static bool g_called_transport_process_completions = false;
+static int32_t g_transport_process_completions_rc = 0;
+int32_t
+nvme_transport_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
+{
+ g_called_transport_process_completions = true;
+ return g_transport_process_completions_rc;
+}
+
+static void
+prepare_submit_request_test(struct spdk_nvme_qpair *qpair,
+ struct spdk_nvme_ctrlr *ctrlr)
+{
+ memset(ctrlr, 0, sizeof(*ctrlr));
+ ctrlr->free_io_qids = NULL;
+ TAILQ_INIT(&ctrlr->active_io_qpairs);
+ TAILQ_INIT(&ctrlr->active_procs);
+ MOCK_CLEAR(spdk_zmalloc);
+ nvme_qpair_init(qpair, 1, ctrlr, 0, 32);
+}
+
+static void
+cleanup_submit_request_test(struct spdk_nvme_qpair *qpair)
+{
+ free(qpair->req_buf);
+}
+
+static void
+expected_success_callback(void *arg, const struct spdk_nvme_cpl *cpl)
+{
+ CU_ASSERT(!spdk_nvme_cpl_is_error(cpl));
+}
+
+static void
+expected_failure_callback(void *arg, const struct spdk_nvme_cpl *cpl)
+{
+ CU_ASSERT(spdk_nvme_cpl_is_error(cpl));
+}
+
+static void
+test3(void)
+{
+ struct spdk_nvme_qpair qpair = {};
+ struct nvme_request *req;
+ struct spdk_nvme_ctrlr ctrlr = {};
+
+ prepare_submit_request_test(&qpair, &ctrlr);
+
+ req = nvme_allocate_request_null(&qpair, expected_success_callback, NULL);
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+
+ CU_ASSERT(nvme_qpair_submit_request(&qpair, req) == 0);
+
+ nvme_free_request(req);
+
+ cleanup_submit_request_test(&qpair);
+}
+
+static void
+test_ctrlr_failed(void)
+{
+ struct spdk_nvme_qpair qpair = {};
+ struct nvme_request *req;
+ struct spdk_nvme_ctrlr ctrlr = {};
+ char payload[4096];
+
+ prepare_submit_request_test(&qpair, &ctrlr);
+
+ req = nvme_allocate_request_contig(&qpair, payload, sizeof(payload), expected_failure_callback,
+ NULL);
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+
+ /* Set the controller to failed.
+ * Set the controller to resetting so that the qpair won't get re-enabled.
+ */
+ ctrlr.is_failed = true;
+ ctrlr.is_resetting = true;
+
+ CU_ASSERT(nvme_qpair_submit_request(&qpair, req) != 0);
+
+ cleanup_submit_request_test(&qpair);
+}
+
+static void struct_packing(void)
+{
+ /* ctrlr is the first field in nvme_qpair after the fields
+ * that are used in the I/O path. Make sure the I/O path fields
+ * all fit into two cache lines.
+ */
+ CU_ASSERT(offsetof(struct spdk_nvme_qpair, ctrlr) <= 128);
+}
+
+static int g_num_cb_failed = 0;
+static int g_num_cb_passed = 0;
+
+static void
+dummy_cb_fn(void *cb_arg, const struct spdk_nvme_cpl *cpl)
+{
+ if (cpl->status.sc == SPDK_NVME_SC_SUCCESS) {
+ g_num_cb_passed++;
+ } else {
+ g_num_cb_failed++;
+ }
+}
+
+static void test_nvme_qpair_process_completions(void)
+{
+ struct spdk_nvme_qpair admin_qp = {0};
+ struct spdk_nvme_qpair qpair = {0};
+ struct spdk_nvme_ctrlr ctrlr = {0};
+ struct nvme_request dummy_1 = {{0}};
+ struct nvme_request dummy_2 = {{0}};
+ int rc;
+
+ dummy_1.cb_fn = dummy_cb_fn;
+ dummy_2.cb_fn = dummy_cb_fn;
+ dummy_1.qpair = &qpair;
+ dummy_2.qpair = &qpair;
+
+ TAILQ_INIT(&ctrlr.active_io_qpairs);
+ TAILQ_INIT(&ctrlr.active_procs);
+ nvme_qpair_init(&qpair, 1, &ctrlr, 0, 32);
+ nvme_qpair_init(&admin_qp, 0, &ctrlr, 0, 32);
+
+ ctrlr.adminq = &admin_qp;
+
+ STAILQ_INIT(&qpair.queued_req);
+ STAILQ_INSERT_TAIL(&qpair.queued_req, &dummy_1, stailq);
+ STAILQ_INSERT_TAIL(&qpair.queued_req, &dummy_2, stailq);
+
+ /* If the controller is failed, return -ENXIO */
+ ctrlr.is_failed = true;
+ ctrlr.is_removed = false;
+ rc = spdk_nvme_qpair_process_completions(&qpair, 0);
+ CU_ASSERT(rc == -ENXIO);
+ CU_ASSERT(!STAILQ_EMPTY(&qpair.queued_req));
+ CU_ASSERT(g_num_cb_passed == 0);
+ CU_ASSERT(g_num_cb_failed == 0);
+
+ /* Same if the qpair is failed at the transport layer. */
+ ctrlr.is_failed = false;
+ ctrlr.is_removed = false;
+ qpair.state = NVME_QPAIR_DISCONNECTED;
+ rc = spdk_nvme_qpair_process_completions(&qpair, 0);
+ CU_ASSERT(rc == -ENXIO);
+ CU_ASSERT(!STAILQ_EMPTY(&qpair.queued_req));
+ CU_ASSERT(g_num_cb_passed == 0);
+ CU_ASSERT(g_num_cb_failed == 0);
+
+ /* If the controller is removed, make sure we abort the requests. */
+ ctrlr.is_failed = true;
+ ctrlr.is_removed = true;
+ qpair.state = NVME_QPAIR_CONNECTED;
+ rc = spdk_nvme_qpair_process_completions(&qpair, 0);
+ CU_ASSERT(rc == -ENXIO);
+ CU_ASSERT(STAILQ_EMPTY(&qpair.queued_req));
+ CU_ASSERT(g_num_cb_passed == 0);
+ CU_ASSERT(g_num_cb_failed == 2);
+
+ /* If we are resetting, make sure that we don't call into the transport. */
+ STAILQ_INSERT_TAIL(&qpair.queued_req, &dummy_1, stailq);
+ dummy_1.queued = true;
+ STAILQ_INSERT_TAIL(&qpair.queued_req, &dummy_2, stailq);
+ dummy_2.queued = true;
+ g_num_cb_failed = 0;
+ ctrlr.is_failed = false;
+ ctrlr.is_removed = false;
+ ctrlr.is_resetting = true;
+ rc = spdk_nvme_qpair_process_completions(&qpair, 0);
+ CU_ASSERT(rc == -ENXIO);
+ CU_ASSERT(g_called_transport_process_completions == false);
+ /* We also need to make sure we didn't abort the requests. */
+ CU_ASSERT(!STAILQ_EMPTY(&qpair.queued_req));
+ CU_ASSERT(g_num_cb_passed == 0);
+ CU_ASSERT(g_num_cb_failed == 0);
+
+ /* The case where we aren't resetting, but are enabling the qpair is the same as above. */
+ ctrlr.is_resetting = false;
+ qpair.state = NVME_QPAIR_ENABLING;
+ rc = spdk_nvme_qpair_process_completions(&qpair, 0);
+ CU_ASSERT(rc == -ENXIO);
+ CU_ASSERT(g_called_transport_process_completions == false);
+ CU_ASSERT(!STAILQ_EMPTY(&qpair.queued_req));
+ CU_ASSERT(g_num_cb_passed == 0);
+ CU_ASSERT(g_num_cb_failed == 0);
+
+ /* For other qpair states, we want to enable the qpair. */
+ qpair.state = NVME_QPAIR_CONNECTED;
+ rc = spdk_nvme_qpair_process_completions(&qpair, 1);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_called_transport_process_completions == true);
+ /* These should have been submitted to the lower layer. */
+ CU_ASSERT(STAILQ_EMPTY(&qpair.queued_req));
+ CU_ASSERT(g_num_cb_passed == 0);
+ CU_ASSERT(g_num_cb_failed == 0);
+ CU_ASSERT(nvme_qpair_get_state(&qpair) == NVME_QPAIR_ENABLED);
+
+ g_called_transport_process_completions = false;
+ g_transport_process_completions_rc = -ENXIO;
+
+ /* Fail the controller if we get an error from the transport on admin qpair. */
+ admin_qp.state = NVME_QPAIR_ENABLED;
+ rc = spdk_nvme_qpair_process_completions(&admin_qp, 0);
+ CU_ASSERT(rc == -ENXIO);
+ CU_ASSERT(g_called_transport_process_completions == true);
+ CU_ASSERT(ctrlr.is_failed == true);
+
+ /* Don't fail the controller for regular qpairs. */
+ ctrlr.is_failed = false;
+ g_called_transport_process_completions = false;
+ rc = spdk_nvme_qpair_process_completions(&qpair, 0);
+ CU_ASSERT(rc == -ENXIO);
+ CU_ASSERT(g_called_transport_process_completions == true);
+ CU_ASSERT(ctrlr.is_failed == false);
+
+ /* Make sure we don't modify the return value from the transport. */
+ ctrlr.is_failed = false;
+ g_called_transport_process_completions = false;
+ g_transport_process_completions_rc = 23;
+ rc = spdk_nvme_qpair_process_completions(&qpair, 0);
+ CU_ASSERT(rc == 23);
+ CU_ASSERT(g_called_transport_process_completions == true);
+ CU_ASSERT(ctrlr.is_failed == false);
+
+ free(qpair.req_buf);
+ free(admin_qp.req_buf);
+}
+
+static void test_nvme_completion_is_retry(void)
+{
+ struct spdk_nvme_cpl cpl = {};
+
+ cpl.status.sct = SPDK_NVME_SCT_GENERIC;
+ cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
+ cpl.status.dnr = 0;
+ CU_ASSERT_TRUE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_FORMAT_IN_PROGRESS;
+ cpl.status.dnr = 1;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+ cpl.status.dnr = 0;
+ CU_ASSERT_TRUE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_INVALID_FIELD;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_COMMAND_ID_CONFLICT;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_DATA_TRANSFER_ERROR;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_ABORTED_POWER_LOSS;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_ABORTED_FAILED_FUSED;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_ABORTED_MISSING_FUSED;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_INVALID_SGL_SEG_DESCRIPTOR;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_INVALID_NUM_SGL_DESCIRPTORS;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_METADATA_SGL_LENGTH_INVALID;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_SGL_DESCRIPTOR_TYPE_INVALID;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_INVALID_CONTROLLER_MEM_BUF;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_INVALID_PRP_OFFSET;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_ATOMIC_WRITE_UNIT_EXCEEDED;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_CAPACITY_EXCEEDED;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_RESERVATION_CONFLICT;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = 0x70;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sct = SPDK_NVME_SCT_MEDIA_ERROR;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sct = SPDK_NVME_SCT_PATH;
+ cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
+ cpl.status.dnr = 0;
+ CU_ASSERT_TRUE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sct = SPDK_NVME_SCT_PATH;
+ cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
+ cpl.status.dnr = 1;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sct = 0x4;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+}
+
+#ifdef DEBUG
+static void
+test_get_status_string(void)
+{
+ const char *status_string;
+ struct spdk_nvme_status status;
+
+ status.sct = SPDK_NVME_SCT_GENERIC;
+ status.sc = SPDK_NVME_SC_SUCCESS;
+ status_string = spdk_nvme_cpl_get_status_string(&status);
+ CU_ASSERT(strcmp(status_string, "SUCCESS") == 0);
+
+ status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
+ status.sc = SPDK_NVME_SC_COMPLETION_QUEUE_INVALID;
+ status_string = spdk_nvme_cpl_get_status_string(&status);
+ CU_ASSERT(strcmp(status_string, "INVALID COMPLETION QUEUE") == 0);
+
+ status.sct = SPDK_NVME_SCT_MEDIA_ERROR;
+ status.sc = SPDK_NVME_SC_UNRECOVERED_READ_ERROR;
+ status_string = spdk_nvme_cpl_get_status_string(&status);
+ CU_ASSERT(strcmp(status_string, "UNRECOVERED READ ERROR") == 0);
+
+ status.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
+ status.sc = 0;
+ status_string = spdk_nvme_cpl_get_status_string(&status);
+ CU_ASSERT(strcmp(status_string, "VENDOR SPECIFIC") == 0);
+
+ status.sct = 0x4;
+ status.sc = 0;
+ status_string = spdk_nvme_cpl_get_status_string(&status);
+ CU_ASSERT(strcmp(status_string, "RESERVED") == 0);
+}
+#endif
+
+static void
+test_nvme_qpair_add_cmd_error_injection(void)
+{
+ struct spdk_nvme_qpair qpair = {};
+ struct spdk_nvme_ctrlr ctrlr = {};
+ int rc;
+
+ prepare_submit_request_test(&qpair, &ctrlr);
+ ctrlr.adminq = &qpair;
+
+ /* Admin error injection at submission path */
+ MOCK_CLEAR(spdk_zmalloc);
+ rc = spdk_nvme_qpair_add_cmd_error_injection(&ctrlr, NULL,
+ SPDK_NVME_OPC_GET_FEATURES, true, 5000, 1,
+ SPDK_NVME_SCT_GENERIC, SPDK_NVME_SC_INVALID_FIELD);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(!TAILQ_EMPTY(&qpair.err_cmd_head));
+
+ /* Remove cmd error injection */
+ spdk_nvme_qpair_remove_cmd_error_injection(&ctrlr, NULL, SPDK_NVME_OPC_GET_FEATURES);
+
+ CU_ASSERT(TAILQ_EMPTY(&qpair.err_cmd_head));
+
+ /* IO error injection at completion path */
+ rc = spdk_nvme_qpair_add_cmd_error_injection(&ctrlr, &qpair,
+ SPDK_NVME_OPC_READ, false, 0, 1,
+ SPDK_NVME_SCT_MEDIA_ERROR, SPDK_NVME_SC_UNRECOVERED_READ_ERROR);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(!TAILQ_EMPTY(&qpair.err_cmd_head));
+
+ /* Provide the same opc, and check whether allocate a new entry */
+ rc = spdk_nvme_qpair_add_cmd_error_injection(&ctrlr, &qpair,
+ SPDK_NVME_OPC_READ, false, 0, 1,
+ SPDK_NVME_SCT_MEDIA_ERROR, SPDK_NVME_SC_UNRECOVERED_READ_ERROR);
+
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&qpair.err_cmd_head));
+ CU_ASSERT(TAILQ_NEXT(TAILQ_FIRST(&qpair.err_cmd_head), link) == NULL);
+
+ /* Remove cmd error injection */
+ spdk_nvme_qpair_remove_cmd_error_injection(&ctrlr, &qpair, SPDK_NVME_OPC_READ);
+
+ CU_ASSERT(TAILQ_EMPTY(&qpair.err_cmd_head));
+
+ rc = spdk_nvme_qpair_add_cmd_error_injection(&ctrlr, &qpair,
+ SPDK_NVME_OPC_COMPARE, true, 0, 5,
+ SPDK_NVME_SCT_GENERIC, SPDK_NVME_SC_COMPARE_FAILURE);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(!TAILQ_EMPTY(&qpair.err_cmd_head));
+
+ /* Remove cmd error injection */
+ spdk_nvme_qpair_remove_cmd_error_injection(&ctrlr, &qpair, SPDK_NVME_OPC_COMPARE);
+
+ CU_ASSERT(TAILQ_EMPTY(&qpair.err_cmd_head));
+
+ cleanup_submit_request_test(&qpair);
+}
+
+static void
+test_nvme_qpair_submit_request(void)
+{
+ int rc;
+ struct spdk_nvme_qpair qpair = {};
+ struct spdk_nvme_ctrlr ctrlr = {};
+ struct nvme_request *req, *req1, *req2, *req3, *req2_1, *req2_2, *req2_3;
+
+ prepare_submit_request_test(&qpair, &ctrlr);
+
+ /*
+ * Build a request chain like the following:
+ * req
+ * |
+ * ---------------
+ * | | |
+ * req1 req2 req3
+ * |
+ * ---------------
+ * | | |
+ * req2_1 req2_2 req2_3
+ */
+ req = nvme_allocate_request_null(&qpair, NULL, NULL);
+ CU_ASSERT(req != NULL);
+ TAILQ_INIT(&req->children);
+
+ req1 = nvme_allocate_request_null(&qpair, NULL, NULL);
+ CU_ASSERT(req1 != NULL);
+ req->num_children++;
+ TAILQ_INSERT_TAIL(&req->children, req1, child_tailq);
+ req1->parent = req;
+
+ req2 = nvme_allocate_request_null(&qpair, NULL, NULL);
+ CU_ASSERT(req2 != NULL);
+ TAILQ_INIT(&req2->children);
+ req->num_children++;
+ TAILQ_INSERT_TAIL(&req->children, req2, child_tailq);
+ req2->parent = req;
+
+ req3 = nvme_allocate_request_null(&qpair, NULL, NULL);
+ CU_ASSERT(req3 != NULL);
+ req->num_children++;
+ TAILQ_INSERT_TAIL(&req->children, req3, child_tailq);
+ req3->parent = req;
+
+ req2_1 = nvme_allocate_request_null(&qpair, NULL, NULL);
+ CU_ASSERT(req2_1 != NULL);
+ req2->num_children++;
+ TAILQ_INSERT_TAIL(&req2->children, req2_1, child_tailq);
+ req2_1->parent = req2;
+
+ req2_2 = nvme_allocate_request_null(&qpair, NULL, NULL);
+ CU_ASSERT(req2_2 != NULL);
+ req2->num_children++;
+ TAILQ_INSERT_TAIL(&req2->children, req2_2, child_tailq);
+ req2_2->parent = req2;
+
+ req2_3 = nvme_allocate_request_null(&qpair, NULL, NULL);
+ CU_ASSERT(req2_3 != NULL);
+ req2->num_children++;
+ TAILQ_INSERT_TAIL(&req2->children, req2_3, child_tailq);
+ req2_3->parent = req2;
+
+ ctrlr.is_failed = true;
+ rc = nvme_qpair_submit_request(&qpair, req);
+ SPDK_CU_ASSERT_FATAL(rc == -ENXIO);
+
+ cleanup_submit_request_test(&qpair);
+}
+
+static void
+test_nvme_qpair_resubmit_request_with_transport_failed(void)
+{
+ int rc;
+ struct spdk_nvme_qpair qpair = {};
+ struct spdk_nvme_ctrlr ctrlr = {};
+ struct nvme_request *req;
+
+ prepare_submit_request_test(&qpair, &ctrlr);
+
+ req = nvme_allocate_request_null(&qpair, dummy_cb_fn, NULL);
+ CU_ASSERT(req != NULL);
+ TAILQ_INIT(&req->children);
+
+ STAILQ_INSERT_TAIL(&qpair.queued_req, req, stailq);
+ req->queued = true;
+
+ g_transport_process_completions_rc = 1;
+ qpair.state = NVME_QPAIR_ENABLED;
+ g_num_cb_failed = 0;
+ MOCK_SET(nvme_transport_qpair_submit_request, -EINVAL);
+ rc = spdk_nvme_qpair_process_completions(&qpair, g_transport_process_completions_rc);
+ MOCK_CLEAR(nvme_transport_qpair_submit_request);
+ CU_ASSERT(rc == g_transport_process_completions_rc);
+ CU_ASSERT(STAILQ_EMPTY(&qpair.queued_req));
+ CU_ASSERT(g_num_cb_failed == 1);
+
+ cleanup_submit_request_test(&qpair);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvme_qpair", NULL, NULL);
+
+ CU_ADD_TEST(suite, test3);
+ CU_ADD_TEST(suite, test_ctrlr_failed);
+ CU_ADD_TEST(suite, struct_packing);
+ CU_ADD_TEST(suite, test_nvme_qpair_process_completions);
+ CU_ADD_TEST(suite, test_nvme_completion_is_retry);
+#ifdef DEBUG
+ CU_ADD_TEST(suite, test_get_status_string);
+#endif
+ CU_ADD_TEST(suite, test_nvme_qpair_add_cmd_error_injection);
+ CU_ADD_TEST(suite, test_nvme_qpair_submit_request);
+ CU_ADD_TEST(suite, test_nvme_qpair_resubmit_request_with_transport_failed);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_quirks.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_quirks.c/.gitignore
new file mode 100644
index 000000000..eca86651b
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_quirks.c/.gitignore
@@ -0,0 +1 @@
+nvme_quirks_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_quirks.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_quirks.c/Makefile
new file mode 100644
index 000000000..d86887f0e
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_quirks.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_quirks_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_quirks.c/nvme_quirks_ut.c b/src/spdk/test/unit/lib/nvme/nvme_quirks.c/nvme_quirks_ut.c
new file mode 100644
index 000000000..c3e799251
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_quirks.c/nvme_quirks_ut.c
@@ -0,0 +1,92 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "nvme/nvme_quirks.c"
+
+SPDK_LOG_REGISTER_COMPONENT("nvme", SPDK_LOG_NVME)
+
+static void
+test_nvme_quirks_striping(void)
+{
+ struct spdk_pci_id pci_id = {};
+ uint64_t quirks = 0;
+
+ /* Non-Intel device should not have striping enabled */
+ quirks = nvme_get_quirks(&pci_id);
+ CU_ASSERT((quirks & NVME_INTEL_QUIRK_STRIPING) == 0);
+
+ /* Set the vendor id to Intel, but no device id. No striping. */
+ pci_id.class_id = SPDK_PCI_CLASS_NVME;
+ pci_id.vendor_id = SPDK_PCI_VID_INTEL;
+ quirks = nvme_get_quirks(&pci_id);
+ CU_ASSERT((quirks & NVME_INTEL_QUIRK_STRIPING) == 0);
+
+ /* Device ID 0x0953 should have striping enabled */
+ pci_id.device_id = 0x0953;
+ quirks = nvme_get_quirks(&pci_id);
+ CU_ASSERT((quirks & NVME_INTEL_QUIRK_STRIPING) != 0);
+
+ /* Even if specific subvendor/subdevice ids are set,
+ * striping should be enabled.
+ */
+ pci_id.subvendor_id = SPDK_PCI_VID_INTEL;
+ pci_id.subdevice_id = 0x3704;
+ quirks = nvme_get_quirks(&pci_id);
+ CU_ASSERT((quirks & NVME_INTEL_QUIRK_STRIPING) != 0);
+
+ pci_id.subvendor_id = 1234;
+ pci_id.subdevice_id = 42;
+ quirks = nvme_get_quirks(&pci_id);
+ CU_ASSERT((quirks & NVME_INTEL_QUIRK_STRIPING) != 0);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvme_quirks", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_nvme_quirks_striping);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_rdma.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_rdma.c/.gitignore
new file mode 100644
index 000000000..66265b955
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_rdma.c/.gitignore
@@ -0,0 +1 @@
+nvme_rdma_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_rdma.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_rdma.c/Makefile
new file mode 100644
index 000000000..7ea42632b
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_rdma.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_rdma_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_rdma.c/nvme_rdma_ut.c b/src/spdk/test/unit/lib/nvme/nvme_rdma.c/nvme_rdma_ut.c
new file mode 100644
index 000000000..8342e84d3
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_rdma.c/nvme_rdma_ut.c
@@ -0,0 +1,406 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+#include "spdk_cunit.h"
+#include "nvme/nvme_rdma.c"
+#include "common/lib/nvme/common_stubs.h"
+#include "common/lib/test_rdma.c"
+
+SPDK_LOG_REGISTER_COMPONENT("nvme", SPDK_LOG_NVME)
+
+DEFINE_STUB(spdk_mem_map_set_translation, int, (struct spdk_mem_map *map, uint64_t vaddr,
+ uint64_t size, uint64_t translation), 0);
+DEFINE_STUB(spdk_mem_map_clear_translation, int, (struct spdk_mem_map *map, uint64_t vaddr,
+ uint64_t size), 0);
+
+DEFINE_STUB(spdk_mem_map_alloc, struct spdk_mem_map *, (uint64_t default_translation,
+ const struct spdk_mem_map_ops *ops, void *cb_ctx), NULL);
+DEFINE_STUB_V(spdk_mem_map_free, (struct spdk_mem_map **pmap));
+
+DEFINE_STUB(nvme_poll_group_connect_qpair, int, (struct spdk_nvme_qpair *qpair), 0);
+
+DEFINE_STUB_V(nvme_qpair_resubmit_requests, (struct spdk_nvme_qpair *qpair, uint32_t num_requests));
+DEFINE_STUB(spdk_nvme_poll_group_process_completions, int64_t, (struct spdk_nvme_poll_group *group,
+ uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb), 0)
+
+/* used to mock out having to split an SGL over a memory region */
+uint64_t g_mr_size;
+struct ibv_mr g_nvme_rdma_mr;
+
+uint64_t
+spdk_mem_map_translate(const struct spdk_mem_map *map, uint64_t vaddr, uint64_t *size)
+{
+ if (g_mr_size != 0) {
+ *(uint32_t *)size = g_mr_size;
+ }
+
+ return (uint64_t)&g_nvme_rdma_mr;
+}
+
+struct nvme_rdma_ut_bdev_io {
+ struct iovec iovs[NVME_RDMA_MAX_SGL_DESCRIPTORS];
+ int iovpos;
+};
+
+/* essentially a simplification of bdev_nvme_next_sge and bdev_nvme_reset_sgl */
+static void nvme_rdma_ut_reset_sgl(void *cb_arg, uint32_t offset)
+{
+ struct nvme_rdma_ut_bdev_io *bio = cb_arg;
+ struct iovec *iov;
+
+ for (bio->iovpos = 0; bio->iovpos < NVME_RDMA_MAX_SGL_DESCRIPTORS; bio->iovpos++) {
+ iov = &bio->iovs[bio->iovpos];
+ /* Only provide offsets at the beginning of an iov */
+ if (offset == 0) {
+ break;
+ }
+
+ offset -= iov->iov_len;
+ }
+
+ SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_RDMA_MAX_SGL_DESCRIPTORS);
+}
+
+static int nvme_rdma_ut_next_sge(void *cb_arg, void **address, uint32_t *length)
+{
+ struct nvme_rdma_ut_bdev_io *bio = cb_arg;
+ struct iovec *iov;
+
+ SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_RDMA_MAX_SGL_DESCRIPTORS);
+
+ iov = &bio->iovs[bio->iovpos];
+
+ *address = iov->iov_base;
+ *length = iov->iov_len;
+ bio->iovpos++;
+
+ return 0;
+}
+
+static void
+test_nvme_rdma_build_sgl_request(void)
+{
+ struct nvme_rdma_qpair rqpair;
+ struct spdk_nvme_ctrlr ctrlr = {0};
+ struct spdk_nvmf_cmd cmd = {{0}};
+ struct spdk_nvme_rdma_req rdma_req = {0};
+ struct nvme_request req = {{0}};
+ struct nvme_rdma_ut_bdev_io bio;
+ struct spdk_nvme_rdma_mr_map rmap = {0};
+ struct spdk_mem_map *map = NULL;
+ uint64_t i;
+ int rc;
+
+ rmap.map = map;
+
+ ctrlr.max_sges = NVME_RDMA_MAX_SGL_DESCRIPTORS;
+ ctrlr.cdata.nvmf_specific.msdbd = 16;
+
+ rqpair.mr_map = &rmap;
+ rqpair.qpair.ctrlr = &ctrlr;
+ rqpair.cmds = &cmd;
+ cmd.sgl[0].address = 0x1111;
+ rdma_req.id = 0;
+ rdma_req.req = &req;
+
+ req.payload.reset_sgl_fn = nvme_rdma_ut_reset_sgl;
+ req.payload.next_sge_fn = nvme_rdma_ut_next_sge;
+ req.payload.contig_or_cb_arg = &bio;
+ req.qpair = &rqpair.qpair;
+
+ g_nvme_rdma_mr.rkey = 1;
+
+ for (i = 0; i < NVME_RDMA_MAX_SGL_DESCRIPTORS; i++) {
+ bio.iovs[i].iov_base = (void *)i;
+ bio.iovs[i].iov_len = 0;
+ }
+
+ /* Test case 1: single SGL. Expected: PASS */
+ bio.iovpos = 0;
+ req.payload_offset = 0;
+ req.payload_size = 0x1000;
+ bio.iovs[0].iov_len = 0x1000;
+ rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(bio.iovpos == 1);
+ CU_ASSERT(req.cmd.dptr.sgl1.keyed.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK);
+ CU_ASSERT(req.cmd.dptr.sgl1.keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS);
+ CU_ASSERT(req.cmd.dptr.sgl1.keyed.length == req.payload_size);
+ CU_ASSERT(req.cmd.dptr.sgl1.keyed.key == g_nvme_rdma_mr.rkey);
+ CU_ASSERT(req.cmd.dptr.sgl1.address == (uint64_t)bio.iovs[0].iov_base);
+ CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
+
+ /* Test case 2: multiple SGL. Expected: PASS */
+ bio.iovpos = 0;
+ req.payload_offset = 0;
+ req.payload_size = 0x4000;
+ for (i = 0; i < 4; i++) {
+ bio.iovs[i].iov_len = 0x1000;
+ }
+ rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(bio.iovpos == 4);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_LAST_SEGMENT);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 4 * sizeof(struct spdk_nvme_sgl_descriptor));
+ CU_ASSERT(req.cmd.dptr.sgl1.address == (uint64_t)0);
+ CU_ASSERT(rdma_req.send_sgl[0].length == 4 * sizeof(struct spdk_nvme_sgl_descriptor) + sizeof(
+ struct spdk_nvme_cmd))
+ for (i = 0; i < 4; i++) {
+ CU_ASSERT(cmd.sgl[i].keyed.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK);
+ CU_ASSERT(cmd.sgl[i].keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS);
+ CU_ASSERT(cmd.sgl[i].keyed.length == bio.iovs[i].iov_len);
+ CU_ASSERT(cmd.sgl[i].keyed.key == g_nvme_rdma_mr.rkey);
+ CU_ASSERT(cmd.sgl[i].address == (uint64_t)bio.iovs[i].iov_base);
+ }
+
+ /* Test case 3: Multiple SGL, SGL 2X mr size. Expected: FAIL */
+ bio.iovpos = 0;
+ req.payload_offset = 0;
+ g_mr_size = 0x800;
+ rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
+ SPDK_CU_ASSERT_FATAL(rc != 0);
+ CU_ASSERT(bio.iovpos == 1);
+
+ /* Test case 4: Multiple SGL, SGL size smaller than I/O size. Expected: FAIL */
+ bio.iovpos = 0;
+ req.payload_offset = 0;
+ req.payload_size = 0x6000;
+ g_mr_size = 0x0;
+ rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
+ SPDK_CU_ASSERT_FATAL(rc != 0);
+ CU_ASSERT(bio.iovpos == NVME_RDMA_MAX_SGL_DESCRIPTORS);
+
+ /* Test case 5: SGL length exceeds 3 bytes. Expected: FAIL */
+ req.payload_size = 0x1000 + (1 << 24);
+ bio.iovs[0].iov_len = 0x1000;
+ bio.iovs[1].iov_len = 1 << 24;
+ rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
+ SPDK_CU_ASSERT_FATAL(rc != 0);
+}
+
+static void
+test_nvme_rdma_build_sgl_inline_request(void)
+{
+ struct nvme_rdma_qpair rqpair;
+ struct spdk_nvme_ctrlr ctrlr = {0};
+ struct spdk_nvmf_cmd cmd = {{0}};
+ struct spdk_nvme_rdma_req rdma_req = {0};
+ struct nvme_request req = {{0}};
+ struct nvme_rdma_ut_bdev_io bio;
+ struct spdk_nvme_rdma_mr_map rmap = {0};
+ struct spdk_mem_map *map = NULL;
+ int rc;
+
+ rmap.map = map;
+
+ ctrlr.max_sges = NVME_RDMA_MAX_SGL_DESCRIPTORS;
+ ctrlr.cdata.nvmf_specific.msdbd = 16;
+
+ rqpair.mr_map = &rmap;
+ rqpair.qpair.ctrlr = &ctrlr;
+ rqpair.cmds = &cmd;
+ cmd.sgl[0].address = 0x1111;
+ rdma_req.id = 0;
+ rdma_req.req = &req;
+
+ req.payload.reset_sgl_fn = nvme_rdma_ut_reset_sgl;
+ req.payload.next_sge_fn = nvme_rdma_ut_next_sge;
+ req.payload.contig_or_cb_arg = &bio;
+ req.qpair = &rqpair.qpair;
+
+ g_nvme_rdma_mr.lkey = 2;
+
+ /* Test case 1: single inline SGL. Expected: PASS */
+ bio.iovpos = 0;
+ req.payload_offset = 0;
+ req.payload_size = 0x1000;
+ bio.iovs[0].iov_base = (void *)0xdeadbeef;
+ bio.iovs[0].iov_len = 0x1000;
+ rc = nvme_rdma_build_sgl_inline_request(&rqpair, &rdma_req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(bio.iovpos == 1);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
+ CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
+ CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
+ CU_ASSERT(rdma_req.send_sgl[1].length == req.payload_size);
+ CU_ASSERT(rdma_req.send_sgl[1].addr == (uint64_t)bio.iovs[0].iov_base);
+ CU_ASSERT(rdma_req.send_sgl[1].lkey == g_nvme_rdma_mr.lkey);
+
+ /* Test case 2: SGL length exceeds 3 bytes. Expected: PASS */
+ bio.iovpos = 0;
+ req.payload_offset = 0;
+ req.payload_size = 1 << 24;
+ bio.iovs[0].iov_len = 1 << 24;
+ rc = nvme_rdma_build_sgl_inline_request(&rqpair, &rdma_req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(bio.iovpos == 1);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
+ CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
+ CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
+ CU_ASSERT(rdma_req.send_sgl[1].length == req.payload_size);
+ CU_ASSERT(rdma_req.send_sgl[1].addr == (uint64_t)bio.iovs[0].iov_base);
+ CU_ASSERT(rdma_req.send_sgl[1].lkey == g_nvme_rdma_mr.lkey);
+}
+
+static void
+test_nvme_rdma_build_contig_request(void)
+{
+ struct nvme_rdma_qpair rqpair;
+ struct spdk_nvme_ctrlr ctrlr = {0};
+ struct spdk_nvmf_cmd cmd = {{0}};
+ struct spdk_nvme_rdma_req rdma_req = {0};
+ struct nvme_request req = {{0}};
+ struct spdk_nvme_rdma_mr_map rmap = {0};
+ struct spdk_mem_map *map = NULL;
+ int rc;
+
+ rmap.map = map;
+
+ ctrlr.max_sges = NVME_RDMA_MAX_SGL_DESCRIPTORS;
+ ctrlr.cdata.nvmf_specific.msdbd = 16;
+
+ rqpair.mr_map = &rmap;
+ rqpair.qpair.ctrlr = &ctrlr;
+ rqpair.cmds = &cmd;
+ cmd.sgl[0].address = 0x1111;
+ rdma_req.id = 0;
+ rdma_req.req = &req;
+
+ req.payload.contig_or_cb_arg = (void *)0xdeadbeef;
+ req.qpair = &rqpair.qpair;
+
+ g_nvme_rdma_mr.rkey = 2;
+
+ /* Test case 1: contig request. Expected: PASS */
+ req.payload_offset = 0;
+ req.payload_size = 0x1000;
+ rc = nvme_rdma_build_contig_request(&rqpair, &rdma_req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(req.cmd.dptr.sgl1.keyed.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK);
+ CU_ASSERT(req.cmd.dptr.sgl1.keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS);
+ CU_ASSERT(req.cmd.dptr.sgl1.keyed.length == req.payload_size);
+ CU_ASSERT(req.cmd.dptr.sgl1.keyed.key == g_nvme_rdma_mr.rkey);
+ CU_ASSERT(req.cmd.dptr.sgl1.address == (uint64_t)req.payload.contig_or_cb_arg);
+ CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
+
+ /* Test case 2: SGL length exceeds 3 bytes. Expected: FAIL */
+ req.payload_offset = 0;
+ req.payload_size = 1 << 24;
+ rc = nvme_rdma_build_contig_request(&rqpair, &rdma_req);
+ SPDK_CU_ASSERT_FATAL(rc != 0);
+}
+
+static void
+test_nvme_rdma_build_contig_inline_request(void)
+{
+ struct nvme_rdma_qpair rqpair;
+ struct spdk_nvme_ctrlr ctrlr = {0};
+ struct spdk_nvmf_cmd cmd = {{0}};
+ struct spdk_nvme_rdma_req rdma_req = {0};
+ struct nvme_request req = {{0}};
+ struct spdk_nvme_rdma_mr_map rmap = {0};
+ struct spdk_mem_map *map = NULL;
+ int rc;
+
+ rmap.map = map;
+
+ ctrlr.max_sges = NVME_RDMA_MAX_SGL_DESCRIPTORS;
+ ctrlr.cdata.nvmf_specific.msdbd = 16;
+
+ rqpair.mr_map = &rmap;
+ rqpair.qpair.ctrlr = &ctrlr;
+ rqpair.cmds = &cmd;
+ cmd.sgl[0].address = 0x1111;
+ rdma_req.id = 0;
+ rdma_req.req = &req;
+
+ req.payload.contig_or_cb_arg = (void *)0xdeadbeef;
+ req.qpair = &rqpair.qpair;
+
+ g_nvme_rdma_mr.rkey = 2;
+
+ /* Test case 1: single inline SGL. Expected: PASS */
+ req.payload_offset = 0;
+ req.payload_size = 0x1000;
+ rc = nvme_rdma_build_contig_inline_request(&rqpair, &rdma_req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
+ CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
+ CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
+ CU_ASSERT(rdma_req.send_sgl[1].length == req.payload_size);
+ CU_ASSERT(rdma_req.send_sgl[1].addr == (uint64_t)req.payload.contig_or_cb_arg);
+ CU_ASSERT(rdma_req.send_sgl[1].lkey == g_nvme_rdma_mr.lkey);
+
+ /* Test case 2: SGL length exceeds 3 bytes. Expected: PASS */
+ req.payload_offset = 0;
+ req.payload_size = 1 << 24;
+ rc = nvme_rdma_build_contig_inline_request(&rqpair, &rdma_req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
+ CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
+ CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
+ CU_ASSERT(rdma_req.send_sgl[1].length == req.payload_size);
+ CU_ASSERT(rdma_req.send_sgl[1].addr == (uint64_t)req.payload.contig_or_cb_arg);
+ CU_ASSERT(rdma_req.send_sgl[1].lkey == g_nvme_rdma_mr.lkey);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvme_rdma", NULL, NULL);
+ CU_ADD_TEST(suite, test_nvme_rdma_build_sgl_request);
+ CU_ADD_TEST(suite, test_nvme_rdma_build_sgl_inline_request);
+ CU_ADD_TEST(suite, test_nvme_rdma_build_contig_request);
+ CU_ADD_TEST(suite, test_nvme_rdma_build_contig_inline_request);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_tcp.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_tcp.c/.gitignore
new file mode 100644
index 000000000..c0cf6e92c
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_tcp.c/.gitignore
@@ -0,0 +1 @@
+nvme_tcp_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_tcp.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_tcp.c/Makefile
new file mode 100644
index 000000000..612f2b793
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_tcp.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_tcp_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_tcp.c/nvme_tcp_ut.c b/src/spdk/test/unit/lib/nvme/nvme_tcp.c/nvme_tcp_ut.c
new file mode 100644
index 000000000..ed817fe2d
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_tcp.c/nvme_tcp_ut.c
@@ -0,0 +1,459 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "common/lib/test_sock.c"
+
+#include "nvme/nvme_tcp.c"
+#include "common/lib/nvme/common_stubs.h"
+
+SPDK_LOG_REGISTER_COMPONENT("nvme", SPDK_LOG_NVME);
+
+DEFINE_STUB(nvme_qpair_submit_request,
+ int, (struct spdk_nvme_qpair *qpair, struct nvme_request *req), 0);
+
+DEFINE_STUB(spdk_sock_set_priority,
+ int, (struct spdk_sock *sock, int priority), 0);
+
+DEFINE_STUB(spdk_nvme_poll_group_remove, int, (struct spdk_nvme_poll_group *group,
+ struct spdk_nvme_qpair *qpair), 0);
+
+static void
+test_nvme_tcp_pdu_set_data_buf(void)
+{
+ struct nvme_tcp_pdu pdu = {};
+ struct iovec iov[NVME_TCP_MAX_SGL_DESCRIPTORS] = {};
+ uint32_t data_len;
+ uint64_t i;
+
+ /* 1st case: input is a single SGL entry. */
+ iov[0].iov_base = (void *)0xDEADBEEF;
+ iov[0].iov_len = 4096;
+
+ nvme_tcp_pdu_set_data_buf(&pdu, iov, 1, 1024, 512);
+
+ CU_ASSERT(pdu.data_iovcnt == 1);
+ CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF + 1024);
+ CU_ASSERT(pdu.data_iov[0].iov_len == 512);
+
+ /* 2nd case: simulate split on multiple SGL entries. */
+ iov[0].iov_base = (void *)0xDEADBEEF;
+ iov[0].iov_len = 4096;
+ iov[1].iov_base = (void *)0xFEEDBEEF;
+ iov[1].iov_len = 512 * 7;
+ iov[2].iov_base = (void *)0xF00DF00D;
+ iov[2].iov_len = 4096 * 2;
+
+ nvme_tcp_pdu_set_data_buf(&pdu, iov, 3, 0, 2048);
+
+ CU_ASSERT(pdu.data_iovcnt == 1);
+ CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF);
+ CU_ASSERT(pdu.data_iov[0].iov_len == 2048);
+
+ nvme_tcp_pdu_set_data_buf(&pdu, iov, 3, 2048, 2048 + 512 * 3);
+
+ CU_ASSERT(pdu.data_iovcnt == 2);
+ CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF + 2048);
+ CU_ASSERT(pdu.data_iov[0].iov_len == 2048);
+ CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF);
+ CU_ASSERT(pdu.data_iov[1].iov_len == 512 * 3);
+
+ nvme_tcp_pdu_set_data_buf(&pdu, iov, 3, 4096 + 512 * 3, 512 * 4 + 4096 * 2);
+
+ CU_ASSERT(pdu.data_iovcnt == 2);
+ CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xFEEDBEEF + 512 * 3);
+ CU_ASSERT(pdu.data_iov[0].iov_len == 512 * 4);
+ CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xF00DF00D);
+ CU_ASSERT(pdu.data_iov[1].iov_len == 4096 * 2);
+
+ /* 3rd case: Number of input SGL entries is equal to the number of PDU SGL
+ * entries.
+ */
+ data_len = 0;
+ for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) {
+ iov[i].iov_base = (void *)(0xDEADBEEF + i);
+ iov[i].iov_len = 512 * (i + 1);
+ data_len += 512 * (i + 1);
+ }
+
+ nvme_tcp_pdu_set_data_buf(&pdu, iov, NVME_TCP_MAX_SGL_DESCRIPTORS, 0, data_len);
+
+ CU_ASSERT(pdu.data_iovcnt == NVME_TCP_MAX_SGL_DESCRIPTORS);
+ for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) {
+ CU_ASSERT((uint64_t)pdu.data_iov[i].iov_base == 0xDEADBEEF + i);
+ CU_ASSERT(pdu.data_iov[i].iov_len == 512 * (i + 1));
+ }
+}
+
+static void
+test_nvme_tcp_build_iovs(void)
+{
+ const uintptr_t pdu_iov_len = 4096;
+ struct nvme_tcp_pdu pdu = {};
+ struct iovec iovs[5] = {};
+ uint32_t mapped_length = 0;
+ int rc;
+
+ pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD;
+ pdu.hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd);
+ pdu.hdr.common.plen = pdu.hdr.common.hlen + SPDK_NVME_TCP_DIGEST_LEN + pdu_iov_len * 2 +
+ SPDK_NVME_TCP_DIGEST_LEN;
+ pdu.data_len = pdu_iov_len * 2;
+ pdu.padding_len = 0;
+
+ pdu.data_iov[0].iov_base = (void *)0xDEADBEEF;
+ pdu.data_iov[0].iov_len = pdu_iov_len;
+ pdu.data_iov[1].iov_base = (void *)(0xDEADBEEF + pdu_iov_len);
+ pdu.data_iov[1].iov_len = pdu_iov_len;
+ pdu.data_iovcnt = 2;
+
+ rc = nvme_tcp_build_iovs(iovs, 5, &pdu, true, true, &mapped_length);
+ CU_ASSERT(rc == 4);
+ CU_ASSERT(iovs[0].iov_base == (void *)&pdu.hdr.raw);
+ CU_ASSERT(iovs[0].iov_len == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN);
+ CU_ASSERT(iovs[1].iov_base == (void *)0xDEADBEEF);
+ CU_ASSERT(iovs[1].iov_len == pdu_iov_len);
+ CU_ASSERT(iovs[2].iov_base == (void *)(0xDEADBEEF + pdu_iov_len));
+ CU_ASSERT(iovs[2].iov_len == pdu_iov_len);
+ CU_ASSERT(iovs[3].iov_base == (void *)pdu.data_digest);
+ CU_ASSERT(iovs[3].iov_len == SPDK_NVME_TCP_DIGEST_LEN);
+ CU_ASSERT(mapped_length == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN +
+ pdu_iov_len * 2 + SPDK_NVME_TCP_DIGEST_LEN);
+
+ /* Add a new data_iov entry, update pdu iov count and data length */
+ pdu.data_iov[2].iov_base = (void *)(0xBAADF00D);
+ pdu.data_iov[2].iov_len = 123;
+ pdu.data_iovcnt = 3;
+ pdu.data_len += 123;
+ pdu.hdr.common.plen += 123;
+
+ rc = nvme_tcp_build_iovs(iovs, 5, &pdu, true, true, &mapped_length);
+ CU_ASSERT(rc == 5);
+ CU_ASSERT(iovs[0].iov_base == (void *)&pdu.hdr.raw);
+ CU_ASSERT(iovs[0].iov_len == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN);
+ CU_ASSERT(iovs[1].iov_base == (void *)0xDEADBEEF);
+ CU_ASSERT(iovs[1].iov_len == pdu_iov_len);
+ CU_ASSERT(iovs[2].iov_base == (void *)(0xDEADBEEF + pdu_iov_len));
+ CU_ASSERT(iovs[2].iov_len == pdu_iov_len);
+ CU_ASSERT(iovs[3].iov_base == (void *)(0xBAADF00D));
+ CU_ASSERT(iovs[3].iov_len == 123);
+ CU_ASSERT(iovs[4].iov_base == (void *)pdu.data_digest);
+ CU_ASSERT(iovs[4].iov_len == SPDK_NVME_TCP_DIGEST_LEN);
+ CU_ASSERT(mapped_length == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN +
+ pdu_iov_len * 2 + SPDK_NVME_TCP_DIGEST_LEN + 123);
+}
+
+struct nvme_tcp_ut_bdev_io {
+ struct iovec iovs[NVME_TCP_MAX_SGL_DESCRIPTORS];
+ int iovpos;
+};
+
+/* essentially a simplification of bdev_nvme_next_sge and bdev_nvme_reset_sgl */
+static void
+nvme_tcp_ut_reset_sgl(void *cb_arg, uint32_t offset)
+{
+ struct nvme_tcp_ut_bdev_io *bio = cb_arg;
+ struct iovec *iov;
+
+ for (bio->iovpos = 0; bio->iovpos < NVME_TCP_MAX_SGL_DESCRIPTORS; bio->iovpos++) {
+ iov = &bio->iovs[bio->iovpos];
+ /* Offset must be aligned with the start of any SGL entry */
+ if (offset == 0) {
+ break;
+ }
+
+ SPDK_CU_ASSERT_FATAL(offset >= iov->iov_len);
+ offset -= iov->iov_len;
+ }
+
+ SPDK_CU_ASSERT_FATAL(offset == 0);
+ SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_TCP_MAX_SGL_DESCRIPTORS);
+}
+
+static int
+nvme_tcp_ut_next_sge(void *cb_arg, void **address, uint32_t *length)
+{
+ struct nvme_tcp_ut_bdev_io *bio = cb_arg;
+ struct iovec *iov;
+
+ SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_TCP_MAX_SGL_DESCRIPTORS);
+
+ iov = &bio->iovs[bio->iovpos];
+
+ *address = iov->iov_base;
+ *length = iov->iov_len;
+ bio->iovpos++;
+
+ return 0;
+}
+
+static void
+test_nvme_tcp_build_sgl_request(void)
+{
+ struct nvme_tcp_qpair tqpair;
+ struct spdk_nvme_ctrlr ctrlr = {0};
+ struct nvme_tcp_req tcp_req = {0};
+ struct nvme_request req = {{0}};
+ struct nvme_tcp_ut_bdev_io bio;
+ uint64_t i;
+ int rc;
+
+ ctrlr.max_sges = NVME_TCP_MAX_SGL_DESCRIPTORS;
+ tqpair.qpair.ctrlr = &ctrlr;
+ tcp_req.req = &req;
+
+ req.payload.reset_sgl_fn = nvme_tcp_ut_reset_sgl;
+ req.payload.next_sge_fn = nvme_tcp_ut_next_sge;
+ req.payload.contig_or_cb_arg = &bio;
+ req.qpair = &tqpair.qpair;
+
+ for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) {
+ bio.iovs[i].iov_base = (void *)(0xFEEDB000 + i * 0x1000);
+ bio.iovs[i].iov_len = 0;
+ }
+
+ /* Test case 1: Single SGL. Expected: PASS */
+ bio.iovpos = 0;
+ req.payload_offset = 0;
+ req.payload_size = 0x1000;
+ bio.iovs[0].iov_len = 0x1000;
+ rc = nvme_tcp_build_sgl_request(&tqpair, &tcp_req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(bio.iovpos == 1);
+ CU_ASSERT((uint64_t)tcp_req.iov[0].iov_base == (uint64_t)bio.iovs[0].iov_base);
+ CU_ASSERT(tcp_req.iov[0].iov_len == bio.iovs[0].iov_len);
+ CU_ASSERT(tcp_req.iovcnt == 1);
+
+ /* Test case 2: Multiple SGL. Expected: PASS */
+ bio.iovpos = 0;
+ req.payload_offset = 0;
+ req.payload_size = 0x4000;
+ for (i = 0; i < 4; i++) {
+ bio.iovs[i].iov_len = 0x1000;
+ }
+ rc = nvme_tcp_build_sgl_request(&tqpair, &tcp_req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(bio.iovpos == 4);
+ CU_ASSERT(tcp_req.iovcnt == 4);
+ for (i = 0; i < 4; i++) {
+ CU_ASSERT(tcp_req.iov[i].iov_len == bio.iovs[i].iov_len);
+ CU_ASSERT((uint64_t)tcp_req.iov[i].iov_base == (uint64_t)bio.iovs[i].iov_base);
+ }
+
+ /* Test case 3: Payload is bigger than SGL. Expected: FAIL */
+ bio.iovpos = 0;
+ req.payload_offset = 0;
+ req.payload_size = 0x17000;
+ for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) {
+ bio.iovs[i].iov_len = 0x1000;
+ }
+ rc = nvme_tcp_build_sgl_request(&tqpair, &tcp_req);
+ SPDK_CU_ASSERT_FATAL(rc != 0);
+ CU_ASSERT(bio.iovpos == NVME_TCP_MAX_SGL_DESCRIPTORS);
+ for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) {
+ CU_ASSERT(tcp_req.iov[i].iov_len == bio.iovs[i].iov_len);
+ CU_ASSERT((uint64_t)tcp_req.iov[i].iov_base == (uint64_t)bio.iovs[i].iov_base);
+ }
+}
+
+static void
+test_nvme_tcp_pdu_set_data_buf_with_md(void)
+{
+ struct nvme_tcp_pdu pdu = {};
+ struct iovec iovs[7] = {};
+ struct spdk_dif_ctx dif_ctx = {};
+ int rc;
+
+ pdu.dif_ctx = &dif_ctx;
+
+ rc = spdk_dif_ctx_init(&dif_ctx, 520, 8, true, false, SPDK_DIF_DISABLE, 0,
+ 0, 0, 0, 0, 0);
+ CU_ASSERT(rc == 0);
+
+ /* Single iovec case */
+ iovs[0].iov_base = (void *)0xDEADBEEF;
+ iovs[0].iov_len = 2080;
+
+ nvme_tcp_pdu_set_data_buf(&pdu, iovs, 1, 0, 500);
+
+ CU_ASSERT(dif_ctx.data_offset == 0);
+ CU_ASSERT(pdu.data_len == 500);
+ CU_ASSERT(pdu.data_iovcnt == 1);
+ CU_ASSERT(pdu.data_iov[0].iov_base == (void *)0xDEADBEEF);
+ CU_ASSERT(pdu.data_iov[0].iov_len == 500);
+
+ nvme_tcp_pdu_set_data_buf(&pdu, iovs, 1, 500, 1000);
+
+ CU_ASSERT(dif_ctx.data_offset == 500);
+ CU_ASSERT(pdu.data_len == 1000);
+ CU_ASSERT(pdu.data_iovcnt == 1);
+ CU_ASSERT(pdu.data_iov[0].iov_base == (void *)(0xDEADBEEF + 500));
+ CU_ASSERT(pdu.data_iov[0].iov_len == 1016);
+
+ nvme_tcp_pdu_set_data_buf(&pdu, iovs, 1, 1500, 548);
+
+ CU_ASSERT(dif_ctx.data_offset == 1500);
+ CU_ASSERT(pdu.data_len == 548);
+ CU_ASSERT(pdu.data_iovcnt == 1);
+ CU_ASSERT(pdu.data_iov[0].iov_base == (void *)(0xDEADBEEF + 1516));
+ CU_ASSERT(pdu.data_iov[0].iov_len == 564);
+
+ /* Multiple iovecs case */
+ iovs[0].iov_base = (void *)0xDEADBEEF;
+ iovs[0].iov_len = 256;
+ iovs[1].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x1000));
+ iovs[1].iov_len = 256 + 1;
+ iovs[2].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x2000));
+ iovs[2].iov_len = 4;
+ iovs[3].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x3000));
+ iovs[3].iov_len = 3 + 123;
+ iovs[4].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x4000));
+ iovs[4].iov_len = 389 + 6;
+ iovs[5].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x5000));
+ iovs[5].iov_len = 2 + 512 + 8 + 432;
+ iovs[6].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x6000));
+ iovs[6].iov_len = 80 + 8;
+
+ nvme_tcp_pdu_set_data_buf(&pdu, iovs, 7, 0, 500);
+
+ CU_ASSERT(dif_ctx.data_offset == 0);
+ CU_ASSERT(pdu.data_len == 500);
+ CU_ASSERT(pdu.data_iovcnt == 2);
+ CU_ASSERT(pdu.data_iov[0].iov_base == (void *)0xDEADBEEF);
+ CU_ASSERT(pdu.data_iov[0].iov_len == 256);
+ CU_ASSERT(pdu.data_iov[1].iov_base == (void *)(0xDEADBEEF + 0x1000));
+ CU_ASSERT(pdu.data_iov[1].iov_len == 244);
+
+ nvme_tcp_pdu_set_data_buf(&pdu, iovs, 7, 500, 1000);
+
+ CU_ASSERT(dif_ctx.data_offset == 500);
+ CU_ASSERT(pdu.data_len == 1000);
+ CU_ASSERT(pdu.data_iovcnt == 5);
+ CU_ASSERT(pdu.data_iov[0].iov_base == (void *)(0xDEADBEEF + 0x1000 + 244));
+ CU_ASSERT(pdu.data_iov[0].iov_len == 13);
+ CU_ASSERT(pdu.data_iov[1].iov_base == (void *)(0xDEADBEEF + 0x2000));
+ CU_ASSERT(pdu.data_iov[1].iov_len == 4);
+ CU_ASSERT(pdu.data_iov[2].iov_base == (void *)(0xDEADBEEF + 0x3000));
+ CU_ASSERT(pdu.data_iov[2].iov_len == 3 + 123);
+ CU_ASSERT(pdu.data_iov[3].iov_base == (void *)(0xDEADBEEF + 0x4000));
+ CU_ASSERT(pdu.data_iov[3].iov_len == 395);
+ CU_ASSERT(pdu.data_iov[4].iov_base == (void *)(0xDEADBEEF + 0x5000));
+ CU_ASSERT(pdu.data_iov[4].iov_len == 478);
+
+ nvme_tcp_pdu_set_data_buf(&pdu, iovs, 7, 1500, 548);
+
+ CU_ASSERT(dif_ctx.data_offset == 1500);
+ CU_ASSERT(pdu.data_len == 548);
+ CU_ASSERT(pdu.data_iovcnt == 2);
+ CU_ASSERT(pdu.data_iov[0].iov_base == (void *)(0xDEADBEEF + 0x5000 + 478));
+ CU_ASSERT(pdu.data_iov[0].iov_len == 476);
+ CU_ASSERT(pdu.data_iov[1].iov_base == (void *)(0xDEADBEEF + 0x6000));
+ CU_ASSERT(pdu.data_iov[1].iov_len == 88);
+}
+
+static void
+test_nvme_tcp_build_iovs_with_md(void)
+{
+ struct nvme_tcp_pdu pdu = {};
+ struct iovec iovs[11] = {};
+ struct spdk_dif_ctx dif_ctx = {};
+ uint32_t mapped_length = 0;
+ int rc;
+
+ rc = spdk_dif_ctx_init(&dif_ctx, 520, 8, true, false, SPDK_DIF_DISABLE, 0,
+ 0, 0, 0, 0, 0);
+ CU_ASSERT(rc == 0);
+
+ pdu.dif_ctx = &dif_ctx;
+
+ pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD;
+ pdu.hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd);
+ pdu.hdr.common.plen = pdu.hdr.common.hlen + SPDK_NVME_TCP_DIGEST_LEN + 512 * 8 +
+ SPDK_NVME_TCP_DIGEST_LEN;
+ pdu.data_len = 512 * 8;
+ pdu.padding_len = 0;
+
+ pdu.data_iov[0].iov_base = (void *)0xDEADBEEF;
+ pdu.data_iov[0].iov_len = (512 + 8) * 8;
+ pdu.data_iovcnt = 1;
+
+ rc = nvme_tcp_build_iovs(iovs, 11, &pdu, true, true, &mapped_length);
+ CU_ASSERT(rc == 10);
+ CU_ASSERT(iovs[0].iov_base == (void *)&pdu.hdr.raw);
+ CU_ASSERT(iovs[0].iov_len == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN);
+ CU_ASSERT(iovs[1].iov_base == (void *)0xDEADBEEF);
+ CU_ASSERT(iovs[1].iov_len == 512);
+ CU_ASSERT(iovs[2].iov_base == (void *)(0xDEADBEEF + 520));
+ CU_ASSERT(iovs[2].iov_len == 512);
+ CU_ASSERT(iovs[3].iov_base == (void *)(0xDEADBEEF + 520 * 2));
+ CU_ASSERT(iovs[3].iov_len == 512);
+ CU_ASSERT(iovs[4].iov_base == (void *)(0xDEADBEEF + 520 * 3));
+ CU_ASSERT(iovs[4].iov_len == 512);
+ CU_ASSERT(iovs[5].iov_base == (void *)(0xDEADBEEF + 520 * 4));
+ CU_ASSERT(iovs[5].iov_len == 512);
+ CU_ASSERT(iovs[6].iov_base == (void *)(0xDEADBEEF + 520 * 5));
+ CU_ASSERT(iovs[6].iov_len == 512);
+ CU_ASSERT(iovs[7].iov_base == (void *)(0xDEADBEEF + 520 * 6));
+ CU_ASSERT(iovs[7].iov_len == 512);
+ CU_ASSERT(iovs[8].iov_base == (void *)(0xDEADBEEF + 520 * 7));
+ CU_ASSERT(iovs[8].iov_len == 512);
+ CU_ASSERT(iovs[9].iov_base == (void *)pdu.data_digest);
+ CU_ASSERT(iovs[9].iov_len == SPDK_NVME_TCP_DIGEST_LEN);
+ CU_ASSERT(mapped_length == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN +
+ 512 * 8 + SPDK_NVME_TCP_DIGEST_LEN);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvme_tcp", NULL, NULL);
+ CU_ADD_TEST(suite, test_nvme_tcp_pdu_set_data_buf);
+ CU_ADD_TEST(suite, test_nvme_tcp_build_iovs);
+ CU_ADD_TEST(suite, test_nvme_tcp_build_sgl_request);
+ CU_ADD_TEST(suite, test_nvme_tcp_pdu_set_data_buf_with_md);
+ CU_ADD_TEST(suite, test_nvme_tcp_build_iovs_with_md);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_uevent.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_uevent.c/.gitignore
new file mode 100644
index 000000000..1cb0d98ad
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_uevent.c/.gitignore
@@ -0,0 +1 @@
+nvme_uevent_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_uevent.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_uevent.c/Makefile
new file mode 100644
index 000000000..98687efb8
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_uevent.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_uevent_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_uevent.c/nvme_uevent_ut.c b/src/spdk/test/unit/lib/nvme/nvme_uevent.c/nvme_uevent_ut.c
new file mode 100644
index 000000000..a9775c983
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_uevent.c/nvme_uevent_ut.c
@@ -0,0 +1,165 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "spdk/env.h"
+
+#include "common/lib/test_env.c"
+
+#include "nvme/nvme_uevent.c"
+
+#ifdef __linux__
+
+enum uevent_parse_event_return_type {
+ uevent_abnormal_exit = -1,
+ uevent_normal_exit = 0,
+ uevent_expected_continue = 1
+};
+
+#define SPDK_NVME_UEVENT_SUBSYSTEM_NULL 0xFF
+
+static void
+test_nvme_uevent_parse_event(void)
+{
+ char *commands;
+ struct spdk_uevent uevent = {};
+ int rc = uevent_normal_exit;
+
+ /* Simulate commands to check expected behaviors */
+ /* Linux kernel puts null characters after every uevent */
+
+ /* Case 1: Add wrong non-uio or vfio-pci /devices/pci0000:80/0000:80:01.0/0000:81:00.0/uio/uio0 */
+ commands =
+ "ACTION=add\0DEVPATH=/devices/pci0000:80/0000:80:01.0/0000:81:00.0/uio/uio0\0SUBSYSTEM= \0DRIVER= \0PCI_SLOT_NAME= \0";
+ uevent.subsystem = SPDK_NVME_UEVENT_SUBSYSTEM_NULL;
+ uevent.action = 0;
+
+ rc = parse_event(commands, &uevent);
+
+ CU_ASSERT(rc == uevent_abnormal_exit);
+ CU_ASSERT(uevent.subsystem == SPDK_NVME_UEVENT_SUBSYSTEM_NULL);
+ CU_ASSERT(uevent.action == SPDK_NVME_UEVENT_ADD);
+
+ /* Case 2: Add uio /devices/pci0000:80/0000:80:01.0/0000:81:00.0/uio/uio0 */
+ commands =
+ "ACTION=add \0DEVPATH=/devices/pci0000:80/0000:80:01.0/0000:81:00.0/uio/uio0\0SUBSYSTEM=uio\0DRIVER=\0PCI_SLOT_NAME= \0";
+ uevent.subsystem = SPDK_NVME_UEVENT_SUBSYSTEM_NULL;
+ uevent.action = 0;
+
+ rc = parse_event(commands, &uevent);
+
+ CU_ASSERT(rc == uevent_expected_continue);
+ CU_ASSERT(uevent.subsystem == SPDK_NVME_UEVENT_SUBSYSTEM_UIO);
+ CU_ASSERT(uevent.action == SPDK_NVME_UEVENT_ADD);
+
+ /* Case 3: Remove uio /devices/pci0000:80/0000:80:01.0/0000:81:00.0/uio/uio0 */
+ commands =
+ "ACTION=remove\0DEVPATH=/devices/pci0000:80/0000:80:01.0/0000:81:00.0/uio/uio0\0SUBSYSTEM=uio\0DRIVER=\0PCI_SLOT_NAME= \0";
+ uevent.subsystem = SPDK_NVME_UEVENT_SUBSYSTEM_NULL;
+
+ rc = parse_event(commands, &uevent);
+
+ CU_ASSERT(rc == uevent_expected_continue);
+ CU_ASSERT(uevent.subsystem == SPDK_NVME_UEVENT_SUBSYSTEM_UIO);
+ CU_ASSERT(uevent.action == SPDK_NVME_UEVENT_REMOVE);
+
+ /* Case 4: Add vfio-pci 0000:81:00.0 */
+ commands = "ACTION=bind\0DEVPATH=\0SUBSYSTEM= \0DRIVER=vfio-pci\0PCI_SLOT_NAME=0000:81:00.0\0";
+ uevent.subsystem = SPDK_NVME_UEVENT_SUBSYSTEM_NULL;
+
+ rc = parse_event(commands, &uevent);
+
+ CU_ASSERT(rc == uevent_expected_continue);
+ CU_ASSERT(uevent.subsystem == SPDK_NVME_UEVENT_SUBSYSTEM_VFIO);
+ CU_ASSERT(uevent.action == SPDK_NVME_UEVENT_ADD);
+
+ /* Case 5: Remove vfio-pci 0000:81:00.0 */
+ commands = "ACTION=remove\0DEVPATH= \0SUBSYSTEM= \0DRIVER=vfio-pci \0PCI_SLOT_NAME=0000:81:00.0\0";
+ uevent.subsystem = SPDK_NVME_UEVENT_SUBSYSTEM_NULL;
+
+ rc = parse_event(commands, &uevent);
+
+ CU_ASSERT(rc == uevent_expected_continue);
+ CU_ASSERT(uevent.subsystem == SPDK_NVME_UEVENT_SUBSYSTEM_VFIO);
+ CU_ASSERT(uevent.action == SPDK_NVME_UEVENT_REMOVE);
+
+ /* Case 6: Add wrong vfio-pci addr 000000 */
+ commands = "ACTION=bind\0DEVPATH= \0SUBSYSTEM= \0DRIVER=vfio-pci \0PCI_SLOT_NAME=000000\0";
+ uevent.subsystem = SPDK_NVME_UEVENT_SUBSYSTEM_NULL;
+
+ rc = parse_event(commands, &uevent);
+
+ CU_ASSERT(rc == uevent_abnormal_exit);
+ CU_ASSERT(uevent.subsystem == SPDK_NVME_UEVENT_SUBSYSTEM_VFIO);
+ CU_ASSERT(uevent.action == SPDK_NVME_UEVENT_ADD);
+
+ /* Case 7: Add wrong type vfio 0000:81:00.0 */
+ commands = "ACTION=bind\0DEVPATH= \0SUBSYSTEM= \0DRIVER=vfio \0PCI_SLOT_NAME=0000:81:00.0\0";
+ uevent.subsystem = SPDK_NVME_UEVENT_SUBSYSTEM_NULL;
+ uevent.action = 0;
+ rc = parse_event(commands, &uevent);
+
+ CU_ASSERT(rc == uevent_abnormal_exit);
+ CU_ASSERT(uevent.subsystem == SPDK_NVME_UEVENT_SUBSYSTEM_NULL);
+ CU_ASSERT(uevent.action == SPDK_NVME_UEVENT_ADD);
+}
+
+#else
+
+static void
+test_nvme_uevent_parse_event(void)
+{
+ CU_ASSERT(1);
+}
+
+#endif
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvme_uevent", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_nvme_uevent_parse_event);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvmf/Makefile b/src/spdk/test/unit/lib/nvmf/Makefile
new file mode 100644
index 000000000..94d5dde63
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/Makefile
@@ -0,0 +1,48 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = tcp.c ctrlr.c subsystem.c ctrlr_discovery.c ctrlr_bdev.c
+
+DIRS-$(CONFIG_RDMA) += rdma.c
+
+DIRS-$(CONFIG_FC) += fc.c fc_ls.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/nvmf/ctrlr.c/.gitignore b/src/spdk/test/unit/lib/nvmf/ctrlr.c/.gitignore
new file mode 100644
index 000000000..65e849431
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/ctrlr.c/.gitignore
@@ -0,0 +1 @@
+ctrlr_ut
diff --git a/src/spdk/test/unit/lib/nvmf/ctrlr.c/Makefile b/src/spdk/test/unit/lib/nvmf/ctrlr.c/Makefile
new file mode 100644
index 000000000..c68c589ab
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/ctrlr.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = ctrlr_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvmf/ctrlr.c/ctrlr_ut.c b/src/spdk/test/unit/lib/nvmf/ctrlr.c/ctrlr_ut.c
new file mode 100644
index 000000000..1da8f9d54
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/ctrlr.c/ctrlr_ut.c
@@ -0,0 +1,1711 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation. All rights reserved.
+ * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+#include "spdk_internal/mock.h"
+#include "spdk_internal/thread.h"
+
+#include "common/lib/ut_multithread.c"
+#include "nvmf/ctrlr.c"
+
+SPDK_LOG_REGISTER_COMPONENT("nvmf", SPDK_LOG_NVMF)
+
+struct spdk_bdev {
+ int ut_mock;
+ uint64_t blockcnt;
+};
+
+const char subsystem_default_sn[SPDK_NVME_CTRLR_SN_LEN + 1] = "subsys_default_sn";
+const char subsystem_default_mn[SPDK_NVME_CTRLR_MN_LEN + 1] = "subsys_default_mn";
+
+DEFINE_STUB(spdk_nvmf_tgt_find_subsystem,
+ struct spdk_nvmf_subsystem *,
+ (struct spdk_nvmf_tgt *tgt, const char *subnqn),
+ NULL);
+
+DEFINE_STUB(spdk_nvmf_poll_group_create,
+ struct spdk_nvmf_poll_group *,
+ (struct spdk_nvmf_tgt *tgt),
+ NULL);
+
+DEFINE_STUB(spdk_nvmf_subsystem_get_sn,
+ const char *,
+ (const struct spdk_nvmf_subsystem *subsystem),
+ subsystem_default_sn);
+
+DEFINE_STUB(spdk_nvmf_subsystem_get_mn,
+ const char *,
+ (const struct spdk_nvmf_subsystem *subsystem),
+ subsystem_default_mn);
+
+DEFINE_STUB(spdk_nvmf_subsystem_get_first_ns,
+ struct spdk_nvmf_ns *,
+ (struct spdk_nvmf_subsystem *subsystem),
+ NULL);
+
+DEFINE_STUB(spdk_nvmf_subsystem_get_next_ns,
+ struct spdk_nvmf_ns *,
+ (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns),
+ NULL);
+
+DEFINE_STUB(spdk_nvmf_subsystem_host_allowed,
+ bool,
+ (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn),
+ true);
+
+DEFINE_STUB(nvmf_subsystem_add_ctrlr,
+ int,
+ (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr),
+ 0);
+
+DEFINE_STUB(nvmf_subsystem_get_ctrlr,
+ struct spdk_nvmf_ctrlr *,
+ (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid),
+ NULL);
+
+DEFINE_STUB(nvmf_ctrlr_dsm_supported,
+ bool,
+ (struct spdk_nvmf_ctrlr *ctrlr),
+ false);
+
+DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported,
+ bool,
+ (struct spdk_nvmf_ctrlr *ctrlr),
+ false);
+
+DEFINE_STUB_V(nvmf_get_discovery_log_page,
+ (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov,
+ uint32_t iovcnt, uint64_t offset, uint32_t length));
+
+DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid,
+ int,
+ (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid),
+ 0);
+
+DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed,
+ bool,
+ (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid),
+ true);
+
+DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct spdk_nvmf_request *req),
+ 0);
+
+DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct spdk_nvmf_request *req),
+ 0);
+
+DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct spdk_nvmf_request *req),
+ 0);
+
+DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req),
+ 0);
+
+DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct spdk_nvmf_request *req),
+ 0);
+
+DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct spdk_nvmf_request *req),
+ 0);
+
+DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct spdk_nvmf_request *req),
+ 0);
+
+DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct spdk_nvmf_request *req),
+ 0);
+
+DEFINE_STUB(nvmf_transport_req_complete,
+ int,
+ (struct spdk_nvmf_request *req),
+ 0);
+
+DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx));
+
+DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx, bool,
+ (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd,
+ struct spdk_dif_ctx *dif_ctx),
+ true);
+
+DEFINE_STUB_V(nvmf_transport_qpair_abort_request,
+ (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req));
+
+DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd));
+DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl));
+
+int
+spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx)
+{
+ return 0;
+}
+
+void
+nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata,
+ bool dif_insert_or_strip)
+{
+ uint64_t num_blocks;
+
+ SPDK_CU_ASSERT_FATAL(ns->bdev != NULL);
+ num_blocks = ns->bdev->blockcnt;
+ nsdata->nsze = num_blocks;
+ nsdata->ncap = num_blocks;
+ nsdata->nuse = num_blocks;
+ nsdata->nlbaf = 0;
+ nsdata->flbas.format = 0;
+ nsdata->lbaf[0].lbads = spdk_u32log2(512);
+}
+
+static void
+test_get_log_page(void)
+{
+ struct spdk_nvmf_subsystem subsystem = {};
+ struct spdk_nvmf_request req = {};
+ struct spdk_nvmf_qpair qpair = {};
+ struct spdk_nvmf_ctrlr ctrlr = {};
+ union nvmf_h2c_msg cmd = {};
+ union nvmf_c2h_msg rsp = {};
+ char data[4096];
+
+ subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
+
+ ctrlr.subsys = &subsystem;
+
+ qpair.ctrlr = &ctrlr;
+
+ req.qpair = &qpair;
+ req.cmd = &cmd;
+ req.rsp = &rsp;
+ req.data = &data;
+ req.length = sizeof(data);
+
+ /* Get Log Page - all valid */
+ memset(&cmd, 0, sizeof(cmd));
+ memset(&rsp, 0, sizeof(rsp));
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
+ cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
+ cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = (req.length / 4 - 1);
+ CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
+
+ /* Get Log Page with invalid log ID */
+ memset(&cmd, 0, sizeof(cmd));
+ memset(&rsp, 0, sizeof(rsp));
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
+ cmd.nvme_cmd.cdw10 = 0;
+ CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
+
+ /* Get Log Page with invalid offset (not dword aligned) */
+ memset(&cmd, 0, sizeof(cmd));
+ memset(&rsp, 0, sizeof(rsp));
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
+ cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
+ cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = (req.length / 4 - 1);
+ cmd.nvme_cmd.cdw12 = 2;
+ CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
+
+ /* Get Log Page without data buffer */
+ memset(&cmd, 0, sizeof(cmd));
+ memset(&rsp, 0, sizeof(rsp));
+ req.data = NULL;
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
+ cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
+ cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = (req.length / 4 - 1);
+ CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
+ req.data = data;
+}
+
+static void
+test_process_fabrics_cmd(void)
+{
+ struct spdk_nvmf_request req = {};
+ int ret;
+ struct spdk_nvmf_qpair req_qpair = {};
+ union nvmf_h2c_msg req_cmd = {};
+ union nvmf_c2h_msg req_rsp = {};
+
+ req.qpair = &req_qpair;
+ req.cmd = &req_cmd;
+ req.rsp = &req_rsp;
+ req.qpair->ctrlr = NULL;
+
+ /* No ctrlr and invalid command check */
+ req.cmd->nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET;
+ ret = nvmf_ctrlr_process_fabrics_cmd(&req);
+ CU_ASSERT_EQUAL(req.rsp->nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR);
+ CU_ASSERT_EQUAL(ret, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+}
+
+static bool
+nvme_status_success(const struct spdk_nvme_status *status)
+{
+ return status->sct == SPDK_NVME_SCT_GENERIC && status->sc == SPDK_NVME_SC_SUCCESS;
+}
+
+static void
+test_connect(void)
+{
+ struct spdk_nvmf_fabric_connect_data connect_data;
+ struct spdk_nvmf_poll_group group;
+ struct spdk_nvmf_subsystem_poll_group *sgroups;
+ struct spdk_nvmf_transport transport;
+ struct spdk_nvmf_transport_ops tops = {};
+ struct spdk_nvmf_subsystem subsystem;
+ struct spdk_nvmf_request req;
+ struct spdk_nvmf_qpair admin_qpair;
+ struct spdk_nvmf_qpair qpair;
+ struct spdk_nvmf_qpair qpair2;
+ struct spdk_nvmf_ctrlr ctrlr;
+ struct spdk_nvmf_tgt tgt;
+ union nvmf_h2c_msg cmd;
+ union nvmf_c2h_msg rsp;
+ const uint8_t hostid[16] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
+ };
+ const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1";
+ const char hostnqn[] = "nqn.2016-06.io.spdk:host1";
+ int rc;
+
+ memset(&group, 0, sizeof(group));
+ group.thread = spdk_get_thread();
+
+ memset(&ctrlr, 0, sizeof(ctrlr));
+ ctrlr.subsys = &subsystem;
+ ctrlr.qpair_mask = spdk_bit_array_create(3);
+ SPDK_CU_ASSERT_FATAL(ctrlr.qpair_mask != NULL);
+ ctrlr.vcprop.cc.bits.en = 1;
+ ctrlr.vcprop.cc.bits.iosqes = 6;
+ ctrlr.vcprop.cc.bits.iocqes = 4;
+
+ memset(&admin_qpair, 0, sizeof(admin_qpair));
+ admin_qpair.group = &group;
+
+ memset(&tgt, 0, sizeof(tgt));
+ memset(&transport, 0, sizeof(transport));
+ transport.ops = &tops;
+ transport.opts.max_aq_depth = 32;
+ transport.opts.max_queue_depth = 64;
+ transport.opts.max_qpairs_per_ctrlr = 3;
+ transport.tgt = &tgt;
+
+ memset(&qpair, 0, sizeof(qpair));
+ qpair.transport = &transport;
+ qpair.group = &group;
+ qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
+ TAILQ_INIT(&qpair.outstanding);
+
+ memset(&connect_data, 0, sizeof(connect_data));
+ memcpy(connect_data.hostid, hostid, sizeof(hostid));
+ connect_data.cntlid = 0xFFFF;
+ snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn);
+ snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn);
+
+ memset(&subsystem, 0, sizeof(subsystem));
+ subsystem.thread = spdk_get_thread();
+ subsystem.id = 1;
+ TAILQ_INIT(&subsystem.ctrlrs);
+ subsystem.tgt = &tgt;
+ subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
+ subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
+ snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn);
+
+ sgroups = calloc(subsystem.id + 1, sizeof(struct spdk_nvmf_subsystem_poll_group));
+ group.sgroups = sgroups;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC;
+ cmd.connect_cmd.cid = 1;
+ cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT;
+ cmd.connect_cmd.recfmt = 0;
+ cmd.connect_cmd.qid = 0;
+ cmd.connect_cmd.sqsize = 31;
+ cmd.connect_cmd.cattr = 0;
+ cmd.connect_cmd.kato = 120000;
+
+ memset(&req, 0, sizeof(req));
+ req.qpair = &qpair;
+ req.length = sizeof(connect_data);
+ req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
+ req.data = &connect_data;
+ req.cmd = &cmd;
+ req.rsp = &rsp;
+
+ MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem);
+ MOCK_SET(spdk_nvmf_poll_group_create, &group);
+
+ /* Valid admin connect command */
+ memset(&rsp, 0, sizeof(rsp));
+ sgroups[subsystem.id].io_outstanding++;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
+ CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
+ CU_ASSERT(qpair.ctrlr != NULL);
+ CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
+ nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
+ spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
+ free(qpair.ctrlr);
+ qpair.ctrlr = NULL;
+
+ /* Valid admin connect command with kato = 0 */
+ cmd.connect_cmd.kato = 0;
+ memset(&rsp, 0, sizeof(rsp));
+ sgroups[subsystem.id].io_outstanding++;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
+ CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
+ CU_ASSERT(qpair.ctrlr != NULL && qpair.ctrlr->keep_alive_poller == NULL);
+ CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
+ spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
+ free(qpair.ctrlr);
+ qpair.ctrlr = NULL;
+ cmd.connect_cmd.kato = 120000;
+
+ /* Invalid data length */
+ memset(&rsp, 0, sizeof(rsp));
+ req.length = sizeof(connect_data) - 1;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ req.length = sizeof(connect_data);
+
+ /* Invalid recfmt */
+ memset(&rsp, 0, sizeof(rsp));
+ cmd.connect_cmd.recfmt = 1234;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ cmd.connect_cmd.recfmt = 0;
+
+ /* Subsystem not found */
+ memset(&rsp, 0, sizeof(rsp));
+ MOCK_SET(spdk_nvmf_tgt_find_subsystem, NULL);
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 256);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem);
+
+ /* Unterminated hostnqn */
+ memset(&rsp, 0, sizeof(rsp));
+ memset(connect_data.hostnqn, 'b', sizeof(connect_data.hostnqn));
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 512);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn);
+
+ /* Host not allowed */
+ memset(&rsp, 0, sizeof(rsp));
+ MOCK_SET(spdk_nvmf_subsystem_host_allowed, false);
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_HOST);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ MOCK_SET(spdk_nvmf_subsystem_host_allowed, true);
+
+ /* Invalid sqsize == 0 */
+ memset(&rsp, 0, sizeof(rsp));
+ cmd.connect_cmd.sqsize = 0;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ cmd.connect_cmd.sqsize = 31;
+
+ /* Invalid admin sqsize > max_aq_depth */
+ memset(&rsp, 0, sizeof(rsp));
+ cmd.connect_cmd.sqsize = 32;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ cmd.connect_cmd.sqsize = 31;
+
+ /* Invalid I/O sqsize > max_queue_depth */
+ memset(&rsp, 0, sizeof(rsp));
+ cmd.connect_cmd.qid = 1;
+ cmd.connect_cmd.sqsize = 64;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ cmd.connect_cmd.qid = 0;
+ cmd.connect_cmd.sqsize = 31;
+
+ /* Invalid cntlid for admin queue */
+ memset(&rsp, 0, sizeof(rsp));
+ connect_data.cntlid = 0x1234;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ connect_data.cntlid = 0xFFFF;
+
+ ctrlr.admin_qpair = &admin_qpair;
+ ctrlr.subsys = &subsystem;
+
+ /* Valid I/O queue connect command */
+ memset(&rsp, 0, sizeof(rsp));
+ MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr);
+ cmd.connect_cmd.qid = 1;
+ cmd.connect_cmd.sqsize = 63;
+ sgroups[subsystem.id].io_outstanding++;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
+ CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
+ CU_ASSERT(qpair.ctrlr == &ctrlr);
+ CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
+ qpair.ctrlr = NULL;
+ cmd.connect_cmd.sqsize = 31;
+
+ /* Non-existent controller */
+ memset(&rsp, 0, sizeof(rsp));
+ MOCK_SET(nvmf_subsystem_get_ctrlr, NULL);
+ sgroups[subsystem.id].io_outstanding++;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
+ MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr);
+
+ /* I/O connect to discovery controller */
+ memset(&rsp, 0, sizeof(rsp));
+ subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY;
+ subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
+ sgroups[subsystem.id].io_outstanding++;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
+
+ /* I/O connect to discovery controller with keep-alive-timeout != 0 */
+ cmd.connect_cmd.qid = 0;
+ cmd.connect_cmd.kato = 120000;
+ memset(&rsp, 0, sizeof(rsp));
+ subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY;
+ subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
+ sgroups[subsystem.id].io_outstanding++;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
+ CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
+ CU_ASSERT(qpair.ctrlr != NULL);
+ CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL);
+ CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
+ nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
+ spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
+ free(qpair.ctrlr);
+ qpair.ctrlr = NULL;
+
+ /* I/O connect to discovery controller with keep-alive-timeout == 0.
+ * Then, a fixed timeout value is set to keep-alive-timeout.
+ */
+ cmd.connect_cmd.kato = 0;
+ memset(&rsp, 0, sizeof(rsp));
+ subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY;
+ subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
+ sgroups[subsystem.id].io_outstanding++;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
+ CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
+ CU_ASSERT(qpair.ctrlr != NULL);
+ CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL);
+ CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
+ nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
+ spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
+ free(qpair.ctrlr);
+ qpair.ctrlr = NULL;
+ cmd.connect_cmd.qid = 1;
+ cmd.connect_cmd.kato = 120000;
+ subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
+
+ /* I/O connect to disabled controller */
+ memset(&rsp, 0, sizeof(rsp));
+ ctrlr.vcprop.cc.bits.en = 0;
+ sgroups[subsystem.id].io_outstanding++;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
+ ctrlr.vcprop.cc.bits.en = 1;
+
+ /* I/O connect with invalid IOSQES */
+ memset(&rsp, 0, sizeof(rsp));
+ ctrlr.vcprop.cc.bits.iosqes = 3;
+ sgroups[subsystem.id].io_outstanding++;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
+ ctrlr.vcprop.cc.bits.iosqes = 6;
+
+ /* I/O connect with invalid IOCQES */
+ memset(&rsp, 0, sizeof(rsp));
+ ctrlr.vcprop.cc.bits.iocqes = 3;
+ sgroups[subsystem.id].io_outstanding++;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
+ ctrlr.vcprop.cc.bits.iocqes = 4;
+
+ /* I/O connect with too many existing qpairs */
+ memset(&rsp, 0, sizeof(rsp));
+ spdk_bit_array_set(ctrlr.qpair_mask, 0);
+ spdk_bit_array_set(ctrlr.qpair_mask, 1);
+ spdk_bit_array_set(ctrlr.qpair_mask, 2);
+ sgroups[subsystem.id].io_outstanding++;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
+ spdk_bit_array_clear(ctrlr.qpair_mask, 0);
+ spdk_bit_array_clear(ctrlr.qpair_mask, 1);
+ spdk_bit_array_clear(ctrlr.qpair_mask, 2);
+
+ /* I/O connect with duplicate queue ID */
+ memset(&rsp, 0, sizeof(rsp));
+ memset(&qpair2, 0, sizeof(qpair2));
+ qpair2.group = &group;
+ qpair2.qid = 1;
+ spdk_bit_array_set(ctrlr.qpair_mask, 1);
+ cmd.connect_cmd.qid = 1;
+ sgroups[subsystem.id].io_outstanding++;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
+
+ /* Clean up globals */
+ MOCK_CLEAR(spdk_nvmf_tgt_find_subsystem);
+ MOCK_CLEAR(spdk_nvmf_poll_group_create);
+
+ spdk_bit_array_free(&ctrlr.qpair_mask);
+ free(sgroups);
+}
+
+static void
+test_get_ns_id_desc_list(void)
+{
+ struct spdk_nvmf_subsystem subsystem;
+ struct spdk_nvmf_qpair qpair;
+ struct spdk_nvmf_ctrlr ctrlr;
+ struct spdk_nvmf_request req;
+ struct spdk_nvmf_ns *ns_ptrs[1];
+ struct spdk_nvmf_ns ns;
+ union nvmf_h2c_msg cmd;
+ union nvmf_c2h_msg rsp;
+ struct spdk_bdev bdev;
+ uint8_t buf[4096];
+
+ memset(&subsystem, 0, sizeof(subsystem));
+ ns_ptrs[0] = &ns;
+ subsystem.ns = ns_ptrs;
+ subsystem.max_nsid = 1;
+ subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
+
+ memset(&ns, 0, sizeof(ns));
+ ns.opts.nsid = 1;
+ ns.bdev = &bdev;
+
+ memset(&qpair, 0, sizeof(qpair));
+ qpair.ctrlr = &ctrlr;
+
+ memset(&ctrlr, 0, sizeof(ctrlr));
+ ctrlr.subsys = &subsystem;
+ ctrlr.vcprop.cc.bits.en = 1;
+
+ memset(&req, 0, sizeof(req));
+ req.qpair = &qpair;
+ req.cmd = &cmd;
+ req.rsp = &rsp;
+ req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
+ req.data = buf;
+ req.length = sizeof(buf);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_IDENTIFY;
+ cmd.nvme_cmd.cdw10_bits.identify.cns = SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST;
+
+ /* Invalid NSID */
+ cmd.nvme_cmd.nsid = 0;
+ memset(&rsp, 0, sizeof(rsp));
+ CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
+
+ /* Valid NSID, but ns has no IDs defined */
+ cmd.nvme_cmd.nsid = 1;
+ memset(&rsp, 0, sizeof(rsp));
+ CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
+ CU_ASSERT(spdk_mem_all_zero(buf, sizeof(buf)));
+
+ /* Valid NSID, only EUI64 defined */
+ ns.opts.eui64[0] = 0x11;
+ ns.opts.eui64[7] = 0xFF;
+ memset(&rsp, 0, sizeof(rsp));
+ CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
+ CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
+ CU_ASSERT(buf[1] == 8);
+ CU_ASSERT(buf[4] == 0x11);
+ CU_ASSERT(buf[11] == 0xFF);
+ CU_ASSERT(buf[13] == 0);
+
+ /* Valid NSID, only NGUID defined */
+ memset(ns.opts.eui64, 0, sizeof(ns.opts.eui64));
+ ns.opts.nguid[0] = 0x22;
+ ns.opts.nguid[15] = 0xEE;
+ memset(&rsp, 0, sizeof(rsp));
+ CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
+ CU_ASSERT(buf[0] == SPDK_NVME_NIDT_NGUID);
+ CU_ASSERT(buf[1] == 16);
+ CU_ASSERT(buf[4] == 0x22);
+ CU_ASSERT(buf[19] == 0xEE);
+ CU_ASSERT(buf[21] == 0);
+
+ /* Valid NSID, both EUI64 and NGUID defined */
+ ns.opts.eui64[0] = 0x11;
+ ns.opts.eui64[7] = 0xFF;
+ ns.opts.nguid[0] = 0x22;
+ ns.opts.nguid[15] = 0xEE;
+ memset(&rsp, 0, sizeof(rsp));
+ CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
+ CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
+ CU_ASSERT(buf[1] == 8);
+ CU_ASSERT(buf[4] == 0x11);
+ CU_ASSERT(buf[11] == 0xFF);
+ CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID);
+ CU_ASSERT(buf[13] == 16);
+ CU_ASSERT(buf[16] == 0x22);
+ CU_ASSERT(buf[31] == 0xEE);
+ CU_ASSERT(buf[33] == 0);
+
+ /* Valid NSID, EUI64, NGUID, and UUID defined */
+ ns.opts.eui64[0] = 0x11;
+ ns.opts.eui64[7] = 0xFF;
+ ns.opts.nguid[0] = 0x22;
+ ns.opts.nguid[15] = 0xEE;
+ ns.opts.uuid.u.raw[0] = 0x33;
+ ns.opts.uuid.u.raw[15] = 0xDD;
+ memset(&rsp, 0, sizeof(rsp));
+ CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
+ CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
+ CU_ASSERT(buf[1] == 8);
+ CU_ASSERT(buf[4] == 0x11);
+ CU_ASSERT(buf[11] == 0xFF);
+ CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID);
+ CU_ASSERT(buf[13] == 16);
+ CU_ASSERT(buf[16] == 0x22);
+ CU_ASSERT(buf[31] == 0xEE);
+ CU_ASSERT(buf[32] == SPDK_NVME_NIDT_UUID);
+ CU_ASSERT(buf[33] == 16);
+ CU_ASSERT(buf[36] == 0x33);
+ CU_ASSERT(buf[51] == 0xDD);
+ CU_ASSERT(buf[53] == 0);
+}
+
+static void
+test_identify_ns(void)
+{
+ struct spdk_nvmf_subsystem subsystem = {};
+ struct spdk_nvmf_transport transport = {};
+ struct spdk_nvmf_qpair admin_qpair = { .transport = &transport};
+ struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair };
+ struct spdk_nvme_cmd cmd = {};
+ struct spdk_nvme_cpl rsp = {};
+ struct spdk_nvme_ns_data nsdata = {};
+ struct spdk_bdev bdev[3] = {{.blockcnt = 1234}, {.blockcnt = 0}, {.blockcnt = 5678}};
+ struct spdk_nvmf_ns ns[3] = {{.bdev = &bdev[0]}, {.bdev = NULL}, {.bdev = &bdev[2]}};
+ struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]};
+
+ subsystem.ns = ns_arr;
+ subsystem.max_nsid = SPDK_COUNTOF(ns_arr);
+
+ /* Invalid NSID 0 */
+ cmd.nsid = 0;
+ memset(&nsdata, 0, sizeof(nsdata));
+ memset(&rsp, 0, sizeof(rsp));
+ CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
+ &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
+ CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
+
+ /* Valid NSID 1 */
+ cmd.nsid = 1;
+ memset(&nsdata, 0, sizeof(nsdata));
+ memset(&rsp, 0, sizeof(rsp));
+ CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
+ &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
+ CU_ASSERT(nsdata.nsze == 1234);
+
+ /* Valid but inactive NSID 2 */
+ cmd.nsid = 2;
+ memset(&nsdata, 0, sizeof(nsdata));
+ memset(&rsp, 0, sizeof(rsp));
+ CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
+ &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
+ CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
+
+ /* Valid NSID 3 */
+ cmd.nsid = 3;
+ memset(&nsdata, 0, sizeof(nsdata));
+ memset(&rsp, 0, sizeof(rsp));
+ CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
+ &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
+ CU_ASSERT(nsdata.nsze == 5678);
+
+ /* Invalid NSID 4 */
+ cmd.nsid = 4;
+ memset(&nsdata, 0, sizeof(nsdata));
+ memset(&rsp, 0, sizeof(rsp));
+ CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
+ &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
+ CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
+
+ /* Invalid NSID 0xFFFFFFFF (NS management not supported) */
+ cmd.nsid = 0xFFFFFFFF;
+ memset(&nsdata, 0, sizeof(nsdata));
+ memset(&rsp, 0, sizeof(rsp));
+ CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
+ &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
+ CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
+}
+
+static void
+test_set_get_features(void)
+{
+ struct spdk_nvmf_subsystem subsystem = {};
+ struct spdk_nvmf_qpair admin_qpair = {};
+ struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair };
+ union nvmf_h2c_msg cmd = {};
+ union nvmf_c2h_msg rsp = {};
+ struct spdk_nvmf_ns ns[3];
+ struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]};;
+ struct spdk_nvmf_request req;
+ int rc;
+
+ subsystem.ns = ns_arr;
+ subsystem.max_nsid = SPDK_COUNTOF(ns_arr);
+ admin_qpair.ctrlr = &ctrlr;
+ req.qpair = &admin_qpair;
+ cmd.nvme_cmd.nsid = 1;
+ req.cmd = &cmd;
+ req.rsp = &rsp;
+
+ /* Set SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
+ cmd.nvme_cmd.cdw11_bits.feat_rsv_persistence.bits.ptpl = 1;
+ ns[0].ptpl_file = "testcfg";
+ rc = nvmf_ctrlr_set_features_reservation_persistence(&req);
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_FEATURE_ID_NOT_SAVEABLE);
+ CU_ASSERT(ns[0].ptpl_activated == true);
+
+ /* Get SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
+ cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_HOST_RESERVE_PERSIST;
+ rc = nvmf_ctrlr_get_features_reservation_persistence(&req);
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
+ CU_ASSERT(rsp.nvme_cpl.cdw0 == 1);
+
+
+ /* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
+ cmd.nvme_cmd.cdw11 = 0x42;
+ cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
+
+ rc = nvmf_ctrlr_get_features(&req);
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+
+ /* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
+ cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */
+ cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
+
+ rc = nvmf_ctrlr_get_features(&req);
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
+
+ /* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
+ cmd.nvme_cmd.cdw11 = 0x42;
+ cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
+
+ rc = nvmf_ctrlr_set_features(&req);
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+
+ /* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
+ cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */
+ cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
+
+ rc = nvmf_ctrlr_set_features(&req);
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
+
+ /* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid THSEL */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
+ cmd.nvme_cmd.cdw11 = 0x42;
+ cmd.nvme_cmd.cdw11_bits.feat_temp_threshold.bits.thsel = 0x3; /* Set reserved value */
+ cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
+
+ rc = nvmf_ctrlr_set_features(&req);
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
+
+
+ /* get SPDK_NVME_FEAT_ERROR_RECOVERY - generic */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
+ cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
+
+ rc = nvmf_ctrlr_get_features(&req);
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+
+ /* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE set */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
+ cmd.nvme_cmd.cdw11 = 0x42;
+ cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x1;
+ cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
+
+ rc = nvmf_ctrlr_set_features(&req);
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
+
+ /* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE cleared */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
+ cmd.nvme_cmd.cdw11 = 0x42;
+ cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x0;
+ cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
+
+ rc = nvmf_ctrlr_set_features(&req);
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+}
+
+/*
+ * Reservation Unit Test Configuration
+ * -------- -------- --------
+ * | Host A | | Host B | | Host C |
+ * -------- -------- --------
+ * / \ | |
+ * -------- -------- ------- -------
+ * |Ctrlr1_A| |Ctrlr2_A| |Ctrlr_B| |Ctrlr_C|
+ * -------- -------- ------- -------
+ * \ \ / /
+ * \ \ / /
+ * \ \ / /
+ * --------------------------------------
+ * | NAMESPACE 1 |
+ * --------------------------------------
+ */
+
+static struct spdk_nvmf_ctrlr g_ctrlr1_A, g_ctrlr2_A, g_ctrlr_B, g_ctrlr_C;
+struct spdk_nvmf_subsystem_pg_ns_info g_ns_info;
+
+static void
+ut_reservation_init(enum spdk_nvme_reservation_type rtype)
+{
+ /* Host A has two controllers */
+ spdk_uuid_generate(&g_ctrlr1_A.hostid);
+ spdk_uuid_copy(&g_ctrlr2_A.hostid, &g_ctrlr1_A.hostid);
+
+ /* Host B has 1 controller */
+ spdk_uuid_generate(&g_ctrlr_B.hostid);
+
+ /* Host C has 1 controller */
+ spdk_uuid_generate(&g_ctrlr_C.hostid);
+
+ memset(&g_ns_info, 0, sizeof(g_ns_info));
+ g_ns_info.rtype = rtype;
+ g_ns_info.reg_hostid[0] = g_ctrlr1_A.hostid;
+ g_ns_info.reg_hostid[1] = g_ctrlr_B.hostid;
+ g_ns_info.reg_hostid[2] = g_ctrlr_C.hostid;
+}
+
+static void
+test_reservation_write_exclusive(void)
+{
+ struct spdk_nvmf_request req = {};
+ union nvmf_h2c_msg cmd = {};
+ union nvmf_c2h_msg rsp = {};
+ int rc;
+
+ req.cmd = &cmd;
+ req.rsp = &rsp;
+
+ /* Host A holds reservation with type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE */
+ ut_reservation_init(SPDK_NVME_RESERVE_WRITE_EXCLUSIVE);
+ g_ns_info.holder_id = g_ctrlr1_A.hostid;
+
+ /* Test Case: Issue a Read command from Host A and Host B */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+
+ /* Test Case: Issue a DSM Write command from Host A and Host B */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT;
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
+ SPDK_CU_ASSERT_FATAL(rc < 0);
+ SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
+
+ /* Test Case: Issue a Write command from Host C */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
+ SPDK_CU_ASSERT_FATAL(rc < 0);
+ SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
+
+ /* Test Case: Issue a Read command from Host B */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+
+ /* Unregister Host C */
+ memset(&g_ns_info.reg_hostid[2], 0, sizeof(struct spdk_uuid));
+
+ /* Test Case: Read and Write commands from non-registrant Host C */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
+ SPDK_CU_ASSERT_FATAL(rc < 0);
+ SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+}
+
+static void
+test_reservation_exclusive_access(void)
+{
+ struct spdk_nvmf_request req = {};
+ union nvmf_h2c_msg cmd = {};
+ union nvmf_c2h_msg rsp = {};
+ int rc;
+
+ req.cmd = &cmd;
+ req.rsp = &rsp;
+
+ /* Host A holds reservation with type SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS */
+ ut_reservation_init(SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS);
+ g_ns_info.holder_id = g_ctrlr1_A.hostid;
+
+ /* Test Case: Issue a Read command from Host B */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
+ SPDK_CU_ASSERT_FATAL(rc < 0);
+ SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
+
+ /* Test Case: Issue a Reservation Release command from a valid Registrant */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_RESERVATION_RELEASE;
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+}
+
+static void
+_test_reservation_write_exclusive_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype)
+{
+ struct spdk_nvmf_request req = {};
+ union nvmf_h2c_msg cmd = {};
+ union nvmf_c2h_msg rsp = {};
+ int rc;
+
+ req.cmd = &cmd;
+ req.rsp = &rsp;
+
+ /* SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY and SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS */
+ ut_reservation_init(rtype);
+ g_ns_info.holder_id = g_ctrlr1_A.hostid;
+
+ /* Test Case: Issue a Read command from Host A and Host C */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+
+ /* Test Case: Issue a DSM Write command from Host A and Host C */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT;
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+
+ /* Unregister Host C */
+ memset(&g_ns_info.reg_hostid[2], 0, sizeof(struct spdk_uuid));
+
+ /* Test Case: Read and Write commands from non-registrant Host C */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
+ SPDK_CU_ASSERT_FATAL(rc < 0);
+ SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
+}
+
+static void
+test_reservation_write_exclusive_regs_only_and_all_regs(void)
+{
+ _test_reservation_write_exclusive_regs_only_and_all_regs(
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY);
+ _test_reservation_write_exclusive_regs_only_and_all_regs(
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS);
+}
+
+static void
+_test_reservation_exclusive_access_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype)
+{
+ struct spdk_nvmf_request req = {};
+ union nvmf_h2c_msg cmd = {};
+ union nvmf_c2h_msg rsp = {};
+ int rc;
+
+ req.cmd = &cmd;
+ req.rsp = &rsp;
+
+ /* SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY and SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS */
+ ut_reservation_init(rtype);
+ g_ns_info.holder_id = g_ctrlr1_A.hostid;
+
+ /* Test Case: Issue a Write command from Host B */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+
+ /* Unregister Host B */
+ memset(&g_ns_info.reg_hostid[1], 0, sizeof(struct spdk_uuid));
+
+ /* Test Case: Issue a Read command from Host B */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
+ SPDK_CU_ASSERT_FATAL(rc < 0);
+ SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
+ SPDK_CU_ASSERT_FATAL(rc < 0);
+ SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
+}
+
+static void
+test_reservation_exclusive_access_regs_only_and_all_regs(void)
+{
+ _test_reservation_exclusive_access_regs_only_and_all_regs(
+ SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY);
+ _test_reservation_exclusive_access_regs_only_and_all_regs(
+ SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS);
+}
+
+static void
+test_reservation_notification_log_page(void)
+{
+ struct spdk_nvmf_ctrlr ctrlr;
+ struct spdk_nvmf_qpair qpair;
+ struct spdk_nvmf_ns ns;
+ struct spdk_nvmf_request req;
+ union nvmf_h2c_msg cmd = {};
+ union nvmf_c2h_msg rsp = {};
+ union spdk_nvme_async_event_completion event = {};
+ struct spdk_nvme_reservation_notification_log logs[3];
+
+ memset(&ctrlr, 0, sizeof(ctrlr));
+ ctrlr.thread = spdk_get_thread();
+ TAILQ_INIT(&ctrlr.log_head);
+ ns.nsid = 1;
+
+ /* Test Case: Mask all the reservation notifications */
+ ns.mask = SPDK_NVME_REGISTRATION_PREEMPTED_MASK |
+ SPDK_NVME_RESERVATION_RELEASED_MASK |
+ SPDK_NVME_RESERVATION_PREEMPTED_MASK;
+ nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
+ SPDK_NVME_REGISTRATION_PREEMPTED);
+ nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
+ SPDK_NVME_RESERVATION_RELEASED);
+ nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
+ SPDK_NVME_RESERVATION_PREEMPTED);
+ poll_threads();
+ SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&ctrlr.log_head));
+
+ /* Test Case: Unmask all the reservation notifications,
+ * 3 log pages are generated, and AER was triggered.
+ */
+ ns.mask = 0;
+ ctrlr.num_avail_log_pages = 0;
+ req.cmd = &cmd;
+ req.rsp = &rsp;
+ ctrlr.aer_req[0] = &req;
+ ctrlr.nr_aer_reqs = 1;
+ req.qpair = &qpair;
+ TAILQ_INIT(&qpair.outstanding);
+ qpair.ctrlr = NULL;
+ qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+
+ nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
+ SPDK_NVME_REGISTRATION_PREEMPTED);
+ nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
+ SPDK_NVME_RESERVATION_RELEASED);
+ nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
+ SPDK_NVME_RESERVATION_PREEMPTED);
+ poll_threads();
+ event.raw = rsp.nvme_cpl.cdw0;
+ SPDK_CU_ASSERT_FATAL(event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_IO);
+ SPDK_CU_ASSERT_FATAL(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL);
+ SPDK_CU_ASSERT_FATAL(event.bits.log_page_identifier == SPDK_NVME_LOG_RESERVATION_NOTIFICATION);
+ SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 3);
+
+ /* Test Case: Get Log Page to clear the log pages */
+ nvmf_get_reservation_notification_log_page(&ctrlr, (void *)logs, 0, sizeof(logs));
+ SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 0);
+}
+
+static void
+test_get_dif_ctx(void)
+{
+ struct spdk_nvmf_subsystem subsystem = {};
+ struct spdk_nvmf_request req = {};
+ struct spdk_nvmf_qpair qpair = {};
+ struct spdk_nvmf_ctrlr ctrlr = {};
+ struct spdk_nvmf_ns ns = {};
+ struct spdk_nvmf_ns *_ns = NULL;
+ struct spdk_bdev bdev = {};
+ union nvmf_h2c_msg cmd = {};
+ struct spdk_dif_ctx dif_ctx = {};
+ bool ret;
+
+ ctrlr.subsys = &subsystem;
+
+ qpair.ctrlr = &ctrlr;
+
+ req.qpair = &qpair;
+ req.cmd = &cmd;
+
+ ns.bdev = &bdev;
+
+ ctrlr.dif_insert_or_strip = false;
+
+ ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
+ CU_ASSERT(ret == false);
+
+ ctrlr.dif_insert_or_strip = true;
+ qpair.state = SPDK_NVMF_QPAIR_UNINITIALIZED;
+
+ ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
+ CU_ASSERT(ret == false);
+
+ qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
+ cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FABRIC;
+
+ ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
+ CU_ASSERT(ret == false);
+
+ cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FLUSH;
+
+ ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
+ CU_ASSERT(ret == false);
+
+ qpair.qid = 1;
+
+ ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
+ CU_ASSERT(ret == false);
+
+ cmd.nvme_cmd.nsid = 1;
+
+ ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
+ CU_ASSERT(ret == false);
+
+ subsystem.max_nsid = 1;
+ subsystem.ns = &_ns;
+ subsystem.ns[0] = &ns;
+
+ ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
+ CU_ASSERT(ret == false);
+
+ cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_WRITE;
+
+ ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
+ CU_ASSERT(ret == true);
+}
+
+static void
+test_identify_ctrlr(void)
+{
+ struct spdk_nvmf_subsystem subsystem = {
+ .subtype = SPDK_NVMF_SUBTYPE_NVME
+ };
+ struct spdk_nvmf_transport_ops tops = {};
+ struct spdk_nvmf_transport transport = {
+ .ops = &tops,
+ .opts = {
+ .in_capsule_data_size = 4096,
+ },
+ };
+ struct spdk_nvmf_qpair admin_qpair = { .transport = &transport};
+ struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair };
+ struct spdk_nvme_ctrlr_data cdata = {};
+ uint32_t expected_ioccsz;
+
+ nvmf_ctrlr_cdata_init(&transport, &subsystem, &ctrlr.cdata);
+
+ /* Check ioccsz, TCP transport */
+ tops.type = SPDK_NVME_TRANSPORT_TCP;
+ expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
+ CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
+
+ /* Check ioccsz, RDMA transport */
+ tops.type = SPDK_NVME_TRANSPORT_RDMA;
+ expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
+ CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
+
+ /* Check ioccsz, TCP transport with dif_insert_or_strip */
+ tops.type = SPDK_NVME_TRANSPORT_TCP;
+ ctrlr.dif_insert_or_strip = true;
+ expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
+ CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
+}
+
+static int
+custom_admin_cmd_hdlr(struct spdk_nvmf_request *req)
+{
+ req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
+
+ return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
+};
+
+static void
+test_custom_admin_cmd(void)
+{
+ struct spdk_nvmf_subsystem subsystem;
+ struct spdk_nvmf_qpair qpair;
+ struct spdk_nvmf_ctrlr ctrlr;
+ struct spdk_nvmf_request req;
+ struct spdk_nvmf_ns *ns_ptrs[1];
+ struct spdk_nvmf_ns ns;
+ union nvmf_h2c_msg cmd;
+ union nvmf_c2h_msg rsp;
+ struct spdk_bdev bdev;
+ uint8_t buf[4096];
+ int rc;
+
+ memset(&subsystem, 0, sizeof(subsystem));
+ ns_ptrs[0] = &ns;
+ subsystem.ns = ns_ptrs;
+ subsystem.max_nsid = 1;
+ subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
+
+ memset(&ns, 0, sizeof(ns));
+ ns.opts.nsid = 1;
+ ns.bdev = &bdev;
+
+ memset(&qpair, 0, sizeof(qpair));
+ qpair.ctrlr = &ctrlr;
+
+ memset(&ctrlr, 0, sizeof(ctrlr));
+ ctrlr.subsys = &subsystem;
+ ctrlr.vcprop.cc.bits.en = 1;
+
+ memset(&req, 0, sizeof(req));
+ req.qpair = &qpair;
+ req.cmd = &cmd;
+ req.rsp = &rsp;
+ req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
+ req.data = buf;
+ req.length = sizeof(buf);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.nvme_cmd.opc = 0xc1;
+ cmd.nvme_cmd.nsid = 0;
+ memset(&rsp, 0, sizeof(rsp));
+
+ spdk_nvmf_set_custom_admin_cmd_hdlr(cmd.nvme_cmd.opc, custom_admin_cmd_hdlr);
+
+ /* Ensure that our hdlr is being called */
+ rc = nvmf_ctrlr_process_admin_cmd(&req);
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
+}
+
+static void
+test_fused_compare_and_write(void)
+{
+ struct spdk_nvmf_request req = {};
+ struct spdk_nvmf_qpair qpair = {};
+ struct spdk_nvme_cmd cmd = {};
+ union nvmf_c2h_msg rsp = {};
+ struct spdk_nvmf_ctrlr ctrlr = {};
+ struct spdk_nvmf_subsystem subsystem = {};
+ struct spdk_nvmf_ns ns = {};
+ struct spdk_nvmf_ns *subsys_ns[1] = {};
+ struct spdk_bdev bdev = {};
+
+ struct spdk_nvmf_poll_group group = {};
+ struct spdk_nvmf_subsystem_poll_group sgroups = {};
+ struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
+
+ ns.bdev = &bdev;
+
+ subsystem.id = 0;
+ subsystem.max_nsid = 1;
+ subsys_ns[0] = &ns;
+ subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
+
+ /* Enable controller */
+ ctrlr.vcprop.cc.bits.en = 1;
+ ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
+
+ group.num_sgroups = 1;
+ sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
+ sgroups.num_ns = 1;
+ sgroups.ns_info = &ns_info;
+ TAILQ_INIT(&sgroups.queued);
+ group.sgroups = &sgroups;
+ TAILQ_INIT(&qpair.outstanding);
+
+ qpair.ctrlr = &ctrlr;
+ qpair.group = &group;
+ qpair.qid = 1;
+ qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
+
+ cmd.nsid = 1;
+
+ req.qpair = &qpair;
+ req.cmd = (union nvmf_h2c_msg *)&cmd;
+ req.rsp = &rsp;
+
+ /* SUCCESS/SUCCESS */
+ cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
+ cmd.opc = SPDK_NVME_OPC_COMPARE;
+
+ spdk_nvmf_request_exec(&req);
+ CU_ASSERT(qpair.first_fused_req != NULL);
+ CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
+
+ cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
+ cmd.opc = SPDK_NVME_OPC_WRITE;
+
+ spdk_nvmf_request_exec(&req);
+ CU_ASSERT(qpair.first_fused_req == NULL);
+ CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
+
+ /* Wrong sequence */
+ cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
+ cmd.opc = SPDK_NVME_OPC_WRITE;
+
+ spdk_nvmf_request_exec(&req);
+ CU_ASSERT(!nvme_status_success(&rsp.nvme_cpl.status));
+ CU_ASSERT(qpair.first_fused_req == NULL);
+
+ /* Write as FUSE_FIRST (Wrong op code) */
+ cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
+ cmd.opc = SPDK_NVME_OPC_WRITE;
+
+ spdk_nvmf_request_exec(&req);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
+ CU_ASSERT(qpair.first_fused_req == NULL);
+
+ /* Compare as FUSE_SECOND (Wrong op code) */
+ cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
+ cmd.opc = SPDK_NVME_OPC_COMPARE;
+
+ spdk_nvmf_request_exec(&req);
+ CU_ASSERT(qpair.first_fused_req != NULL);
+ CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
+
+ cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
+ cmd.opc = SPDK_NVME_OPC_COMPARE;
+
+ spdk_nvmf_request_exec(&req);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
+ CU_ASSERT(qpair.first_fused_req == NULL);
+}
+
+static void
+test_multi_async_event_reqs(void)
+{
+ struct spdk_nvmf_subsystem subsystem = {};
+ struct spdk_nvmf_qpair qpair = {};
+ struct spdk_nvmf_ctrlr ctrlr = {};
+ struct spdk_nvmf_request req[5] = {};
+ struct spdk_nvmf_ns *ns_ptrs[1] = {};
+ struct spdk_nvmf_ns ns = {};
+ union nvmf_h2c_msg cmd[5] = {};
+ union nvmf_c2h_msg rsp[5] = {};
+
+ struct spdk_nvmf_poll_group group = {};
+ struct spdk_nvmf_subsystem_poll_group sgroups = {};
+
+ int i;
+
+ ns_ptrs[0] = &ns;
+ subsystem.ns = ns_ptrs;
+ subsystem.max_nsid = 1;
+ subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
+
+ ns.opts.nsid = 1;
+ group.sgroups = &sgroups;
+
+ qpair.ctrlr = &ctrlr;
+ qpair.group = &group;
+ TAILQ_INIT(&qpair.outstanding);
+
+ ctrlr.subsys = &subsystem;
+ ctrlr.vcprop.cc.bits.en = 1;
+
+ for (i = 0; i < 5; i++) {
+ cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
+ cmd[i].nvme_cmd.nsid = 1;
+ cmd[i].nvme_cmd.cid = i;
+
+ req[i].qpair = &qpair;
+ req[i].cmd = &cmd[i];
+ req[i].rsp = &rsp[i];
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link);
+ }
+
+ /* Target can store NVMF_MAX_ASYNC_EVENTS reqs */
+ sgroups.io_outstanding = NVMF_MAX_ASYNC_EVENTS;
+ for (i = 0; i < NVMF_MAX_ASYNC_EVENTS; i++) {
+ CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
+ CU_ASSERT(ctrlr.nr_aer_reqs == i + 1);
+ }
+ CU_ASSERT(sgroups.io_outstanding == 0);
+
+ /* Exceeding the NVMF_MAX_ASYNC_EVENTS reports error */
+ CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[4]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(ctrlr.nr_aer_reqs == NVMF_MAX_ASYNC_EVENTS);
+ CU_ASSERT(rsp[4].nvme_cpl.status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp[4].nvme_cpl.status.sc = SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED);
+
+ /* Test if the aer_reqs keep continuous when abort a req in the middle */
+ CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 2) == true);
+ CU_ASSERT(ctrlr.aer_req[0] == &req[0]);
+ CU_ASSERT(ctrlr.aer_req[1] == &req[1]);
+ CU_ASSERT(ctrlr.aer_req[2] == &req[3]);
+
+ CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 3) == true);
+ CU_ASSERT(ctrlr.aer_req[0] == &req[0]);
+ CU_ASSERT(ctrlr.aer_req[1] == &req[1]);
+ CU_ASSERT(ctrlr.aer_req[2] == NULL);
+ CU_ASSERT(ctrlr.nr_aer_reqs == 2);
+
+ TAILQ_REMOVE(&qpair.outstanding, &req[0], link);
+ TAILQ_REMOVE(&qpair.outstanding, &req[1], link);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvmf", NULL, NULL);
+ CU_ADD_TEST(suite, test_get_log_page);
+ CU_ADD_TEST(suite, test_process_fabrics_cmd);
+ CU_ADD_TEST(suite, test_connect);
+ CU_ADD_TEST(suite, test_get_ns_id_desc_list);
+ CU_ADD_TEST(suite, test_identify_ns);
+ CU_ADD_TEST(suite, test_reservation_write_exclusive);
+ CU_ADD_TEST(suite, test_reservation_exclusive_access);
+ CU_ADD_TEST(suite, test_reservation_write_exclusive_regs_only_and_all_regs);
+ CU_ADD_TEST(suite, test_reservation_exclusive_access_regs_only_and_all_regs);
+ CU_ADD_TEST(suite, test_reservation_notification_log_page);
+ CU_ADD_TEST(suite, test_get_dif_ctx);
+ CU_ADD_TEST(suite, test_set_get_features);
+ CU_ADD_TEST(suite, test_identify_ctrlr);
+ CU_ADD_TEST(suite, test_custom_admin_cmd);
+ CU_ADD_TEST(suite, test_fused_compare_and_write);
+ CU_ADD_TEST(suite, test_multi_async_event_reqs);
+
+ allocate_threads(1);
+ set_thread(0);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ free_threads();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvmf/ctrlr_bdev.c/.gitignore b/src/spdk/test/unit/lib/nvmf/ctrlr_bdev.c/.gitignore
new file mode 100644
index 000000000..78fca1017
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/ctrlr_bdev.c/.gitignore
@@ -0,0 +1 @@
+ctrlr_bdev_ut
diff --git a/src/spdk/test/unit/lib/nvmf/ctrlr_bdev.c/Makefile b/src/spdk/test/unit/lib/nvmf/ctrlr_bdev.c/Makefile
new file mode 100644
index 000000000..1d22f14be
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/ctrlr_bdev.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = ctrlr_bdev_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvmf/ctrlr_bdev.c/ctrlr_bdev_ut.c b/src/spdk/test/unit/lib/nvmf/ctrlr_bdev.c/ctrlr_bdev_ut.c
new file mode 100644
index 000000000..0df9c983b
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/ctrlr_bdev.c/ctrlr_bdev_ut.c
@@ -0,0 +1,415 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "spdk_internal/mock.h"
+
+#include "nvmf/ctrlr_bdev.c"
+
+
+SPDK_LOG_REGISTER_COMPONENT("nvmf", SPDK_LOG_NVMF)
+
+DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req), -1);
+
+DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), "test");
+
+struct spdk_bdev {
+ uint32_t blocklen;
+ uint64_t num_blocks;
+ uint32_t md_len;
+};
+
+uint32_t
+spdk_bdev_get_block_size(const struct spdk_bdev *bdev)
+{
+ return bdev->blocklen;
+}
+
+uint64_t
+spdk_bdev_get_num_blocks(const struct spdk_bdev *bdev)
+{
+ return bdev->num_blocks;
+}
+
+uint32_t
+spdk_bdev_get_optimal_io_boundary(const struct spdk_bdev *bdev)
+{
+ abort();
+ return 0;
+}
+
+uint32_t
+spdk_bdev_get_md_size(const struct spdk_bdev *bdev)
+{
+ return bdev->md_len;
+}
+
+DEFINE_STUB(spdk_bdev_comparev_and_writev_blocks, int,
+ (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct iovec *compare_iov, int compare_iovcnt,
+ struct iovec *write_iov, int write_iovcnt,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg),
+ 0);
+
+DEFINE_STUB(nvmf_ctrlr_process_io_cmd, int, (struct spdk_nvmf_request *req), 0);
+
+DEFINE_STUB_V(spdk_bdev_io_get_nvme_fused_status, (const struct spdk_bdev_io *bdev_io,
+ uint32_t *cdw0, int *cmp_sct, int *cmp_sc, int *wr_sct, int *wr_sc));
+
+DEFINE_STUB(spdk_bdev_is_md_interleaved, bool, (const struct spdk_bdev *bdev), false);
+
+DEFINE_STUB(spdk_bdev_get_dif_type, enum spdk_dif_type,
+ (const struct spdk_bdev *bdev), SPDK_DIF_DISABLE);
+
+DEFINE_STUB(spdk_bdev_is_dif_head_of_md, bool, (const struct spdk_bdev *bdev), false);
+
+DEFINE_STUB(spdk_bdev_is_dif_check_enabled, bool,
+ (const struct spdk_bdev *bdev, enum spdk_dif_check_type check_type), false);
+
+DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *,
+ (struct spdk_bdev_desc *desc), NULL);
+
+DEFINE_STUB(spdk_bdev_flush_blocks, int,
+ (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg),
+ 0);
+
+DEFINE_STUB(spdk_bdev_unmap_blocks, int,
+ (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg),
+ 0);
+
+DEFINE_STUB(spdk_bdev_io_type_supported, bool,
+ (struct spdk_bdev *bdev, enum spdk_bdev_io_type io_type), false);
+
+DEFINE_STUB(spdk_bdev_queue_io_wait, int,
+ (struct spdk_bdev *bdev, struct spdk_io_channel *ch,
+ struct spdk_bdev_io_wait_entry *entry),
+ 0);
+
+DEFINE_STUB(spdk_bdev_write_blocks, int,
+ (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, void *buf,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg),
+ 0);
+
+DEFINE_STUB(spdk_bdev_writev_blocks, int,
+ (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg),
+ 0);
+
+DEFINE_STUB(spdk_bdev_read_blocks, int,
+ (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, void *buf,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg),
+ 0);
+
+DEFINE_STUB(spdk_bdev_readv_blocks, int,
+ (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg),
+ 0);
+
+DEFINE_STUB(spdk_bdev_write_zeroes_blocks, int,
+ (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg),
+ 0);
+
+DEFINE_STUB(spdk_bdev_nvme_io_passthru, int,
+ (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ const struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes,
+ spdk_bdev_io_completion_cb cb, void *cb_arg),
+ 0);
+
+DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
+
+DEFINE_STUB(spdk_nvmf_subsystem_get_nqn, const char *,
+ (const struct spdk_nvmf_subsystem *subsystem), NULL);
+
+struct spdk_nvmf_ns *
+spdk_nvmf_subsystem_get_ns(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid)
+{
+ abort();
+ return NULL;
+}
+
+struct spdk_nvmf_ns *
+spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem)
+{
+ abort();
+ return NULL;
+}
+
+struct spdk_nvmf_ns *
+spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns)
+{
+ abort();
+ return NULL;
+}
+
+DEFINE_STUB_V(spdk_bdev_io_get_nvme_status,
+ (const struct spdk_bdev_io *bdev_io, uint32_t *cdw0, int *sct, int *sc));
+
+int
+spdk_dif_ctx_init(struct spdk_dif_ctx *ctx, uint32_t block_size, uint32_t md_size,
+ bool md_interleave, bool dif_loc, enum spdk_dif_type dif_type, uint32_t dif_flags,
+ uint32_t init_ref_tag, uint16_t apptag_mask, uint16_t app_tag,
+ uint32_t data_offset, uint16_t guard_seed)
+{
+ ctx->block_size = block_size;
+ ctx->md_size = md_size;
+ ctx->init_ref_tag = init_ref_tag;
+
+ return 0;
+}
+
+static void
+test_get_rw_params(void)
+{
+ struct spdk_nvme_cmd cmd = {0};
+ uint64_t lba;
+ uint64_t count;
+
+ lba = 0;
+ count = 0;
+ to_le64(&cmd.cdw10, 0x1234567890ABCDEF);
+ to_le32(&cmd.cdw12, 0x9875 | SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS);
+ nvmf_bdev_ctrlr_get_rw_params(&cmd, &lba, &count);
+ CU_ASSERT(lba == 0x1234567890ABCDEF);
+ CU_ASSERT(count == 0x9875 + 1); /* NOTE: this field is 0's based, hence the +1 */
+}
+
+static void
+test_lba_in_range(void)
+{
+ /* Trivial cases (no overflow) */
+ CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 0, 1) == true);
+ CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 0, 1000) == true);
+ CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 0, 1001) == false);
+ CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1, 999) == true);
+ CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1, 1000) == false);
+ CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 999, 1) == true);
+ CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1000, 1) == false);
+ CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1001, 1) == false);
+
+ /* Overflow edge cases */
+ CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, 0, UINT64_MAX) == true);
+ CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, 1, UINT64_MAX) == false);
+ CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, UINT64_MAX - 1, 1) == true);
+ CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, UINT64_MAX, 1) == false);
+}
+
+static void
+test_get_dif_ctx(void)
+{
+ struct spdk_bdev bdev = {};
+ struct spdk_nvme_cmd cmd = {};
+ struct spdk_dif_ctx dif_ctx = {};
+ bool ret;
+
+ bdev.md_len = 0;
+
+ ret = nvmf_bdev_ctrlr_get_dif_ctx(&bdev, &cmd, &dif_ctx);
+ CU_ASSERT(ret == false);
+
+ to_le64(&cmd.cdw10, 0x1234567890ABCDEF);
+ bdev.blocklen = 520;
+ bdev.md_len = 8;
+
+ ret = nvmf_bdev_ctrlr_get_dif_ctx(&bdev, &cmd, &dif_ctx);
+ CU_ASSERT(ret == true);
+ CU_ASSERT(dif_ctx.block_size = 520);
+ CU_ASSERT(dif_ctx.md_size == 8);
+ CU_ASSERT(dif_ctx.init_ref_tag == 0x90ABCDEF);
+}
+
+static void
+test_spdk_nvmf_bdev_ctrlr_compare_and_write_cmd(void)
+{
+ int rc;
+ struct spdk_bdev bdev = {};
+ struct spdk_bdev_desc *desc = NULL;
+ struct spdk_io_channel ch = {};
+
+ struct spdk_nvmf_request cmp_req = {};
+ union nvmf_c2h_msg cmp_rsp = {};
+
+ struct spdk_nvmf_request write_req = {};
+ union nvmf_c2h_msg write_rsp = {};
+
+ struct spdk_nvmf_qpair qpair = {};
+
+ struct spdk_nvme_cmd cmp_cmd = {};
+ struct spdk_nvme_cmd write_cmd = {};
+
+ struct spdk_nvmf_ctrlr ctrlr = {};
+ struct spdk_nvmf_subsystem subsystem = {};
+ struct spdk_nvmf_ns ns = {};
+ struct spdk_nvmf_ns *subsys_ns[1] = {};
+
+ struct spdk_nvmf_poll_group group = {};
+ struct spdk_nvmf_subsystem_poll_group sgroups = {};
+ struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
+
+ bdev.blocklen = 512;
+ bdev.num_blocks = 10;
+ ns.bdev = &bdev;
+
+ subsystem.id = 0;
+ subsystem.max_nsid = 1;
+ subsys_ns[0] = &ns;
+ subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
+
+ /* Enable controller */
+ ctrlr.vcprop.cc.bits.en = 1;
+ ctrlr.subsys = &subsystem;
+
+ group.num_sgroups = 1;
+ sgroups.num_ns = 1;
+ sgroups.ns_info = &ns_info;
+ group.sgroups = &sgroups;
+
+ qpair.ctrlr = &ctrlr;
+ qpair.group = &group;
+
+ cmp_req.qpair = &qpair;
+ cmp_req.cmd = (union nvmf_h2c_msg *)&cmp_cmd;
+ cmp_req.rsp = &cmp_rsp;
+
+ cmp_cmd.nsid = 1;
+ cmp_cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
+ cmp_cmd.opc = SPDK_NVME_OPC_COMPARE;
+
+ write_req.qpair = &qpair;
+ write_req.cmd = (union nvmf_h2c_msg *)&write_cmd;
+ write_req.rsp = &write_rsp;
+
+ write_cmd.nsid = 1;
+ write_cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
+ write_cmd.opc = SPDK_NVME_OPC_WRITE;
+
+ /* 1. SUCCESS */
+ cmp_cmd.cdw10 = 1; /* SLBA: CDW10 and CDW11 */
+ cmp_cmd.cdw12 = 1; /* NLB: CDW12 bits 15:00, 0's based */
+
+ write_cmd.cdw10 = 1; /* SLBA: CDW10 and CDW11 */
+ write_cmd.cdw12 = 1; /* NLB: CDW12 bits 15:00, 0's based */
+ write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
+
+ rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, desc, &ch, &cmp_req, &write_req);
+
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
+ CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0);
+ CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0);
+ CU_ASSERT(write_rsp.nvme_cpl.status.sct == 0);
+ CU_ASSERT(write_rsp.nvme_cpl.status.sc == 0);
+
+ /* 2. Fused command start lba / num blocks mismatch */
+ cmp_cmd.cdw10 = 1; /* SLBA: CDW10 and CDW11 */
+ cmp_cmd.cdw12 = 2; /* NLB: CDW12 bits 15:00, 0's based */
+
+ write_cmd.cdw10 = 1; /* SLBA: CDW10 and CDW11 */
+ write_cmd.cdw12 = 1; /* NLB: CDW12 bits 15:00, 0's based */
+ write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
+
+ rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, desc, &ch, &cmp_req, &write_req);
+
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0);
+ CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0);
+ CU_ASSERT(write_rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(write_rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
+
+ /* 3. SPDK_NVME_SC_LBA_OUT_OF_RANGE */
+ cmp_cmd.cdw10 = 1; /* SLBA: CDW10 and CDW11 */
+ cmp_cmd.cdw12 = 100; /* NLB: CDW12 bits 15:00, 0's based */
+
+ write_cmd.cdw10 = 1; /* SLBA: CDW10 and CDW11 */
+ write_cmd.cdw12 = 100; /* NLB: CDW12 bits 15:00, 0's based */
+ write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
+
+ rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, desc, &ch, &cmp_req, &write_req);
+
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0);
+ CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0);
+ CU_ASSERT(write_rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(write_rsp.nvme_cpl.status.sc == SPDK_NVME_SC_LBA_OUT_OF_RANGE);
+
+ /* 4. SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID */
+ cmp_cmd.cdw10 = 1; /* SLBA: CDW10 and CDW11 */
+ cmp_cmd.cdw12 = 1; /* NLB: CDW12 bits 15:00, 0's based */
+
+ write_cmd.cdw10 = 1; /* SLBA: CDW10 and CDW11 */
+ write_cmd.cdw12 = 1; /* NLB: CDW12 bits 15:00, 0's based */
+ write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen - 1;
+
+ rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, desc, &ch, &cmp_req, &write_req);
+
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0);
+ CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0);
+ CU_ASSERT(write_rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(write_rsp.nvme_cpl.status.sc == SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvmf", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_get_rw_params);
+ CU_ADD_TEST(suite, test_lba_in_range);
+ CU_ADD_TEST(suite, test_get_dif_ctx);
+
+ CU_ADD_TEST(suite, test_spdk_nvmf_bdev_ctrlr_compare_and_write_cmd);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvmf/ctrlr_discovery.c/.gitignore b/src/spdk/test/unit/lib/nvmf/ctrlr_discovery.c/.gitignore
new file mode 100644
index 000000000..a975a97ec
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/ctrlr_discovery.c/.gitignore
@@ -0,0 +1 @@
+ctrlr_discovery_ut
diff --git a/src/spdk/test/unit/lib/nvmf/ctrlr_discovery.c/Makefile b/src/spdk/test/unit/lib/nvmf/ctrlr_discovery.c/Makefile
new file mode 100644
index 000000000..d289bc3e8
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/ctrlr_discovery.c/Makefile
@@ -0,0 +1,39 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+SPDK_LIB_LIST = json
+TEST_FILE = ctrlr_discovery_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvmf/ctrlr_discovery.c/ctrlr_discovery_ut.c b/src/spdk/test/unit/lib/nvmf/ctrlr_discovery.c/ctrlr_discovery_ut.c
new file mode 100644
index 000000000..29e923de8
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/ctrlr_discovery.c/ctrlr_discovery_ut.c
@@ -0,0 +1,303 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+#include "spdk_internal/mock.h"
+
+#include "common/lib/test_env.c"
+#include "nvmf/ctrlr_discovery.c"
+#include "nvmf/subsystem.c"
+
+SPDK_LOG_REGISTER_COMPONENT("nvmf", SPDK_LOG_NVMF)
+
+DEFINE_STUB_V(spdk_bdev_module_release_bdev,
+ (struct spdk_bdev *bdev));
+
+DEFINE_STUB(spdk_bdev_get_block_size, uint32_t,
+ (const struct spdk_bdev *bdev), 512);
+
+DEFINE_STUB(spdk_nvmf_transport_stop_listen,
+ int,
+ (struct spdk_nvmf_transport *transport,
+ const struct spdk_nvme_transport_id *trid), 0);
+
+int
+spdk_bdev_open(struct spdk_bdev *bdev, bool write, spdk_bdev_remove_cb_t remove_cb,
+ void *remove_ctx, struct spdk_bdev_desc **desc)
+{
+ return 0;
+}
+
+void
+spdk_bdev_close(struct spdk_bdev_desc *desc)
+{
+}
+
+const char *
+spdk_bdev_get_name(const struct spdk_bdev *bdev)
+{
+ return "test";
+}
+
+const struct spdk_uuid *
+spdk_bdev_get_uuid(const struct spdk_bdev *bdev)
+{
+ return &bdev->uuid;
+}
+
+int
+spdk_nvmf_transport_listen(struct spdk_nvmf_transport *transport,
+ const struct spdk_nvme_transport_id *trid)
+{
+ return 0;
+}
+
+static struct spdk_nvmf_listener g_listener = {};
+
+struct spdk_nvmf_listener *
+nvmf_transport_find_listener(struct spdk_nvmf_transport *transport,
+ const struct spdk_nvme_transport_id *trid)
+{
+ return &g_listener;
+}
+
+void
+nvmf_transport_listener_discover(struct spdk_nvmf_transport *transport,
+ struct spdk_nvme_transport_id *trid,
+ struct spdk_nvmf_discovery_log_page_entry *entry)
+{
+ entry->trtype = 42;
+}
+
+struct spdk_nvmf_transport_ops g_transport_ops = {};
+
+static struct spdk_nvmf_transport g_transport = {
+ .ops = &g_transport_ops
+};
+
+struct spdk_nvmf_transport *
+spdk_nvmf_transport_create(const char *transport_name,
+ struct spdk_nvmf_transport_opts *tprt_opts)
+{
+ if (strcasecmp(transport_name, spdk_nvme_transport_id_trtype_str(SPDK_NVME_TRANSPORT_RDMA))) {
+ return &g_transport;
+ }
+
+ return NULL;
+}
+
+struct spdk_nvmf_subsystem *
+spdk_nvmf_tgt_find_subsystem(struct spdk_nvmf_tgt *tgt, const char *subnqn)
+{
+ return NULL;
+}
+
+struct spdk_nvmf_transport *
+spdk_nvmf_tgt_get_transport(struct spdk_nvmf_tgt *tgt, const char *transport_name)
+{
+ return &g_transport;
+}
+
+int
+spdk_nvme_transport_id_parse_trtype(enum spdk_nvme_transport_type *trtype, const char *str)
+{
+ if (trtype == NULL || str == NULL) {
+ return -EINVAL;
+ }
+
+ if (strcasecmp(str, "PCIe") == 0) {
+ *trtype = SPDK_NVME_TRANSPORT_PCIE;
+ } else if (strcasecmp(str, "RDMA") == 0) {
+ *trtype = SPDK_NVME_TRANSPORT_RDMA;
+ } else {
+ return -ENOENT;
+ }
+ return 0;
+}
+
+int
+spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1,
+ const struct spdk_nvme_transport_id *trid2)
+{
+ return 0;
+}
+
+void
+nvmf_ctrlr_ns_changed(struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid)
+{
+}
+
+void
+nvmf_ctrlr_destruct(struct spdk_nvmf_ctrlr *ctrlr)
+{
+}
+
+int
+nvmf_poll_group_update_subsystem(struct spdk_nvmf_poll_group *group,
+ struct spdk_nvmf_subsystem *subsystem)
+{
+ return 0;
+}
+
+int
+nvmf_poll_group_add_subsystem(struct spdk_nvmf_poll_group *group,
+ struct spdk_nvmf_subsystem *subsystem,
+ spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
+{
+ return 0;
+}
+
+void
+nvmf_poll_group_remove_subsystem(struct spdk_nvmf_poll_group *group,
+ struct spdk_nvmf_subsystem *subsystem,
+ spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
+{
+}
+
+void
+nvmf_poll_group_pause_subsystem(struct spdk_nvmf_poll_group *group,
+ struct spdk_nvmf_subsystem *subsystem,
+ spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
+{
+}
+
+void
+nvmf_poll_group_resume_subsystem(struct spdk_nvmf_poll_group *group,
+ struct spdk_nvmf_subsystem *subsystem,
+ spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
+{
+}
+
+static void
+_subsystem_add_listen_done(void *cb_arg, int status)
+{
+ SPDK_CU_ASSERT_FATAL(status == 0);
+}
+
+static void
+test_discovery_log(void)
+{
+ struct spdk_nvmf_tgt tgt = {};
+ struct spdk_nvmf_subsystem *subsystem;
+ uint8_t buffer[8192];
+ struct iovec iov;
+ struct spdk_nvmf_discovery_log_page *disc_log;
+ struct spdk_nvmf_discovery_log_page_entry *entry;
+ struct spdk_nvme_transport_id trid = {};
+
+ iov.iov_base = buffer;
+ iov.iov_len = 8192;
+
+ tgt.max_subsystems = 1024;
+ tgt.subsystems = calloc(tgt.max_subsystems, sizeof(struct spdk_nvmf_subsystem *));
+ SPDK_CU_ASSERT_FATAL(tgt.subsystems != NULL);
+
+ /* Add one subsystem and verify that the discovery log contains it */
+ subsystem = spdk_nvmf_subsystem_create(&tgt, "nqn.2016-06.io.spdk:subsystem1",
+ SPDK_NVMF_SUBTYPE_NVME, 0);
+ subsystem->allow_any_host = true;
+ SPDK_CU_ASSERT_FATAL(subsystem != NULL);
+
+ trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
+ trid.adrfam = SPDK_NVMF_ADRFAM_IPV4;
+ snprintf(trid.traddr, sizeof(trid.traddr), "1234");
+ snprintf(trid.trsvcid, sizeof(trid.trsvcid), "5678");
+ spdk_nvmf_subsystem_add_listener(subsystem, &trid, _subsystem_add_listen_done, NULL);
+ subsystem->state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
+
+ /* Get only genctr (first field in the header) */
+ memset(buffer, 0xCC, sizeof(buffer));
+ disc_log = (struct spdk_nvmf_discovery_log_page *)buffer;
+ nvmf_get_discovery_log_page(&tgt, "nqn.2016-06.io.spdk:host1", &iov, 1, 0,
+ sizeof(disc_log->genctr));
+ CU_ASSERT(disc_log->genctr == 2); /* one added subsystem and listener */
+
+ /* Get only the header, no entries */
+ memset(buffer, 0xCC, sizeof(buffer));
+ disc_log = (struct spdk_nvmf_discovery_log_page *)buffer;
+ nvmf_get_discovery_log_page(&tgt, "nqn.2016-06.io.spdk:host1", &iov, 1, 0, sizeof(*disc_log));
+ CU_ASSERT(disc_log->genctr == 2);
+ CU_ASSERT(disc_log->numrec == 1);
+
+ /* Offset 0, exact size match */
+ memset(buffer, 0xCC, sizeof(buffer));
+ disc_log = (struct spdk_nvmf_discovery_log_page *)buffer;
+ nvmf_get_discovery_log_page(&tgt, "nqn.2016-06.io.spdk:host1", &iov, 1, 0,
+ sizeof(*disc_log) + sizeof(disc_log->entries[0]));
+ CU_ASSERT(disc_log->genctr != 0);
+ CU_ASSERT(disc_log->numrec == 1);
+ CU_ASSERT(disc_log->entries[0].trtype == 42);
+
+ /* Offset 0, oversize buffer */
+ memset(buffer, 0xCC, sizeof(buffer));
+ disc_log = (struct spdk_nvmf_discovery_log_page *)buffer;
+ nvmf_get_discovery_log_page(&tgt, "nqn.2016-06.io.spdk:host1", &iov, 1, 0, sizeof(buffer));
+ CU_ASSERT(disc_log->genctr != 0);
+ CU_ASSERT(disc_log->numrec == 1);
+ CU_ASSERT(disc_log->entries[0].trtype == 42);
+ CU_ASSERT(spdk_mem_all_zero(buffer + sizeof(*disc_log) + sizeof(disc_log->entries[0]),
+ sizeof(buffer) - (sizeof(*disc_log) + sizeof(disc_log->entries[0]))));
+
+ /* Get just the first entry, no header */
+ memset(buffer, 0xCC, sizeof(buffer));
+ entry = (struct spdk_nvmf_discovery_log_page_entry *)buffer;
+ nvmf_get_discovery_log_page(&tgt, "nqn.2016-06.io.spdk:host1", &iov,
+ 1,
+ offsetof(struct spdk_nvmf_discovery_log_page, entries[0]),
+ sizeof(*entry));
+ CU_ASSERT(entry->trtype == 42);
+ subsystem->state = SPDK_NVMF_SUBSYSTEM_INACTIVE;
+ spdk_nvmf_subsystem_destroy(subsystem);
+ free(tgt.subsystems);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvmf", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_discovery_log);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvmf/fc.c/.gitignore b/src/spdk/test/unit/lib/nvmf/fc.c/.gitignore
new file mode 100644
index 000000000..3895b84ab
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/fc.c/.gitignore
@@ -0,0 +1 @@
+fc_ut
diff --git a/src/spdk/test/unit/lib/nvmf/fc.c/Makefile b/src/spdk/test/unit/lib/nvmf/fc.c/Makefile
new file mode 100644
index 000000000..7f54f1520
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/fc.c/Makefile
@@ -0,0 +1,58 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) 2018 Broadcom. All Rights Reserved.
+# The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../../)
+include $(SPDK_ROOT_DIR)/mk/config.mk
+
+CFLAGS += -I$(SPDK_ROOT_DIR)/test/common/lib -I$(SPDK_ROOT_DIR)/lib \
+-I$(SPDK_ROOT_DIR)/lib/nvmf
+
+ifneq ($(strip $(CONFIG_FC_PATH)),)
+CFLAGS += -I$(CONFIG_FC_PATH)
+endif
+
+TEST_FILE = fc_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
+
+# Disable clang warning: taking address of packed member of class or structure may result in an unaligned pointer value [-Werror,-Waddress-of-packed-member]
+ifeq ($(CC),clang)
+ CLANG_VERSION := $(shell $(CC) -v 2>&1 | \
+ sed -n "s/.*version \([0-9]*\.[0-9]*\).*/\1/p")
+
+CLANG_MAJOR_VERSION := $(shell echo $(CLANG_VERSION) | cut -f1 -d.)
+
+ifeq ($(shell test $(CLANG_MAJOR_VERSION) -ge 4 && echo 1), 1)
+ CFLAGS += -Wno-address-of-packed-member
+endif
+endif
diff --git a/src/spdk/test/unit/lib/nvmf/fc.c/fc_ut.c b/src/spdk/test/unit/lib/nvmf/fc.c/fc_ut.c
new file mode 100644
index 000000000..a8d4b3b96
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/fc.c/fc_ut.c
@@ -0,0 +1,505 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (c) 2018-2019 Broadcom. All Rights Reserved.
+ * The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* NVMF FC Transport Unit Test */
+
+#include "spdk/env.h"
+#include "spdk_cunit.h"
+#include "spdk/nvmf.h"
+#include "spdk/endian.h"
+#include "spdk/trace.h"
+#include "spdk_internal/log.h"
+
+#include "ut_multithread.c"
+
+#include "transport.h"
+#include "nvmf_internal.h"
+
+#include "nvmf_fc.h"
+
+#include "json/json_util.c"
+#include "json/json_write.c"
+#include "nvmf/nvmf.c"
+#include "nvmf/transport.c"
+#include "nvmf/subsystem.c"
+#include "nvmf/fc.c"
+#include "nvmf/fc_ls.c"
+
+/*
+ * SPDK Stuff
+ */
+
+#ifdef SPDK_CONFIG_RDMA
+const struct spdk_nvmf_transport_ops spdk_nvmf_transport_rdma = {
+ .type = SPDK_NVME_TRANSPORT_RDMA,
+ .opts_init = NULL,
+ .create = NULL,
+ .destroy = NULL,
+
+ .listen = NULL,
+ .stop_listen = NULL,
+ .accept = NULL,
+
+ .listener_discover = NULL,
+
+ .poll_group_create = NULL,
+ .poll_group_destroy = NULL,
+ .poll_group_add = NULL,
+ .poll_group_poll = NULL,
+
+ .req_free = NULL,
+ .req_complete = NULL,
+
+ .qpair_fini = NULL,
+ .qpair_get_peer_trid = NULL,
+ .qpair_get_local_trid = NULL,
+ .qpair_get_listen_trid = NULL,
+};
+#endif
+
+const struct spdk_nvmf_transport_ops spdk_nvmf_transport_tcp = {
+ .type = SPDK_NVME_TRANSPORT_TCP,
+};
+
+struct spdk_trace_histories *g_trace_histories;
+
+DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
+ uint32_t size, uint64_t object_id, uint64_t arg1));
+DEFINE_STUB(spdk_nvme_transport_id_compare, int,
+ (const struct spdk_nvme_transport_id *trid1,
+ const struct spdk_nvme_transport_id *trid2), 0);
+DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix));
+DEFINE_STUB_V(spdk_trace_register_description,
+ (const char *name, uint16_t tpoint_id, uint8_t owner_type,
+ uint8_t object_type, uint8_t new_object, uint8_t arg1_type,
+ const char *arg1_name));
+DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn));
+DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), "fc_ut_test");
+DEFINE_STUB_V(nvmf_ctrlr_destruct, (struct spdk_nvmf_ctrlr *ctrlr));
+DEFINE_STUB_V(nvmf_qpair_free_aer, (struct spdk_nvmf_qpair *qpair));
+DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_desc *desc),
+ NULL);
+DEFINE_STUB_V(spdk_nvmf_request_exec, (struct spdk_nvmf_request *req));
+DEFINE_STUB_V(nvmf_ctrlr_ns_changed, (struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid));
+DEFINE_STUB(spdk_bdev_open, int, (struct spdk_bdev *bdev, bool write,
+ spdk_bdev_remove_cb_t remove_cb,
+ void *remove_ctx, struct spdk_bdev_desc **desc), 0);
+DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
+DEFINE_STUB(spdk_bdev_module_claim_bdev, int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
+ struct spdk_bdev_module *module), 0);
+DEFINE_STUB_V(spdk_bdev_module_release_bdev, (struct spdk_bdev *bdev));
+DEFINE_STUB(spdk_bdev_get_block_size, uint32_t, (const struct spdk_bdev *bdev), 512);
+DEFINE_STUB(spdk_bdev_get_num_blocks, uint64_t, (const struct spdk_bdev *bdev), 1024);
+
+DEFINE_STUB(nvmf_ctrlr_async_event_ns_notice, int, (struct spdk_nvmf_ctrlr *ctrlr), 0);
+DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
+ enum spdk_nvme_transport_type trtype));
+DEFINE_STUB_V(spdk_nvmf_ctrlr_data_init, (struct spdk_nvmf_transport_opts *opts,
+ struct spdk_nvmf_ctrlr_data *cdata));
+DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req),
+ -ENOSPC);
+
+const char *
+spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype)
+{
+ switch (trtype) {
+ case SPDK_NVME_TRANSPORT_PCIE:
+ return "PCIe";
+ case SPDK_NVME_TRANSPORT_RDMA:
+ return "RDMA";
+ case SPDK_NVME_TRANSPORT_FC:
+ return "FC";
+ default:
+ return NULL;
+ }
+}
+
+const char *
+spdk_nvme_transport_id_adrfam_str(enum spdk_nvmf_adrfam adrfam)
+{
+ switch (adrfam) {
+ case SPDK_NVMF_ADRFAM_IPV4:
+ return "IPv4";
+ case SPDK_NVMF_ADRFAM_IPV6:
+ return "IPv6";
+ case SPDK_NVMF_ADRFAM_IB:
+ return "IB";
+ case SPDK_NVMF_ADRFAM_FC:
+ return "FC";
+ default:
+ return NULL;
+ }
+}
+
+const struct spdk_uuid *
+spdk_bdev_get_uuid(const struct spdk_bdev *bdev)
+{
+ return &bdev->uuid;
+}
+
+static bool g_lld_init_called = false;
+
+int
+nvmf_fc_lld_init(void)
+{
+ g_lld_init_called = true;
+ return 0;
+}
+
+static bool g_lld_fini_called = false;
+
+void
+nvmf_fc_lld_fini(void)
+{
+ g_lld_fini_called = true;
+}
+
+DEFINE_STUB_V(nvmf_fc_lld_start, (void));
+DEFINE_STUB(nvmf_fc_init_q, int, (struct spdk_nvmf_fc_hwqp *hwqp), 0);
+DEFINE_STUB_V(nvmf_fc_reinit_q, (void *queues_prev, void *queues_curr));
+DEFINE_STUB(nvmf_fc_init_rqpair_buffers, int, (struct spdk_nvmf_fc_hwqp *hwqp), 0);
+DEFINE_STUB(nvmf_fc_set_q_online_state, int, (struct spdk_nvmf_fc_hwqp *hwqp, bool online), 0);
+DEFINE_STUB(nvmf_fc_put_xchg, int, (struct spdk_nvmf_fc_hwqp *hwqp, struct spdk_nvmf_fc_xchg *xri),
+ 0);
+DEFINE_STUB(nvmf_fc_recv_data, int, (struct spdk_nvmf_fc_request *fc_req), 0);
+DEFINE_STUB(nvmf_fc_send_data, int, (struct spdk_nvmf_fc_request *fc_req), 0);
+DEFINE_STUB_V(nvmf_fc_rqpair_buffer_release, (struct spdk_nvmf_fc_hwqp *hwqp, uint16_t buff_idx));
+DEFINE_STUB(nvmf_fc_xmt_rsp, int, (struct spdk_nvmf_fc_request *fc_req, uint8_t *ersp_buf,
+ uint32_t ersp_len), 0);
+DEFINE_STUB(nvmf_fc_xmt_ls_rsp, int, (struct spdk_nvmf_fc_nport *tgtport,
+ struct spdk_nvmf_fc_ls_rqst *ls_rqst), 0);
+DEFINE_STUB(nvmf_fc_issue_abort, int, (struct spdk_nvmf_fc_hwqp *hwqp,
+ struct spdk_nvmf_fc_xchg *xri,
+ spdk_nvmf_fc_caller_cb cb, void *cb_args), 0);
+DEFINE_STUB(nvmf_fc_xmt_bls_rsp, int, (struct spdk_nvmf_fc_hwqp *hwqp,
+ uint16_t ox_id, uint16_t rx_id,
+ uint16_t rpi, bool rjt, uint8_t rjt_exp,
+ spdk_nvmf_fc_caller_cb cb, void *cb_args), 0);
+DEFINE_STUB(nvmf_fc_alloc_srsr_bufs, struct spdk_nvmf_fc_srsr_bufs *, (size_t rqst_len,
+ size_t rsp_len), NULL);
+DEFINE_STUB_V(nvmf_fc_free_srsr_bufs, (struct spdk_nvmf_fc_srsr_bufs *srsr_bufs));
+DEFINE_STUB(nvmf_fc_xmt_srsr_req, int, (struct spdk_nvmf_fc_hwqp *hwqp,
+ struct spdk_nvmf_fc_srsr_bufs *xmt_srsr_bufs,
+ spdk_nvmf_fc_caller_cb cb, void *cb_args), 0);
+DEFINE_STUB(nvmf_fc_q_sync_available, bool, (void), true);
+DEFINE_STUB(nvmf_fc_issue_q_sync, int, (struct spdk_nvmf_fc_hwqp *hwqp, uint64_t u_id,
+ uint16_t skip_rq), 0);
+DEFINE_STUB(nvmf_fc_assign_conn_to_hwqp, bool, (struct spdk_nvmf_fc_hwqp *hwqp,
+ uint64_t *conn_id, uint32_t sq_size), true);
+DEFINE_STUB(nvmf_fc_get_hwqp_from_conn_id, struct spdk_nvmf_fc_hwqp *,
+ (struct spdk_nvmf_fc_hwqp *queues,
+ uint32_t num_queues, uint64_t conn_id), NULL);
+DEFINE_STUB_V(nvmf_fc_release_conn, (struct spdk_nvmf_fc_hwqp *hwqp, uint64_t conn_id,
+ uint32_t sq_size));
+DEFINE_STUB_V(nvmf_fc_dump_all_queues, (struct spdk_nvmf_fc_hwqp *ls_queue,
+ struct spdk_nvmf_fc_hwqp *io_queues,
+ uint32_t num_io_queues,
+ struct spdk_nvmf_fc_queue_dump_info *dump_info));
+DEFINE_STUB_V(nvmf_fc_get_xri_info, (struct spdk_nvmf_fc_hwqp *hwqp,
+ struct spdk_nvmf_fc_xchg_info *info));
+DEFINE_STUB(nvmf_fc_get_rsvd_thread, struct spdk_thread *, (void), NULL);
+
+uint32_t
+nvmf_fc_process_queue(struct spdk_nvmf_fc_hwqp *hwqp)
+{
+ hwqp->lcore_id++;
+ return 0; /* always return 0 or else it will poll forever */
+}
+
+struct spdk_nvmf_fc_xchg *
+nvmf_fc_get_xri(struct spdk_nvmf_fc_hwqp *hwqp)
+{
+ static struct spdk_nvmf_fc_xchg xchg;
+
+ xchg.xchg_id = 1;
+ return &xchg;
+}
+
+#define MAX_FC_UT_POLL_THREADS 8
+static struct spdk_nvmf_poll_group *g_poll_groups[MAX_FC_UT_POLL_THREADS] = {0};
+#define MAX_FC_UT_HWQPS MAX_FC_UT_POLL_THREADS
+static struct spdk_nvmf_tgt *g_nvmf_tgt = NULL;
+static struct spdk_nvmf_transport *g_nvmf_tprt = NULL;
+uint8_t g_fc_port_handle = 0xff;
+struct spdk_nvmf_fc_hwqp lld_q[MAX_FC_UT_HWQPS];
+
+static void
+_add_transport_done(void *arg, int status)
+{
+ CU_ASSERT(status == 0);
+}
+
+static void
+_add_transport_done_dup_err(void *arg, int status)
+{
+ CU_ASSERT(status == -EEXIST);
+}
+
+static void
+create_transport_test(void)
+{
+ const struct spdk_nvmf_transport_ops *ops = NULL;
+ struct spdk_nvmf_transport_opts opts = { 0 };
+ struct spdk_nvmf_target_opts tgt_opts = {
+ .name = "nvmf_test_tgt",
+ .max_subsystems = 0
+ };
+
+ allocate_threads(8);
+ set_thread(0);
+
+ g_nvmf_tgt = spdk_nvmf_tgt_create(&tgt_opts);
+ SPDK_CU_ASSERT_FATAL(g_nvmf_tgt != NULL);
+
+ ops = nvmf_get_transport_ops(SPDK_NVME_TRANSPORT_NAME_FC);
+ SPDK_CU_ASSERT_FATAL(ops != NULL);
+
+ ops->opts_init(&opts);
+
+ g_lld_init_called = false;
+ g_nvmf_tprt = spdk_nvmf_transport_create("FC", &opts);
+ SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
+
+ CU_ASSERT(g_lld_init_called == true);
+ CU_ASSERT(opts.max_queue_depth == g_nvmf_tprt->opts.max_queue_depth);
+ CU_ASSERT(opts.max_qpairs_per_ctrlr == g_nvmf_tprt->opts.max_qpairs_per_ctrlr);
+ CU_ASSERT(opts.in_capsule_data_size == g_nvmf_tprt->opts.in_capsule_data_size);
+ CU_ASSERT(opts.max_io_size == g_nvmf_tprt->opts.max_io_size);
+ CU_ASSERT(opts.io_unit_size == g_nvmf_tprt->opts.io_unit_size);
+ CU_ASSERT(opts.max_aq_depth == g_nvmf_tprt->opts.max_aq_depth);
+
+ set_thread(0);
+
+ spdk_nvmf_tgt_add_transport(g_nvmf_tgt, g_nvmf_tprt,
+ _add_transport_done, 0);
+ poll_thread(0);
+
+ /* Add transport again - should get error */
+ spdk_nvmf_tgt_add_transport(g_nvmf_tgt, g_nvmf_tprt,
+ _add_transport_done_dup_err, 0);
+ poll_thread(0);
+
+ /* create transport with bad args/options */
+#ifndef SPDK_CONFIG_RDMA
+ CU_ASSERT(spdk_nvmf_transport_create("RDMA", &opts) == NULL);
+#endif
+ CU_ASSERT(spdk_nvmf_transport_create("Bogus Transport", &opts) == NULL);
+ opts.max_io_size = 1024 ^ 3;
+ CU_ASSERT(spdk_nvmf_transport_create("FC", &opts) == NULL);
+ opts.max_io_size = 999;
+ opts.io_unit_size = 1024;
+ CU_ASSERT(spdk_nvmf_transport_create("FC", &opts) == NULL);
+}
+
+static void
+port_init_cb(uint8_t port_handle, enum spdk_fc_event event_type, void *arg, int err)
+{
+ CU_ASSERT(err == 0);
+ CU_ASSERT(port_handle == 2);
+ g_fc_port_handle = port_handle;
+}
+
+static void
+create_fc_port_test(void)
+{
+ struct spdk_nvmf_fc_hw_port_init_args init_args = { 0 };
+ struct spdk_nvmf_fc_port *fc_port = NULL;
+ int err;
+
+ SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
+
+ init_args.port_handle = 2;
+ init_args.io_queue_cnt = spdk_min(MAX_FC_UT_HWQPS, spdk_env_get_core_count());
+ init_args.ls_queue_size = 100;
+ init_args.io_queue_size = 100;
+ init_args.io_queues = (void *)lld_q;
+
+ set_thread(0);
+ err = nvmf_fc_master_enqueue_event(SPDK_FC_HW_PORT_INIT, (void *)&init_args, port_init_cb);
+ CU_ASSERT(err == 0);
+ poll_thread(0);
+
+ fc_port = nvmf_fc_port_lookup(g_fc_port_handle);
+ CU_ASSERT(fc_port != NULL);
+}
+
+static void
+online_fc_port_test(void)
+{
+ struct spdk_nvmf_fc_port *fc_port;
+ struct spdk_nvmf_fc_hw_port_online_args args;
+ int err;
+
+ SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
+
+ fc_port = nvmf_fc_port_lookup(g_fc_port_handle);
+ SPDK_CU_ASSERT_FATAL(fc_port != NULL);
+
+ set_thread(0);
+ args.port_handle = g_fc_port_handle;
+ err = nvmf_fc_master_enqueue_event(SPDK_FC_HW_PORT_ONLINE, (void *)&args, port_init_cb);
+ CU_ASSERT(err == 0);
+ poll_threads();
+ set_thread(0);
+ if (err == 0) {
+ uint32_t i;
+ for (i = 0; i < fc_port->num_io_queues; i++) {
+ CU_ASSERT(fc_port->io_queues[i].fgroup != 0);
+ CU_ASSERT(fc_port->io_queues[i].fgroup != 0);
+ CU_ASSERT(fc_port->io_queues[i].fgroup->hwqp_count != 0);
+ }
+ }
+}
+
+static void
+create_poll_groups_test(void)
+{
+ unsigned i;
+
+ SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
+
+ for (i = 0; i < MAX_FC_UT_POLL_THREADS; i++) {
+ set_thread(i);
+ g_poll_groups[i] = spdk_nvmf_poll_group_create(g_nvmf_tgt);
+ poll_thread(i);
+ CU_ASSERT(g_poll_groups[i] != NULL);
+ }
+ set_thread(0);
+}
+
+static void
+poll_group_poll_test(void)
+{
+ unsigned i;
+ unsigned poll_cnt = 10;
+ struct spdk_nvmf_fc_port *fc_port = NULL;
+
+ SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
+
+ set_thread(0);
+ fc_port = nvmf_fc_port_lookup(g_fc_port_handle);
+ SPDK_CU_ASSERT_FATAL(fc_port != NULL);
+
+ for (i = 0; i < fc_port->num_io_queues; i++) {
+ fc_port->io_queues[i].lcore_id = 0;
+ }
+
+ for (i = 0; i < poll_cnt; i++) {
+ /* this should cause spdk_nvmf_fc_poll_group_poll to be called() */
+ poll_threads();
+ }
+
+ /* check if hwqp's lcore_id has been updated */
+ for (i = 0; i < fc_port->num_io_queues; i++) {
+ CU_ASSERT(fc_port->io_queues[i].lcore_id == poll_cnt);
+ }
+}
+
+static void
+remove_hwqps_from_poll_groups_test(void)
+{
+ unsigned i;
+ struct spdk_nvmf_fc_port *fc_port = NULL;
+
+ SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
+
+ fc_port = nvmf_fc_port_lookup(g_fc_port_handle);
+ SPDK_CU_ASSERT_FATAL(fc_port != NULL);
+
+ for (i = 0; i < fc_port->num_io_queues; i++) {
+ nvmf_fc_poll_group_remove_hwqp(&fc_port->io_queues[i]);
+ poll_threads();
+ CU_ASSERT(fc_port->io_queues[i].fgroup == 0);
+ }
+}
+
+static void
+destroy_transport_test(void)
+{
+ unsigned i;
+
+ set_thread(0);
+ SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
+
+ for (i = 0; i < MAX_FC_UT_POLL_THREADS; i++) {
+ set_thread(i);
+ spdk_nvmf_poll_group_destroy(g_poll_groups[i], NULL, NULL);
+ poll_thread(0);
+ }
+
+ SPDK_CU_ASSERT_FATAL(g_nvmf_tgt != NULL);
+ g_lld_fini_called = false;
+ spdk_nvmf_tgt_destroy(g_nvmf_tgt, NULL, NULL);
+ poll_threads();
+ CU_ASSERT(g_lld_fini_called == true);
+}
+
+static int
+nvmf_fc_tests_init(void)
+{
+ return 0;
+}
+
+static int
+nvmf_fc_tests_fini(void)
+{
+ free_threads();
+ return 0;
+}
+
+int main(int argc, char **argv)
+{
+ unsigned int num_failures = 0;
+ CU_pSuite suite = NULL;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("NVMf-FC", nvmf_fc_tests_init, nvmf_fc_tests_fini);
+
+ CU_ADD_TEST(suite, create_transport_test);
+ CU_ADD_TEST(suite, create_poll_groups_test);
+ CU_ADD_TEST(suite, create_fc_port_test);
+ CU_ADD_TEST(suite, online_fc_port_test);
+ CU_ADD_TEST(suite, poll_group_poll_test);
+ CU_ADD_TEST(suite, remove_hwqps_from_poll_groups_test);
+ CU_ADD_TEST(suite, destroy_transport_test);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvmf/fc_ls.c/.gitignore b/src/spdk/test/unit/lib/nvmf/fc_ls.c/.gitignore
new file mode 100644
index 000000000..ac5b0c40e
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/fc_ls.c/.gitignore
@@ -0,0 +1 @@
+fc_ls_ut
diff --git a/src/spdk/test/unit/lib/nvmf/fc_ls.c/Makefile b/src/spdk/test/unit/lib/nvmf/fc_ls.c/Makefile
new file mode 100644
index 000000000..d9143e627
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/fc_ls.c/Makefile
@@ -0,0 +1,45 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) 2018 Broadcom. All Rights Reserved.
+# The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../../)
+include $(SPDK_ROOT_DIR)/mk/config.mk
+
+CFLAGS += -I$(SPDK_ROOT_DIR)/test/common/lib -I$(SPDK_ROOT_DIR)/lib/nvmf
+
+ifneq ($(strip $(CONFIG_FC_PATH)),)
+CFLAGS += -I$(CONFIG_FC_PATH)
+endif
+
+TEST_FILE = fc_ls_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvmf/fc_ls.c/fc_ls_ut.c b/src/spdk/test/unit/lib/nvmf/fc_ls.c/fc_ls_ut.c
new file mode 100644
index 000000000..68eb81960
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/fc_ls.c/fc_ls_ut.c
@@ -0,0 +1,1070 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (c) 2018-2019 Broadcom. All Rights Reserved.
+ * The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* NVMF FC LS Command Processor Unit Test */
+
+#include "spdk/env.h"
+#include "spdk_cunit.h"
+#include "spdk/nvmf.h"
+#include "spdk/endian.h"
+#include "spdk/trace.h"
+#include "spdk_internal/log.h"
+
+#include "ut_multithread.c"
+
+#include "transport.h"
+#include "nvmf_internal.h"
+#include "nvmf_fc.h"
+
+#include "fc_ls.c"
+
+#define LAST_RSLT_STOP_TEST 999
+
+void spdk_set_thread(struct spdk_thread *thread);
+
+/*
+ * SPDK Stuff
+ */
+
+DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req), -ENOSPC);
+DEFINE_STUB(spdk_nvmf_subsystem_host_allowed, bool,
+ (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn), true);
+DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
+ enum spdk_nvme_transport_type trtype));
+
+static const char *fc_ut_subsystem_nqn =
+ "nqn.2017-11.io.spdk:sn.390c0dc7c87011e786b300a0989adc53:subsystem.good";
+static struct spdk_nvmf_host fc_ut_initiator = {
+ .nqn = "nqn.2017-11.fc_host",
+};
+static struct spdk_nvmf_host *fc_ut_host = &fc_ut_initiator;
+static struct spdk_nvmf_tgt g_nvmf_tgt;
+static struct spdk_nvmf_transport_opts g_nvmf_transport_opts = {
+ .max_queue_depth = 128,
+ .max_qpairs_per_ctrlr = 4,
+ .max_aq_depth = 32,
+};
+static uint32_t g_hw_queue_depth = 1024;
+static struct spdk_nvmf_subsystem g_nvmf_subsystem;
+
+void nvmf_fc_request_abort(struct spdk_nvmf_fc_request *fc_req, bool send_abts,
+ spdk_nvmf_fc_caller_cb cb, void *cb_args);
+void spdk_bdev_io_abort(struct spdk_bdev_io *bdev_io, void *ctx);
+void nvmf_fc_request_abort_complete(void *arg1);
+bool nvmf_fc_req_in_xfer(struct spdk_nvmf_fc_request *fc_req);
+
+struct spdk_nvmf_subsystem *
+spdk_nvmf_tgt_find_subsystem(struct spdk_nvmf_tgt *tgt, const char *subnqn)
+{
+ if (!strcmp(subnqn, g_nvmf_subsystem.subnqn)) {
+ return &g_nvmf_subsystem;
+ }
+ return NULL;
+}
+
+int
+spdk_nvmf_poll_group_add(struct spdk_nvmf_poll_group *group,
+ struct spdk_nvmf_qpair *qpair)
+{
+ qpair->state = SPDK_NVMF_QPAIR_ACTIVE;
+ return 0;
+}
+
+const struct spdk_nvmf_transport_ops spdk_nvmf_transport_fc = {
+ .type = (enum spdk_nvme_transport_type) SPDK_NVMF_TRTYPE_FC,
+ .create = NULL,
+ .destroy = NULL,
+
+ .listen = NULL,
+ .stop_listen = NULL,
+ .accept = NULL,
+
+ .listener_discover = NULL,
+
+ .poll_group_create = NULL,
+ .poll_group_destroy = NULL,
+ .poll_group_add = NULL,
+ .poll_group_poll = NULL,
+
+ .req_complete = NULL,
+
+ .qpair_fini = NULL,
+
+};
+
+struct spdk_nvmf_transport g_nvmf_transport = {
+ .ops = &spdk_nvmf_transport_fc,
+ .tgt = &g_nvmf_tgt,
+};
+
+struct spdk_nvmf_transport *
+spdk_nvmf_tgt_get_transport(struct spdk_nvmf_tgt *tgt, const char *transport_name)
+{
+ return &g_nvmf_transport;
+}
+
+int
+spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx)
+{
+ cb_fn(ctx);
+ return 0;
+}
+
+void
+spdk_nvmf_tgt_new_qpair(struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair)
+{
+ uint32_t i;
+ struct spdk_nvmf_fc_conn *fc_conn;
+ struct spdk_nvmf_fc_hwqp *hwqp = NULL, *sel_hwqp = NULL;
+ struct spdk_nvmf_fc_ls_add_conn_api_data *api_data = NULL;
+ struct spdk_nvmf_fc_port *fc_port;
+
+ fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
+ api_data = &fc_conn->create_opd->u.add_conn;
+
+ /* Pick a hwqp with least load */
+ fc_port = fc_conn->fc_assoc->tgtport->fc_port;
+ for (i = 0; i < fc_port->num_io_queues; i ++) {
+ hwqp = &fc_port->io_queues[i];
+ if (!sel_hwqp || (hwqp->rq_size > sel_hwqp->rq_size)) {
+ sel_hwqp = hwqp;
+ }
+ }
+
+ if (!nvmf_fc_assign_conn_to_hwqp(sel_hwqp,
+ &fc_conn->conn_id,
+ fc_conn->max_queue_depth)) {
+ goto err;
+ }
+
+ fc_conn->hwqp = sel_hwqp;
+
+ /* If this is for ADMIN connection, then update assoc ID. */
+ if (fc_conn->qpair.qid == 0) {
+ fc_conn->fc_assoc->assoc_id = fc_conn->conn_id;
+ }
+
+ nvmf_fc_poller_api_func(sel_hwqp, SPDK_NVMF_FC_POLLER_API_ADD_CONNECTION, &api_data->args);
+
+ return;
+err:
+ nvmf_fc_ls_add_conn_failure(api_data->assoc, api_data->ls_rqst,
+ api_data->args.fc_conn, api_data->aq_conn);
+}
+
+struct spdk_nvmf_fc_conn *
+nvmf_fc_hwqp_find_fc_conn(struct spdk_nvmf_fc_hwqp *hwqp, uint64_t conn_id)
+{
+ struct spdk_nvmf_fc_conn *fc_conn;
+
+ TAILQ_FOREACH(fc_conn, &hwqp->connection_list, link) {
+ if (fc_conn->conn_id == conn_id) {
+ return fc_conn;
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ * LLD functions
+ */
+
+static inline uint64_t
+nvmf_fc_gen_conn_id(uint32_t qnum, struct spdk_nvmf_fc_hwqp *hwqp)
+{
+ static uint16_t conn_cnt = 0;
+ return ((uint64_t) qnum | (conn_cnt++ << 8));
+}
+
+bool
+nvmf_fc_assign_conn_to_hwqp(struct spdk_nvmf_fc_hwqp *hwqp,
+ uint64_t *conn_id, uint32_t sq_size)
+{
+ SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_LS, "Assign connection to HWQP\n");
+
+
+ if (hwqp->rq_size < sq_size) {
+ return false; /* queue has no space for this connection */
+ }
+
+ hwqp->rq_size -= sq_size;
+ hwqp->num_conns++;
+
+ /* create connection ID */
+ *conn_id = nvmf_fc_gen_conn_id(hwqp->hwqp_id, hwqp);
+
+ SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_LS,
+ "New connection assigned to HWQP%d (free %d), conn_id 0x%lx\n",
+ hwqp->hwqp_id, hwqp->rq_size, *conn_id);
+ return true;
+}
+
+struct spdk_nvmf_fc_hwqp *
+nvmf_fc_get_hwqp_from_conn_id(struct spdk_nvmf_fc_hwqp *queues,
+ uint32_t num_queues, uint64_t conn_id)
+{
+ return &queues[(conn_id & 0xff) % num_queues];
+}
+
+void
+nvmf_fc_release_conn(struct spdk_nvmf_fc_hwqp *hwqp, uint64_t conn_id,
+ uint32_t sq_size)
+{
+ hwqp->rq_size += sq_size;
+}
+
+struct spdk_nvmf_fc_srsr_bufs *
+nvmf_fc_alloc_srsr_bufs(size_t rqst_len, size_t rsp_len)
+{
+ struct spdk_nvmf_fc_srsr_bufs *srsr_bufs;
+
+ srsr_bufs = calloc(1, sizeof(struct spdk_nvmf_fc_srsr_bufs));
+ if (!srsr_bufs) {
+ return NULL;
+ }
+
+ srsr_bufs->rqst = calloc(1, rqst_len + rsp_len);
+ if (srsr_bufs->rqst) {
+ srsr_bufs->rqst_len = rqst_len;
+ srsr_bufs->rsp = srsr_bufs->rqst + rqst_len;
+ srsr_bufs->rsp_len = rsp_len;
+ } else {
+ free(srsr_bufs);
+ srsr_bufs = NULL;
+ }
+
+ return srsr_bufs;
+}
+
+void
+nvmf_fc_free_srsr_bufs(struct spdk_nvmf_fc_srsr_bufs *srsr_bufs)
+{
+ if (srsr_bufs) {
+ free(srsr_bufs->rqst);
+ free(srsr_bufs);
+ }
+}
+
+/*
+ * The Tests
+ */
+
+enum _test_run_type {
+ TEST_RUN_TYPE_CREATE_ASSOC = 1,
+ TEST_RUN_TYPE_CREATE_CONN,
+ TEST_RUN_TYPE_DISCONNECT,
+ TEST_RUN_TYPE_CONN_BAD_ASSOC,
+ TEST_RUN_TYPE_FAIL_LS_RSP,
+ TEST_RUN_TYPE_DISCONNECT_BAD_ASSOC,
+ TEST_RUN_TYPE_CREATE_MAX_ASSOC,
+};
+
+static uint32_t g_test_run_type = 0;
+static uint64_t g_curr_assoc_id = 0;
+static uint16_t g_create_conn_test_cnt = 0;
+static uint16_t g_max_assoc_conn_test = 0;
+static int g_last_rslt = 0;
+static bool g_spdk_nvmf_fc_xmt_srsr_req = false;
+static struct spdk_nvmf_fc_remote_port_info g_rem_port;
+
+static void
+run_create_assoc_test(const char *subnqn,
+ struct spdk_nvmf_host *host,
+ struct spdk_nvmf_fc_nport *tgt_port)
+{
+ struct spdk_nvmf_fc_ls_rqst ls_rqst;
+ struct spdk_nvmf_fc_ls_cr_assoc_rqst ca_rqst;
+ uint8_t respbuf[128];
+
+ memset(&ca_rqst, 0, sizeof(struct spdk_nvmf_fc_ls_cr_assoc_rqst));
+
+ ca_rqst.w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION;
+ to_be32(&ca_rqst.desc_list_len,
+ sizeof(struct spdk_nvmf_fc_ls_cr_assoc_rqst) -
+ (2 * sizeof(uint32_t)));
+ to_be32(&ca_rqst.assoc_cmd.desc_tag, FCNVME_LSDESC_CREATE_ASSOC_CMD);
+ to_be32(&ca_rqst.assoc_cmd.desc_len,
+ sizeof(struct spdk_nvmf_fc_lsdesc_cr_assoc_cmd) -
+ (2 * sizeof(uint32_t)));
+ to_be16(&ca_rqst.assoc_cmd.ersp_ratio, (g_nvmf_transport.opts.max_aq_depth / 2));
+ to_be16(&ca_rqst.assoc_cmd.sqsize, g_nvmf_transport.opts.max_aq_depth - 1);
+ snprintf(&ca_rqst.assoc_cmd.subnqn[0], strlen(subnqn) + 1, "%s", subnqn);
+ snprintf(&ca_rqst.assoc_cmd.hostnqn[0], strlen(host->nqn) + 1, "%s", host->nqn);
+ ls_rqst.rqstbuf.virt = &ca_rqst;
+ ls_rqst.rspbuf.virt = respbuf;
+ ls_rqst.rqst_len = sizeof(struct spdk_nvmf_fc_ls_cr_assoc_rqst);
+ ls_rqst.rsp_len = 0;
+ ls_rqst.rpi = 5000;
+ ls_rqst.private_data = NULL;
+ ls_rqst.s_id = 0;
+ ls_rqst.nport = tgt_port;
+ ls_rqst.rport = &g_rem_port;
+ ls_rqst.nvmf_tgt = &g_nvmf_tgt;
+
+ nvmf_fc_handle_ls_rqst(&ls_rqst);
+ poll_thread(0);
+}
+
+static void
+run_create_conn_test(struct spdk_nvmf_host *host,
+ struct spdk_nvmf_fc_nport *tgt_port,
+ uint64_t assoc_id,
+ uint16_t qid)
+{
+ struct spdk_nvmf_fc_ls_rqst ls_rqst;
+ struct spdk_nvmf_fc_ls_cr_conn_rqst cc_rqst;
+ uint8_t respbuf[128];
+
+ memset(&cc_rqst, 0, sizeof(struct spdk_nvmf_fc_ls_cr_conn_rqst));
+
+ /* fill in request descriptor */
+ cc_rqst.w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION;
+ to_be32(&cc_rqst.desc_list_len,
+ sizeof(struct spdk_nvmf_fc_ls_cr_conn_rqst) -
+ (2 * sizeof(uint32_t)));
+
+ /* fill in connect command descriptor */
+ to_be32(&cc_rqst.connect_cmd.desc_tag, FCNVME_LSDESC_CREATE_CONN_CMD);
+ to_be32(&cc_rqst.connect_cmd.desc_len,
+ sizeof(struct spdk_nvmf_fc_lsdesc_cr_conn_cmd) -
+ (2 * sizeof(uint32_t)));
+
+ to_be16(&cc_rqst.connect_cmd.ersp_ratio, (g_nvmf_transport.opts.max_queue_depth / 2));
+ to_be16(&cc_rqst.connect_cmd.sqsize, g_nvmf_transport.opts.max_queue_depth - 1);
+ to_be16(&cc_rqst.connect_cmd.qid, qid);
+
+ /* fill in association id descriptor */
+ to_be32(&cc_rqst.assoc_id.desc_tag, FCNVME_LSDESC_ASSOC_ID),
+ to_be32(&cc_rqst.assoc_id.desc_len,
+ sizeof(struct spdk_nvmf_fc_lsdesc_assoc_id) -
+ (2 * sizeof(uint32_t)));
+ cc_rqst.assoc_id.association_id = assoc_id; /* alreday be64 */
+
+ ls_rqst.rqstbuf.virt = &cc_rqst;
+ ls_rqst.rspbuf.virt = respbuf;
+ ls_rqst.rqst_len = sizeof(struct spdk_nvmf_fc_ls_cr_conn_rqst);
+ ls_rqst.rsp_len = 0;
+ ls_rqst.rpi = 5000;
+ ls_rqst.private_data = NULL;
+ ls_rqst.s_id = 0;
+ ls_rqst.nport = tgt_port;
+ ls_rqst.rport = &g_rem_port;
+ ls_rqst.nvmf_tgt = &g_nvmf_tgt;
+
+ nvmf_fc_handle_ls_rqst(&ls_rqst);
+ poll_thread(0);
+}
+
+static void
+run_disconn_test(struct spdk_nvmf_fc_nport *tgt_port,
+ uint64_t assoc_id)
+{
+ struct spdk_nvmf_fc_ls_rqst ls_rqst;
+ struct spdk_nvmf_fc_ls_disconnect_rqst dc_rqst;
+ uint8_t respbuf[128];
+
+ memset(&dc_rqst, 0, sizeof(struct spdk_nvmf_fc_ls_disconnect_rqst));
+
+ /* fill in request descriptor */
+ dc_rqst.w0.ls_cmd = FCNVME_LS_DISCONNECT;
+ to_be32(&dc_rqst.desc_list_len,
+ sizeof(struct spdk_nvmf_fc_ls_disconnect_rqst) -
+ (2 * sizeof(uint32_t)));
+
+ /* fill in disconnect command descriptor */
+ to_be32(&dc_rqst.disconn_cmd.desc_tag, FCNVME_LSDESC_DISCONN_CMD);
+ to_be32(&dc_rqst.disconn_cmd.desc_len,
+ sizeof(struct spdk_nvmf_fc_lsdesc_disconn_cmd) -
+ (2 * sizeof(uint32_t)));
+
+ /* fill in association id descriptor */
+ to_be32(&dc_rqst.assoc_id.desc_tag, FCNVME_LSDESC_ASSOC_ID),
+ to_be32(&dc_rqst.assoc_id.desc_len,
+ sizeof(struct spdk_nvmf_fc_lsdesc_assoc_id) -
+ (2 * sizeof(uint32_t)));
+ dc_rqst.assoc_id.association_id = assoc_id; /* alreday be64 */
+
+ ls_rqst.rqstbuf.virt = &dc_rqst;
+ ls_rqst.rspbuf.virt = respbuf;
+ ls_rqst.rqst_len = sizeof(struct spdk_nvmf_fc_ls_disconnect_rqst);
+ ls_rqst.rsp_len = 0;
+ ls_rqst.rpi = 5000;
+ ls_rqst.private_data = NULL;
+ ls_rqst.s_id = 0;
+ ls_rqst.nport = tgt_port;
+ ls_rqst.rport = &g_rem_port;
+ ls_rqst.nvmf_tgt = &g_nvmf_tgt;
+
+ nvmf_fc_handle_ls_rqst(&ls_rqst);
+ poll_thread(0);
+}
+
+static void
+disconnect_assoc_cb(void *cb_data, uint32_t err)
+{
+ CU_ASSERT(err == 0);
+}
+
+static int
+handle_ca_rsp(struct spdk_nvmf_fc_ls_rqst *ls_rqst, bool max_assoc_test)
+{
+ struct spdk_nvmf_fc_ls_acc_hdr *acc_hdr =
+ (struct spdk_nvmf_fc_ls_acc_hdr *) ls_rqst->rspbuf.virt;
+
+
+ if (acc_hdr->rqst.w0.ls_cmd == FCNVME_LS_CREATE_ASSOCIATION) {
+ if (acc_hdr->w0.ls_cmd == FCNVME_LS_ACC) {
+ struct spdk_nvmf_fc_ls_cr_assoc_acc *acc =
+ (struct spdk_nvmf_fc_ls_cr_assoc_acc *)ls_rqst->rspbuf.virt;
+
+ CU_ASSERT(from_be32(&acc_hdr->desc_list_len) ==
+ sizeof(struct spdk_nvmf_fc_ls_cr_assoc_acc) - 8);
+ CU_ASSERT(from_be32(&acc_hdr->rqst.desc_len) ==
+ sizeof(struct spdk_nvmf_fc_lsdesc_rqst) - 8);
+ CU_ASSERT(from_be32(&acc_hdr->rqst.desc_tag) ==
+ FCNVME_LSDESC_RQST);
+ CU_ASSERT(from_be32(&acc->assoc_id.desc_tag) ==
+ FCNVME_LSDESC_ASSOC_ID);
+ CU_ASSERT(from_be32(&acc->assoc_id.desc_len) ==
+ sizeof(struct spdk_nvmf_fc_lsdesc_assoc_id) - 8);
+ CU_ASSERT(from_be32(&acc->conn_id.desc_tag) ==
+ FCNVME_LSDESC_CONN_ID);
+ CU_ASSERT(from_be32(&acc->conn_id.desc_len) ==
+ sizeof(struct spdk_nvmf_fc_lsdesc_conn_id) - 8);
+
+ g_curr_assoc_id = acc->assoc_id.association_id;
+ g_create_conn_test_cnt++;
+ return 0;
+ } else if (max_assoc_test) {
+ /* reject reason code should be insufficient resources */
+ struct spdk_nvmf_fc_ls_rjt *rjt =
+ (struct spdk_nvmf_fc_ls_rjt *)ls_rqst->rspbuf.virt;
+ if (rjt->rjt.reason_code == FCNVME_RJT_RC_INSUFF_RES) {
+ return LAST_RSLT_STOP_TEST;
+ }
+ }
+ CU_FAIL("Unexpected reject response for create association");
+ } else {
+ CU_FAIL("Response not for create association");
+ }
+
+ return -EINVAL;
+}
+
+static int
+handle_cc_rsp(struct spdk_nvmf_fc_ls_rqst *ls_rqst)
+{
+ struct spdk_nvmf_fc_ls_acc_hdr *acc_hdr =
+ (struct spdk_nvmf_fc_ls_acc_hdr *) ls_rqst->rspbuf.virt;
+
+ if (acc_hdr->rqst.w0.ls_cmd == FCNVME_LS_CREATE_CONNECTION) {
+ if (acc_hdr->w0.ls_cmd == FCNVME_LS_ACC) {
+ struct spdk_nvmf_fc_ls_cr_conn_acc *acc =
+ (struct spdk_nvmf_fc_ls_cr_conn_acc *)ls_rqst->rspbuf.virt;
+
+ CU_ASSERT(from_be32(&acc_hdr->desc_list_len) ==
+ sizeof(struct spdk_nvmf_fc_ls_cr_conn_acc) - 8);
+ CU_ASSERT(from_be32(&acc_hdr->rqst.desc_len) ==
+ sizeof(struct spdk_nvmf_fc_lsdesc_rqst) - 8);
+ CU_ASSERT(from_be32(&acc_hdr->rqst.desc_tag) ==
+ FCNVME_LSDESC_RQST);
+ CU_ASSERT(from_be32(&acc->conn_id.desc_tag) ==
+ FCNVME_LSDESC_CONN_ID);
+ CU_ASSERT(from_be32(&acc->conn_id.desc_len) ==
+ sizeof(struct spdk_nvmf_fc_lsdesc_conn_id) - 8);
+ g_create_conn_test_cnt++;
+ return 0;
+ }
+
+ if (acc_hdr->w0.ls_cmd == FCNVME_LS_RJT) {
+ struct spdk_nvmf_fc_ls_rjt *rjt =
+ (struct spdk_nvmf_fc_ls_rjt *)ls_rqst->rspbuf.virt;
+ if (g_create_conn_test_cnt == g_nvmf_transport.opts.max_qpairs_per_ctrlr) {
+ /* expected to get reject for too many connections */
+ CU_ASSERT(rjt->rjt.reason_code ==
+ FCNVME_RJT_RC_INV_PARAM);
+ CU_ASSERT(rjt->rjt.reason_explanation ==
+ FCNVME_RJT_EXP_INV_Q_ID);
+ } else if (!g_max_assoc_conn_test) {
+ CU_FAIL("Unexpected reject response create connection");
+ }
+ } else {
+ CU_FAIL("Unexpected response code for create connection");
+ }
+ } else {
+ CU_FAIL("Response not for create connection");
+ }
+
+ return -EINVAL;
+}
+
+static int
+handle_disconn_rsp(struct spdk_nvmf_fc_ls_rqst *ls_rqst)
+{
+ struct spdk_nvmf_fc_ls_acc_hdr *acc_hdr =
+ (struct spdk_nvmf_fc_ls_acc_hdr *) ls_rqst->rspbuf.virt;
+
+ if (acc_hdr->rqst.w0.ls_cmd == FCNVME_LS_DISCONNECT) {
+ if (acc_hdr->w0.ls_cmd == FCNVME_LS_ACC) {
+ CU_ASSERT(from_be32(&acc_hdr->desc_list_len) ==
+ sizeof(struct spdk_nvmf_fc_ls_disconnect_acc) - 8);
+ CU_ASSERT(from_be32(&acc_hdr->rqst.desc_len) ==
+ sizeof(struct spdk_nvmf_fc_lsdesc_rqst) - 8);
+ CU_ASSERT(from_be32(&acc_hdr->rqst.desc_tag) ==
+ FCNVME_LSDESC_RQST);
+ return 0;
+ } else {
+ CU_FAIL("Unexpected reject response for disconnect");
+ }
+ } else {
+ CU_FAIL("Response not for create connection");
+ }
+
+ return -EINVAL;
+}
+
+static int
+handle_conn_bad_assoc_rsp(struct spdk_nvmf_fc_ls_rqst *ls_rqst)
+{
+ struct spdk_nvmf_fc_ls_acc_hdr *acc_hdr =
+ (struct spdk_nvmf_fc_ls_acc_hdr *) ls_rqst->rspbuf.virt;
+
+ if (acc_hdr->rqst.w0.ls_cmd == FCNVME_LS_CREATE_CONNECTION) {
+ if (acc_hdr->w0.ls_cmd == FCNVME_LS_RJT) {
+ struct spdk_nvmf_fc_ls_rjt *rjt =
+ (struct spdk_nvmf_fc_ls_rjt *)ls_rqst->rspbuf.virt;
+
+ CU_ASSERT(from_be32(&rjt->desc_list_len) ==
+ sizeof(struct spdk_nvmf_fc_ls_rjt) - 8);
+ CU_ASSERT(from_be32(&rjt->rqst.desc_tag) ==
+ FCNVME_LSDESC_RQST);
+ CU_ASSERT(from_be32(&rjt->rjt.desc_len) ==
+ sizeof(struct spdk_nvmf_fc_lsdesc_rjt) - 8);
+ CU_ASSERT(from_be32(&rjt->rjt.desc_tag) ==
+ FCNVME_LSDESC_RJT);
+ CU_ASSERT(rjt->rjt.reason_code ==
+ FCNVME_RJT_RC_INV_ASSOC);
+ CU_ASSERT(rjt->rjt.reason_explanation ==
+ FCNVME_RJT_EXP_NONE);
+ /* make sure reserved fields are 0 */
+ CU_ASSERT(rjt->rjt.rsvd8 == 0);
+ CU_ASSERT(rjt->rjt.rsvd12 == 0);
+ return 0;
+ } else {
+ CU_FAIL("Unexpected accept response for create conn. on bad assoc_id");
+ }
+ } else {
+ CU_FAIL("Response not for create connection on bad assoc_id");
+ }
+
+ return -EINVAL;
+}
+
+static int
+handle_disconn_bad_assoc_rsp(struct spdk_nvmf_fc_ls_rqst *ls_rqst)
+{
+ struct spdk_nvmf_fc_ls_acc_hdr *acc_hdr =
+ (struct spdk_nvmf_fc_ls_acc_hdr *) ls_rqst->rspbuf.virt;
+
+ if (acc_hdr->rqst.w0.ls_cmd == FCNVME_LS_DISCONNECT) {
+ if (acc_hdr->w0.ls_cmd == FCNVME_LS_RJT) {
+ struct spdk_nvmf_fc_ls_rjt *rjt =
+ (struct spdk_nvmf_fc_ls_rjt *)ls_rqst->rspbuf.virt;
+
+ CU_ASSERT(from_be32(&rjt->desc_list_len) ==
+ sizeof(struct spdk_nvmf_fc_ls_rjt) - 8);
+ CU_ASSERT(from_be32(&rjt->rqst.desc_tag) ==
+ FCNVME_LSDESC_RQST);
+ CU_ASSERT(from_be32(&rjt->rjt.desc_len) ==
+ sizeof(struct spdk_nvmf_fc_lsdesc_rjt) - 8);
+ CU_ASSERT(from_be32(&rjt->rjt.desc_tag) ==
+ FCNVME_LSDESC_RJT);
+ CU_ASSERT(rjt->rjt.reason_code ==
+ FCNVME_RJT_RC_INV_ASSOC);
+ CU_ASSERT(rjt->rjt.reason_explanation ==
+ FCNVME_RJT_EXP_NONE);
+ return 0;
+ } else {
+ CU_FAIL("Unexpected accept response for disconnect on bad assoc_id");
+ }
+ } else {
+ CU_FAIL("Response not for dsconnect on bad assoc_id");
+ }
+
+ return -EINVAL;
+}
+
+
+static struct spdk_nvmf_fc_port g_fc_port = {
+ .num_io_queues = 16,
+};
+
+static struct spdk_nvmf_fc_nport g_tgt_port;
+
+static uint64_t assoc_id[1024];
+
+#define FC_LS_UT_MAX_IO_QUEUES 16
+struct spdk_nvmf_fc_hwqp g_fc_hwqp[FC_LS_UT_MAX_IO_QUEUES];
+struct spdk_nvmf_fc_poll_group g_fgroup[FC_LS_UT_MAX_IO_QUEUES];
+struct spdk_nvmf_poll_group g_poll_group[FC_LS_UT_MAX_IO_QUEUES];
+static bool threads_allocated = false;
+
+static void
+ls_assign_hwqp_threads(void)
+{
+ uint32_t i;
+
+ for (i = 0; i < g_fc_port.num_io_queues; i++) {
+ struct spdk_nvmf_fc_hwqp *hwqp = &g_fc_port.io_queues[i];
+ if (hwqp->thread == NULL) {
+ hwqp->thread = spdk_get_thread();
+ }
+ }
+}
+
+static void
+ls_prepare_threads(void)
+{
+ if (threads_allocated == false) {
+ allocate_threads(8);
+ set_thread(0);
+ }
+ threads_allocated = true;
+}
+
+static void
+setup_polling_threads(void)
+{
+ ls_prepare_threads();
+ set_thread(0);
+ ls_assign_hwqp_threads();
+}
+
+static int
+ls_tests_init(void)
+{
+ uint16_t i;
+
+ bzero(&g_nvmf_tgt, sizeof(g_nvmf_tgt));
+
+ g_nvmf_transport.opts = g_nvmf_transport_opts;
+
+ snprintf(g_nvmf_subsystem.subnqn, sizeof(g_nvmf_subsystem.subnqn), "%s", fc_ut_subsystem_nqn);
+ g_fc_port.hw_port_status = SPDK_FC_PORT_ONLINE;
+ g_fc_port.io_queues = g_fc_hwqp;
+ for (i = 0; i < g_fc_port.num_io_queues; i++) {
+ struct spdk_nvmf_fc_hwqp *hwqp = &g_fc_port.io_queues[i];
+ hwqp->lcore_id = i;
+ hwqp->hwqp_id = i;
+ hwqp->thread = NULL;
+ hwqp->fc_port = &g_fc_port;
+ hwqp->num_conns = 0;
+ hwqp->rq_size = g_hw_queue_depth;
+ TAILQ_INIT(&hwqp->connection_list);
+ TAILQ_INIT(&hwqp->in_use_reqs);
+
+ bzero(&g_poll_group[i], sizeof(struct spdk_nvmf_poll_group));
+ bzero(&g_fgroup[i], sizeof(struct spdk_nvmf_fc_poll_group));
+ TAILQ_INIT(&g_poll_group[i].tgroups);
+ TAILQ_INIT(&g_poll_group[i].qpairs);
+ g_fgroup[i].group.transport = &g_nvmf_transport;
+ g_fgroup[i].group.group = &g_poll_group[i];
+ hwqp->fgroup = &g_fgroup[i];
+ }
+
+ nvmf_fc_ls_init(&g_fc_port);
+ bzero(&g_tgt_port, sizeof(struct spdk_nvmf_fc_nport));
+ g_tgt_port.fc_port = &g_fc_port;
+ TAILQ_INIT(&g_tgt_port.rem_port_list);
+ TAILQ_INIT(&g_tgt_port.fc_associations);
+
+ bzero(&g_rem_port, sizeof(struct spdk_nvmf_fc_remote_port_info));
+ TAILQ_INSERT_TAIL(&g_tgt_port.rem_port_list, &g_rem_port, link);
+
+ return 0;
+}
+
+static int
+ls_tests_fini(void)
+{
+ nvmf_fc_ls_fini(&g_fc_port);
+ free_threads();
+ return 0;
+}
+
+static void
+create_single_assoc_test(void)
+{
+ setup_polling_threads();
+ /* main test driver */
+ g_test_run_type = TEST_RUN_TYPE_CREATE_ASSOC;
+ run_create_assoc_test(fc_ut_subsystem_nqn, fc_ut_host, &g_tgt_port);
+
+ if (g_last_rslt == 0) {
+ /* disconnect the association */
+ g_test_run_type = TEST_RUN_TYPE_DISCONNECT;
+ run_disconn_test(&g_tgt_port, g_curr_assoc_id);
+ g_create_conn_test_cnt = 0;
+ }
+}
+
+static void
+create_max_conns_test(void)
+{
+ uint16_t qid = 1;
+
+ setup_polling_threads();
+ /* main test driver */
+ g_test_run_type = TEST_RUN_TYPE_CREATE_ASSOC;
+ run_create_assoc_test(fc_ut_subsystem_nqn, fc_ut_host, &g_tgt_port);
+
+ if (g_last_rslt == 0) {
+ g_test_run_type = TEST_RUN_TYPE_CREATE_CONN;
+ /* create connections until we get too many connections error */
+ while (g_last_rslt == 0) {
+ if (g_create_conn_test_cnt > g_nvmf_transport.opts.max_qpairs_per_ctrlr) {
+ CU_FAIL("Did not get CIOC failure for too many connections");
+ break;
+ }
+ run_create_conn_test(fc_ut_host, &g_tgt_port, g_curr_assoc_id, qid++);
+ }
+
+ /* disconnect the association */
+ g_last_rslt = 0;
+ g_test_run_type = TEST_RUN_TYPE_DISCONNECT;
+ run_disconn_test(&g_tgt_port, g_curr_assoc_id);
+ g_create_conn_test_cnt = 0;
+ }
+}
+
+static void
+invalid_connection_test(void)
+{
+ setup_polling_threads();
+ /* run test to create connection to invalid association */
+ g_test_run_type = TEST_RUN_TYPE_CONN_BAD_ASSOC;
+ run_create_conn_test(fc_ut_host, &g_tgt_port, g_curr_assoc_id, 1);
+}
+
+static void
+create_max_aq_conns_test(void)
+{
+ /* run test to create max. associations with max. connections */
+ uint32_t i, j;
+ uint32_t create_assoc_test_cnt = 0;
+
+ setup_polling_threads();
+ g_max_assoc_conn_test = 1;
+ g_last_rslt = 0;
+ while (1) {
+ g_test_run_type = TEST_RUN_TYPE_CREATE_MAX_ASSOC;
+ run_create_assoc_test(fc_ut_subsystem_nqn, fc_ut_host, &g_tgt_port);
+ if (g_last_rslt == 0) {
+ assoc_id[create_assoc_test_cnt++] = g_curr_assoc_id;
+ g_test_run_type = TEST_RUN_TYPE_CREATE_CONN;
+ for (j = 1; j < g_nvmf_transport.opts.max_qpairs_per_ctrlr; j++) {
+ if (g_last_rslt == 0) {
+ run_create_conn_test(fc_ut_host, &g_tgt_port, g_curr_assoc_id, (uint16_t) j);
+ }
+ }
+ } else {
+ break;
+ }
+ }
+
+ if (g_last_rslt == LAST_RSLT_STOP_TEST) {
+ uint32_t ma = (((g_hw_queue_depth / g_nvmf_transport.opts.max_queue_depth) *
+ (g_fc_port.num_io_queues - 1))) /
+ (g_nvmf_transport.opts.max_qpairs_per_ctrlr - 1);
+ if (create_assoc_test_cnt < ma) {
+ printf("(%d assocs - should be %d) ", create_assoc_test_cnt, ma);
+ CU_FAIL("Didn't create max. associations");
+ } else {
+ printf("(%d assocs.) ", create_assoc_test_cnt);
+ }
+ g_last_rslt = 0;
+ }
+
+ for (i = 0; i < create_assoc_test_cnt; i++) {
+ int ret;
+ g_spdk_nvmf_fc_xmt_srsr_req = false;
+ ret = nvmf_fc_delete_association(&g_tgt_port, from_be64(&assoc_id[i]), true, false,
+ disconnect_assoc_cb, 0);
+ CU_ASSERT(ret == 0);
+ poll_thread(0);
+
+#if (NVMF_FC_LS_SEND_LS_DISCONNECT == 1)
+ if (ret == 0) {
+ /* check that LS disconnect was sent */
+ CU_ASSERT(g_spdk_nvmf_fc_xmt_srsr_req);
+ }
+#endif
+ }
+ g_max_assoc_conn_test = 0;
+}
+
+static void
+xmt_ls_rsp_failure_test(void)
+{
+ setup_polling_threads();
+ g_test_run_type = TEST_RUN_TYPE_FAIL_LS_RSP;
+ run_create_assoc_test(fc_ut_subsystem_nqn, fc_ut_host, &g_tgt_port);
+ if (g_last_rslt == 0) {
+ /* check target port for associations */
+ CU_ASSERT(g_tgt_port.assoc_count == 0);
+ }
+}
+
+static void
+disconnect_bad_assoc_test(void)
+{
+ setup_polling_threads();
+ g_test_run_type = TEST_RUN_TYPE_DISCONNECT_BAD_ASSOC;
+ run_disconn_test(&g_tgt_port, 0xffff);
+}
+
+/*
+ * SPDK functions that are called by LS processing
+ */
+
+int
+nvmf_fc_xmt_ls_rsp(struct spdk_nvmf_fc_nport *g_tgt_port,
+ struct spdk_nvmf_fc_ls_rqst *ls_rqst)
+{
+ switch (g_test_run_type) {
+ case TEST_RUN_TYPE_CREATE_ASSOC:
+ g_last_rslt = handle_ca_rsp(ls_rqst, false);
+ break;
+ case TEST_RUN_TYPE_CREATE_CONN:
+ g_last_rslt = handle_cc_rsp(ls_rqst);
+ break;
+ case TEST_RUN_TYPE_DISCONNECT:
+ g_last_rslt = handle_disconn_rsp(ls_rqst);
+ break;
+ case TEST_RUN_TYPE_CONN_BAD_ASSOC:
+ g_last_rslt = handle_conn_bad_assoc_rsp(ls_rqst);
+ break;
+ case TEST_RUN_TYPE_FAIL_LS_RSP:
+ g_last_rslt = handle_ca_rsp(ls_rqst, false);
+ return 1;
+ case TEST_RUN_TYPE_DISCONNECT_BAD_ASSOC:
+ g_last_rslt = handle_disconn_bad_assoc_rsp(ls_rqst);
+ break;
+ case TEST_RUN_TYPE_CREATE_MAX_ASSOC:
+ g_last_rslt = handle_ca_rsp(ls_rqst, true);
+ break;
+
+ default:
+ CU_FAIL("LS Response for Invalid Test Type");
+ g_last_rslt = 1;
+ }
+
+ return 0;
+}
+
+int
+nvmf_fc_xmt_srsr_req(struct spdk_nvmf_fc_hwqp *hwqp,
+ struct spdk_nvmf_fc_srsr_bufs *srsr_bufs,
+ spdk_nvmf_fc_caller_cb cb, void *cb_args)
+{
+ struct spdk_nvmf_fc_ls_disconnect_rqst *dc_rqst =
+ (struct spdk_nvmf_fc_ls_disconnect_rqst *)
+ srsr_bufs->rqst;
+
+ CU_ASSERT(dc_rqst->w0.ls_cmd == FCNVME_LS_DISCONNECT);
+ CU_ASSERT(from_be32(&dc_rqst->desc_list_len) ==
+ sizeof(struct spdk_nvmf_fc_ls_disconnect_rqst) -
+ (2 * sizeof(uint32_t)));
+ CU_ASSERT(from_be32(&dc_rqst->assoc_id.desc_tag) ==
+ FCNVME_LSDESC_ASSOC_ID);
+ CU_ASSERT(from_be32(&dc_rqst->assoc_id.desc_len) ==
+ sizeof(struct spdk_nvmf_fc_lsdesc_assoc_id) -
+ (2 * sizeof(uint32_t)));
+
+ g_spdk_nvmf_fc_xmt_srsr_req = true;
+
+ if (cb) {
+ cb(hwqp, 0, cb_args);
+ }
+
+ return 0;
+}
+
+DEFINE_STUB_V(nvmf_fc_request_abort, (struct spdk_nvmf_fc_request *fc_req,
+ bool send_abts, spdk_nvmf_fc_caller_cb cb, void *cb_args));
+DEFINE_STUB_V(spdk_bdev_io_abort, (struct spdk_bdev_io *bdev_io, void *ctx));
+DEFINE_STUB_V(nvmf_fc_request_abort_complete, (void *arg1));
+
+static void
+usage(const char *program_name)
+{
+ printf("%s [options]\n", program_name);
+ printf("options:\n");
+ spdk_log_usage(stdout, "-t");
+ printf(" -i value - Number of IO Queues (default: %u)\n",
+ g_fc_port.num_io_queues);
+ printf(" -d value - HW queue depth (default: %u)\n",
+ g_hw_queue_depth);
+ printf(" -q value - SQ size (default: %u)\n",
+ g_nvmf_transport_opts.max_queue_depth);
+ printf(" -c value - Connection count (default: %u)\n",
+ g_nvmf_transport_opts.max_qpairs_per_ctrlr);
+ printf(" -u test# - Unit test# to run\n");
+ printf(" 0 : Run all tests (default)\n");
+ printf(" 1 : CASS/DISC create single assoc test\n");
+ printf(" 2 : Max. conns. test\n");
+ printf(" 3 : CIOC to invalid assoc_id connection test\n");
+ printf(" 4 : Create/delete max assoc conns test\n");
+ printf(" 5 : LS response failure test\n");
+ printf(" 6 : Disconnect bad assoc_id test\n");
+}
+
+int main(int argc, char **argv)
+{
+ unsigned int num_failures = 0;
+ CU_pSuite suite = NULL;
+ int test = 0;
+ long int val;
+ int op;
+
+ while ((op = getopt(argc, argv, "a:q:c:t:u:d:i:")) != -1) {
+ switch (op) {
+ case 'q':
+ val = spdk_strtol(optarg, 10);
+ if (val < 16) {
+ fprintf(stderr, "SQ size must be at least 16\n");
+ return -EINVAL;
+ }
+ g_nvmf_transport_opts.max_queue_depth = (uint16_t)val;
+ break;
+ case 'c':
+ val = spdk_strtol(optarg, 10);
+ if (val < 2) {
+ fprintf(stderr, "Connection count must be at least 2\n");
+ return -EINVAL;
+ }
+ g_nvmf_transport_opts.max_qpairs_per_ctrlr = (uint16_t)val;
+ break;
+ case 't':
+ if (spdk_log_set_flag(optarg) < 0) {
+ fprintf(stderr, "Unknown trace flag '%s'\n", optarg);
+ usage(argv[0]);
+ return -EINVAL;
+ }
+ break;
+ case 'u':
+ test = (int)spdk_strtol(optarg, 10);
+ break;
+ case 'd':
+ val = spdk_strtol(optarg, 10);
+ if (val < 16) {
+ fprintf(stderr, "HW queue depth must be at least 16\n");
+ return -EINVAL;
+ }
+ g_hw_queue_depth = (uint32_t)val;
+ break;
+ case 'i':
+ val = spdk_strtol(optarg, 10);
+ if (val < 2) {
+ fprintf(stderr, "Number of io queues must be at least 2\n");
+ return -EINVAL;
+ }
+ if (val > FC_LS_UT_MAX_IO_QUEUES) {
+ fprintf(stderr, "Number of io queues can't be greater than %d\n",
+ FC_LS_UT_MAX_IO_QUEUES);
+ return -EINVAL;
+ }
+ g_fc_port.num_io_queues = (uint32_t)val;
+ break;
+
+
+ default:
+ usage(argv[0]);
+ return -EINVAL;
+ }
+ }
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("FC-NVMe LS", ls_tests_init, ls_tests_fini);
+
+ if (test == 0) {
+
+ CU_ADD_TEST(suite, create_single_assoc_test);
+
+ CU_ADD_TEST(suite, create_max_conns_test);
+ CU_ADD_TEST(suite, invalid_connection_test);
+ CU_ADD_TEST(suite, disconnect_bad_assoc_test);
+
+ CU_ADD_TEST(suite, create_max_aq_conns_test);
+ CU_ADD_TEST(suite, xmt_ls_rsp_failure_test);
+
+ } else {
+
+ switch (test) {
+ case 1:
+ CU_ADD_TEST(suite, create_single_assoc_test);
+ break;
+ case 2:
+ CU_ADD_TEST(suite, create_max_conns_test);
+ break;
+ case 3:
+ CU_ADD_TEST(suite, invalid_connection_test);
+ break;
+ case 4:
+ CU_ADD_TEST(suite, create_max_aq_conns_test);
+ break;
+ case 5:
+ CU_ADD_TEST(suite, xmt_ls_rsp_failure_test);
+ break;
+ case 6:
+ CU_ADD_TEST(suite, disconnect_bad_assoc_test);
+ break;
+
+ default:
+ fprintf(stderr, "Invalid test number\n");
+ usage(argv[0]);
+ CU_cleanup_registry();
+ return -EINVAL;
+ }
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvmf/rdma.c/.gitignore b/src/spdk/test/unit/lib/nvmf/rdma.c/.gitignore
new file mode 100644
index 000000000..0adb59d10
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/rdma.c/.gitignore
@@ -0,0 +1 @@
+rdma_ut
diff --git a/src/spdk/test/unit/lib/nvmf/rdma.c/Makefile b/src/spdk/test/unit/lib/nvmf/rdma.c/Makefile
new file mode 100644
index 000000000..ad4998663
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/rdma.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = rdma_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvmf/rdma.c/rdma_ut.c b/src/spdk/test/unit/lib/nvmf/rdma.c/rdma_ut.c
new file mode 100644
index 000000000..b0af58d18
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/rdma.c/rdma_ut.c
@@ -0,0 +1,1283 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation. All rights reserved.
+ * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+#include "spdk_cunit.h"
+#include "common/lib/test_env.c"
+#include "common/lib/test_rdma.c"
+#include "nvmf/rdma.c"
+#include "nvmf/transport.c"
+
+uint64_t g_mr_size;
+uint64_t g_mr_next_size;
+struct ibv_mr g_rdma_mr;
+
+#define RDMA_UT_UNITS_IN_MAX_IO 16
+
+struct spdk_nvmf_transport_opts g_rdma_ut_transport_opts = {
+ .max_queue_depth = SPDK_NVMF_RDMA_DEFAULT_MAX_QUEUE_DEPTH,
+ .max_qpairs_per_ctrlr = SPDK_NVMF_RDMA_DEFAULT_MAX_QPAIRS_PER_CTRLR,
+ .in_capsule_data_size = SPDK_NVMF_RDMA_DEFAULT_IN_CAPSULE_DATA_SIZE,
+ .max_io_size = (SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE * RDMA_UT_UNITS_IN_MAX_IO),
+ .io_unit_size = SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE,
+ .max_aq_depth = SPDK_NVMF_RDMA_DEFAULT_AQ_DEPTH,
+ .num_shared_buffers = SPDK_NVMF_RDMA_DEFAULT_NUM_SHARED_BUFFERS,
+};
+
+SPDK_LOG_REGISTER_COMPONENT("nvmf", SPDK_LOG_NVMF)
+DEFINE_STUB(spdk_mem_map_set_translation, int, (struct spdk_mem_map *map, uint64_t vaddr,
+ uint64_t size, uint64_t translation), 0);
+DEFINE_STUB(spdk_mem_map_clear_translation, int, (struct spdk_mem_map *map, uint64_t vaddr,
+ uint64_t size), 0);
+DEFINE_STUB(spdk_mem_map_alloc, struct spdk_mem_map *, (uint64_t default_translation,
+ const struct spdk_mem_map_ops *ops, void *cb_ctx), NULL);
+DEFINE_STUB(spdk_nvmf_qpair_disconnect, int, (struct spdk_nvmf_qpair *qpair,
+ nvmf_qpair_disconnect_cb cb_fn, void *ctx), 0);
+DEFINE_STUB_V(spdk_mem_map_free, (struct spdk_mem_map **pmap));
+
+struct spdk_trace_histories *g_trace_histories;
+DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn));
+DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix));
+DEFINE_STUB_V(spdk_trace_register_description, (const char *name,
+ uint16_t tpoint_id, uint8_t owner_type, uint8_t object_type, uint8_t new_object,
+ uint8_t arg1_type, const char *arg1_name));
+DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
+ uint32_t size, uint64_t object_id, uint64_t arg1));
+
+DEFINE_STUB_V(spdk_nvmf_ctrlr_data_init, (struct spdk_nvmf_transport_opts *opts,
+ struct spdk_nvmf_ctrlr_data *cdata));
+DEFINE_STUB_V(spdk_nvmf_request_exec, (struct spdk_nvmf_request *req));
+DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req), 0);
+DEFINE_STUB(spdk_nvme_transport_id_compare, int, (const struct spdk_nvme_transport_id *trid1,
+ const struct spdk_nvme_transport_id *trid2), 0);
+DEFINE_STUB_V(nvmf_ctrlr_abort_aer, (struct spdk_nvmf_ctrlr *ctrlr));
+DEFINE_STUB(spdk_nvmf_request_get_dif_ctx, bool, (struct spdk_nvmf_request *req,
+ struct spdk_dif_ctx *dif_ctx), false);
+DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
+ enum spdk_nvme_transport_type trtype));
+DEFINE_STUB_V(spdk_nvmf_tgt_new_qpair, (struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair));
+DEFINE_STUB(nvmf_ctrlr_abort_request, int, (struct spdk_nvmf_request *req), 0);
+
+const char *
+spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype)
+{
+ switch (trtype) {
+ case SPDK_NVME_TRANSPORT_PCIE:
+ return "PCIe";
+ case SPDK_NVME_TRANSPORT_RDMA:
+ return "RDMA";
+ case SPDK_NVME_TRANSPORT_FC:
+ return "FC";
+ default:
+ return NULL;
+ }
+}
+
+int
+spdk_nvme_transport_id_populate_trstring(struct spdk_nvme_transport_id *trid, const char *trstring)
+{
+ int len, i;
+
+ if (trstring == NULL) {
+ return -EINVAL;
+ }
+
+ len = strnlen(trstring, SPDK_NVMF_TRSTRING_MAX_LEN);
+ if (len == SPDK_NVMF_TRSTRING_MAX_LEN) {
+ return -EINVAL;
+ }
+
+ /* cast official trstring to uppercase version of input. */
+ for (i = 0; i < len; i++) {
+ trid->trstring[i] = toupper(trstring[i]);
+ }
+ return 0;
+}
+
+uint64_t
+spdk_mem_map_translate(const struct spdk_mem_map *map, uint64_t vaddr, uint64_t *size)
+{
+ if (g_mr_size != 0) {
+ *(uint32_t *)size = g_mr_size;
+ if (g_mr_next_size != 0) {
+ g_mr_size = g_mr_next_size;
+ }
+ }
+
+ return (uint64_t)&g_rdma_mr;
+}
+
+static void reset_nvmf_rdma_request(struct spdk_nvmf_rdma_request *rdma_req)
+{
+ int i;
+
+ rdma_req->req.length = 0;
+ rdma_req->req.data_from_pool = false;
+ rdma_req->req.data = NULL;
+ rdma_req->data.wr.num_sge = 0;
+ rdma_req->data.wr.wr.rdma.remote_addr = 0;
+ rdma_req->data.wr.wr.rdma.rkey = 0;
+ memset(&rdma_req->req.dif, 0, sizeof(rdma_req->req.dif));
+
+ for (i = 0; i < SPDK_NVMF_MAX_SGL_ENTRIES; i++) {
+ rdma_req->req.iov[i].iov_base = 0;
+ rdma_req->req.iov[i].iov_len = 0;
+ rdma_req->req.buffers[i] = 0;
+ rdma_req->data.wr.sg_list[i].addr = 0;
+ rdma_req->data.wr.sg_list[i].length = 0;
+ rdma_req->data.wr.sg_list[i].lkey = 0;
+ }
+ rdma_req->req.iovcnt = 0;
+}
+
+static void
+test_spdk_nvmf_rdma_request_parse_sgl(void)
+{
+ struct spdk_nvmf_rdma_transport rtransport;
+ struct spdk_nvmf_rdma_device device;
+ struct spdk_nvmf_rdma_request rdma_req = {};
+ struct spdk_nvmf_rdma_recv recv;
+ struct spdk_nvmf_rdma_poll_group group;
+ struct spdk_nvmf_rdma_qpair rqpair;
+ struct spdk_nvmf_rdma_poller poller;
+ union nvmf_c2h_msg cpl;
+ union nvmf_h2c_msg cmd;
+ struct spdk_nvme_sgl_descriptor *sgl;
+ struct spdk_nvmf_transport_pg_cache_buf bufs[4];
+ struct spdk_nvme_sgl_descriptor sgl_desc[SPDK_NVMF_MAX_SGL_ENTRIES] = {{0}};
+ struct spdk_nvmf_rdma_request_data data;
+ struct spdk_nvmf_transport_pg_cache_buf buffer;
+ struct spdk_nvmf_transport_pg_cache_buf *buffer_ptr;
+ int rc, i;
+
+ data.wr.sg_list = data.sgl;
+ STAILQ_INIT(&group.group.buf_cache);
+ group.group.buf_cache_size = 0;
+ group.group.buf_cache_count = 0;
+ group.group.transport = &rtransport.transport;
+ STAILQ_INIT(&group.retired_bufs);
+ poller.group = &group;
+ rqpair.poller = &poller;
+ rqpair.max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES;
+
+ sgl = &cmd.nvme_cmd.dptr.sgl1;
+ rdma_req.recv = &recv;
+ rdma_req.req.cmd = &cmd;
+ rdma_req.req.rsp = &cpl;
+ rdma_req.data.wr.sg_list = rdma_req.data.sgl;
+ rdma_req.req.qpair = &rqpair.qpair;
+ rdma_req.req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
+
+ rtransport.transport.opts = g_rdma_ut_transport_opts;
+ rtransport.data_wr_pool = NULL;
+ rtransport.transport.data_buf_pool = NULL;
+
+ device.attr.device_cap_flags = 0;
+ g_rdma_mr.lkey = 0xABCD;
+ sgl->keyed.key = 0xEEEE;
+ sgl->address = 0xFFFF;
+ rdma_req.recv->buf = (void *)0xDDDD;
+
+ /* Test 1: sgl type: keyed data block subtype: address */
+ sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
+ sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
+
+ /* Part 1: simple I/O, one SGL smaller than the transport io unit size */
+ MOCK_SET(spdk_mempool_get, (void *)0x2000);
+ reset_nvmf_rdma_request(&rdma_req);
+ sgl->keyed.length = rtransport.transport.opts.io_unit_size / 2;
+
+ device.map = (void *)0x0;
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size / 2);
+ CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.num_sge == 1);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
+ CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.sg_list[0].length == rtransport.transport.opts.io_unit_size / 2);
+ CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == g_rdma_mr.lkey);
+
+ /* Part 2: simple I/O, one SGL larger than the transport io unit size (equal to the max io size) */
+ reset_nvmf_rdma_request(&rdma_req);
+ sgl->keyed.length = rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO;
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO);
+ CU_ASSERT(rdma_req.data.wr.num_sge == RDMA_UT_UNITS_IN_MAX_IO);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
+ for (i = 0; i < RDMA_UT_UNITS_IN_MAX_IO; i++) {
+ CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey);
+ }
+
+ /* Part 3: simple I/O one SGL larger than the transport max io size */
+ reset_nvmf_rdma_request(&rdma_req);
+ sgl->keyed.length = rtransport.transport.opts.max_io_size * 2;
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == -1);
+
+ /* Part 4: Pretend there are no buffer pools */
+ MOCK_SET(spdk_mempool_get, NULL);
+ reset_nvmf_rdma_request(&rdma_req);
+ sgl->keyed.length = rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO;
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == false);
+ CU_ASSERT(rdma_req.req.data == NULL);
+ CU_ASSERT(rdma_req.data.wr.num_sge == 0);
+ CU_ASSERT(rdma_req.req.buffers[0] == NULL);
+ CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0);
+ CU_ASSERT(rdma_req.data.wr.sg_list[0].length == 0);
+ CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == 0);
+
+ rdma_req.recv->buf = (void *)0xDDDD;
+ /* Test 2: sgl type: keyed data block subtype: offset (in capsule data) */
+ sgl->generic.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
+ sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET;
+
+ /* Part 1: Normal I/O smaller than in capsule data size no offset */
+ reset_nvmf_rdma_request(&rdma_req);
+ sgl->address = 0;
+ sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size;
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(rdma_req.req.data == (void *)0xDDDD);
+ CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.in_capsule_data_size);
+ CU_ASSERT(rdma_req.req.data_from_pool == false);
+
+ /* Part 2: I/O offset + length too large */
+ reset_nvmf_rdma_request(&rdma_req);
+ sgl->address = rtransport.transport.opts.in_capsule_data_size;
+ sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size;
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == -1);
+
+ /* Part 3: I/O too large */
+ reset_nvmf_rdma_request(&rdma_req);
+ sgl->address = 0;
+ sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size * 2;
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == -1);
+
+ /* Test 3: Multi SGL */
+ sgl->generic.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT;
+ sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET;
+ sgl->address = 0;
+ rdma_req.recv->buf = (void *)&sgl_desc;
+ MOCK_SET(spdk_mempool_get, &data);
+
+ /* part 1: 2 segments each with 1 wr. */
+ reset_nvmf_rdma_request(&rdma_req);
+ sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor);
+ for (i = 0; i < 2; i++) {
+ sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
+ sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
+ sgl_desc[i].keyed.length = rtransport.transport.opts.io_unit_size;
+ sgl_desc[i].address = 0x4000 + i * rtransport.transport.opts.io_unit_size;
+ sgl_desc[i].keyed.key = 0x44;
+ }
+
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 2);
+ CU_ASSERT(rdma_req.data.wr.num_sge == 1);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000);
+ CU_ASSERT(rdma_req.data.wr.next == &data.wr);
+ CU_ASSERT(data.wr.wr.rdma.rkey == 0x44);
+ CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size);
+ CU_ASSERT(data.wr.num_sge == 1);
+ CU_ASSERT(data.wr.next == &rdma_req.rsp.wr);
+
+ /* part 2: 2 segments, each with 1 wr containing 8 sge_elements */
+ reset_nvmf_rdma_request(&rdma_req);
+ sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor);
+ for (i = 0; i < 2; i++) {
+ sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
+ sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
+ sgl_desc[i].keyed.length = rtransport.transport.opts.io_unit_size * 8;
+ sgl_desc[i].address = 0x4000 + i * 8 * rtransport.transport.opts.io_unit_size;
+ sgl_desc[i].keyed.key = 0x44;
+ }
+
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 16);
+ CU_ASSERT(rdma_req.req.iovcnt == 16);
+ CU_ASSERT(rdma_req.data.wr.num_sge == 8);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000);
+ CU_ASSERT(rdma_req.data.wr.next == &data.wr);
+ CU_ASSERT(data.wr.wr.rdma.rkey == 0x44);
+ CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size * 8);
+ CU_ASSERT(data.wr.num_sge == 8);
+ CU_ASSERT(data.wr.next == &rdma_req.rsp.wr);
+
+ /* part 3: 2 segments, one very large, one very small */
+ reset_nvmf_rdma_request(&rdma_req);
+ for (i = 0; i < 2; i++) {
+ sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
+ sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
+ sgl_desc[i].keyed.key = 0x44;
+ }
+
+ sgl_desc[0].keyed.length = rtransport.transport.opts.io_unit_size * 15 +
+ rtransport.transport.opts.io_unit_size / 2;
+ sgl_desc[0].address = 0x4000;
+ sgl_desc[1].keyed.length = rtransport.transport.opts.io_unit_size / 2;
+ sgl_desc[1].address = 0x4000 + rtransport.transport.opts.io_unit_size * 15 +
+ rtransport.transport.opts.io_unit_size / 2;
+
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 16);
+ CU_ASSERT(rdma_req.req.iovcnt == 17);
+ CU_ASSERT(rdma_req.data.wr.num_sge == 16);
+ for (i = 0; i < 15; i++) {
+ CU_ASSERT(rdma_req.data.sgl[i].length == rtransport.transport.opts.io_unit_size);
+ }
+ CU_ASSERT(rdma_req.data.sgl[15].length == rtransport.transport.opts.io_unit_size / 2);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000);
+ CU_ASSERT(rdma_req.data.wr.next == &data.wr);
+ CU_ASSERT(data.wr.wr.rdma.rkey == 0x44);
+ CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size * 15 +
+ rtransport.transport.opts.io_unit_size / 2);
+ CU_ASSERT(data.sgl[0].length == rtransport.transport.opts.io_unit_size / 2);
+ CU_ASSERT(data.wr.num_sge == 1);
+ CU_ASSERT(data.wr.next == &rdma_req.rsp.wr);
+
+ /* Test 4: use PG buffer cache */
+ sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
+ sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
+ sgl->address = 0xFFFF;
+ rdma_req.recv->buf = (void *)0xDDDD;
+ g_rdma_mr.lkey = 0xABCD;
+ sgl->keyed.key = 0xEEEE;
+
+ for (i = 0; i < 4; i++) {
+ STAILQ_INSERT_TAIL(&group.group.buf_cache, &bufs[i], link);
+ }
+
+ /* part 1: use the four buffers from the pg cache */
+ group.group.buf_cache_size = 4;
+ group.group.buf_cache_count = 4;
+ MOCK_SET(spdk_mempool_get, (void *)0x2000);
+ reset_nvmf_rdma_request(&rdma_req);
+ sgl->keyed.length = rtransport.transport.opts.io_unit_size * 4;
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4);
+ CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&bufs[0] + NVMF_DATA_BUFFER_MASK) &
+ ~NVMF_DATA_BUFFER_MASK));
+ CU_ASSERT(rdma_req.data.wr.num_sge == 4);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
+ CU_ASSERT(group.group.buf_cache_count == 0);
+ CU_ASSERT(STAILQ_EMPTY(&group.group.buf_cache));
+ for (i = 0; i < 4; i++) {
+ CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == (uint64_t)&bufs[i]);
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (((uint64_t)&bufs[i] + NVMF_DATA_BUFFER_MASK) &
+ ~NVMF_DATA_BUFFER_MASK));
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
+ }
+
+ /* part 2: now that we have used the buffers from the cache, try again. We should get mempool buffers. */
+ reset_nvmf_rdma_request(&rdma_req);
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4);
+ CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.num_sge == 4);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
+ CU_ASSERT(group.group.buf_cache_count == 0);
+ CU_ASSERT(STAILQ_EMPTY(&group.group.buf_cache));
+ for (i = 0; i < 4; i++) {
+ CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
+ CU_ASSERT(group.group.buf_cache_count == 0);
+ }
+
+ /* part 3: half and half */
+ group.group.buf_cache_count = 2;
+
+ for (i = 0; i < 2; i++) {
+ STAILQ_INSERT_TAIL(&group.group.buf_cache, &bufs[i], link);
+ }
+ reset_nvmf_rdma_request(&rdma_req);
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4);
+ CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&bufs[0] + NVMF_DATA_BUFFER_MASK) &
+ ~NVMF_DATA_BUFFER_MASK));
+ CU_ASSERT(rdma_req.data.wr.num_sge == 4);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
+ CU_ASSERT(group.group.buf_cache_count == 0);
+ for (i = 0; i < 2; i++) {
+ CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == (uint64_t)&bufs[i]);
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (((uint64_t)&bufs[i] + NVMF_DATA_BUFFER_MASK) &
+ ~NVMF_DATA_BUFFER_MASK));
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
+ }
+ for (i = 2; i < 4; i++) {
+ CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
+ }
+
+ reset_nvmf_rdma_request(&rdma_req);
+ /* Test 5 dealing with a buffer split over two Memory Regions */
+ MOCK_SET(spdk_mempool_get, (void *)&buffer);
+ sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
+ sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
+ sgl->keyed.length = rtransport.transport.opts.io_unit_size / 2;
+ g_mr_size = rtransport.transport.opts.io_unit_size / 4;
+ g_mr_next_size = rtransport.transport.opts.io_unit_size / 2;
+
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size / 2);
+ CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&buffer + NVMF_DATA_BUFFER_MASK) &
+ ~NVMF_DATA_BUFFER_MASK));
+ CU_ASSERT(rdma_req.data.wr.num_sge == 1);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
+ CU_ASSERT(rdma_req.req.buffers[0] == &buffer);
+ CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == (((uint64_t)&buffer + NVMF_DATA_BUFFER_MASK) &
+ ~NVMF_DATA_BUFFER_MASK));
+ CU_ASSERT(rdma_req.data.wr.sg_list[0].length == rtransport.transport.opts.io_unit_size / 2);
+ CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == g_rdma_mr.lkey);
+ buffer_ptr = STAILQ_FIRST(&group.retired_bufs);
+ CU_ASSERT(buffer_ptr == &buffer);
+ STAILQ_REMOVE(&group.retired_bufs, buffer_ptr, spdk_nvmf_transport_pg_cache_buf, link);
+ CU_ASSERT(STAILQ_EMPTY(&group.retired_bufs));
+ g_mr_size = 0;
+ g_mr_next_size = 0;
+
+ reset_nvmf_rdma_request(&rdma_req);
+}
+
+static struct spdk_nvmf_rdma_recv *
+create_recv(struct spdk_nvmf_rdma_qpair *rqpair, enum spdk_nvme_nvm_opcode opc)
+{
+ struct spdk_nvmf_rdma_recv *rdma_recv;
+ union nvmf_h2c_msg *cmd;
+ struct spdk_nvme_sgl_descriptor *sgl;
+
+ rdma_recv = calloc(1, sizeof(*rdma_recv));
+ rdma_recv->qpair = rqpair;
+ cmd = calloc(1, sizeof(*cmd));
+ rdma_recv->sgl[0].addr = (uintptr_t)cmd;
+ cmd->nvme_cmd.opc = opc;
+ sgl = &cmd->nvme_cmd.dptr.sgl1;
+ sgl->keyed.key = 0xEEEE;
+ sgl->address = 0xFFFF;
+ sgl->keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
+ sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
+ sgl->keyed.length = 1;
+
+ return rdma_recv;
+}
+
+static void
+free_recv(struct spdk_nvmf_rdma_recv *rdma_recv)
+{
+ free((void *)rdma_recv->sgl[0].addr);
+ free(rdma_recv);
+}
+
+static struct spdk_nvmf_rdma_request *
+create_req(struct spdk_nvmf_rdma_qpair *rqpair,
+ struct spdk_nvmf_rdma_recv *rdma_recv)
+{
+ struct spdk_nvmf_rdma_request *rdma_req;
+ union nvmf_c2h_msg *cpl;
+
+ rdma_req = calloc(1, sizeof(*rdma_req));
+ rdma_req->recv = rdma_recv;
+ rdma_req->req.qpair = &rqpair->qpair;
+ rdma_req->state = RDMA_REQUEST_STATE_NEW;
+ rdma_req->data.wr.wr_id = (uintptr_t)&rdma_req->data.rdma_wr;
+ rdma_req->data.wr.sg_list = rdma_req->data.sgl;
+ cpl = calloc(1, sizeof(*cpl));
+ rdma_req->rsp.sgl[0].addr = (uintptr_t)cpl;
+ rdma_req->req.rsp = cpl;
+
+ return rdma_req;
+}
+
+static void
+free_req(struct spdk_nvmf_rdma_request *rdma_req)
+{
+ free((void *)rdma_req->rsp.sgl[0].addr);
+ free(rdma_req);
+}
+
+static void
+qpair_reset(struct spdk_nvmf_rdma_qpair *rqpair,
+ struct spdk_nvmf_rdma_poller *poller,
+ struct spdk_nvmf_rdma_device *device,
+ struct spdk_nvmf_rdma_resources *resources)
+{
+ memset(rqpair, 0, sizeof(*rqpair));
+ STAILQ_INIT(&rqpair->pending_rdma_write_queue);
+ STAILQ_INIT(&rqpair->pending_rdma_read_queue);
+ rqpair->poller = poller;
+ rqpair->device = device;
+ rqpair->resources = resources;
+ rqpair->qpair.qid = 1;
+ rqpair->ibv_state = IBV_QPS_RTS;
+ rqpair->qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
+ rqpair->max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES;
+ rqpair->max_send_depth = 16;
+ rqpair->max_read_depth = 16;
+ resources->recvs_to_post.first = resources->recvs_to_post.last = NULL;
+}
+
+static void
+poller_reset(struct spdk_nvmf_rdma_poller *poller,
+ struct spdk_nvmf_rdma_poll_group *group)
+{
+ memset(poller, 0, sizeof(*poller));
+ STAILQ_INIT(&poller->qpairs_pending_recv);
+ STAILQ_INIT(&poller->qpairs_pending_send);
+ poller->group = group;
+}
+
+static void
+test_spdk_nvmf_rdma_request_process(void)
+{
+ struct spdk_nvmf_rdma_transport rtransport = {};
+ struct spdk_nvmf_rdma_poll_group group = {};
+ struct spdk_nvmf_rdma_poller poller = {};
+ struct spdk_nvmf_rdma_device device = {};
+ struct spdk_nvmf_rdma_resources resources = {};
+ struct spdk_nvmf_rdma_qpair rqpair = {};
+ struct spdk_nvmf_rdma_recv *rdma_recv;
+ struct spdk_nvmf_rdma_request *rdma_req;
+ bool progress;
+
+ STAILQ_INIT(&group.group.buf_cache);
+ STAILQ_INIT(&group.group.pending_buf_queue);
+ group.group.buf_cache_size = 0;
+ group.group.buf_cache_count = 0;
+ poller_reset(&poller, &group);
+ qpair_reset(&rqpair, &poller, &device, &resources);
+
+ rtransport.transport.opts = g_rdma_ut_transport_opts;
+ rtransport.transport.data_buf_pool = spdk_mempool_create("test_data_pool", 16, 128, 0, 0);
+ rtransport.data_wr_pool = spdk_mempool_create("test_wr_pool", 128,
+ sizeof(struct spdk_nvmf_rdma_request_data),
+ 0, 0);
+ MOCK_CLEAR(spdk_mempool_get);
+
+ device.attr.device_cap_flags = 0;
+ device.map = (void *)0x0;
+ g_rdma_mr.lkey = 0xABCD;
+
+ /* Test 1: single SGL READ request */
+ rdma_recv = create_recv(&rqpair, SPDK_NVME_OPC_READ);
+ rdma_req = create_req(&rqpair, rdma_recv);
+ rqpair.current_recv_depth = 1;
+ /* NEW -> EXECUTING */
+ progress = nvmf_rdma_request_process(&rtransport, rdma_req);
+ CU_ASSERT(progress == true);
+ CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_EXECUTING);
+ CU_ASSERT(rdma_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST);
+ /* EXECUTED -> TRANSFERRING_C2H */
+ rdma_req->state = RDMA_REQUEST_STATE_EXECUTED;
+ progress = nvmf_rdma_request_process(&rtransport, rdma_req);
+ CU_ASSERT(progress == true);
+ CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST);
+ CU_ASSERT(rdma_req->recv == NULL);
+ CU_ASSERT(resources.recvs_to_post.first == &rdma_recv->wr);
+ CU_ASSERT(resources.recvs_to_post.last == &rdma_recv->wr);
+ /* COMPLETED -> FREE */
+ rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
+ progress = nvmf_rdma_request_process(&rtransport, rdma_req);
+ CU_ASSERT(progress == true);
+ CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_FREE);
+
+ free_recv(rdma_recv);
+ free_req(rdma_req);
+ poller_reset(&poller, &group);
+ qpair_reset(&rqpair, &poller, &device, &resources);
+
+ /* Test 2: single SGL WRITE request */
+ rdma_recv = create_recv(&rqpair, SPDK_NVME_OPC_WRITE);
+ rdma_req = create_req(&rqpair, rdma_recv);
+ rqpair.current_recv_depth = 1;
+ /* NEW -> TRANSFERRING_H2C */
+ progress = nvmf_rdma_request_process(&rtransport, rdma_req);
+ CU_ASSERT(progress == true);
+ CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
+ CU_ASSERT(rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER);
+ STAILQ_INIT(&poller.qpairs_pending_send);
+ /* READY_TO_EXECUTE -> EXECUTING */
+ rdma_req->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE;
+ progress = nvmf_rdma_request_process(&rtransport, rdma_req);
+ CU_ASSERT(progress == true);
+ CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_EXECUTING);
+ /* EXECUTED -> COMPLETING */
+ rdma_req->state = RDMA_REQUEST_STATE_EXECUTED;
+ progress = nvmf_rdma_request_process(&rtransport, rdma_req);
+ CU_ASSERT(progress == true);
+ CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_COMPLETING);
+ CU_ASSERT(rdma_req->recv == NULL);
+ CU_ASSERT(resources.recvs_to_post.first == &rdma_recv->wr);
+ CU_ASSERT(resources.recvs_to_post.last == &rdma_recv->wr);
+ /* COMPLETED -> FREE */
+ rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
+ progress = nvmf_rdma_request_process(&rtransport, rdma_req);
+ CU_ASSERT(progress == true);
+ CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_FREE);
+
+ free_recv(rdma_recv);
+ free_req(rdma_req);
+ poller_reset(&poller, &group);
+ qpair_reset(&rqpair, &poller, &device, &resources);
+
+ /* Test 3: WRITE+WRITE ibv_send batching */
+ {
+ struct spdk_nvmf_rdma_recv *recv1, *recv2;
+ struct spdk_nvmf_rdma_request *req1, *req2;
+ recv1 = create_recv(&rqpair, SPDK_NVME_OPC_WRITE);
+ req1 = create_req(&rqpair, recv1);
+ recv2 = create_recv(&rqpair, SPDK_NVME_OPC_WRITE);
+ req2 = create_req(&rqpair, recv2);
+
+ /* WRITE 1: NEW -> TRANSFERRING_H2C */
+ rqpair.current_recv_depth = 1;
+ nvmf_rdma_request_process(&rtransport, req1);
+ CU_ASSERT(req1->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
+
+ /* WRITE 2: NEW -> TRANSFERRING_H2C */
+ rqpair.current_recv_depth = 2;
+ nvmf_rdma_request_process(&rtransport, req2);
+ CU_ASSERT(req2->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
+
+ STAILQ_INIT(&poller.qpairs_pending_send);
+
+ /* WRITE 1 completes before WRITE 2 has finished RDMA reading */
+ /* WRITE 1: READY_TO_EXECUTE -> EXECUTING */
+ req1->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE;
+ nvmf_rdma_request_process(&rtransport, req1);
+ CU_ASSERT(req1->state == RDMA_REQUEST_STATE_EXECUTING);
+ /* WRITE 1: EXECUTED -> COMPLETING */
+ req1->state = RDMA_REQUEST_STATE_EXECUTED;
+ nvmf_rdma_request_process(&rtransport, req1);
+ CU_ASSERT(req1->state == RDMA_REQUEST_STATE_COMPLETING);
+ STAILQ_INIT(&poller.qpairs_pending_send);
+ /* WRITE 1: COMPLETED -> FREE */
+ req1->state = RDMA_REQUEST_STATE_COMPLETED;
+ nvmf_rdma_request_process(&rtransport, req1);
+ CU_ASSERT(req1->state == RDMA_REQUEST_STATE_FREE);
+
+ /* Now WRITE 2 has finished reading and completes */
+ /* WRITE 2: COMPLETED -> FREE */
+ /* WRITE 2: READY_TO_EXECUTE -> EXECUTING */
+ req2->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE;
+ nvmf_rdma_request_process(&rtransport, req2);
+ CU_ASSERT(req2->state == RDMA_REQUEST_STATE_EXECUTING);
+ /* WRITE 1: EXECUTED -> COMPLETING */
+ req2->state = RDMA_REQUEST_STATE_EXECUTED;
+ nvmf_rdma_request_process(&rtransport, req2);
+ CU_ASSERT(req2->state == RDMA_REQUEST_STATE_COMPLETING);
+ STAILQ_INIT(&poller.qpairs_pending_send);
+ /* WRITE 1: COMPLETED -> FREE */
+ req2->state = RDMA_REQUEST_STATE_COMPLETED;
+ nvmf_rdma_request_process(&rtransport, req2);
+ CU_ASSERT(req2->state == RDMA_REQUEST_STATE_FREE);
+
+ free_recv(recv1);
+ free_req(req1);
+ free_recv(recv2);
+ free_req(req2);
+ poller_reset(&poller, &group);
+ qpair_reset(&rqpair, &poller, &device, &resources);
+ }
+
+ spdk_mempool_free(rtransport.transport.data_buf_pool);
+ spdk_mempool_free(rtransport.data_wr_pool);
+}
+
+#define TEST_GROUPS_COUNT 5
+static void
+test_nvmf_rdma_get_optimal_poll_group(void)
+{
+ struct spdk_nvmf_rdma_transport rtransport = {};
+ struct spdk_nvmf_transport *transport = &rtransport.transport;
+ struct spdk_nvmf_rdma_qpair rqpair = {};
+ struct spdk_nvmf_transport_poll_group *groups[TEST_GROUPS_COUNT];
+ struct spdk_nvmf_rdma_poll_group *rgroups[TEST_GROUPS_COUNT];
+ struct spdk_nvmf_transport_poll_group *result;
+ uint32_t i;
+
+ rqpair.qpair.transport = transport;
+ pthread_mutex_init(&rtransport.lock, NULL);
+ TAILQ_INIT(&rtransport.poll_groups);
+
+ for (i = 0; i < TEST_GROUPS_COUNT; i++) {
+ groups[i] = nvmf_rdma_poll_group_create(transport);
+ CU_ASSERT(groups[i] != NULL);
+ rgroups[i] = SPDK_CONTAINEROF(groups[i], struct spdk_nvmf_rdma_poll_group, group);
+ groups[i]->transport = transport;
+ }
+ CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[0]);
+ CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[0]);
+
+ /* Emulate connection of %TEST_GROUPS_COUNT% initiators - each creates 1 admin and 1 io qp */
+ for (i = 0; i < TEST_GROUPS_COUNT; i++) {
+ rqpair.qpair.qid = 0;
+ result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair);
+ CU_ASSERT(result == groups[i]);
+ CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]);
+ CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[i]);
+
+ rqpair.qpair.qid = 1;
+ result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair);
+ CU_ASSERT(result == groups[i]);
+ CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]);
+ CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]);
+ }
+ /* wrap around, admin/io pg point to the first pg
+ Destroy all poll groups except of the last one */
+ for (i = 0; i < TEST_GROUPS_COUNT - 1; i++) {
+ nvmf_rdma_poll_group_destroy(groups[i]);
+ CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[i + 1]);
+ CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[i + 1]);
+ }
+
+ CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]);
+ CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]);
+
+ /* Check that pointers to the next admin/io poll groups are not changed */
+ rqpair.qpair.qid = 0;
+ result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair);
+ CU_ASSERT(result == groups[TEST_GROUPS_COUNT - 1]);
+ CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]);
+ CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]);
+
+ rqpair.qpair.qid = 1;
+ result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair);
+ CU_ASSERT(result == groups[TEST_GROUPS_COUNT - 1]);
+ CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]);
+ CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]);
+
+ /* Remove the last poll group, check that pointers are NULL */
+ nvmf_rdma_poll_group_destroy(groups[TEST_GROUPS_COUNT - 1]);
+ CU_ASSERT(rtransport.conn_sched.next_admin_pg == NULL);
+ CU_ASSERT(rtransport.conn_sched.next_io_pg == NULL);
+
+ /* Request optimal poll group, result must be NULL */
+ rqpair.qpair.qid = 0;
+ result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair);
+ CU_ASSERT(result == NULL);
+
+ rqpair.qpair.qid = 1;
+ result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair);
+ CU_ASSERT(result == NULL);
+
+ pthread_mutex_destroy(&rtransport.lock);
+}
+#undef TEST_GROUPS_COUNT
+
+static void
+test_spdk_nvmf_rdma_request_parse_sgl_with_md(void)
+{
+ struct spdk_nvmf_rdma_transport rtransport;
+ struct spdk_nvmf_rdma_device device;
+ struct spdk_nvmf_rdma_request rdma_req = {};
+ struct spdk_nvmf_rdma_recv recv;
+ struct spdk_nvmf_rdma_poll_group group;
+ struct spdk_nvmf_rdma_qpair rqpair;
+ struct spdk_nvmf_rdma_poller poller;
+ union nvmf_c2h_msg cpl;
+ union nvmf_h2c_msg cmd;
+ struct spdk_nvme_sgl_descriptor *sgl;
+ struct spdk_nvme_sgl_descriptor sgl_desc[SPDK_NVMF_MAX_SGL_ENTRIES] = {{0}};
+ struct spdk_nvmf_rdma_request_data data;
+ struct spdk_nvmf_transport_pg_cache_buf buffer;
+ struct spdk_nvmf_transport_pg_cache_buf *buffer_ptr;
+ const uint32_t data_bs = 512;
+ const uint32_t md_size = 8;
+ int rc, i;
+ void *aligned_buffer;
+
+ data.wr.sg_list = data.sgl;
+ STAILQ_INIT(&group.group.buf_cache);
+ group.group.buf_cache_size = 0;
+ group.group.buf_cache_count = 0;
+ group.group.transport = &rtransport.transport;
+ STAILQ_INIT(&group.retired_bufs);
+ poller.group = &group;
+ rqpair.poller = &poller;
+ rqpair.max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES;
+
+ sgl = &cmd.nvme_cmd.dptr.sgl1;
+ rdma_req.recv = &recv;
+ rdma_req.req.cmd = &cmd;
+ rdma_req.req.rsp = &cpl;
+ rdma_req.data.wr.sg_list = rdma_req.data.sgl;
+ rdma_req.req.qpair = &rqpair.qpair;
+ rdma_req.req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
+
+ rtransport.transport.opts = g_rdma_ut_transport_opts;
+ rtransport.data_wr_pool = NULL;
+ rtransport.transport.data_buf_pool = NULL;
+
+ device.attr.device_cap_flags = 0;
+ device.map = NULL;
+ g_rdma_mr.lkey = 0xABCD;
+ sgl->keyed.key = 0xEEEE;
+ sgl->address = 0xFFFF;
+ rdma_req.recv->buf = (void *)0xDDDD;
+
+ /* Test 1: sgl type: keyed data block subtype: address */
+ sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
+ sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
+
+ /* Part 1: simple I/O, one SGL smaller than the transport io unit size, block size 512 */
+ MOCK_SET(spdk_mempool_get, (void *)0x2000);
+ reset_nvmf_rdma_request(&rdma_req);
+ spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
+ SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
+ 0, 0, 0, 0, 0);
+ rdma_req.req.dif.dif_insert_or_strip = true;
+ rtransport.transport.opts.io_unit_size = data_bs * 8;
+ sgl->keyed.length = data_bs * 4;
+
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == data_bs * 4);
+ CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
+ CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4);
+ CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.num_sge == 4);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
+ CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000);
+
+ for (i = 0; i < 4; ++i) {
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size));
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs);
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey);
+ }
+
+ /* Part 2: simple I/O, one SGL equal to io unit size, io_unit_size is not aligned with md_size,
+ block size 512 */
+ MOCK_SET(spdk_mempool_get, (void *)0x2000);
+ reset_nvmf_rdma_request(&rdma_req);
+ spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
+ SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
+ 0, 0, 0, 0, 0);
+ rdma_req.req.dif.dif_insert_or_strip = true;
+ rtransport.transport.opts.io_unit_size = data_bs * 4;
+ sgl->keyed.length = data_bs * 4;
+
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == data_bs * 4);
+ CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
+ CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4);
+ CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.num_sge == 5);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
+ CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000);
+
+ for (i = 0; i < 3; ++i) {
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size));
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs);
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey);
+ }
+ CU_ASSERT(rdma_req.data.wr.sg_list[3].addr == 0x2000 + 3 * (data_bs + md_size));
+ CU_ASSERT(rdma_req.data.wr.sg_list[3].length == 488);
+ CU_ASSERT(rdma_req.data.wr.sg_list[3].lkey == g_rdma_mr.lkey);
+
+ /* 2nd buffer consumed */
+ CU_ASSERT(rdma_req.data.wr.sg_list[4].addr == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.sg_list[4].length == 24);
+ CU_ASSERT(rdma_req.data.wr.sg_list[4].lkey == g_rdma_mr.lkey);
+
+ /* Part 3: simple I/O, one SGL equal io unit size, io_unit_size is equal to block size 512 bytes */
+ MOCK_SET(spdk_mempool_get, (void *)0x2000);
+ reset_nvmf_rdma_request(&rdma_req);
+ spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
+ SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
+ 0, 0, 0, 0, 0);
+ rdma_req.req.dif.dif_insert_or_strip = true;
+ rtransport.transport.opts.io_unit_size = data_bs;
+ sgl->keyed.length = data_bs;
+
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == data_bs);
+ CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
+ CU_ASSERT(rdma_req.req.dif.elba_length == data_bs + md_size);
+ CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.num_sge == 1);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
+ CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000);
+
+ CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.sg_list[0].length == data_bs);
+ CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == g_rdma_mr.lkey);
+
+ CU_ASSERT(rdma_req.req.iovcnt == 2);
+ CU_ASSERT(rdma_req.req.iov[0].iov_base == (void *)((unsigned long)0x2000));
+ CU_ASSERT(rdma_req.req.iov[0].iov_len == data_bs);
+ /* 2nd buffer consumed for metadata */
+ CU_ASSERT(rdma_req.req.iov[1].iov_base == (void *)((unsigned long)0x2000));
+ CU_ASSERT(rdma_req.req.iov[1].iov_len == md_size);
+
+ /* Part 4: simple I/O, one SGL equal io unit size, io_unit_size is aligned with md_size,
+ block size 512 */
+ MOCK_SET(spdk_mempool_get, (void *)0x2000);
+ reset_nvmf_rdma_request(&rdma_req);
+ spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
+ SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
+ 0, 0, 0, 0, 0);
+ rdma_req.req.dif.dif_insert_or_strip = true;
+ rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 4;
+ sgl->keyed.length = data_bs * 4;
+
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == data_bs * 4);
+ CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
+ CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4);
+ CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.num_sge == 4);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
+ CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000);
+
+ for (i = 0; i < 4; ++i) {
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size));
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs);
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey);
+ }
+
+ /* Part 5: simple I/O, one SGL equal to 2x io unit size, io_unit_size is aligned with md_size,
+ block size 512 */
+ MOCK_SET(spdk_mempool_get, (void *)0x2000);
+ reset_nvmf_rdma_request(&rdma_req);
+ spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
+ SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
+ 0, 0, 0, 0, 0);
+ rdma_req.req.dif.dif_insert_or_strip = true;
+ rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 2;
+ sgl->keyed.length = data_bs * 4;
+
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == data_bs * 4);
+ CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
+ CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4);
+ CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.num_sge == 4);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
+ CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000);
+
+ for (i = 0; i < 2; ++i) {
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size));
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs);
+ }
+ for (i = 0; i < 2; ++i) {
+ CU_ASSERT(rdma_req.data.wr.sg_list[i + 2].addr == 0x2000 + i * (data_bs + md_size));
+ CU_ASSERT(rdma_req.data.wr.sg_list[i + 2].length == data_bs);
+ }
+
+ /* Part 6: simple I/O, one SGL larger than the transport io unit size, io_unit_size is not aligned to md_size,
+ block size 512 */
+ MOCK_SET(spdk_mempool_get, (void *)0x2000);
+ reset_nvmf_rdma_request(&rdma_req);
+ spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
+ SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
+ 0, 0, 0, 0, 0);
+ rdma_req.req.dif.dif_insert_or_strip = true;
+ rtransport.transport.opts.io_unit_size = data_bs * 4;
+ sgl->keyed.length = data_bs * 6;
+
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == data_bs * 6);
+ CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
+ CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 6);
+ CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.num_sge == 7);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
+ CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000);
+
+ for (i = 0; i < 3; ++i) {
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size));
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs);
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey);
+ }
+ CU_ASSERT(rdma_req.data.wr.sg_list[3].addr == 0x2000 + 3 * (data_bs + md_size));
+ CU_ASSERT(rdma_req.data.wr.sg_list[3].length == 488);
+ CU_ASSERT(rdma_req.data.wr.sg_list[3].lkey == g_rdma_mr.lkey);
+
+ /* 2nd IO buffer consumed */
+ CU_ASSERT(rdma_req.data.wr.sg_list[4].addr == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.sg_list[4].length == 24);
+ CU_ASSERT(rdma_req.data.wr.sg_list[4].lkey == g_rdma_mr.lkey);
+
+ CU_ASSERT(rdma_req.data.wr.sg_list[5].addr == 0x2000 + 24 + md_size);
+ CU_ASSERT(rdma_req.data.wr.sg_list[5].length == 512);
+ CU_ASSERT(rdma_req.data.wr.sg_list[5].lkey == g_rdma_mr.lkey);
+
+ CU_ASSERT(rdma_req.data.wr.sg_list[6].addr == 0x2000 + 24 + 512 + md_size * 2);
+ CU_ASSERT(rdma_req.data.wr.sg_list[6].length == 512);
+ CU_ASSERT(rdma_req.data.wr.sg_list[6].lkey == g_rdma_mr.lkey);
+
+ /* Part 7: simple I/O, number of SGL entries exceeds the number of entries
+ one WR can hold. Additional WR is chained */
+ MOCK_SET(spdk_mempool_get, &data);
+ aligned_buffer = (void *)((uintptr_t)((char *)&data + NVMF_DATA_BUFFER_MASK) &
+ ~NVMF_DATA_BUFFER_MASK);
+ reset_nvmf_rdma_request(&rdma_req);
+ spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
+ SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
+ 0, 0, 0, 0, 0);
+ rdma_req.req.dif.dif_insert_or_strip = true;
+ rtransport.transport.opts.io_unit_size = data_bs * 16;
+ sgl->keyed.length = data_bs * 16;
+
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == data_bs * 16);
+ CU_ASSERT(rdma_req.req.iovcnt == 2);
+ CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
+ CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 16);
+ CU_ASSERT(rdma_req.req.data == aligned_buffer);
+ CU_ASSERT(rdma_req.data.wr.num_sge == 16);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
+ /* additional wr from pool */
+ CU_ASSERT(rdma_req.data.wr.next == (void *)&data.wr);
+ CU_ASSERT(rdma_req.data.wr.next->num_sge == 1);
+ CU_ASSERT(rdma_req.data.wr.next->next == &rdma_req.rsp.wr);
+
+ /* Part 8: simple I/O, data with metadata do not fit to 1 io_buffer */
+ MOCK_SET(spdk_mempool_get, (void *)0x2000);
+ reset_nvmf_rdma_request(&rdma_req);
+ spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
+ SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
+ 0, 0, 0, 0, 0);
+ rdma_req.req.dif.dif_insert_or_strip = true;
+ rtransport.transport.opts.io_unit_size = 516;
+ sgl->keyed.length = data_bs * 2;
+
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == data_bs * 2);
+ CU_ASSERT(rdma_req.req.iovcnt == 3);
+ CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
+ CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 2);
+ CU_ASSERT(rdma_req.req.data == (void *)0x2000);
+ CU_ASSERT(rdma_req.data.wr.num_sge == 2);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
+
+ CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.sg_list[0].length == 512);
+ CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == g_rdma_mr.lkey);
+
+ /* 2nd IO buffer consumed, offset 4 bytes due to part of the metadata
+ is located at the beginning of that buffer */
+ CU_ASSERT(rdma_req.data.wr.sg_list[1].addr == 0x2000 + 4);
+ CU_ASSERT(rdma_req.data.wr.sg_list[1].length == 512);
+ CU_ASSERT(rdma_req.data.wr.sg_list[1].lkey == g_rdma_mr.lkey);
+
+ /* Test 9 dealing with a buffer split over two Memory Regions */
+ MOCK_SET(spdk_mempool_get, (void *)&buffer);
+ reset_nvmf_rdma_request(&rdma_req);
+ spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
+ SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
+ 0, 0, 0, 0, 0);
+ rdma_req.req.dif.dif_insert_or_strip = true;
+ rtransport.transport.opts.io_unit_size = data_bs * 4;
+ sgl->keyed.length = data_bs * 2;
+ g_mr_size = data_bs;
+ g_mr_next_size = rtransport.transport.opts.io_unit_size;
+
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size / 2);
+ CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&buffer + NVMF_DATA_BUFFER_MASK) &
+ ~NVMF_DATA_BUFFER_MASK));
+ CU_ASSERT(rdma_req.data.wr.num_sge == 2);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
+ CU_ASSERT(rdma_req.req.buffers[0] == &buffer);
+ for (i = 0; i < 2; i++) {
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (uint64_t)rdma_req.req.data + i *
+ (data_bs + md_size));
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs);
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey);
+ }
+ buffer_ptr = STAILQ_FIRST(&group.retired_bufs);
+ CU_ASSERT(buffer_ptr == &buffer);
+ STAILQ_REMOVE(&group.retired_bufs, buffer_ptr, spdk_nvmf_transport_pg_cache_buf, link);
+ CU_ASSERT(STAILQ_EMPTY(&group.retired_bufs));
+ g_mr_size = 0;
+ g_mr_next_size = 0;
+
+ /* Test 2: Multi SGL */
+ sgl->generic.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT;
+ sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET;
+ sgl->address = 0;
+ rdma_req.recv->buf = (void *)&sgl_desc;
+ MOCK_SET(spdk_mempool_get, &data);
+ aligned_buffer = (void *)((uintptr_t)((char *)&data + NVMF_DATA_BUFFER_MASK) &
+ ~NVMF_DATA_BUFFER_MASK);
+
+ /* part 1: 2 segments each with 1 wr. io_unit_size is aligned with data_bs + md_size */
+ reset_nvmf_rdma_request(&rdma_req);
+ spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
+ SPDK_DIF_TYPE1,
+ SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 0, 0, 0, 0, 0);
+ rdma_req.req.dif.dif_insert_or_strip = true;
+ rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 4;
+ sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor);
+
+ for (i = 0; i < 2; i++) {
+ sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
+ sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
+ sgl_desc[i].keyed.length = data_bs * 4;
+ sgl_desc[i].address = 0x4000 + i * data_bs * 4;
+ sgl_desc[i].keyed.key = 0x44;
+ }
+
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == data_bs * 4 * 2);
+ CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
+ CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4 * 2);
+ CU_ASSERT(rdma_req.data.wr.num_sge == 4);
+ for (i = 0; i < 4; ++i) {
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (uintptr_t)((unsigned char *)aligned_buffer) + i *
+ (data_bs + md_size));
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs);
+ }
+
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000);
+ CU_ASSERT(rdma_req.data.wr.next == &data.wr);
+ CU_ASSERT(data.wr.wr.rdma.rkey == 0x44);
+ CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + data_bs * 4);
+ CU_ASSERT(data.wr.num_sge == 4);
+ for (i = 0; i < 4; ++i) {
+ CU_ASSERT(data.wr.sg_list[i].addr == (uintptr_t)((unsigned char *)aligned_buffer) + i *
+ (data_bs + md_size));
+ CU_ASSERT(data.wr.sg_list[i].length == data_bs);
+ }
+
+ CU_ASSERT(data.wr.next == &rdma_req.rsp.wr);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvmf", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_spdk_nvmf_rdma_request_parse_sgl);
+ CU_ADD_TEST(suite, test_spdk_nvmf_rdma_request_process);
+ CU_ADD_TEST(suite, test_nvmf_rdma_get_optimal_poll_group);
+ CU_ADD_TEST(suite, test_spdk_nvmf_rdma_request_parse_sgl_with_md);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvmf/subsystem.c/.gitignore b/src/spdk/test/unit/lib/nvmf/subsystem.c/.gitignore
new file mode 100644
index 000000000..76ca0d330
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/subsystem.c/.gitignore
@@ -0,0 +1 @@
+subsystem_ut
diff --git a/src/spdk/test/unit/lib/nvmf/subsystem.c/Makefile b/src/spdk/test/unit/lib/nvmf/subsystem.c/Makefile
new file mode 100644
index 000000000..3d5fa6c8e
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/subsystem.c/Makefile
@@ -0,0 +1,39 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+SPDK_LIB_LIST = json
+TEST_FILE = subsystem_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvmf/subsystem.c/subsystem_ut.c b/src/spdk/test/unit/lib/nvmf/subsystem.c/subsystem_ut.c
new file mode 100644
index 000000000..149c22da1
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/subsystem.c/subsystem_ut.c
@@ -0,0 +1,1342 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation. All rights reserved.
+ * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "common/lib/ut_multithread.c"
+#include "spdk_cunit.h"
+#include "spdk_internal/mock.h"
+#include "spdk_internal/thread.h"
+
+#include "nvmf/subsystem.c"
+
+SPDK_LOG_REGISTER_COMPONENT("nvmf", SPDK_LOG_NVMF)
+
+DEFINE_STUB(spdk_bdev_module_claim_bdev,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
+ struct spdk_bdev_module *module), 0);
+
+DEFINE_STUB_V(spdk_bdev_module_release_bdev,
+ (struct spdk_bdev *bdev));
+
+DEFINE_STUB(spdk_bdev_get_block_size, uint32_t,
+ (const struct spdk_bdev *bdev), 512);
+
+DEFINE_STUB(spdk_bdev_get_md_size, uint32_t,
+ (const struct spdk_bdev *bdev), 0);
+
+DEFINE_STUB(spdk_bdev_is_md_interleaved, bool,
+ (const struct spdk_bdev *bdev), false);
+
+DEFINE_STUB(spdk_nvmf_transport_stop_listen,
+ int,
+ (struct spdk_nvmf_transport *transport,
+ const struct spdk_nvme_transport_id *trid), 0);
+
+int
+spdk_nvmf_transport_listen(struct spdk_nvmf_transport *transport,
+ const struct spdk_nvme_transport_id *trid)
+{
+ return 0;
+}
+
+void
+nvmf_transport_listener_discover(struct spdk_nvmf_transport *transport,
+ struct spdk_nvme_transport_id *trid,
+ struct spdk_nvmf_discovery_log_page_entry *entry)
+{
+ entry->trtype = 42;
+}
+
+static struct spdk_nvmf_transport g_transport = {};
+
+struct spdk_nvmf_transport *
+spdk_nvmf_transport_create(const char *transport_name,
+ struct spdk_nvmf_transport_opts *tprt_opts)
+{
+ if (strcasecmp(transport_name, spdk_nvme_transport_id_trtype_str(SPDK_NVME_TRANSPORT_RDMA))) {
+ return &g_transport;
+ }
+
+ return NULL;
+}
+
+struct spdk_nvmf_subsystem *
+spdk_nvmf_tgt_find_subsystem(struct spdk_nvmf_tgt *tgt, const char *subnqn)
+{
+ return NULL;
+}
+
+struct spdk_nvmf_transport *
+spdk_nvmf_tgt_get_transport(struct spdk_nvmf_tgt *tgt, const char *transport_name)
+{
+ if (strncmp(transport_name, SPDK_NVME_TRANSPORT_NAME_RDMA, SPDK_NVMF_TRSTRING_MAX_LEN)) {
+ return &g_transport;
+ }
+
+ return NULL;
+}
+
+int
+nvmf_poll_group_update_subsystem(struct spdk_nvmf_poll_group *group,
+ struct spdk_nvmf_subsystem *subsystem)
+{
+ return 0;
+}
+
+int
+nvmf_poll_group_add_subsystem(struct spdk_nvmf_poll_group *group,
+ struct spdk_nvmf_subsystem *subsystem,
+ spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
+{
+ return 0;
+}
+
+void
+nvmf_poll_group_remove_subsystem(struct spdk_nvmf_poll_group *group,
+ struct spdk_nvmf_subsystem *subsystem,
+ spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
+{
+}
+
+void
+nvmf_poll_group_pause_subsystem(struct spdk_nvmf_poll_group *group,
+ struct spdk_nvmf_subsystem *subsystem,
+ spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
+{
+}
+
+void
+nvmf_poll_group_resume_subsystem(struct spdk_nvmf_poll_group *group,
+ struct spdk_nvmf_subsystem *subsystem,
+ spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
+{
+}
+
+int
+spdk_nvme_transport_id_parse_trtype(enum spdk_nvme_transport_type *trtype, const char *str)
+{
+ if (trtype == NULL || str == NULL) {
+ return -EINVAL;
+ }
+
+ if (strcasecmp(str, "PCIe") == 0) {
+ *trtype = SPDK_NVME_TRANSPORT_PCIE;
+ } else if (strcasecmp(str, "RDMA") == 0) {
+ *trtype = SPDK_NVME_TRANSPORT_RDMA;
+ } else {
+ return -ENOENT;
+ }
+ return 0;
+}
+
+int
+spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1,
+ const struct spdk_nvme_transport_id *trid2)
+{
+ return 0;
+}
+
+int32_t
+spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return -1;
+}
+
+int32_t
+spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
+{
+ return -1;
+}
+
+int
+spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return -1;
+}
+
+void
+nvmf_ctrlr_destruct(struct spdk_nvmf_ctrlr *ctrlr)
+{
+}
+
+static struct spdk_nvmf_ctrlr *g_ns_changed_ctrlr = NULL;
+static uint32_t g_ns_changed_nsid = 0;
+void
+nvmf_ctrlr_ns_changed(struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid)
+{
+ g_ns_changed_ctrlr = ctrlr;
+ g_ns_changed_nsid = nsid;
+}
+
+int
+spdk_bdev_open_ext(const char *bdev_name, bool write, spdk_bdev_event_cb_t event_cb,
+ void *event_ctx, struct spdk_bdev_desc **_desc)
+{
+ return 0;
+}
+
+void
+spdk_bdev_close(struct spdk_bdev_desc *desc)
+{
+}
+
+const char *
+spdk_bdev_get_name(const struct spdk_bdev *bdev)
+{
+ return "test";
+}
+
+const struct spdk_uuid *
+spdk_bdev_get_uuid(const struct spdk_bdev *bdev)
+{
+ return &bdev->uuid;
+}
+
+static void
+test_spdk_nvmf_subsystem_add_ns(void)
+{
+ struct spdk_nvmf_tgt tgt = {};
+ struct spdk_nvmf_subsystem subsystem = {
+ .max_nsid = 0,
+ .ns = NULL,
+ .tgt = &tgt
+ };
+ struct spdk_bdev bdev1 = {}, bdev2 = {};
+ struct spdk_nvmf_ns_opts ns_opts;
+ uint32_t nsid;
+ int rc;
+
+ tgt.max_subsystems = 1024;
+ tgt.subsystems = calloc(tgt.max_subsystems, sizeof(struct spdk_nvmf_subsystem *));
+ SPDK_CU_ASSERT_FATAL(tgt.subsystems != NULL);
+
+ /* Allow NSID to be assigned automatically */
+ spdk_nvmf_ns_opts_get_defaults(&ns_opts, sizeof(ns_opts));
+ nsid = spdk_nvmf_subsystem_add_ns(&subsystem, &bdev1, &ns_opts, sizeof(ns_opts), NULL);
+ /* NSID 1 is the first unused ID */
+ CU_ASSERT(nsid == 1);
+ CU_ASSERT(subsystem.max_nsid == 1);
+ SPDK_CU_ASSERT_FATAL(subsystem.ns != NULL);
+ SPDK_CU_ASSERT_FATAL(subsystem.ns[nsid - 1] != NULL);
+ CU_ASSERT(subsystem.ns[nsid - 1]->bdev == &bdev1);
+
+ /* Request a specific NSID */
+ spdk_nvmf_ns_opts_get_defaults(&ns_opts, sizeof(ns_opts));
+ ns_opts.nsid = 5;
+ nsid = spdk_nvmf_subsystem_add_ns(&subsystem, &bdev2, &ns_opts, sizeof(ns_opts), NULL);
+ CU_ASSERT(nsid == 5);
+ CU_ASSERT(subsystem.max_nsid == 5);
+ SPDK_CU_ASSERT_FATAL(subsystem.ns[nsid - 1] != NULL);
+ CU_ASSERT(subsystem.ns[nsid - 1]->bdev == &bdev2);
+
+ /* Request an NSID that is already in use */
+ spdk_nvmf_ns_opts_get_defaults(&ns_opts, sizeof(ns_opts));
+ ns_opts.nsid = 5;
+ nsid = spdk_nvmf_subsystem_add_ns(&subsystem, &bdev2, &ns_opts, sizeof(ns_opts), NULL);
+ CU_ASSERT(nsid == 0);
+ CU_ASSERT(subsystem.max_nsid == 5);
+
+ /* Request 0xFFFFFFFF (invalid NSID, reserved for broadcast) */
+ spdk_nvmf_ns_opts_get_defaults(&ns_opts, sizeof(ns_opts));
+ ns_opts.nsid = 0xFFFFFFFF;
+ nsid = spdk_nvmf_subsystem_add_ns(&subsystem, &bdev2, &ns_opts, sizeof(ns_opts), NULL);
+ CU_ASSERT(nsid == 0);
+ CU_ASSERT(subsystem.max_nsid == 5);
+
+ rc = spdk_nvmf_subsystem_remove_ns(&subsystem, 1);
+ CU_ASSERT(rc == 0);
+ rc = spdk_nvmf_subsystem_remove_ns(&subsystem, 5);
+ CU_ASSERT(rc == 0);
+
+ free(subsystem.ns);
+ free(tgt.subsystems);
+}
+
+static void
+nvmf_test_create_subsystem(void)
+{
+ struct spdk_nvmf_tgt tgt = {};
+ char nqn[256];
+ struct spdk_nvmf_subsystem *subsystem;
+
+ tgt.max_subsystems = 1024;
+ tgt.subsystems = calloc(tgt.max_subsystems, sizeof(struct spdk_nvmf_subsystem *));
+ SPDK_CU_ASSERT_FATAL(tgt.subsystems != NULL);
+
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:subsystem1");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem != NULL);
+ CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn);
+ spdk_nvmf_subsystem_destroy(subsystem);
+
+ /* valid name with complex reverse domain */
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk-full--rev-domain.name:subsystem1");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem != NULL);
+ CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn);
+ spdk_nvmf_subsystem_destroy(subsystem);
+
+ /* Valid name discovery controller */
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:subsystem1");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem != NULL);
+ CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn);
+ spdk_nvmf_subsystem_destroy(subsystem);
+
+
+ /* Invalid name, no user supplied string */
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem == NULL);
+
+ /* Valid name, only contains top-level domain name */
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:subsystem1");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem != NULL);
+ CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn);
+ spdk_nvmf_subsystem_destroy(subsystem);
+
+ /* Invalid name, domain label > 63 characters */
+ snprintf(nqn, sizeof(nqn),
+ "nqn.2016-06.io.abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz:sub");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem == NULL);
+
+ /* Invalid name, domain label starts with digit */
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.3spdk:sub");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem == NULL);
+
+ /* Invalid name, domain label starts with - */
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.-spdk:subsystem1");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem == NULL);
+
+ /* Invalid name, domain label ends with - */
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk-:subsystem1");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem == NULL);
+
+ /* Invalid name, domain label with multiple consecutive periods */
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io..spdk:subsystem1");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem == NULL);
+
+ /* Longest valid name */
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:");
+ memset(nqn + strlen(nqn), 'a', 223 - strlen(nqn));
+ nqn[223] = '\0';
+ CU_ASSERT(strlen(nqn) == 223);
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem != NULL);
+ CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn);
+ spdk_nvmf_subsystem_destroy(subsystem);
+
+ /* Invalid name, too long */
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:");
+ memset(nqn + strlen(nqn), 'a', 224 - strlen(nqn));
+ nqn[224] = '\0';
+ CU_ASSERT(strlen(nqn) == 224);
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ CU_ASSERT(subsystem == NULL);
+
+ /* Valid name using uuid format */
+ snprintf(nqn, sizeof(nqn), "nqn.2014-08.org.nvmexpress:uuid:11111111-aaaa-bbdd-FFEE-123456789abc");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem != NULL);
+ CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn);
+ spdk_nvmf_subsystem_destroy(subsystem);
+
+ /* Invalid name user string contains an invalid utf-8 character */
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:\xFFsubsystem1");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem == NULL);
+
+ /* Valid name with non-ascii but valid utf-8 characters */
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:\xe1\x8a\x88subsystem1\xca\x80");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem != NULL);
+ CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn);
+ spdk_nvmf_subsystem_destroy(subsystem);
+
+ /* Invalid uuid (too long) */
+ snprintf(nqn, sizeof(nqn),
+ "nqn.2014-08.org.nvmexpress:uuid:11111111-aaaa-bbdd-FFEE-123456789abcdef");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem == NULL);
+
+ /* Invalid uuid (dashes placed incorrectly) */
+ snprintf(nqn, sizeof(nqn), "nqn.2014-08.org.nvmexpress:uuid:111111-11aaaa-bbdd-FFEE-123456789abc");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem == NULL);
+
+ /* Invalid uuid (invalid characters in uuid) */
+ snprintf(nqn, sizeof(nqn), "nqn.2014-08.org.nvmexpress:uuid:111hg111-aaaa-bbdd-FFEE-123456789abc");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem == NULL);
+
+ free(tgt.subsystems);
+}
+
+static void
+test_spdk_nvmf_subsystem_set_sn(void)
+{
+ struct spdk_nvmf_subsystem subsystem = {};
+
+ /* Basic valid serial number */
+ CU_ASSERT(spdk_nvmf_subsystem_set_sn(&subsystem, "abcd xyz") == 0);
+ CU_ASSERT(strcmp(subsystem.sn, "abcd xyz") == 0);
+
+ /* Exactly 20 characters (valid) */
+ CU_ASSERT(spdk_nvmf_subsystem_set_sn(&subsystem, "12345678901234567890") == 0);
+ CU_ASSERT(strcmp(subsystem.sn, "12345678901234567890") == 0);
+
+ /* 21 characters (too long, invalid) */
+ CU_ASSERT(spdk_nvmf_subsystem_set_sn(&subsystem, "123456789012345678901") < 0);
+
+ /* Non-ASCII characters (invalid) */
+ CU_ASSERT(spdk_nvmf_subsystem_set_sn(&subsystem, "abcd\txyz") < 0);
+}
+
+/*
+ * Reservation Unit Test Configuration
+ * -------- -------- --------
+ * | Host A | | Host B | | Host C |
+ * -------- -------- --------
+ * / \ | |
+ * -------- -------- ------- -------
+ * |Ctrlr1_A| |Ctrlr2_A| |Ctrlr_B| |Ctrlr_C|
+ * -------- -------- ------- -------
+ * \ \ / /
+ * \ \ / /
+ * \ \ / /
+ * --------------------------------------
+ * | NAMESPACE 1 |
+ * --------------------------------------
+ */
+static struct spdk_nvmf_subsystem g_subsystem;
+static struct spdk_nvmf_ctrlr g_ctrlr1_A, g_ctrlr2_A, g_ctrlr_B, g_ctrlr_C;
+static struct spdk_nvmf_ns g_ns;
+static struct spdk_bdev g_bdev;
+struct spdk_nvmf_subsystem_pg_ns_info g_ns_info;
+
+void
+nvmf_ctrlr_async_event_reservation_notification(struct spdk_nvmf_ctrlr *ctrlr)
+{
+}
+
+static void
+ut_reservation_init(void)
+{
+
+ TAILQ_INIT(&g_subsystem.ctrlrs);
+
+ memset(&g_ns, 0, sizeof(g_ns));
+ TAILQ_INIT(&g_ns.registrants);
+ g_ns.subsystem = &g_subsystem;
+ g_ns.ptpl_file = NULL;
+ g_ns.ptpl_activated = false;
+ spdk_uuid_generate(&g_bdev.uuid);
+ g_ns.bdev = &g_bdev;
+
+ /* Host A has two controllers */
+ spdk_uuid_generate(&g_ctrlr1_A.hostid);
+ TAILQ_INIT(&g_ctrlr1_A.log_head);
+ g_ctrlr1_A.subsys = &g_subsystem;
+ g_ctrlr1_A.num_avail_log_pages = 0;
+ TAILQ_INSERT_TAIL(&g_subsystem.ctrlrs, &g_ctrlr1_A, link);
+ spdk_uuid_copy(&g_ctrlr2_A.hostid, &g_ctrlr1_A.hostid);
+ TAILQ_INIT(&g_ctrlr2_A.log_head);
+ g_ctrlr2_A.subsys = &g_subsystem;
+ g_ctrlr2_A.num_avail_log_pages = 0;
+ TAILQ_INSERT_TAIL(&g_subsystem.ctrlrs, &g_ctrlr2_A, link);
+
+ /* Host B has 1 controller */
+ spdk_uuid_generate(&g_ctrlr_B.hostid);
+ TAILQ_INIT(&g_ctrlr_B.log_head);
+ g_ctrlr_B.subsys = &g_subsystem;
+ g_ctrlr_B.num_avail_log_pages = 0;
+ TAILQ_INSERT_TAIL(&g_subsystem.ctrlrs, &g_ctrlr_B, link);
+
+ /* Host C has 1 controller */
+ spdk_uuid_generate(&g_ctrlr_C.hostid);
+ TAILQ_INIT(&g_ctrlr_C.log_head);
+ g_ctrlr_C.subsys = &g_subsystem;
+ g_ctrlr_C.num_avail_log_pages = 0;
+ TAILQ_INSERT_TAIL(&g_subsystem.ctrlrs, &g_ctrlr_C, link);
+}
+
+static void
+ut_reservation_deinit(void)
+{
+ struct spdk_nvmf_registrant *reg, *tmp;
+ struct spdk_nvmf_reservation_log *log, *log_tmp;
+ struct spdk_nvmf_ctrlr *ctrlr, *ctrlr_tmp;
+
+ TAILQ_FOREACH_SAFE(reg, &g_ns.registrants, link, tmp) {
+ TAILQ_REMOVE(&g_ns.registrants, reg, link);
+ free(reg);
+ }
+ TAILQ_FOREACH_SAFE(log, &g_ctrlr1_A.log_head, link, log_tmp) {
+ TAILQ_REMOVE(&g_ctrlr1_A.log_head, log, link);
+ free(log);
+ }
+ g_ctrlr1_A.num_avail_log_pages = 0;
+ TAILQ_FOREACH_SAFE(log, &g_ctrlr2_A.log_head, link, log_tmp) {
+ TAILQ_REMOVE(&g_ctrlr2_A.log_head, log, link);
+ free(log);
+ }
+ g_ctrlr2_A.num_avail_log_pages = 0;
+ TAILQ_FOREACH_SAFE(log, &g_ctrlr_B.log_head, link, log_tmp) {
+ TAILQ_REMOVE(&g_ctrlr_B.log_head, log, link);
+ free(log);
+ }
+ g_ctrlr_B.num_avail_log_pages = 0;
+ TAILQ_FOREACH_SAFE(log, &g_ctrlr_C.log_head, link, log_tmp) {
+ TAILQ_REMOVE(&g_ctrlr_C.log_head, log, link);
+ free(log);
+ }
+ g_ctrlr_C.num_avail_log_pages = 0;
+
+ TAILQ_FOREACH_SAFE(ctrlr, &g_subsystem.ctrlrs, link, ctrlr_tmp) {
+ TAILQ_REMOVE(&g_subsystem.ctrlrs, ctrlr, link);
+ }
+}
+
+static struct spdk_nvmf_request *
+ut_reservation_build_req(uint32_t length)
+{
+ struct spdk_nvmf_request *req;
+
+ req = calloc(1, sizeof(*req));
+ assert(req != NULL);
+
+ req->data = calloc(1, length);
+ assert(req->data != NULL);
+ req->length = length;
+
+ req->cmd = (union nvmf_h2c_msg *)calloc(1, sizeof(union nvmf_h2c_msg));
+ assert(req->cmd != NULL);
+
+ req->rsp = (union nvmf_c2h_msg *)calloc(1, sizeof(union nvmf_c2h_msg));
+ assert(req->rsp != NULL);
+
+ return req;
+}
+
+static void
+ut_reservation_free_req(struct spdk_nvmf_request *req)
+{
+ free(req->cmd);
+ free(req->rsp);
+ free(req->data);
+ free(req);
+}
+
+static void
+ut_reservation_build_register_request(struct spdk_nvmf_request *req,
+ uint8_t rrega, uint8_t iekey,
+ uint8_t cptpl, uint64_t crkey,
+ uint64_t nrkey)
+{
+ struct spdk_nvme_reservation_register_data key;
+ struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
+
+ key.crkey = crkey;
+ key.nrkey = nrkey;
+ cmd->cdw10 = 0;
+ cmd->cdw10_bits.resv_register.rrega = rrega;
+ cmd->cdw10_bits.resv_register.iekey = iekey;
+ cmd->cdw10_bits.resv_register.cptpl = cptpl;
+ memcpy(req->data, &key, sizeof(key));
+}
+
+static void
+ut_reservation_build_acquire_request(struct spdk_nvmf_request *req,
+ uint8_t racqa, uint8_t iekey,
+ uint8_t rtype, uint64_t crkey,
+ uint64_t prkey)
+{
+ struct spdk_nvme_reservation_acquire_data key;
+ struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
+
+ key.crkey = crkey;
+ key.prkey = prkey;
+ cmd->cdw10 = 0;
+ cmd->cdw10_bits.resv_acquire.racqa = racqa;
+ cmd->cdw10_bits.resv_acquire.iekey = iekey;
+ cmd->cdw10_bits.resv_acquire.rtype = rtype;
+ memcpy(req->data, &key, sizeof(key));
+}
+
+static void
+ut_reservation_build_release_request(struct spdk_nvmf_request *req,
+ uint8_t rrela, uint8_t iekey,
+ uint8_t rtype, uint64_t crkey)
+{
+ struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
+
+ cmd->cdw10 = 0;
+ cmd->cdw10_bits.resv_release.rrela = rrela;
+ cmd->cdw10_bits.resv_release.iekey = iekey;
+ cmd->cdw10_bits.resv_release.rtype = rtype;
+ memcpy(req->data, &crkey, sizeof(crkey));
+}
+
+/*
+ * Construct four registrants for other test cases.
+ *
+ * g_ctrlr1_A register with key 0xa1.
+ * g_ctrlr2_A register with key 0xa1.
+ * g_ctrlr_B register with key 0xb1.
+ * g_ctrlr_C register with key 0xc1.
+ * */
+static void
+ut_reservation_build_registrants(void)
+{
+ struct spdk_nvmf_request *req;
+ struct spdk_nvme_cpl *rsp;
+ struct spdk_nvmf_registrant *reg;
+ uint32_t gen;
+
+ req = ut_reservation_build_req(16);
+ rsp = &req->rsp->nvme_cpl;
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ gen = g_ns.gen;
+
+ /* TEST CASE: g_ctrlr1_A register with a new key */
+ ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REGISTER_KEY,
+ 0, 0, 0, 0xa1);
+ nvmf_ns_reservation_register(&g_ns, &g_ctrlr1_A, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
+ SPDK_CU_ASSERT_FATAL(reg->rkey == 0xa1);
+ SPDK_CU_ASSERT_FATAL(g_ns.gen == gen + 1);
+
+ /* TEST CASE: g_ctrlr2_A register with a new key, because it has same
+ * Host Identifier with g_ctrlr1_A, so the register key should same.
+ */
+ ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REGISTER_KEY,
+ 0, 0, 0, 0xa2);
+ nvmf_ns_reservation_register(&g_ns, &g_ctrlr2_A, req);
+ /* Reservation conflict for other key than 0xa1 */
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
+
+ /* g_ctrlr_B register with a new key */
+ ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REGISTER_KEY,
+ 0, 0, 0, 0xb1);
+ nvmf_ns_reservation_register(&g_ns, &g_ctrlr_B, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_B.hostid);
+ SPDK_CU_ASSERT_FATAL(reg->rkey == 0xb1);
+ SPDK_CU_ASSERT_FATAL(g_ns.gen == gen + 2);
+
+ /* g_ctrlr_C register with a new key */
+ ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REGISTER_KEY,
+ 0, 0, 0, 0xc1);
+ nvmf_ns_reservation_register(&g_ns, &g_ctrlr_C, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_C.hostid);
+ SPDK_CU_ASSERT_FATAL(reg->rkey == 0xc1);
+ SPDK_CU_ASSERT_FATAL(g_ns.gen == gen + 3);
+
+ ut_reservation_free_req(req);
+}
+
+static void
+test_reservation_register(void)
+{
+ struct spdk_nvmf_request *req;
+ struct spdk_nvme_cpl *rsp;
+ struct spdk_nvmf_registrant *reg;
+ uint32_t gen;
+
+ ut_reservation_init();
+
+ req = ut_reservation_build_req(16);
+ rsp = &req->rsp->nvme_cpl;
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+
+ ut_reservation_build_registrants();
+
+ /* TEST CASE: Replace g_ctrlr1_A with a new key */
+ ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REPLACE_KEY,
+ 0, 0, 0xa1, 0xa11);
+ nvmf_ns_reservation_register(&g_ns, &g_ctrlr1_A, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
+ SPDK_CU_ASSERT_FATAL(reg->rkey == 0xa11);
+
+ /* TEST CASE: Host A with g_ctrlr1_A get reservation with
+ * type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE
+ */
+ ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE, 0xa11, 0x0);
+ gen = g_ns.gen;
+ nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr1_A, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
+ SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE);
+ SPDK_CU_ASSERT_FATAL(g_ns.crkey == 0xa11);
+ SPDK_CU_ASSERT_FATAL(g_ns.holder == reg);
+ SPDK_CU_ASSERT_FATAL(g_ns.gen == gen);
+
+ /* TEST CASE: g_ctrlr_C unregister with IEKEY enabled */
+ ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_UNREGISTER_KEY,
+ 1, 0, 0, 0);
+ nvmf_ns_reservation_register(&g_ns, &g_ctrlr_C, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_C.hostid);
+ SPDK_CU_ASSERT_FATAL(reg == NULL);
+
+ /* TEST CASE: g_ctrlr_B unregister with correct key */
+ ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_UNREGISTER_KEY,
+ 0, 0, 0xb1, 0);
+ nvmf_ns_reservation_register(&g_ns, &g_ctrlr_B, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_B.hostid);
+ SPDK_CU_ASSERT_FATAL(reg == NULL);
+
+ /* TEST CASE: g_ctrlr1_A unregister with correct key,
+ * reservation should be removed as well.
+ */
+ ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_UNREGISTER_KEY,
+ 0, 0, 0xa11, 0);
+ nvmf_ns_reservation_register(&g_ns, &g_ctrlr1_A, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
+ SPDK_CU_ASSERT_FATAL(reg == NULL);
+ SPDK_CU_ASSERT_FATAL(g_ns.rtype == 0);
+ SPDK_CU_ASSERT_FATAL(g_ns.crkey == 0);
+ SPDK_CU_ASSERT_FATAL(g_ns.holder == NULL);
+
+ ut_reservation_free_req(req);
+ ut_reservation_deinit();
+}
+
+static void
+test_reservation_register_with_ptpl(void)
+{
+ struct spdk_nvmf_request *req;
+ struct spdk_nvme_cpl *rsp;
+ struct spdk_nvmf_registrant *reg;
+ bool update_sgroup = false;
+ int rc;
+ struct spdk_nvmf_reservation_info info;
+
+ ut_reservation_init();
+
+ req = ut_reservation_build_req(16);
+ rsp = &req->rsp->nvme_cpl;
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+
+ /* TEST CASE: No persistent file, register with PTPL enabled will fail */
+ g_ns.ptpl_file = NULL;
+ ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REGISTER_KEY, 0,
+ SPDK_NVME_RESERVE_PTPL_PERSIST_POWER_LOSS, 0, 0xa1);
+ update_sgroup = nvmf_ns_reservation_register(&g_ns, &g_ctrlr1_A, req);
+ SPDK_CU_ASSERT_FATAL(update_sgroup == false);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc != SPDK_NVME_SC_SUCCESS);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
+ SPDK_CU_ASSERT_FATAL(reg == NULL);
+
+ /* TEST CASE: Enable PTPL */
+ g_ns.ptpl_file = "/tmp/Ns1PR.cfg";
+ ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REGISTER_KEY, 0,
+ SPDK_NVME_RESERVE_PTPL_PERSIST_POWER_LOSS, 0, 0xa1);
+ update_sgroup = nvmf_ns_reservation_register(&g_ns, &g_ctrlr1_A, req);
+ SPDK_CU_ASSERT_FATAL(update_sgroup == true);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ SPDK_CU_ASSERT_FATAL(g_ns.ptpl_activated == true);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
+ SPDK_CU_ASSERT_FATAL(reg != NULL);
+ SPDK_CU_ASSERT_FATAL(!spdk_uuid_compare(&g_ctrlr1_A.hostid, &reg->hostid));
+ /* Load reservation information from configuration file */
+ memset(&info, 0, sizeof(info));
+ rc = nvmf_ns_load_reservation(g_ns.ptpl_file, &info);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(info.ptpl_activated == true);
+
+ /* TEST CASE: Disable PTPL */
+ rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD;
+ ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REGISTER_KEY, 0,
+ SPDK_NVME_RESERVE_PTPL_CLEAR_POWER_ON, 0, 0xa1);
+ update_sgroup = nvmf_ns_reservation_register(&g_ns, &g_ctrlr1_A, req);
+ SPDK_CU_ASSERT_FATAL(update_sgroup == true);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ SPDK_CU_ASSERT_FATAL(g_ns.ptpl_activated == false);
+ rc = nvmf_ns_load_reservation(g_ns.ptpl_file, &info);
+ SPDK_CU_ASSERT_FATAL(rc < 0);
+ unlink(g_ns.ptpl_file);
+
+ ut_reservation_free_req(req);
+ ut_reservation_deinit();
+}
+
+static void
+test_reservation_acquire_preempt_1(void)
+{
+ struct spdk_nvmf_request *req;
+ struct spdk_nvme_cpl *rsp;
+ struct spdk_nvmf_registrant *reg;
+ uint32_t gen;
+
+ ut_reservation_init();
+
+ req = ut_reservation_build_req(16);
+ rsp = &req->rsp->nvme_cpl;
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+
+ ut_reservation_build_registrants();
+
+ gen = g_ns.gen;
+ /* ACQUIRE: Host A with g_ctrlr1_A acquire reservation with
+ * type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE.
+ */
+ ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY, 0xa1, 0x0);
+ nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr1_A, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
+ SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY);
+ SPDK_CU_ASSERT_FATAL(g_ns.crkey == 0xa1);
+ SPDK_CU_ASSERT_FATAL(g_ns.holder == reg);
+ SPDK_CU_ASSERT_FATAL(g_ns.gen == gen);
+
+ /* TEST CASE: g_ctrlr1_A holds the reservation, g_ctrlr_B preempt g_ctrl1_A,
+ * g_ctrl1_A registrant is unregistred.
+ */
+ gen = g_ns.gen;
+ ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_PREEMPT, 0,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS, 0xb1, 0xa1);
+ nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr_B, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
+ SPDK_CU_ASSERT_FATAL(reg == NULL);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_B.hostid);
+ SPDK_CU_ASSERT_FATAL(reg != NULL);
+ SPDK_CU_ASSERT_FATAL(g_ns.holder == reg);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_C.hostid);
+ SPDK_CU_ASSERT_FATAL(reg != NULL);
+ SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS);
+ SPDK_CU_ASSERT_FATAL(g_ns.gen > gen);
+
+ /* TEST CASE: g_ctrlr_B holds the reservation, g_ctrlr_C preempt g_ctrlr_B
+ * with valid key and PRKEY set to 0, all registrants other the host that issued
+ * the command are unregistered.
+ */
+ gen = g_ns.gen;
+ ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_PREEMPT, 0,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS, 0xc1, 0x0);
+ nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr_C, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr2_A.hostid);
+ SPDK_CU_ASSERT_FATAL(reg == NULL);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_B.hostid);
+ SPDK_CU_ASSERT_FATAL(reg == NULL);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_C.hostid);
+ SPDK_CU_ASSERT_FATAL(reg != NULL);
+ SPDK_CU_ASSERT_FATAL(g_ns.holder == reg);
+ SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS);
+ SPDK_CU_ASSERT_FATAL(g_ns.gen > gen);
+
+ ut_reservation_free_req(req);
+ ut_reservation_deinit();
+}
+
+static void
+test_reservation_acquire_release_with_ptpl(void)
+{
+ struct spdk_nvmf_request *req;
+ struct spdk_nvme_cpl *rsp;
+ struct spdk_nvmf_registrant *reg;
+ bool update_sgroup = false;
+ struct spdk_uuid holder_uuid;
+ int rc;
+ struct spdk_nvmf_reservation_info info;
+
+ ut_reservation_init();
+
+ req = ut_reservation_build_req(16);
+ rsp = &req->rsp->nvme_cpl;
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+
+ /* TEST CASE: Enable PTPL */
+ g_ns.ptpl_file = "/tmp/Ns1PR.cfg";
+ ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REGISTER_KEY, 0,
+ SPDK_NVME_RESERVE_PTPL_PERSIST_POWER_LOSS, 0, 0xa1);
+ update_sgroup = nvmf_ns_reservation_register(&g_ns, &g_ctrlr1_A, req);
+ SPDK_CU_ASSERT_FATAL(update_sgroup == true);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ SPDK_CU_ASSERT_FATAL(g_ns.ptpl_activated == true);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
+ SPDK_CU_ASSERT_FATAL(reg != NULL);
+ SPDK_CU_ASSERT_FATAL(!spdk_uuid_compare(&g_ctrlr1_A.hostid, &reg->hostid));
+ /* Load reservation information from configuration file */
+ memset(&info, 0, sizeof(info));
+ rc = nvmf_ns_load_reservation(g_ns.ptpl_file, &info);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(info.ptpl_activated == true);
+
+ /* TEST CASE: Acquire the reservation */
+ rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD;
+ ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY, 0xa1, 0x0);
+ update_sgroup = nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr1_A, req);
+ SPDK_CU_ASSERT_FATAL(update_sgroup == true);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ memset(&info, 0, sizeof(info));
+ rc = nvmf_ns_load_reservation(g_ns.ptpl_file, &info);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(info.ptpl_activated == true);
+ SPDK_CU_ASSERT_FATAL(info.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY);
+ SPDK_CU_ASSERT_FATAL(info.crkey == 0xa1);
+ spdk_uuid_parse(&holder_uuid, info.holder_uuid);
+ SPDK_CU_ASSERT_FATAL(!spdk_uuid_compare(&g_ctrlr1_A.hostid, &holder_uuid));
+
+ /* TEST CASE: Release the reservation */
+ rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD;
+ ut_reservation_build_release_request(req, SPDK_NVME_RESERVE_RELEASE, 0,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY, 0xa1);
+ update_sgroup = nvmf_ns_reservation_release(&g_ns, &g_ctrlr1_A, req);
+ SPDK_CU_ASSERT_FATAL(update_sgroup == true);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ memset(&info, 0, sizeof(info));
+ rc = nvmf_ns_load_reservation(g_ns.ptpl_file, &info);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(info.rtype == 0);
+ SPDK_CU_ASSERT_FATAL(info.crkey == 0);
+ SPDK_CU_ASSERT_FATAL(info.ptpl_activated == true);
+ unlink(g_ns.ptpl_file);
+
+ ut_reservation_free_req(req);
+ ut_reservation_deinit();
+}
+
+static void
+test_reservation_release(void)
+{
+ struct spdk_nvmf_request *req;
+ struct spdk_nvme_cpl *rsp;
+ struct spdk_nvmf_registrant *reg;
+
+ ut_reservation_init();
+
+ req = ut_reservation_build_req(16);
+ rsp = &req->rsp->nvme_cpl;
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+
+ ut_reservation_build_registrants();
+
+ /* ACQUIRE: Host A with g_ctrlr1_A get reservation with
+ * type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS
+ */
+ ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS, 0xa1, 0x0);
+ nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr1_A, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
+ SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS);
+ SPDK_CU_ASSERT_FATAL(g_ns.holder == reg);
+
+ /* Test Case: Host B release the reservation */
+ ut_reservation_build_release_request(req, SPDK_NVME_RESERVE_RELEASE, 0,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS, 0xb1);
+ nvmf_ns_reservation_release(&g_ns, &g_ctrlr_B, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ SPDK_CU_ASSERT_FATAL(g_ns.rtype == 0);
+ SPDK_CU_ASSERT_FATAL(g_ns.crkey == 0);
+ SPDK_CU_ASSERT_FATAL(g_ns.holder == NULL);
+
+ /* Test Case: Host C clear the registrants */
+ ut_reservation_build_release_request(req, SPDK_NVME_RESERVE_CLEAR, 0,
+ 0, 0xc1);
+ nvmf_ns_reservation_release(&g_ns, &g_ctrlr_C, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
+ SPDK_CU_ASSERT_FATAL(reg == NULL);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr2_A.hostid);
+ SPDK_CU_ASSERT_FATAL(reg == NULL);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_B.hostid);
+ SPDK_CU_ASSERT_FATAL(reg == NULL);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_C.hostid);
+ SPDK_CU_ASSERT_FATAL(reg == NULL);
+
+ ut_reservation_free_req(req);
+ ut_reservation_deinit();
+}
+
+void
+nvmf_ctrlr_reservation_notice_log(struct spdk_nvmf_ctrlr *ctrlr,
+ struct spdk_nvmf_ns *ns,
+ enum spdk_nvme_reservation_notification_log_page_type type)
+{
+ ctrlr->num_avail_log_pages++;
+}
+
+static void
+test_reservation_unregister_notification(void)
+{
+ struct spdk_nvmf_request *req;
+ struct spdk_nvme_cpl *rsp;
+
+ ut_reservation_init();
+
+ req = ut_reservation_build_req(16);
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ rsp = &req->rsp->nvme_cpl;
+
+ ut_reservation_build_registrants();
+
+ /* ACQUIRE: Host B with g_ctrlr_B get reservation with
+ * type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY
+ */
+ rsp->status.sc = 0xff;
+ ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY, 0xb1, 0x0);
+ nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr_B, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY);
+
+ /* Test Case : g_ctrlr_B holds the reservation, g_ctrlr_B unregister the registration.
+ * Reservation release notification sends to g_ctrlr1_A/g_ctrlr2_A/g_ctrlr_C only for
+ * SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY or SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY
+ * type.
+ */
+ rsp->status.sc = 0xff;
+ g_ctrlr1_A.num_avail_log_pages = 0;
+ g_ctrlr2_A.num_avail_log_pages = 0;
+ g_ctrlr_B.num_avail_log_pages = 5;
+ g_ctrlr_C.num_avail_log_pages = 0;
+ ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_UNREGISTER_KEY,
+ 0, 0, 0xb1, 0);
+ nvmf_ns_reservation_register(&g_ns, &g_ctrlr_B, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ SPDK_CU_ASSERT_FATAL(g_ns.rtype == 0);
+ SPDK_CU_ASSERT_FATAL(1 == g_ctrlr1_A.num_avail_log_pages);
+ SPDK_CU_ASSERT_FATAL(1 == g_ctrlr2_A.num_avail_log_pages);
+ SPDK_CU_ASSERT_FATAL(5 == g_ctrlr_B.num_avail_log_pages);
+ SPDK_CU_ASSERT_FATAL(1 == g_ctrlr_C.num_avail_log_pages);
+
+ ut_reservation_free_req(req);
+ ut_reservation_deinit();
+}
+
+static void
+test_reservation_release_notification(void)
+{
+ struct spdk_nvmf_request *req;
+ struct spdk_nvme_cpl *rsp;
+
+ ut_reservation_init();
+
+ req = ut_reservation_build_req(16);
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ rsp = &req->rsp->nvme_cpl;
+
+ ut_reservation_build_registrants();
+
+ /* ACQUIRE: Host B with g_ctrlr_B get reservation with
+ * type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY
+ */
+ rsp->status.sc = 0xff;
+ ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY, 0xb1, 0x0);
+ nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr_B, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY);
+
+ /* Test Case : g_ctrlr_B holds the reservation, g_ctrlr_B release the reservation.
+ * Reservation release notification sends to g_ctrlr1_A/g_ctrlr2_A/g_ctrlr_C.
+ */
+ rsp->status.sc = 0xff;
+ g_ctrlr1_A.num_avail_log_pages = 0;
+ g_ctrlr2_A.num_avail_log_pages = 0;
+ g_ctrlr_B.num_avail_log_pages = 5;
+ g_ctrlr_C.num_avail_log_pages = 0;
+ ut_reservation_build_release_request(req, SPDK_NVME_RESERVE_RELEASE, 0,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY, 0xb1);
+ nvmf_ns_reservation_release(&g_ns, &g_ctrlr_B, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ SPDK_CU_ASSERT_FATAL(g_ns.rtype == 0);
+ SPDK_CU_ASSERT_FATAL(1 == g_ctrlr1_A.num_avail_log_pages);
+ SPDK_CU_ASSERT_FATAL(1 == g_ctrlr2_A.num_avail_log_pages);
+ SPDK_CU_ASSERT_FATAL(5 == g_ctrlr_B.num_avail_log_pages);
+ SPDK_CU_ASSERT_FATAL(1 == g_ctrlr_C.num_avail_log_pages);
+
+ ut_reservation_free_req(req);
+ ut_reservation_deinit();
+}
+
+static void
+test_reservation_release_notification_write_exclusive(void)
+{
+ struct spdk_nvmf_request *req;
+ struct spdk_nvme_cpl *rsp;
+
+ ut_reservation_init();
+
+ req = ut_reservation_build_req(16);
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ rsp = &req->rsp->nvme_cpl;
+
+ ut_reservation_build_registrants();
+
+ /* ACQUIRE: Host B with g_ctrlr_B get reservation with
+ * type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE
+ */
+ rsp->status.sc = 0xff;
+ ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE, 0xb1, 0x0);
+ nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr_B, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE);
+
+ /* Test Case : g_ctrlr_B holds the reservation, g_ctrlr_B release the reservation.
+ * Because the reservation type is SPDK_NVME_RESERVE_WRITE_EXCLUSIVE,
+ * no reservation notification occurs.
+ */
+ rsp->status.sc = 0xff;
+ g_ctrlr1_A.num_avail_log_pages = 5;
+ g_ctrlr2_A.num_avail_log_pages = 5;
+ g_ctrlr_B.num_avail_log_pages = 5;
+ g_ctrlr_C.num_avail_log_pages = 5;
+ ut_reservation_build_release_request(req, SPDK_NVME_RESERVE_RELEASE, 0,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE, 0xb1);
+ nvmf_ns_reservation_release(&g_ns, &g_ctrlr_B, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ SPDK_CU_ASSERT_FATAL(g_ns.rtype == 0);
+ SPDK_CU_ASSERT_FATAL(5 == g_ctrlr1_A.num_avail_log_pages);
+ SPDK_CU_ASSERT_FATAL(5 == g_ctrlr2_A.num_avail_log_pages);
+ SPDK_CU_ASSERT_FATAL(5 == g_ctrlr_B.num_avail_log_pages);
+ SPDK_CU_ASSERT_FATAL(5 == g_ctrlr_C.num_avail_log_pages);
+
+ ut_reservation_free_req(req);
+ ut_reservation_deinit();
+}
+
+static void
+test_reservation_clear_notification(void)
+{
+ struct spdk_nvmf_request *req;
+ struct spdk_nvme_cpl *rsp;
+
+ ut_reservation_init();
+
+ req = ut_reservation_build_req(16);
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ rsp = &req->rsp->nvme_cpl;
+
+ ut_reservation_build_registrants();
+
+ /* ACQUIRE: Host B with g_ctrlr_B get reservation with
+ * type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY
+ */
+ rsp->status.sc = 0xff;
+ ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY, 0xb1, 0x0);
+ nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr_B, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY);
+
+ /* Test Case : g_ctrlr_B holds the reservation, g_ctrlr_B clear the reservation.
+ * Reservation Preempted notification sends to g_ctrlr1_A/g_ctrlr2_A/g_ctrlr_C.
+ */
+ rsp->status.sc = 0xff;
+ g_ctrlr1_A.num_avail_log_pages = 0;
+ g_ctrlr2_A.num_avail_log_pages = 0;
+ g_ctrlr_B.num_avail_log_pages = 5;
+ g_ctrlr_C.num_avail_log_pages = 0;
+ ut_reservation_build_release_request(req, SPDK_NVME_RESERVE_CLEAR, 0,
+ 0, 0xb1);
+ nvmf_ns_reservation_release(&g_ns, &g_ctrlr_B, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ SPDK_CU_ASSERT_FATAL(g_ns.rtype == 0);
+ SPDK_CU_ASSERT_FATAL(1 == g_ctrlr1_A.num_avail_log_pages);
+ SPDK_CU_ASSERT_FATAL(1 == g_ctrlr2_A.num_avail_log_pages);
+ SPDK_CU_ASSERT_FATAL(5 == g_ctrlr_B.num_avail_log_pages);
+ SPDK_CU_ASSERT_FATAL(1 == g_ctrlr_C.num_avail_log_pages);
+
+ ut_reservation_free_req(req);
+ ut_reservation_deinit();
+}
+
+static void
+test_reservation_preempt_notification(void)
+{
+ struct spdk_nvmf_request *req;
+ struct spdk_nvme_cpl *rsp;
+
+ ut_reservation_init();
+
+ req = ut_reservation_build_req(16);
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ rsp = &req->rsp->nvme_cpl;
+
+ ut_reservation_build_registrants();
+
+ /* ACQUIRE: Host B with g_ctrlr_B get reservation with
+ * type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY
+ */
+ rsp->status.sc = 0xff;
+ ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY, 0xb1, 0x0);
+ nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr_B, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY);
+
+ /* Test Case : g_ctrlr_B holds the reservation, g_ctrlr_C preempt g_ctrlr_B,
+ * g_ctrlr_B registrant is unregistred, and reservation is preempted.
+ * Registration Preempted notification sends to g_ctrlr_B.
+ * Reservation Preempted notification sends to g_ctrlr1_A/g_ctrlr2_A.
+ */
+ rsp->status.sc = 0xff;
+ g_ctrlr1_A.num_avail_log_pages = 0;
+ g_ctrlr2_A.num_avail_log_pages = 0;
+ g_ctrlr_B.num_avail_log_pages = 0;
+ g_ctrlr_C.num_avail_log_pages = 5;
+ ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_PREEMPT, 0,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS, 0xc1, 0xb1);
+ nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr_C, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS);
+ SPDK_CU_ASSERT_FATAL(1 == g_ctrlr1_A.num_avail_log_pages);
+ SPDK_CU_ASSERT_FATAL(1 == g_ctrlr2_A.num_avail_log_pages);
+ SPDK_CU_ASSERT_FATAL(1 == g_ctrlr_B.num_avail_log_pages);
+ SPDK_CU_ASSERT_FATAL(5 == g_ctrlr_C.num_avail_log_pages);
+
+ ut_reservation_free_req(req);
+ ut_reservation_deinit();
+}
+
+static void
+test_spdk_nvmf_ns_event(void)
+{
+ struct spdk_nvmf_tgt tgt = {};
+ struct spdk_nvmf_subsystem subsystem = {
+ .max_nsid = 0,
+ .ns = NULL,
+ .tgt = &tgt
+ };
+ struct spdk_nvmf_ctrlr ctrlr = {
+ .subsys = &subsystem
+ };
+ struct spdk_bdev bdev1 = {};
+ struct spdk_nvmf_ns_opts ns_opts;
+ uint32_t nsid;
+
+ tgt.max_subsystems = 1024;
+ tgt.subsystems = calloc(tgt.max_subsystems, sizeof(struct spdk_nvmf_subsystem *));
+ SPDK_CU_ASSERT_FATAL(tgt.subsystems != NULL);
+
+ /* Add one namespace */
+ spdk_nvmf_ns_opts_get_defaults(&ns_opts, sizeof(ns_opts));
+ nsid = spdk_nvmf_subsystem_add_ns(&subsystem, &bdev1, &ns_opts, sizeof(ns_opts), NULL);
+ CU_ASSERT(nsid == 1);
+ CU_ASSERT(NULL != subsystem.ns[0]);
+
+ /* Add one controller */
+ TAILQ_INIT(&subsystem.ctrlrs);
+ TAILQ_INSERT_TAIL(&subsystem.ctrlrs, &ctrlr, link);
+
+ /* Namespace resize event */
+ subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
+ g_ns_changed_nsid = 0xFFFFFFFF;
+ g_ns_changed_ctrlr = NULL;
+ nvmf_ns_event(SPDK_BDEV_EVENT_RESIZE, &bdev1, subsystem.ns[0]);
+ CU_ASSERT(SPDK_NVMF_SUBSYSTEM_PAUSING == subsystem.state);
+
+ poll_threads();
+ CU_ASSERT(1 == g_ns_changed_nsid);
+ CU_ASSERT(&ctrlr == g_ns_changed_ctrlr);
+ CU_ASSERT(SPDK_NVMF_SUBSYSTEM_ACTIVE == subsystem.state);
+
+ /* Namespace remove event */
+ subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
+ g_ns_changed_nsid = 0xFFFFFFFF;
+ g_ns_changed_ctrlr = NULL;
+ nvmf_ns_event(SPDK_BDEV_EVENT_REMOVE, &bdev1, subsystem.ns[0]);
+ CU_ASSERT(SPDK_NVMF_SUBSYSTEM_PAUSING == subsystem.state);
+ CU_ASSERT(0xFFFFFFFF == g_ns_changed_nsid);
+ CU_ASSERT(NULL == g_ns_changed_ctrlr);
+
+ poll_threads();
+ CU_ASSERT(1 == g_ns_changed_nsid);
+ CU_ASSERT(&ctrlr == g_ns_changed_ctrlr);
+ CU_ASSERT(NULL == subsystem.ns[0]);
+ CU_ASSERT(SPDK_NVMF_SUBSYSTEM_ACTIVE == subsystem.state);
+
+ free(subsystem.ns);
+ free(tgt.subsystems);
+}
+
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvmf", NULL, NULL);
+
+ CU_ADD_TEST(suite, nvmf_test_create_subsystem);
+ CU_ADD_TEST(suite, test_spdk_nvmf_subsystem_add_ns);
+ CU_ADD_TEST(suite, test_spdk_nvmf_subsystem_set_sn);
+ CU_ADD_TEST(suite, test_reservation_register);
+ CU_ADD_TEST(suite, test_reservation_register_with_ptpl);
+ CU_ADD_TEST(suite, test_reservation_acquire_preempt_1);
+ CU_ADD_TEST(suite, test_reservation_acquire_release_with_ptpl);
+ CU_ADD_TEST(suite, test_reservation_release);
+ CU_ADD_TEST(suite, test_reservation_unregister_notification);
+ CU_ADD_TEST(suite, test_reservation_release_notification);
+ CU_ADD_TEST(suite, test_reservation_release_notification_write_exclusive);
+ CU_ADD_TEST(suite, test_reservation_clear_notification);
+ CU_ADD_TEST(suite, test_reservation_preempt_notification);
+ CU_ADD_TEST(suite, test_spdk_nvmf_ns_event);
+
+ allocate_threads(1);
+ set_thread(0);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ free_threads();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvmf/tcp.c/.gitignore b/src/spdk/test/unit/lib/nvmf/tcp.c/.gitignore
new file mode 100644
index 000000000..ea821fbfa
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/tcp.c/.gitignore
@@ -0,0 +1 @@
+tcp_ut
diff --git a/src/spdk/test/unit/lib/nvmf/tcp.c/Makefile b/src/spdk/test/unit/lib/nvmf/tcp.c/Makefile
new file mode 100644
index 000000000..2f6dc9b85
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/tcp.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = tcp_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvmf/tcp.c/tcp_ut.c b/src/spdk/test/unit/lib/nvmf/tcp.c/tcp_ut.c
new file mode 100644
index 000000000..a6d6d9da3
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/tcp.c/tcp_ut.c
@@ -0,0 +1,722 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+#include "spdk/nvmf_spec.h"
+#include "spdk_cunit.h"
+
+#include "spdk_internal/mock.h"
+#include "spdk_internal/thread.h"
+
+#include "common/lib/test_env.c"
+#include "common/lib/test_sock.c"
+
+#include "nvmf/ctrlr.c"
+#include "nvmf/tcp.c"
+
+#define UT_IPV4_ADDR "192.168.0.1"
+#define UT_PORT "4420"
+#define UT_NVMF_ADRFAM_INVALID 0xf
+#define UT_MAX_QUEUE_DEPTH 128
+#define UT_MAX_QPAIRS_PER_CTRLR 128
+#define UT_IN_CAPSULE_DATA_SIZE 1024
+#define UT_MAX_IO_SIZE 4096
+#define UT_IO_UNIT_SIZE 1024
+#define UT_MAX_AQ_DEPTH 64
+#define UT_SQ_HEAD_MAX 128
+#define UT_NUM_SHARED_BUFFERS 128
+
+SPDK_LOG_REGISTER_COMPONENT("nvmf", SPDK_LOG_NVMF)
+
+DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid,
+ int,
+ (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid),
+ 0);
+
+DEFINE_STUB(nvmf_subsystem_add_ctrlr,
+ int,
+ (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr),
+ 0);
+
+DEFINE_STUB(nvmf_subsystem_get_ctrlr,
+ struct spdk_nvmf_ctrlr *,
+ (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid),
+ NULL);
+
+DEFINE_STUB(spdk_nvmf_tgt_find_subsystem,
+ struct spdk_nvmf_subsystem *,
+ (struct spdk_nvmf_tgt *tgt, const char *subnqn),
+ NULL);
+
+DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed,
+ bool,
+ (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid),
+ true);
+
+DEFINE_STUB_V(nvmf_get_discovery_log_page,
+ (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov,
+ uint32_t iovcnt, uint64_t offset, uint32_t length));
+
+DEFINE_STUB_V(nvmf_subsystem_remove_ctrlr,
+ (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr));
+
+DEFINE_STUB(spdk_nvmf_subsystem_get_first_ns,
+ struct spdk_nvmf_ns *,
+ (struct spdk_nvmf_subsystem *subsystem),
+ NULL);
+
+DEFINE_STUB(spdk_nvmf_subsystem_get_next_ns,
+ struct spdk_nvmf_ns *,
+ (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns),
+ NULL);
+
+DEFINE_STUB(spdk_nvmf_subsystem_host_allowed,
+ bool,
+ (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn),
+ true);
+
+DEFINE_STUB(nvmf_ctrlr_dsm_supported,
+ bool,
+ (struct spdk_nvmf_ctrlr *ctrlr),
+ false);
+
+DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported,
+ bool,
+ (struct spdk_nvmf_ctrlr *ctrlr),
+ false);
+
+DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct spdk_nvmf_request *req),
+ 0);
+
+DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct spdk_nvmf_request *req),
+ 0);
+
+DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct spdk_nvmf_request *req),
+ 0);
+
+DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req),
+ 0);
+
+DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct spdk_nvmf_request *req),
+ 0);
+
+DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct spdk_nvmf_request *req),
+ 0);
+
+DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct spdk_nvmf_request *req),
+ 0);
+
+DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct spdk_nvmf_request *req),
+ 0);
+
+DEFINE_STUB(spdk_nvmf_bdev_ctrlr_abort_cmd,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct spdk_nvmf_request *req, struct spdk_nvmf_request *req_to_abort),
+ 0);
+
+DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx,
+ bool,
+ (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd, struct spdk_dif_ctx *dif_ctx),
+ false);
+
+DEFINE_STUB(nvmf_transport_req_complete,
+ int,
+ (struct spdk_nvmf_request *req),
+ 0);
+
+DEFINE_STUB_V(spdk_nvmf_request_free_buffers,
+ (struct spdk_nvmf_request *req, struct spdk_nvmf_transport_poll_group *group,
+ struct spdk_nvmf_transport *transport));
+
+DEFINE_STUB(spdk_sock_get_optimal_sock_group,
+ int,
+ (struct spdk_sock *sock, struct spdk_sock_group **group),
+ 0);
+
+DEFINE_STUB(spdk_sock_group_get_ctx,
+ void *,
+ (struct spdk_sock_group *group),
+ NULL);
+
+DEFINE_STUB(spdk_sock_set_priority,
+ int,
+ (struct spdk_sock *sock, int priority),
+ 0);
+
+DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx));
+
+DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
+ enum spdk_nvme_transport_type trtype));
+DEFINE_STUB_V(spdk_nvmf_transport_register, (const struct spdk_nvmf_transport_ops *ops));
+
+DEFINE_STUB_V(spdk_nvmf_tgt_new_qpair, (struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair));
+
+DEFINE_STUB_V(nvmf_transport_qpair_abort_request,
+ (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req));
+
+DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd));
+DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl));
+
+struct spdk_trace_histories *g_trace_histories;
+
+struct spdk_bdev {
+ int ut_mock;
+ uint64_t blockcnt;
+};
+
+int
+spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1,
+ const struct spdk_nvme_transport_id *trid2)
+{
+ return 0;
+}
+
+void
+spdk_trace_register_object(uint8_t type, char id_prefix)
+{
+}
+
+void
+spdk_trace_register_description(const char *name,
+ uint16_t tpoint_id, uint8_t owner_type,
+ uint8_t object_type, uint8_t new_object,
+ uint8_t arg1_type, const char *arg1_name)
+{
+}
+
+void
+_spdk_trace_record(uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
+ uint32_t size, uint64_t object_id, uint64_t arg1)
+{
+}
+
+const char *
+spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype)
+{
+ switch (trtype) {
+ case SPDK_NVME_TRANSPORT_PCIE:
+ return "PCIe";
+ case SPDK_NVME_TRANSPORT_RDMA:
+ return "RDMA";
+ case SPDK_NVME_TRANSPORT_FC:
+ return "FC";
+ default:
+ return NULL;
+ }
+}
+
+int
+spdk_nvme_transport_id_populate_trstring(struct spdk_nvme_transport_id *trid, const char *trstring)
+{
+ int len, i;
+
+ if (trstring == NULL) {
+ return -EINVAL;
+ }
+
+ len = strnlen(trstring, SPDK_NVMF_TRSTRING_MAX_LEN);
+ if (len == SPDK_NVMF_TRSTRING_MAX_LEN) {
+ return -EINVAL;
+ }
+
+ /* cast official trstring to uppercase version of input. */
+ for (i = 0; i < len; i++) {
+ trid->trstring[i] = toupper(trstring[i]);
+ }
+ return 0;
+}
+
+int
+spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx)
+{
+ return 0;
+}
+
+int
+spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req,
+ struct spdk_nvmf_transport_poll_group *group,
+ struct spdk_nvmf_transport *transport,
+ uint32_t length)
+{
+ /* length more than 1 io unit length will fail. */
+ if (length >= transport->opts.io_unit_size) {
+ return -EINVAL;
+ }
+
+ req->iovcnt = 1;
+ req->iov[0].iov_base = (void *)0xDEADBEEF;
+
+ return 0;
+}
+
+
+void
+nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata,
+ bool dif_insert_or_strip)
+{
+ uint64_t num_blocks;
+
+ SPDK_CU_ASSERT_FATAL(ns->bdev != NULL);
+ num_blocks = ns->bdev->blockcnt;
+ nsdata->nsze = num_blocks;
+ nsdata->ncap = num_blocks;
+ nsdata->nuse = num_blocks;
+ nsdata->nlbaf = 0;
+ nsdata->flbas.format = 0;
+ nsdata->lbaf[0].lbads = spdk_u32log2(512);
+}
+
+const char *
+spdk_nvmf_subsystem_get_sn(const struct spdk_nvmf_subsystem *subsystem)
+{
+ return subsystem->sn;
+}
+
+const char *
+spdk_nvmf_subsystem_get_mn(const struct spdk_nvmf_subsystem *subsystem)
+{
+ return subsystem->mn;
+}
+
+void
+spdk_trace_add_register_fn(struct spdk_trace_register_fn *reg_fn)
+{
+}
+
+static void
+test_nvmf_tcp_create(void)
+{
+ struct spdk_thread *thread;
+ struct spdk_nvmf_transport *transport;
+ struct spdk_nvmf_tcp_transport *ttransport;
+ struct spdk_nvmf_transport_opts opts;
+
+ thread = spdk_thread_create(NULL, NULL);
+ SPDK_CU_ASSERT_FATAL(thread != NULL);
+ spdk_set_thread(thread);
+
+ /* case 1 */
+ memset(&opts, 0, sizeof(opts));
+ opts.max_queue_depth = UT_MAX_QUEUE_DEPTH;
+ opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR;
+ opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE;
+ opts.max_io_size = UT_MAX_IO_SIZE;
+ opts.io_unit_size = UT_IO_UNIT_SIZE;
+ opts.max_aq_depth = UT_MAX_AQ_DEPTH;
+ opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS;
+ /* expect success */
+ transport = nvmf_tcp_create(&opts);
+ CU_ASSERT_PTR_NOT_NULL(transport);
+ ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport);
+ SPDK_CU_ASSERT_FATAL(ttransport != NULL);
+ transport->opts = opts;
+ CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH);
+ CU_ASSERT(transport->opts.max_io_size == UT_MAX_IO_SIZE);
+ CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE);
+ CU_ASSERT(transport->opts.io_unit_size == UT_IO_UNIT_SIZE);
+ /* destroy transport */
+ spdk_mempool_free(ttransport->transport.data_buf_pool);
+ free(ttransport);
+
+ /* case 2 */
+ memset(&opts, 0, sizeof(opts));
+ opts.max_queue_depth = UT_MAX_QUEUE_DEPTH;
+ opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR;
+ opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE;
+ opts.max_io_size = UT_MAX_IO_SIZE;
+ opts.io_unit_size = UT_MAX_IO_SIZE + 1;
+ opts.max_aq_depth = UT_MAX_AQ_DEPTH;
+ opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS;
+ /* expect success */
+ transport = nvmf_tcp_create(&opts);
+ CU_ASSERT_PTR_NOT_NULL(transport);
+ ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport);
+ SPDK_CU_ASSERT_FATAL(ttransport != NULL);
+ transport->opts = opts;
+ CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH);
+ CU_ASSERT(transport->opts.max_io_size == UT_MAX_IO_SIZE);
+ CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE);
+ CU_ASSERT(transport->opts.io_unit_size == UT_MAX_IO_SIZE);
+ /* destroy transport */
+ spdk_mempool_free(ttransport->transport.data_buf_pool);
+ free(ttransport);
+
+ /* case 3 */
+ memset(&opts, 0, sizeof(opts));
+ opts.max_queue_depth = UT_MAX_QUEUE_DEPTH;
+ opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR;
+ opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE;
+ opts.max_io_size = UT_MAX_IO_SIZE;
+ opts.io_unit_size = 16;
+ opts.max_aq_depth = UT_MAX_AQ_DEPTH;
+ /* expect failse */
+ transport = nvmf_tcp_create(&opts);
+ CU_ASSERT_PTR_NULL(transport);
+
+ spdk_thread_exit(thread);
+ while (!spdk_thread_is_exited(thread)) {
+ spdk_thread_poll(thread, 0, 0);
+ }
+ spdk_thread_destroy(thread);
+}
+
+static void
+test_nvmf_tcp_destroy(void)
+{
+ struct spdk_thread *thread;
+ struct spdk_nvmf_transport *transport;
+ struct spdk_nvmf_transport_opts opts;
+
+ thread = spdk_thread_create(NULL, NULL);
+ SPDK_CU_ASSERT_FATAL(thread != NULL);
+ spdk_set_thread(thread);
+
+ /* case 1 */
+ memset(&opts, 0, sizeof(opts));
+ opts.max_queue_depth = UT_MAX_QUEUE_DEPTH;
+ opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR;
+ opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE;
+ opts.max_io_size = UT_MAX_IO_SIZE;
+ opts.io_unit_size = UT_IO_UNIT_SIZE;
+ opts.max_aq_depth = UT_MAX_AQ_DEPTH;
+ opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS;
+ transport = nvmf_tcp_create(&opts);
+ CU_ASSERT_PTR_NOT_NULL(transport);
+ transport->opts = opts;
+ /* destroy transport */
+ CU_ASSERT(nvmf_tcp_destroy(transport) == 0);
+
+ spdk_thread_exit(thread);
+ while (!spdk_thread_is_exited(thread)) {
+ spdk_thread_poll(thread, 0, 0);
+ }
+ spdk_thread_destroy(thread);
+}
+
+static void
+test_nvmf_tcp_poll_group_create(void)
+{
+ struct spdk_nvmf_transport *transport;
+ struct spdk_nvmf_transport_poll_group *group;
+ struct spdk_thread *thread;
+ struct spdk_nvmf_transport_opts opts;
+ struct spdk_sock_group grp = {};
+
+ thread = spdk_thread_create(NULL, NULL);
+ SPDK_CU_ASSERT_FATAL(thread != NULL);
+ spdk_set_thread(thread);
+
+ memset(&opts, 0, sizeof(opts));
+ opts.max_queue_depth = UT_MAX_QUEUE_DEPTH;
+ opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR;
+ opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE;
+ opts.max_io_size = UT_MAX_IO_SIZE;
+ opts.io_unit_size = UT_IO_UNIT_SIZE;
+ opts.max_aq_depth = UT_MAX_AQ_DEPTH;
+ opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS;
+ transport = nvmf_tcp_create(&opts);
+ CU_ASSERT_PTR_NOT_NULL(transport);
+ transport->opts = opts;
+ MOCK_SET(spdk_sock_group_create, &grp);
+ group = nvmf_tcp_poll_group_create(transport);
+ MOCK_CLEAR_P(spdk_sock_group_create);
+ SPDK_CU_ASSERT_FATAL(group);
+ group->transport = transport;
+ nvmf_tcp_poll_group_destroy(group);
+ nvmf_tcp_destroy(transport);
+
+ spdk_thread_exit(thread);
+ while (!spdk_thread_is_exited(thread)) {
+ spdk_thread_poll(thread, 0, 0);
+ }
+ spdk_thread_destroy(thread);
+}
+
+static void
+test_nvmf_tcp_send_c2h_data(void)
+{
+ struct spdk_thread *thread;
+ struct spdk_nvmf_tcp_transport ttransport = {};
+ struct spdk_nvmf_tcp_qpair tqpair = {};
+ struct spdk_nvmf_tcp_req tcp_req = {};
+ struct nvme_tcp_pdu pdu = {};
+ struct spdk_nvme_tcp_c2h_data_hdr *c2h_data;
+
+ thread = spdk_thread_create(NULL, NULL);
+ SPDK_CU_ASSERT_FATAL(thread != NULL);
+ spdk_set_thread(thread);
+
+ tcp_req.pdu = &pdu;
+ tcp_req.req.length = 300;
+
+ tqpair.qpair.transport = &ttransport.transport;
+ TAILQ_INIT(&tqpair.send_queue);
+
+ /* Set qpair state to make unrelated operations NOP */
+ tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
+ tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR;
+
+ tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd;
+
+ tcp_req.req.iov[0].iov_base = (void *)0xDEADBEEF;
+ tcp_req.req.iov[0].iov_len = 101;
+ tcp_req.req.iov[1].iov_base = (void *)0xFEEDBEEF;
+ tcp_req.req.iov[1].iov_len = 100;
+ tcp_req.req.iov[2].iov_base = (void *)0xC0FFEE;
+ tcp_req.req.iov[2].iov_len = 99;
+ tcp_req.req.iovcnt = 3;
+ tcp_req.req.length = 300;
+
+ nvmf_tcp_send_c2h_data(&tqpair, &tcp_req);
+
+ CU_ASSERT(TAILQ_FIRST(&tqpair.send_queue) == &pdu);
+ TAILQ_REMOVE(&tqpair.send_queue, &pdu, tailq);
+
+ c2h_data = &pdu.hdr.c2h_data;
+ CU_ASSERT(c2h_data->datao == 0);
+ CU_ASSERT(c2h_data->datal = 300);
+ CU_ASSERT(c2h_data->common.plen == sizeof(*c2h_data) + 300);
+ CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU);
+
+ CU_ASSERT(pdu.data_iovcnt == 3);
+ CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF);
+ CU_ASSERT(pdu.data_iov[0].iov_len == 101);
+ CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF);
+ CU_ASSERT(pdu.data_iov[1].iov_len == 100);
+ CU_ASSERT((uint64_t)pdu.data_iov[2].iov_base == 0xC0FFEE);
+ CU_ASSERT(pdu.data_iov[2].iov_len == 99);
+
+ spdk_thread_exit(thread);
+ while (!spdk_thread_is_exited(thread)) {
+ spdk_thread_poll(thread, 0, 0);
+ }
+ spdk_thread_destroy(thread);
+}
+
+#define NVMF_TCP_PDU_MAX_H2C_DATA_SIZE (128 * 1024)
+
+static void
+test_nvmf_tcp_h2c_data_hdr_handle(void)
+{
+ struct spdk_nvmf_tcp_transport ttransport = {};
+ struct spdk_nvmf_tcp_qpair tqpair = {};
+ struct nvme_tcp_pdu pdu = {};
+ struct spdk_nvmf_tcp_req tcp_req = {};
+ struct spdk_nvme_tcp_h2c_data_hdr *h2c_data;
+
+ TAILQ_INIT(&tqpair.state_queue[TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER]);
+
+ /* Set qpair state to make unrelated operations NOP */
+ tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
+ tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR;
+
+ tcp_req.req.iov[0].iov_base = (void *)0xDEADBEEF;
+ tcp_req.req.iov[0].iov_len = 101;
+ tcp_req.req.iov[1].iov_base = (void *)0xFEEDBEEF;
+ tcp_req.req.iov[1].iov_len = 99;
+ tcp_req.req.iovcnt = 2;
+ tcp_req.req.length = 200;
+
+ tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd;
+ tcp_req.req.cmd->nvme_cmd.cid = 1;
+ tcp_req.ttag = 2;
+
+ TAILQ_INSERT_TAIL(&tqpair.state_queue[TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER],
+ &tcp_req, state_link);
+
+ h2c_data = &pdu.hdr.h2c_data;
+ h2c_data->cccid = 1;
+ h2c_data->ttag = 2;
+ h2c_data->datao = 0;
+ h2c_data->datal = 200;
+
+ nvmf_tcp_h2c_data_hdr_handle(&ttransport, &tqpair, &pdu);
+
+ CU_ASSERT(pdu.data_iovcnt == 2);
+ CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF);
+ CU_ASSERT(pdu.data_iov[0].iov_len == 101);
+ CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF);
+ CU_ASSERT(pdu.data_iov[1].iov_len == 99);
+
+ CU_ASSERT(TAILQ_FIRST(&tqpair.state_queue[TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER]) ==
+ &tcp_req);
+ TAILQ_REMOVE(&tqpair.state_queue[TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER],
+ &tcp_req, state_link);
+}
+
+
+static void
+test_nvmf_tcp_incapsule_data_handle(void)
+{
+ struct spdk_nvmf_tcp_transport ttransport = {};
+ struct spdk_nvmf_tcp_qpair tqpair = {};
+ struct nvme_tcp_pdu *pdu;
+ union nvmf_c2h_msg rsp0 = {};
+ union nvmf_c2h_msg rsp = {};
+
+ struct spdk_nvmf_request *req_temp = NULL;
+ struct spdk_nvmf_tcp_req tcp_req2 = {};
+ struct spdk_nvmf_tcp_req tcp_req1 = {};
+
+ struct spdk_nvme_tcp_cmd *capsule_data;
+ struct spdk_nvmf_capsule_cmd *nvmf_capsule_data;
+ struct spdk_nvme_sgl_descriptor *sgl;
+
+ struct spdk_nvmf_transport_poll_group *group;
+ struct spdk_nvmf_tcp_poll_group tcp_group = {};
+ struct spdk_sock_group grp = {};
+ int i = 0;
+
+ ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE;
+ ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE;
+
+ tcp_group.sock_group = &grp;
+ TAILQ_INIT(&tcp_group.qpairs);
+ group = &tcp_group.group;
+ group->transport = &ttransport.transport;
+ STAILQ_INIT(&group->pending_buf_queue);
+ tqpair.group = &tcp_group;
+
+ /* init tqpair, add pdu to pdu_in_progress and wait for the buff */
+ for (i = TCP_REQUEST_STATE_FREE; i < TCP_REQUEST_NUM_STATES; i++) {
+ TAILQ_INIT(&tqpair.state_queue[i]);
+ }
+
+ TAILQ_INIT(&tqpair.send_queue);
+
+ TAILQ_INSERT_TAIL(&tqpair.state_queue[TCP_REQUEST_STATE_FREE], &tcp_req2, state_link);
+ tqpair.state_cntr[TCP_REQUEST_STATE_FREE]++;
+ tqpair.qpair.transport = &ttransport.transport;
+ tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
+ tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH;
+ tqpair.qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
+
+ /* init a null tcp_req into tqpair TCP_REQUEST_STATE_FREE queue */
+ tcp_req2.req.qpair = &tqpair.qpair;
+ tcp_req2.req.cmd = (union nvmf_h2c_msg *)&tcp_req2.cmd;
+ tcp_req2.req.rsp = &rsp;
+
+ /* init tcp_req1 */
+ tcp_req1.req.qpair = &tqpair.qpair;
+ tcp_req1.req.cmd = (union nvmf_h2c_msg *)&tcp_req1.cmd;
+ tcp_req1.req.rsp = &rsp0;
+ tcp_req1.state = TCP_REQUEST_STATE_NEW;
+
+ TAILQ_INSERT_TAIL(&tqpair.state_queue[TCP_REQUEST_STATE_NEW], &tcp_req1, state_link);
+ tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++;
+
+ /* init pdu, make pdu need sgl buff */
+ pdu = &tqpair.pdu_in_progress;
+ capsule_data = &pdu->hdr.capsule_cmd;
+ nvmf_capsule_data = (struct spdk_nvmf_capsule_cmd *)&pdu->hdr.capsule_cmd.ccsqe;
+ sgl = &capsule_data->ccsqe.dptr.sgl1;
+
+ capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD;
+ capsule_data->common.hlen = sizeof(*capsule_data);
+ capsule_data->common.plen = 1096;
+ capsule_data->ccsqe.opc = SPDK_NVME_OPC_FABRIC;
+
+ sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT;
+ sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK;
+ sgl->unkeyed.length = UT_IO_UNIT_SIZE;
+
+ nvmf_capsule_data->fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT;
+
+ /* insert tcp_req1 to pending_buf_queue, And this req takes precedence over the next req. */
+ nvmf_tcp_req_process(&ttransport, &tcp_req1);
+ CU_ASSERT(STAILQ_FIRST(&group->pending_buf_queue) == &tcp_req1.req);
+
+ sgl->unkeyed.length = UT_IO_UNIT_SIZE - 1;
+
+ /* process tqpair capsule req. but we still remain req in pending_buff. */
+ nvmf_tcp_capsule_cmd_hdr_handle(&ttransport, &tqpair, &tqpair.pdu_in_progress);
+ CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD);
+ CU_ASSERT(STAILQ_FIRST(&group->pending_buf_queue) == &tcp_req1.req);
+ STAILQ_FOREACH(req_temp, &group->pending_buf_queue, buf_link) {
+ if (req_temp == &tcp_req2.req) {
+ break;
+ }
+ }
+ CU_ASSERT(req_temp == NULL);
+ CU_ASSERT(tqpair.pdu_in_progress.req == (void *)&tcp_req2);
+}
+
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvmf", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_nvmf_tcp_create);
+ CU_ADD_TEST(suite, test_nvmf_tcp_destroy);
+ CU_ADD_TEST(suite, test_nvmf_tcp_poll_group_create);
+ CU_ADD_TEST(suite, test_nvmf_tcp_send_c2h_data);
+ CU_ADD_TEST(suite, test_nvmf_tcp_h2c_data_hdr_handle);
+ CU_ADD_TEST(suite, test_nvmf_tcp_incapsule_data_handle);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/reduce/Makefile b/src/spdk/test/unit/lib/reduce/Makefile
new file mode 100644
index 000000000..7c901ac18
--- /dev/null
+++ b/src/spdk/test/unit/lib/reduce/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = reduce.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/reduce/reduce.c/.gitignore b/src/spdk/test/unit/lib/reduce/reduce.c/.gitignore
new file mode 100644
index 000000000..be248403f
--- /dev/null
+++ b/src/spdk/test/unit/lib/reduce/reduce.c/.gitignore
@@ -0,0 +1 @@
+reduce_ut
diff --git a/src/spdk/test/unit/lib/reduce/reduce.c/Makefile b/src/spdk/test/unit/lib/reduce/reduce.c/Makefile
new file mode 100644
index 000000000..4a704c660
--- /dev/null
+++ b/src/spdk/test/unit/lib/reduce/reduce.c/Makefile
@@ -0,0 +1,39 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = reduce_ut.c
+LDFLAGS += -Wl,--wrap,unlink
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/reduce/reduce.c/reduce_ut.c b/src/spdk/test/unit/lib/reduce/reduce.c/reduce_ut.c
new file mode 100644
index 000000000..9c94a4ac6
--- /dev/null
+++ b/src/spdk/test/unit/lib/reduce/reduce.c/reduce_ut.c
@@ -0,0 +1,1300 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "reduce/reduce.c"
+#include "spdk_internal/mock.h"
+#include "common/lib/test_env.c"
+
+static struct spdk_reduce_vol *g_vol;
+static int g_reduce_errno;
+static char *g_volatile_pm_buf;
+static size_t g_volatile_pm_buf_len;
+static char *g_persistent_pm_buf;
+static size_t g_persistent_pm_buf_len;
+static char *g_backing_dev_buf;
+static char g_path[REDUCE_PATH_MAX];
+static char *g_decomp_buf;
+
+#define TEST_MD_PATH "/tmp"
+
+enum ut_reduce_bdev_io_type {
+ UT_REDUCE_IO_READV = 1,
+ UT_REDUCE_IO_WRITEV = 2,
+ UT_REDUCE_IO_UNMAP = 3,
+};
+
+struct ut_reduce_bdev_io {
+ enum ut_reduce_bdev_io_type type;
+ struct spdk_reduce_backing_dev *backing_dev;
+ struct iovec *iov;
+ int iovcnt;
+ uint64_t lba;
+ uint32_t lba_count;
+ struct spdk_reduce_vol_cb_args *args;
+ TAILQ_ENTRY(ut_reduce_bdev_io) link;
+};
+
+static bool g_defer_bdev_io = false;
+static TAILQ_HEAD(, ut_reduce_bdev_io) g_pending_bdev_io =
+ TAILQ_HEAD_INITIALIZER(g_pending_bdev_io);
+static uint32_t g_pending_bdev_io_count = 0;
+
+static void
+sync_pm_buf(const void *addr, size_t length)
+{
+ uint64_t offset = (char *)addr - g_volatile_pm_buf;
+
+ memcpy(&g_persistent_pm_buf[offset], addr, length);
+}
+
+int
+pmem_msync(const void *addr, size_t length)
+{
+ sync_pm_buf(addr, length);
+ return 0;
+}
+
+void
+pmem_persist(const void *addr, size_t len)
+{
+ sync_pm_buf(addr, len);
+}
+
+static void
+get_pm_file_size(void)
+{
+ struct spdk_reduce_vol_params params;
+ uint64_t pm_size, expected_pm_size;
+
+ params.backing_io_unit_size = 4096;
+ params.chunk_size = 4096 * 4;
+ params.vol_size = 4096 * 4 * 100;
+
+ pm_size = _get_pm_file_size(&params);
+ expected_pm_size = sizeof(struct spdk_reduce_vol_superblock);
+ /* 100 chunks in logical map * 8 bytes per chunk */
+ expected_pm_size += 100 * sizeof(uint64_t);
+ /* 100 chunks * (chunk stuct size + 4 backing io units per chunk * 8 bytes per backing io unit) */
+ expected_pm_size += 100 * (sizeof(struct spdk_reduce_chunk_map) + 4 * sizeof(uint64_t));
+ /* reduce allocates some extra chunks too for in-flight writes when logical map
+ * is full. REDUCE_EXTRA_CHUNKS is a private #ifdef in reduce.c Here we need the num chunks
+ * times (chunk struct size + 4 backing io units per chunk * 8 bytes per backing io unit).
+ */
+ expected_pm_size += REDUCE_NUM_EXTRA_CHUNKS *
+ (sizeof(struct spdk_reduce_chunk_map) + 4 * sizeof(uint64_t));
+ /* reduce will add some padding so numbers may not match exactly. Make sure
+ * they are close though.
+ */
+ CU_ASSERT((pm_size - expected_pm_size) <= REDUCE_PM_SIZE_ALIGNMENT);
+}
+
+static void
+get_vol_size(void)
+{
+ uint64_t chunk_size, backing_dev_size;
+
+ chunk_size = 16 * 1024;
+ backing_dev_size = 16 * 1024 * 1000;
+ CU_ASSERT(_get_vol_size(chunk_size, backing_dev_size) < backing_dev_size);
+}
+
+void *
+pmem_map_file(const char *path, size_t len, int flags, mode_t mode,
+ size_t *mapped_lenp, int *is_pmemp)
+{
+ CU_ASSERT(g_volatile_pm_buf == NULL);
+ snprintf(g_path, sizeof(g_path), "%s", path);
+ *is_pmemp = 1;
+
+ if (g_persistent_pm_buf == NULL) {
+ g_persistent_pm_buf = calloc(1, len);
+ g_persistent_pm_buf_len = len;
+ SPDK_CU_ASSERT_FATAL(g_persistent_pm_buf != NULL);
+ }
+
+ *mapped_lenp = g_persistent_pm_buf_len;
+ g_volatile_pm_buf = calloc(1, g_persistent_pm_buf_len);
+ SPDK_CU_ASSERT_FATAL(g_volatile_pm_buf != NULL);
+ memcpy(g_volatile_pm_buf, g_persistent_pm_buf, g_persistent_pm_buf_len);
+ g_volatile_pm_buf_len = g_persistent_pm_buf_len;
+
+ return g_volatile_pm_buf;
+}
+
+int
+pmem_unmap(void *addr, size_t len)
+{
+ CU_ASSERT(addr == g_volatile_pm_buf);
+ CU_ASSERT(len == g_volatile_pm_buf_len);
+ free(g_volatile_pm_buf);
+ g_volatile_pm_buf = NULL;
+ g_volatile_pm_buf_len = 0;
+
+ return 0;
+}
+
+static void
+persistent_pm_buf_destroy(void)
+{
+ CU_ASSERT(g_persistent_pm_buf != NULL);
+ free(g_persistent_pm_buf);
+ g_persistent_pm_buf = NULL;
+ g_persistent_pm_buf_len = 0;
+}
+
+static void
+unlink_cb(void)
+{
+ persistent_pm_buf_destroy();
+}
+
+static void
+init_cb(void *cb_arg, struct spdk_reduce_vol *vol, int reduce_errno)
+{
+ g_vol = vol;
+ g_reduce_errno = reduce_errno;
+}
+
+static void
+load_cb(void *cb_arg, struct spdk_reduce_vol *vol, int reduce_errno)
+{
+ g_vol = vol;
+ g_reduce_errno = reduce_errno;
+}
+
+static void
+unload_cb(void *cb_arg, int reduce_errno)
+{
+ g_reduce_errno = reduce_errno;
+}
+
+static void
+init_failure(void)
+{
+ struct spdk_reduce_vol_params params = {};
+ struct spdk_reduce_backing_dev backing_dev = {};
+
+ backing_dev.blocklen = 512;
+ /* This blockcnt is too small for a reduce vol - there needs to be
+ * enough space for at least REDUCE_NUM_EXTRA_CHUNKS + 1 chunks.
+ */
+ backing_dev.blockcnt = 20;
+
+ params.vol_size = 0;
+ params.chunk_size = 16 * 1024;
+ params.backing_io_unit_size = backing_dev.blocklen;
+ params.logical_block_size = 512;
+
+ /* backing_dev has an invalid size. This should fail. */
+ g_vol = NULL;
+ g_reduce_errno = 0;
+ spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
+ CU_ASSERT(g_reduce_errno == -EINVAL);
+ SPDK_CU_ASSERT_FATAL(g_vol == NULL);
+
+ /* backing_dev now has valid size, but backing_dev still has null
+ * function pointers. This should fail.
+ */
+ backing_dev.blockcnt = 20000;
+
+ g_vol = NULL;
+ g_reduce_errno = 0;
+ spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
+ CU_ASSERT(g_reduce_errno == -EINVAL);
+ SPDK_CU_ASSERT_FATAL(g_vol == NULL);
+}
+
+static void
+backing_dev_readv_execute(struct spdk_reduce_backing_dev *backing_dev,
+ struct iovec *iov, int iovcnt,
+ uint64_t lba, uint32_t lba_count,
+ struct spdk_reduce_vol_cb_args *args)
+{
+ char *offset;
+ int i;
+
+ offset = g_backing_dev_buf + lba * backing_dev->blocklen;
+ for (i = 0; i < iovcnt; i++) {
+ memcpy(iov[i].iov_base, offset, iov[i].iov_len);
+ offset += iov[i].iov_len;
+ }
+ args->cb_fn(args->cb_arg, 0);
+}
+
+static void
+backing_dev_insert_io(enum ut_reduce_bdev_io_type type, struct spdk_reduce_backing_dev *backing_dev,
+ struct iovec *iov, int iovcnt, uint64_t lba, uint32_t lba_count,
+ struct spdk_reduce_vol_cb_args *args)
+{
+ struct ut_reduce_bdev_io *ut_bdev_io;
+
+ ut_bdev_io = calloc(1, sizeof(*ut_bdev_io));
+ SPDK_CU_ASSERT_FATAL(ut_bdev_io != NULL);
+
+ ut_bdev_io->type = type;
+ ut_bdev_io->backing_dev = backing_dev;
+ ut_bdev_io->iov = iov;
+ ut_bdev_io->iovcnt = iovcnt;
+ ut_bdev_io->lba = lba;
+ ut_bdev_io->lba_count = lba_count;
+ ut_bdev_io->args = args;
+ TAILQ_INSERT_TAIL(&g_pending_bdev_io, ut_bdev_io, link);
+ g_pending_bdev_io_count++;
+}
+
+static void
+backing_dev_readv(struct spdk_reduce_backing_dev *backing_dev, struct iovec *iov, int iovcnt,
+ uint64_t lba, uint32_t lba_count, struct spdk_reduce_vol_cb_args *args)
+{
+ if (g_defer_bdev_io == false) {
+ CU_ASSERT(g_pending_bdev_io_count == 0);
+ CU_ASSERT(TAILQ_EMPTY(&g_pending_bdev_io));
+ backing_dev_readv_execute(backing_dev, iov, iovcnt, lba, lba_count, args);
+ return;
+ }
+
+ backing_dev_insert_io(UT_REDUCE_IO_READV, backing_dev, iov, iovcnt, lba, lba_count, args);
+}
+
+static void
+backing_dev_writev_execute(struct spdk_reduce_backing_dev *backing_dev,
+ struct iovec *iov, int iovcnt,
+ uint64_t lba, uint32_t lba_count,
+ struct spdk_reduce_vol_cb_args *args)
+{
+ char *offset;
+ int i;
+
+ offset = g_backing_dev_buf + lba * backing_dev->blocklen;
+ for (i = 0; i < iovcnt; i++) {
+ memcpy(offset, iov[i].iov_base, iov[i].iov_len);
+ offset += iov[i].iov_len;
+ }
+ args->cb_fn(args->cb_arg, 0);
+}
+
+static void
+backing_dev_writev(struct spdk_reduce_backing_dev *backing_dev, struct iovec *iov, int iovcnt,
+ uint64_t lba, uint32_t lba_count, struct spdk_reduce_vol_cb_args *args)
+{
+ if (g_defer_bdev_io == false) {
+ CU_ASSERT(g_pending_bdev_io_count == 0);
+ CU_ASSERT(TAILQ_EMPTY(&g_pending_bdev_io));
+ backing_dev_writev_execute(backing_dev, iov, iovcnt, lba, lba_count, args);
+ return;
+ }
+
+ backing_dev_insert_io(UT_REDUCE_IO_WRITEV, backing_dev, iov, iovcnt, lba, lba_count, args);
+}
+
+static void
+backing_dev_unmap_execute(struct spdk_reduce_backing_dev *backing_dev,
+ uint64_t lba, uint32_t lba_count,
+ struct spdk_reduce_vol_cb_args *args)
+{
+ char *offset;
+
+ offset = g_backing_dev_buf + lba * backing_dev->blocklen;
+ memset(offset, 0, lba_count * backing_dev->blocklen);
+ args->cb_fn(args->cb_arg, 0);
+}
+
+static void
+backing_dev_unmap(struct spdk_reduce_backing_dev *backing_dev,
+ uint64_t lba, uint32_t lba_count, struct spdk_reduce_vol_cb_args *args)
+{
+ if (g_defer_bdev_io == false) {
+ CU_ASSERT(g_pending_bdev_io_count == 0);
+ CU_ASSERT(TAILQ_EMPTY(&g_pending_bdev_io));
+ backing_dev_unmap_execute(backing_dev, lba, lba_count, args);
+ return;
+ }
+
+ backing_dev_insert_io(UT_REDUCE_IO_UNMAP, backing_dev, NULL, 0, lba, lba_count, args);
+}
+
+static void
+backing_dev_io_execute(uint32_t count)
+{
+ struct ut_reduce_bdev_io *ut_bdev_io;
+ uint32_t done = 0;
+
+ CU_ASSERT(g_defer_bdev_io == true);
+ while (!TAILQ_EMPTY(&g_pending_bdev_io) && (count == 0 || done < count)) {
+ ut_bdev_io = TAILQ_FIRST(&g_pending_bdev_io);
+ TAILQ_REMOVE(&g_pending_bdev_io, ut_bdev_io, link);
+ g_pending_bdev_io_count--;
+ switch (ut_bdev_io->type) {
+ case UT_REDUCE_IO_READV:
+ backing_dev_readv_execute(ut_bdev_io->backing_dev,
+ ut_bdev_io->iov, ut_bdev_io->iovcnt,
+ ut_bdev_io->lba, ut_bdev_io->lba_count,
+ ut_bdev_io->args);
+ break;
+ case UT_REDUCE_IO_WRITEV:
+ backing_dev_writev_execute(ut_bdev_io->backing_dev,
+ ut_bdev_io->iov, ut_bdev_io->iovcnt,
+ ut_bdev_io->lba, ut_bdev_io->lba_count,
+ ut_bdev_io->args);
+ break;
+ case UT_REDUCE_IO_UNMAP:
+ backing_dev_unmap_execute(ut_bdev_io->backing_dev,
+ ut_bdev_io->lba, ut_bdev_io->lba_count,
+ ut_bdev_io->args);
+ break;
+ default:
+ CU_ASSERT(false);
+ break;
+ }
+ free(ut_bdev_io);
+ done++;
+ }
+}
+
+static int
+ut_compress(char *outbuf, uint32_t *compressed_len, char *inbuf, uint32_t inbuflen)
+{
+ uint32_t len = 0;
+ uint8_t count;
+ char last;
+
+ while (true) {
+ if (inbuflen == 0) {
+ *compressed_len = len;
+ return 0;
+ }
+
+ if (*compressed_len < (len + 2)) {
+ return -ENOSPC;
+ }
+
+ last = *inbuf;
+ count = 1;
+ inbuflen--;
+ inbuf++;
+
+ while (inbuflen > 0 && *inbuf == last && count < UINT8_MAX) {
+ count++;
+ inbuflen--;
+ inbuf++;
+ }
+
+ outbuf[len] = count;
+ outbuf[len + 1] = last;
+ len += 2;
+ }
+}
+
+static int
+ut_decompress(uint8_t *outbuf, uint32_t *compressed_len, uint8_t *inbuf, uint32_t inbuflen)
+{
+ uint32_t len = 0;
+
+ SPDK_CU_ASSERT_FATAL(inbuflen % 2 == 0);
+
+ while (true) {
+ if (inbuflen == 0) {
+ *compressed_len = len;
+ return 0;
+ }
+
+ if ((len + inbuf[0]) > *compressed_len) {
+ return -ENOSPC;
+ }
+
+ memset(outbuf, inbuf[1], inbuf[0]);
+ outbuf += inbuf[0];
+ len += inbuf[0];
+ inbuflen -= 2;
+ inbuf += 2;
+ }
+}
+
+static void
+ut_build_data_buffer(uint8_t *data, uint32_t data_len, uint8_t init_val, uint32_t repeat)
+{
+ uint32_t _repeat = repeat;
+
+ SPDK_CU_ASSERT_FATAL(repeat > 0);
+
+ while (data_len > 0) {
+ *data = init_val;
+ data++;
+ data_len--;
+ _repeat--;
+ if (_repeat == 0) {
+ init_val++;
+ _repeat = repeat;
+ }
+ }
+}
+
+static void
+backing_dev_compress(struct spdk_reduce_backing_dev *backing_dev,
+ struct iovec *src_iov, int src_iovcnt,
+ struct iovec *dst_iov, int dst_iovcnt,
+ struct spdk_reduce_vol_cb_args *args)
+{
+ uint32_t compressed_len;
+ uint64_t total_length = 0;
+ char *buf = g_decomp_buf;
+ int rc, i;
+
+ CU_ASSERT(dst_iovcnt == 1);
+
+ for (i = 0; i < src_iovcnt; i++) {
+ memcpy(buf, src_iov[i].iov_base, src_iov[i].iov_len);
+ buf += src_iov[i].iov_len;
+ total_length += src_iov[i].iov_len;
+ }
+
+ compressed_len = dst_iov[0].iov_len;
+ rc = ut_compress(dst_iov[0].iov_base, &compressed_len,
+ g_decomp_buf, total_length);
+
+ args->cb_fn(args->cb_arg, rc ? rc : (int)compressed_len);
+}
+
+static void
+backing_dev_decompress(struct spdk_reduce_backing_dev *backing_dev,
+ struct iovec *src_iov, int src_iovcnt,
+ struct iovec *dst_iov, int dst_iovcnt,
+ struct spdk_reduce_vol_cb_args *args)
+{
+ uint32_t decompressed_len = 0;
+ char *buf = g_decomp_buf;
+ int rc, i;
+
+ CU_ASSERT(src_iovcnt == 1);
+
+ for (i = 0; i < dst_iovcnt; i++) {
+ decompressed_len += dst_iov[i].iov_len;
+ }
+
+ rc = ut_decompress(g_decomp_buf, &decompressed_len,
+ src_iov[0].iov_base, src_iov[0].iov_len);
+
+ for (i = 0; i < dst_iovcnt; i++) {
+ memcpy(dst_iov[i].iov_base, buf, dst_iov[i].iov_len);
+ buf += dst_iov[i].iov_len;
+ }
+
+ args->cb_fn(args->cb_arg, rc ? rc : (int)decompressed_len);
+}
+
+static void
+backing_dev_destroy(struct spdk_reduce_backing_dev *backing_dev)
+{
+ /* We don't free this during backing_dev_close so that we can test init/unload/load
+ * scenarios.
+ */
+ free(g_backing_dev_buf);
+ free(g_decomp_buf);
+ g_backing_dev_buf = NULL;
+}
+
+static void
+backing_dev_init(struct spdk_reduce_backing_dev *backing_dev, struct spdk_reduce_vol_params *params,
+ uint32_t backing_blocklen)
+{
+ int64_t size;
+
+ size = 4 * 1024 * 1024;
+ backing_dev->blocklen = backing_blocklen;
+ backing_dev->blockcnt = size / backing_dev->blocklen;
+ backing_dev->readv = backing_dev_readv;
+ backing_dev->writev = backing_dev_writev;
+ backing_dev->unmap = backing_dev_unmap;
+ backing_dev->compress = backing_dev_compress;
+ backing_dev->decompress = backing_dev_decompress;
+
+ g_decomp_buf = calloc(1, params->chunk_size);
+ SPDK_CU_ASSERT_FATAL(g_decomp_buf != NULL);
+
+ g_backing_dev_buf = calloc(1, size);
+ SPDK_CU_ASSERT_FATAL(g_backing_dev_buf != NULL);
+}
+
+static void
+init_md(void)
+{
+ struct spdk_reduce_vol_params params = {};
+ struct spdk_reduce_vol_params *persistent_params;
+ struct spdk_reduce_backing_dev backing_dev = {};
+ struct spdk_uuid uuid;
+ uint64_t *entry;
+
+ params.chunk_size = 16 * 1024;
+ params.backing_io_unit_size = 512;
+ params.logical_block_size = 512;
+
+ backing_dev_init(&backing_dev, &params, 512);
+
+ g_vol = NULL;
+ g_reduce_errno = -1;
+ spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+ SPDK_CU_ASSERT_FATAL(g_vol != NULL);
+ /* Confirm that reduce persisted the params to metadata. */
+ CU_ASSERT(memcmp(g_persistent_pm_buf, SPDK_REDUCE_SIGNATURE, 8) == 0);
+ persistent_params = (struct spdk_reduce_vol_params *)(g_persistent_pm_buf + 8);
+ CU_ASSERT(memcmp(persistent_params, &params, sizeof(params)) == 0);
+ /* Now confirm that contents of pm_file after the superblock have been initialized
+ * to REDUCE_EMPTY_MAP_ENTRY.
+ */
+ entry = (uint64_t *)(g_persistent_pm_buf + sizeof(struct spdk_reduce_vol_superblock));
+ while (entry != (uint64_t *)(g_persistent_pm_buf + g_vol->pm_file.size)) {
+ CU_ASSERT(*entry == REDUCE_EMPTY_MAP_ENTRY);
+ entry++;
+ }
+
+ /* Check that the pm file path was constructed correctly. It should be in
+ * the form:
+ * TEST_MD_PATH + "/" + <uuid string>
+ */
+ CU_ASSERT(strncmp(&g_path[0], TEST_MD_PATH, strlen(TEST_MD_PATH)) == 0);
+ CU_ASSERT(g_path[strlen(TEST_MD_PATH)] == '/');
+ CU_ASSERT(spdk_uuid_parse(&uuid, &g_path[strlen(TEST_MD_PATH) + 1]) == 0);
+ CU_ASSERT(spdk_uuid_compare(&uuid, spdk_reduce_vol_get_uuid(g_vol)) == 0);
+
+ g_reduce_errno = -1;
+ spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+ CU_ASSERT(g_volatile_pm_buf == NULL);
+
+ persistent_pm_buf_destroy();
+ backing_dev_destroy(&backing_dev);
+}
+
+static void
+_init_backing_dev(uint32_t backing_blocklen)
+{
+ struct spdk_reduce_vol_params params = {};
+ struct spdk_reduce_vol_params *persistent_params;
+ struct spdk_reduce_backing_dev backing_dev = {};
+
+ params.chunk_size = 16 * 1024;
+ params.backing_io_unit_size = 512;
+ params.logical_block_size = 512;
+ spdk_uuid_generate(&params.uuid);
+
+ backing_dev_init(&backing_dev, &params, backing_blocklen);
+
+ g_vol = NULL;
+ memset(g_path, 0, sizeof(g_path));
+ g_reduce_errno = -1;
+ spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+ SPDK_CU_ASSERT_FATAL(g_vol != NULL);
+ CU_ASSERT(strncmp(TEST_MD_PATH, g_path, strlen(TEST_MD_PATH)) == 0);
+ /* Confirm that libreduce persisted the params to the backing device. */
+ CU_ASSERT(memcmp(g_backing_dev_buf, SPDK_REDUCE_SIGNATURE, 8) == 0);
+ persistent_params = (struct spdk_reduce_vol_params *)(g_backing_dev_buf + 8);
+ CU_ASSERT(memcmp(persistent_params, &params, sizeof(params)) == 0);
+ /* Confirm that the path to the persistent memory metadata file was persisted to
+ * the backing device.
+ */
+ CU_ASSERT(strncmp(g_path,
+ g_backing_dev_buf + REDUCE_BACKING_DEV_PATH_OFFSET,
+ REDUCE_PATH_MAX) == 0);
+
+ g_reduce_errno = -1;
+ spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+
+ persistent_pm_buf_destroy();
+ backing_dev_destroy(&backing_dev);
+}
+
+static void
+init_backing_dev(void)
+{
+ _init_backing_dev(512);
+ _init_backing_dev(4096);
+}
+
+static void
+_load(uint32_t backing_blocklen)
+{
+ struct spdk_reduce_vol_params params = {};
+ struct spdk_reduce_backing_dev backing_dev = {};
+ char pmem_file_path[REDUCE_PATH_MAX];
+
+ params.chunk_size = 16 * 1024;
+ params.backing_io_unit_size = 512;
+ params.logical_block_size = 512;
+ spdk_uuid_generate(&params.uuid);
+
+ backing_dev_init(&backing_dev, &params, backing_blocklen);
+
+ g_vol = NULL;
+ g_reduce_errno = -1;
+ spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+ SPDK_CU_ASSERT_FATAL(g_vol != NULL);
+ CU_ASSERT(strncmp(TEST_MD_PATH, g_path, strlen(TEST_MD_PATH)) == 0);
+ memcpy(pmem_file_path, g_path, sizeof(pmem_file_path));
+
+ g_reduce_errno = -1;
+ spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+
+ g_vol = NULL;
+ memset(g_path, 0, sizeof(g_path));
+ g_reduce_errno = -1;
+ spdk_reduce_vol_load(&backing_dev, load_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+ SPDK_CU_ASSERT_FATAL(g_vol != NULL);
+ CU_ASSERT(strncmp(g_path, pmem_file_path, sizeof(pmem_file_path)) == 0);
+ CU_ASSERT(g_vol->params.vol_size == params.vol_size);
+ CU_ASSERT(g_vol->params.chunk_size == params.chunk_size);
+ CU_ASSERT(g_vol->params.backing_io_unit_size == params.backing_io_unit_size);
+
+ g_reduce_errno = -1;
+ spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+
+ persistent_pm_buf_destroy();
+ backing_dev_destroy(&backing_dev);
+}
+
+static void
+load(void)
+{
+ _load(512);
+ _load(4096);
+}
+
+static uint64_t
+_vol_get_chunk_map_index(struct spdk_reduce_vol *vol, uint64_t offset)
+{
+ uint64_t logical_map_index = offset / vol->logical_blocks_per_chunk;
+
+ return vol->pm_logical_map[logical_map_index];
+}
+
+static void
+write_cb(void *arg, int reduce_errno)
+{
+ g_reduce_errno = reduce_errno;
+}
+
+static void
+read_cb(void *arg, int reduce_errno)
+{
+ g_reduce_errno = reduce_errno;
+}
+
+static void
+_write_maps(uint32_t backing_blocklen)
+{
+ struct spdk_reduce_vol_params params = {};
+ struct spdk_reduce_backing_dev backing_dev = {};
+ struct iovec iov;
+ const int bufsize = 16 * 1024; /* chunk size */
+ char buf[bufsize];
+ uint32_t num_lbas, i;
+ uint64_t old_chunk0_map_index, new_chunk0_map_index;
+ struct spdk_reduce_chunk_map *old_chunk0_map, *new_chunk0_map;
+
+ params.chunk_size = bufsize;
+ params.backing_io_unit_size = 4096;
+ params.logical_block_size = 512;
+ num_lbas = bufsize / params.logical_block_size;
+ spdk_uuid_generate(&params.uuid);
+
+ backing_dev_init(&backing_dev, &params, backing_blocklen);
+
+ g_vol = NULL;
+ g_reduce_errno = -1;
+ spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+ SPDK_CU_ASSERT_FATAL(g_vol != NULL);
+
+ for (i = 0; i < g_vol->params.vol_size / g_vol->params.chunk_size; i++) {
+ CU_ASSERT(_vol_get_chunk_map_index(g_vol, i) == REDUCE_EMPTY_MAP_ENTRY);
+ }
+
+ ut_build_data_buffer(buf, bufsize, 0x00, 1);
+ iov.iov_base = buf;
+ iov.iov_len = bufsize;
+ g_reduce_errno = -1;
+ spdk_reduce_vol_writev(g_vol, &iov, 1, 0, num_lbas, write_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+
+ old_chunk0_map_index = _vol_get_chunk_map_index(g_vol, 0);
+ CU_ASSERT(old_chunk0_map_index != REDUCE_EMPTY_MAP_ENTRY);
+ CU_ASSERT(spdk_bit_array_get(g_vol->allocated_chunk_maps, old_chunk0_map_index) == true);
+
+ old_chunk0_map = _reduce_vol_get_chunk_map(g_vol, old_chunk0_map_index);
+ for (i = 0; i < g_vol->backing_io_units_per_chunk; i++) {
+ CU_ASSERT(old_chunk0_map->io_unit_index[i] != REDUCE_EMPTY_MAP_ENTRY);
+ CU_ASSERT(spdk_bit_array_get(g_vol->allocated_backing_io_units,
+ old_chunk0_map->io_unit_index[i]) == true);
+ }
+
+ g_reduce_errno = -1;
+ spdk_reduce_vol_writev(g_vol, &iov, 1, 0, num_lbas, write_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+
+ new_chunk0_map_index = _vol_get_chunk_map_index(g_vol, 0);
+ CU_ASSERT(new_chunk0_map_index != REDUCE_EMPTY_MAP_ENTRY);
+ CU_ASSERT(new_chunk0_map_index != old_chunk0_map_index);
+ CU_ASSERT(spdk_bit_array_get(g_vol->allocated_chunk_maps, new_chunk0_map_index) == true);
+ CU_ASSERT(spdk_bit_array_get(g_vol->allocated_chunk_maps, old_chunk0_map_index) == false);
+
+ for (i = 0; i < g_vol->backing_io_units_per_chunk; i++) {
+ CU_ASSERT(spdk_bit_array_get(g_vol->allocated_backing_io_units,
+ old_chunk0_map->io_unit_index[i]) == false);
+ }
+
+ new_chunk0_map = _reduce_vol_get_chunk_map(g_vol, new_chunk0_map_index);
+ for (i = 0; i < g_vol->backing_io_units_per_chunk; i++) {
+ CU_ASSERT(new_chunk0_map->io_unit_index[i] != REDUCE_EMPTY_MAP_ENTRY);
+ CU_ASSERT(spdk_bit_array_get(g_vol->allocated_backing_io_units,
+ new_chunk0_map->io_unit_index[i]) == true);
+ }
+
+ g_reduce_errno = -1;
+ spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+
+ g_vol = NULL;
+ g_reduce_errno = -1;
+ spdk_reduce_vol_load(&backing_dev, load_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+ SPDK_CU_ASSERT_FATAL(g_vol != NULL);
+ CU_ASSERT(g_vol->params.vol_size == params.vol_size);
+ CU_ASSERT(g_vol->params.chunk_size == params.chunk_size);
+ CU_ASSERT(g_vol->params.backing_io_unit_size == params.backing_io_unit_size);
+
+ g_reduce_errno = -1;
+ spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+
+ persistent_pm_buf_destroy();
+ backing_dev_destroy(&backing_dev);
+}
+
+static void
+write_maps(void)
+{
+ _write_maps(512);
+ _write_maps(4096);
+}
+
+static void
+_read_write(uint32_t backing_blocklen)
+{
+ struct spdk_reduce_vol_params params = {};
+ struct spdk_reduce_backing_dev backing_dev = {};
+ struct iovec iov;
+ char buf[16 * 1024]; /* chunk size */
+ char compare_buf[16 * 1024];
+ uint32_t i;
+
+ params.chunk_size = 16 * 1024;
+ params.backing_io_unit_size = 4096;
+ params.logical_block_size = 512;
+ spdk_uuid_generate(&params.uuid);
+
+ backing_dev_init(&backing_dev, &params, backing_blocklen);
+
+ g_vol = NULL;
+ g_reduce_errno = -1;
+ spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+ SPDK_CU_ASSERT_FATAL(g_vol != NULL);
+
+ /* Write 0xAA to 2 512-byte logical blocks, starting at LBA 2. */
+ memset(buf, 0xAA, 2 * params.logical_block_size);
+ iov.iov_base = buf;
+ iov.iov_len = 2 * params.logical_block_size;
+ g_reduce_errno = -1;
+ spdk_reduce_vol_writev(g_vol, &iov, 1, 2, 2, write_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+
+ memset(compare_buf, 0xAA, sizeof(compare_buf));
+ for (i = 0; i < params.chunk_size / params.logical_block_size; i++) {
+ memset(buf, 0xFF, params.logical_block_size);
+ iov.iov_base = buf;
+ iov.iov_len = params.logical_block_size;
+ g_reduce_errno = -1;
+ spdk_reduce_vol_readv(g_vol, &iov, 1, i, 1, read_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+
+ switch (i) {
+ case 2:
+ case 3:
+ CU_ASSERT(memcmp(buf, compare_buf, params.logical_block_size) == 0);
+ break;
+ default:
+ CU_ASSERT(spdk_mem_all_zero(buf, params.logical_block_size));
+ break;
+ }
+ }
+
+ g_reduce_errno = -1;
+ spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+
+ /* Overwrite what we just wrote with 0xCC */
+ g_vol = NULL;
+ g_reduce_errno = -1;
+ spdk_reduce_vol_load(&backing_dev, load_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+ SPDK_CU_ASSERT_FATAL(g_vol != NULL);
+ CU_ASSERT(g_vol->params.vol_size == params.vol_size);
+ CU_ASSERT(g_vol->params.chunk_size == params.chunk_size);
+ CU_ASSERT(g_vol->params.backing_io_unit_size == params.backing_io_unit_size);
+
+ memset(buf, 0xCC, 2 * params.logical_block_size);
+ iov.iov_base = buf;
+ iov.iov_len = 2 * params.logical_block_size;
+ g_reduce_errno = -1;
+ spdk_reduce_vol_writev(g_vol, &iov, 1, 2, 2, write_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+
+ memset(compare_buf, 0xCC, sizeof(compare_buf));
+ for (i = 0; i < params.chunk_size / params.logical_block_size; i++) {
+ memset(buf, 0xFF, params.logical_block_size);
+ iov.iov_base = buf;
+ iov.iov_len = params.logical_block_size;
+ g_reduce_errno = -1;
+ spdk_reduce_vol_readv(g_vol, &iov, 1, i, 1, read_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+
+ switch (i) {
+ case 2:
+ case 3:
+ CU_ASSERT(memcmp(buf, compare_buf, params.logical_block_size) == 0);
+ break;
+ default:
+ CU_ASSERT(spdk_mem_all_zero(buf, params.logical_block_size));
+ break;
+ }
+ }
+
+ g_reduce_errno = -1;
+ spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+
+ g_vol = NULL;
+ g_reduce_errno = -1;
+ spdk_reduce_vol_load(&backing_dev, load_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+ SPDK_CU_ASSERT_FATAL(g_vol != NULL);
+ CU_ASSERT(g_vol->params.vol_size == params.vol_size);
+ CU_ASSERT(g_vol->params.chunk_size == params.chunk_size);
+ CU_ASSERT(g_vol->params.backing_io_unit_size == params.backing_io_unit_size);
+
+ g_reduce_errno = -1;
+
+ /* Write 0xBB to 2 512-byte logical blocks, starting at LBA 37.
+ * This is writing into the second chunk of the volume. This also
+ * enables implicitly checking that we reloaded the bit arrays
+ * correctly - making sure we don't use the first chunk map again
+ * for this new write - the first chunk map was already used by the
+ * write from before we unloaded and reloaded.
+ */
+ memset(buf, 0xBB, 2 * params.logical_block_size);
+ iov.iov_base = buf;
+ iov.iov_len = 2 * params.logical_block_size;
+ g_reduce_errno = -1;
+ spdk_reduce_vol_writev(g_vol, &iov, 1, 37, 2, write_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+
+ for (i = 0; i < 2 * params.chunk_size / params.logical_block_size; i++) {
+ memset(buf, 0xFF, params.logical_block_size);
+ iov.iov_base = buf;
+ iov.iov_len = params.logical_block_size;
+ g_reduce_errno = -1;
+ spdk_reduce_vol_readv(g_vol, &iov, 1, i, 1, read_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+
+ switch (i) {
+ case 2:
+ case 3:
+ memset(compare_buf, 0xCC, sizeof(compare_buf));
+ CU_ASSERT(memcmp(buf, compare_buf, params.logical_block_size) == 0);
+ break;
+ case 37:
+ case 38:
+ memset(compare_buf, 0xBB, sizeof(compare_buf));
+ CU_ASSERT(memcmp(buf, compare_buf, params.logical_block_size) == 0);
+ break;
+ default:
+ CU_ASSERT(spdk_mem_all_zero(buf, params.logical_block_size));
+ break;
+ }
+ }
+
+ g_reduce_errno = -1;
+ spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+
+ persistent_pm_buf_destroy();
+ backing_dev_destroy(&backing_dev);
+}
+
+static void
+read_write(void)
+{
+ _read_write(512);
+ _read_write(4096);
+}
+
+static void
+_readv_writev(uint32_t backing_blocklen)
+{
+ struct spdk_reduce_vol_params params = {};
+ struct spdk_reduce_backing_dev backing_dev = {};
+ struct iovec iov[REDUCE_MAX_IOVECS + 1];
+
+ params.chunk_size = 16 * 1024;
+ params.backing_io_unit_size = 4096;
+ params.logical_block_size = 512;
+ spdk_uuid_generate(&params.uuid);
+
+ backing_dev_init(&backing_dev, &params, backing_blocklen);
+
+ g_vol = NULL;
+ g_reduce_errno = -1;
+ spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+ SPDK_CU_ASSERT_FATAL(g_vol != NULL);
+
+ g_reduce_errno = -1;
+ spdk_reduce_vol_writev(g_vol, iov, REDUCE_MAX_IOVECS + 1, 2, REDUCE_MAX_IOVECS + 1, write_cb, NULL);
+ CU_ASSERT(g_reduce_errno == -EINVAL);
+
+ g_reduce_errno = -1;
+ spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+
+ persistent_pm_buf_destroy();
+ backing_dev_destroy(&backing_dev);
+}
+
+static void
+readv_writev(void)
+{
+ _readv_writev(512);
+ _readv_writev(4096);
+}
+
+static void
+destroy_cb(void *ctx, int reduce_errno)
+{
+ g_reduce_errno = reduce_errno;
+}
+
+static void
+destroy(void)
+{
+ struct spdk_reduce_vol_params params = {};
+ struct spdk_reduce_backing_dev backing_dev = {};
+
+ params.chunk_size = 16 * 1024;
+ params.backing_io_unit_size = 512;
+ params.logical_block_size = 512;
+ spdk_uuid_generate(&params.uuid);
+
+ backing_dev_init(&backing_dev, &params, 512);
+
+ g_vol = NULL;
+ g_reduce_errno = -1;
+ spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+ SPDK_CU_ASSERT_FATAL(g_vol != NULL);
+
+ g_reduce_errno = -1;
+ spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+
+ g_vol = NULL;
+ g_reduce_errno = -1;
+ spdk_reduce_vol_load(&backing_dev, load_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+ SPDK_CU_ASSERT_FATAL(g_vol != NULL);
+
+ g_reduce_errno = -1;
+ spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+
+ g_reduce_errno = -1;
+ MOCK_CLEAR(spdk_malloc);
+ MOCK_CLEAR(spdk_zmalloc);
+ spdk_reduce_vol_destroy(&backing_dev, destroy_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+
+ g_reduce_errno = 0;
+ spdk_reduce_vol_load(&backing_dev, load_cb, NULL);
+ CU_ASSERT(g_reduce_errno == -EILSEQ);
+
+ backing_dev_destroy(&backing_dev);
+}
+
+/* This test primarily checks that the reduce unit test infrastructure for asynchronous
+ * backing device I/O operations is working correctly.
+ */
+static void
+defer_bdev_io(void)
+{
+ struct spdk_reduce_vol_params params = {};
+ struct spdk_reduce_backing_dev backing_dev = {};
+ const uint32_t logical_block_size = 512;
+ struct iovec iov;
+ char buf[logical_block_size];
+ char compare_buf[logical_block_size];
+
+ params.chunk_size = 16 * 1024;
+ params.backing_io_unit_size = 4096;
+ params.logical_block_size = logical_block_size;
+ spdk_uuid_generate(&params.uuid);
+
+ backing_dev_init(&backing_dev, &params, 512);
+
+ g_vol = NULL;
+ g_reduce_errno = -1;
+ spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+ SPDK_CU_ASSERT_FATAL(g_vol != NULL);
+
+ /* Write 0xAA to 1 512-byte logical block. */
+ memset(buf, 0xAA, params.logical_block_size);
+ iov.iov_base = buf;
+ iov.iov_len = params.logical_block_size;
+ g_reduce_errno = -100;
+ g_defer_bdev_io = true;
+ spdk_reduce_vol_writev(g_vol, &iov, 1, 0, 1, write_cb, NULL);
+ /* Callback should not have executed, so this should still equal -100. */
+ CU_ASSERT(g_reduce_errno == -100);
+ CU_ASSERT(!TAILQ_EMPTY(&g_pending_bdev_io));
+ /* We wrote to just 512 bytes of one chunk which was previously unallocated. This
+ * should result in 1 pending I/O since the rest of this chunk will be zeroes and
+ * very compressible.
+ */
+ CU_ASSERT(g_pending_bdev_io_count == 1);
+
+ backing_dev_io_execute(0);
+ CU_ASSERT(TAILQ_EMPTY(&g_pending_bdev_io));
+ CU_ASSERT(g_reduce_errno == 0);
+
+ g_defer_bdev_io = false;
+ memset(compare_buf, 0xAA, sizeof(compare_buf));
+ memset(buf, 0xFF, sizeof(buf));
+ iov.iov_base = buf;
+ iov.iov_len = params.logical_block_size;
+ g_reduce_errno = -100;
+ spdk_reduce_vol_readv(g_vol, &iov, 1, 0, 1, read_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+ CU_ASSERT(memcmp(buf, compare_buf, sizeof(buf)) == 0);
+
+ g_reduce_errno = -1;
+ spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+
+ persistent_pm_buf_destroy();
+ backing_dev_destroy(&backing_dev);
+}
+
+static void
+overlapped(void)
+{
+ struct spdk_reduce_vol_params params = {};
+ struct spdk_reduce_backing_dev backing_dev = {};
+ const uint32_t logical_block_size = 512;
+ struct iovec iov;
+ char buf[2 * logical_block_size];
+ char compare_buf[2 * logical_block_size];
+
+ params.chunk_size = 16 * 1024;
+ params.backing_io_unit_size = 4096;
+ params.logical_block_size = logical_block_size;
+ spdk_uuid_generate(&params.uuid);
+
+ backing_dev_init(&backing_dev, &params, 512);
+
+ g_vol = NULL;
+ g_reduce_errno = -1;
+ spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+ SPDK_CU_ASSERT_FATAL(g_vol != NULL);
+
+ /* Write 0xAA to 1 512-byte logical block. */
+ memset(buf, 0xAA, logical_block_size);
+ iov.iov_base = buf;
+ iov.iov_len = logical_block_size;
+ g_reduce_errno = -100;
+ g_defer_bdev_io = true;
+ spdk_reduce_vol_writev(g_vol, &iov, 1, 0, 1, write_cb, NULL);
+ /* Callback should not have executed, so this should still equal -100. */
+ CU_ASSERT(g_reduce_errno == -100);
+ CU_ASSERT(!TAILQ_EMPTY(&g_pending_bdev_io));
+ /* We wrote to just 512 bytes of one chunk which was previously unallocated. This
+ * should result in 1 pending I/O since the rest of this chunk will be zeroes and
+ * very compressible.
+ */
+ CU_ASSERT(g_pending_bdev_io_count == 1);
+
+ /* Now do an overlapped I/O to the same chunk. */
+ spdk_reduce_vol_writev(g_vol, &iov, 1, 1, 1, write_cb, NULL);
+ /* Callback should not have executed, so this should still equal -100. */
+ CU_ASSERT(g_reduce_errno == -100);
+ CU_ASSERT(!TAILQ_EMPTY(&g_pending_bdev_io));
+ /* The second I/O overlaps with the first one. So we should only see pending bdev_io
+ * related to the first I/O here - the second one won't start until the first one is completed.
+ */
+ CU_ASSERT(g_pending_bdev_io_count == 1);
+
+ backing_dev_io_execute(0);
+ CU_ASSERT(g_reduce_errno == 0);
+
+ g_defer_bdev_io = false;
+ memset(compare_buf, 0xAA, sizeof(compare_buf));
+ memset(buf, 0xFF, sizeof(buf));
+ iov.iov_base = buf;
+ iov.iov_len = 2 * logical_block_size;
+ g_reduce_errno = -100;
+ spdk_reduce_vol_readv(g_vol, &iov, 1, 0, 2, read_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+ CU_ASSERT(memcmp(buf, compare_buf, 2 * logical_block_size) == 0);
+
+ g_reduce_errno = -1;
+ spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
+ CU_ASSERT(g_reduce_errno == 0);
+
+ persistent_pm_buf_destroy();
+ backing_dev_destroy(&backing_dev);
+}
+
+#define BUFSIZE 4096
+
+static void
+compress_algorithm(void)
+{
+ uint8_t original_data[BUFSIZE];
+ uint8_t compressed_data[BUFSIZE];
+ uint8_t decompressed_data[BUFSIZE];
+ uint32_t compressed_len, decompressed_len;
+ int rc;
+
+ ut_build_data_buffer(original_data, BUFSIZE, 0xAA, BUFSIZE);
+ compressed_len = sizeof(compressed_data);
+ rc = ut_compress(compressed_data, &compressed_len, original_data, UINT8_MAX);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(compressed_len == 2);
+ CU_ASSERT(compressed_data[0] == UINT8_MAX);
+ CU_ASSERT(compressed_data[1] == 0xAA);
+
+ decompressed_len = sizeof(decompressed_data);
+ rc = ut_decompress(decompressed_data, &decompressed_len, compressed_data, compressed_len);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(decompressed_len == UINT8_MAX);
+ CU_ASSERT(memcmp(original_data, decompressed_data, decompressed_len) == 0);
+
+ compressed_len = sizeof(compressed_data);
+ rc = ut_compress(compressed_data, &compressed_len, original_data, UINT8_MAX + 1);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(compressed_len == 4);
+ CU_ASSERT(compressed_data[0] == UINT8_MAX);
+ CU_ASSERT(compressed_data[1] == 0xAA);
+ CU_ASSERT(compressed_data[2] == 1);
+ CU_ASSERT(compressed_data[3] == 0xAA);
+
+ decompressed_len = sizeof(decompressed_data);
+ rc = ut_decompress(decompressed_data, &decompressed_len, compressed_data, compressed_len);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(decompressed_len == UINT8_MAX + 1);
+ CU_ASSERT(memcmp(original_data, decompressed_data, decompressed_len) == 0);
+
+ ut_build_data_buffer(original_data, BUFSIZE, 0x00, 1);
+ compressed_len = sizeof(compressed_data);
+ rc = ut_compress(compressed_data, &compressed_len, original_data, 2048);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(compressed_len == 4096);
+ CU_ASSERT(compressed_data[0] == 1);
+ CU_ASSERT(compressed_data[1] == 0);
+ CU_ASSERT(compressed_data[4094] == 1);
+ CU_ASSERT(compressed_data[4095] == 0xFF);
+
+ decompressed_len = sizeof(decompressed_data);
+ rc = ut_decompress(decompressed_data, &decompressed_len, compressed_data, compressed_len);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(decompressed_len == 2048);
+ CU_ASSERT(memcmp(original_data, decompressed_data, decompressed_len) == 0);
+
+ compressed_len = sizeof(compressed_data);
+ rc = ut_compress(compressed_data, &compressed_len, original_data, 2049);
+ CU_ASSERT(rc == -ENOSPC);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("reduce", NULL, NULL);
+
+ CU_ADD_TEST(suite, get_pm_file_size);
+ CU_ADD_TEST(suite, get_vol_size);
+ CU_ADD_TEST(suite, init_failure);
+ CU_ADD_TEST(suite, init_md);
+ CU_ADD_TEST(suite, init_backing_dev);
+ CU_ADD_TEST(suite, load);
+ CU_ADD_TEST(suite, write_maps);
+ CU_ADD_TEST(suite, read_write);
+ CU_ADD_TEST(suite, readv_writev);
+ CU_ADD_TEST(suite, destroy);
+ CU_ADD_TEST(suite, defer_bdev_io);
+ CU_ADD_TEST(suite, overlapped);
+ CU_ADD_TEST(suite, compress_algorithm);
+
+ g_unlink_path = g_path;
+ g_unlink_callback = unlink_cb;
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/scsi/Makefile b/src/spdk/test/unit/lib/scsi/Makefile
new file mode 100644
index 000000000..8044d3f4e
--- /dev/null
+++ b/src/spdk/test/unit/lib/scsi/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = dev.c lun.c scsi.c scsi_bdev.c scsi_pr.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/scsi/dev.c/.gitignore b/src/spdk/test/unit/lib/scsi/dev.c/.gitignore
new file mode 100644
index 000000000..e325086bb
--- /dev/null
+++ b/src/spdk/test/unit/lib/scsi/dev.c/.gitignore
@@ -0,0 +1 @@
+dev_ut
diff --git a/src/spdk/test/unit/lib/scsi/dev.c/Makefile b/src/spdk/test/unit/lib/scsi/dev.c/Makefile
new file mode 100644
index 000000000..983b3bc9e
--- /dev/null
+++ b/src/spdk/test/unit/lib/scsi/dev.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = dev_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/scsi/dev.c/dev_ut.c b/src/spdk/test/unit/lib/scsi/dev.c/dev_ut.c
new file mode 100644
index 000000000..f738011fb
--- /dev/null
+++ b/src/spdk/test/unit/lib/scsi/dev.c/dev_ut.c
@@ -0,0 +1,682 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "CUnit/Basic.h"
+#include "spdk_cunit.h"
+
+#include "spdk/util.h"
+
+#include "scsi/dev.c"
+#include "scsi/port.c"
+
+#include "spdk_internal/mock.h"
+
+/* Unit test bdev mockup */
+struct spdk_bdev {
+ char name[100];
+};
+
+static struct spdk_bdev g_bdevs[] = {
+ {"malloc0"},
+ {"malloc1"},
+};
+
+static struct spdk_scsi_port *g_initiator_port_with_pending_tasks = NULL;
+static struct spdk_scsi_port *g_initiator_port_with_pending_mgmt_tasks = NULL;
+
+const char *
+spdk_bdev_get_name(const struct spdk_bdev *bdev)
+{
+ return bdev->name;
+}
+
+static struct spdk_scsi_task *
+spdk_get_task(uint32_t *owner_task_ctr)
+{
+ struct spdk_scsi_task *task;
+
+ task = calloc(1, sizeof(*task));
+ if (!task) {
+ return NULL;
+ }
+
+ return task;
+}
+
+void
+spdk_scsi_task_put(struct spdk_scsi_task *task)
+{
+ free(task);
+}
+
+struct spdk_scsi_lun *scsi_lun_construct(struct spdk_bdev *bdev,
+ void (*hotremove_cb)(const struct spdk_scsi_lun *, void *),
+ void *hotremove_ctx)
+{
+ struct spdk_scsi_lun *lun;
+
+ lun = calloc(1, sizeof(struct spdk_scsi_lun));
+ SPDK_CU_ASSERT_FATAL(lun != NULL);
+
+ lun->bdev = bdev;
+
+ return lun;
+}
+
+void
+scsi_lun_destruct(struct spdk_scsi_lun *lun)
+{
+ free(lun);
+}
+
+struct spdk_bdev *
+spdk_bdev_get_by_name(const char *bdev_name)
+{
+ size_t i;
+
+ for (i = 0; i < SPDK_COUNTOF(g_bdevs); i++) {
+ if (strcmp(bdev_name, g_bdevs[i].name) == 0) {
+ return &g_bdevs[i];
+ }
+ }
+
+ return NULL;
+}
+
+DEFINE_STUB_V(scsi_lun_execute_mgmt_task,
+ (struct spdk_scsi_lun *lun, struct spdk_scsi_task *task));
+
+DEFINE_STUB_V(scsi_lun_execute_task,
+ (struct spdk_scsi_lun *lun, struct spdk_scsi_task *task));
+
+DEFINE_STUB(scsi_lun_allocate_io_channel, int,
+ (struct spdk_scsi_lun *lun), 0);
+
+DEFINE_STUB_V(scsi_lun_free_io_channel, (struct spdk_scsi_lun *lun));
+
+bool
+scsi_lun_has_pending_mgmt_tasks(const struct spdk_scsi_lun *lun,
+ const struct spdk_scsi_port *initiator_port)
+{
+ return (g_initiator_port_with_pending_mgmt_tasks == initiator_port);
+}
+
+bool
+scsi_lun_has_pending_tasks(const struct spdk_scsi_lun *lun,
+ const struct spdk_scsi_port *initiator_port)
+{
+ return (g_initiator_port_with_pending_tasks == initiator_port);
+}
+
+static void
+dev_destruct_null_dev(void)
+{
+ /* pass null for the dev */
+ spdk_scsi_dev_destruct(NULL, NULL, NULL);
+}
+
+static void
+dev_destruct_zero_luns(void)
+{
+ struct spdk_scsi_dev dev = { .is_allocated = 1 };
+
+ /* No luns attached to the dev */
+
+ /* free the dev */
+ spdk_scsi_dev_destruct(&dev, NULL, NULL);
+}
+
+static void
+dev_destruct_null_lun(void)
+{
+ struct spdk_scsi_dev dev = { .is_allocated = 1 };
+
+ /* pass null for the lun */
+ dev.lun[0] = NULL;
+
+ /* free the dev */
+ spdk_scsi_dev_destruct(&dev, NULL, NULL);
+}
+
+static void
+dev_destruct_success(void)
+{
+ struct spdk_scsi_dev dev = { .is_allocated = 1 };
+ int rc;
+
+ /* dev with a single lun */
+ rc = spdk_scsi_dev_add_lun(&dev, "malloc0", 0, NULL, NULL);
+
+ CU_ASSERT(rc == 0);
+
+ /* free the dev */
+ spdk_scsi_dev_destruct(&dev, NULL, NULL);
+
+}
+
+static void
+dev_construct_num_luns_zero(void)
+{
+ struct spdk_scsi_dev *dev;
+ const char *bdev_name_list[1] = {};
+ int lun_id_list[1] = { 0 };
+
+ dev = spdk_scsi_dev_construct("Name", bdev_name_list, lun_id_list, 0,
+ SPDK_SPC_PROTOCOL_IDENTIFIER_ISCSI, NULL, NULL);
+
+ /* dev should be null since we passed num_luns = 0 */
+ CU_ASSERT_TRUE(dev == NULL);
+}
+
+static void
+dev_construct_no_lun_zero(void)
+{
+ struct spdk_scsi_dev *dev;
+ const char *bdev_name_list[1] = {};
+ int lun_id_list[1] = { 0 };
+
+ lun_id_list[0] = 1;
+
+ dev = spdk_scsi_dev_construct("Name", bdev_name_list, lun_id_list, 1,
+ SPDK_SPC_PROTOCOL_IDENTIFIER_ISCSI, NULL, NULL);
+
+ /* dev should be null since no LUN0 was specified (lun_id_list[0] = 1) */
+ CU_ASSERT_TRUE(dev == NULL);
+}
+
+static void
+dev_construct_null_lun(void)
+{
+ struct spdk_scsi_dev *dev;
+ const char *bdev_name_list[1] = {};
+ int lun_id_list[1] = { 0 };
+
+ dev = spdk_scsi_dev_construct("Name", bdev_name_list, lun_id_list, 1,
+ SPDK_SPC_PROTOCOL_IDENTIFIER_ISCSI, NULL, NULL);
+
+ /* dev should be null since no LUN0 was specified (lun_list[0] = NULL) */
+ CU_ASSERT_TRUE(dev == NULL);
+}
+
+static void
+dev_construct_name_too_long(void)
+{
+ struct spdk_scsi_dev *dev;
+ const char *bdev_name_list[1] = {"malloc0"};
+ int lun_id_list[1] = { 0 };
+ char name[SPDK_SCSI_DEV_MAX_NAME + 1 + 1];
+
+ /* Try to construct a dev with a name that is one byte longer than allowed. */
+ memset(name, 'x', sizeof(name) - 1);
+ name[sizeof(name) - 1] = '\0';
+
+ dev = spdk_scsi_dev_construct(name, bdev_name_list, lun_id_list, 1,
+ SPDK_SPC_PROTOCOL_IDENTIFIER_ISCSI, NULL, NULL);
+
+ CU_ASSERT(dev == NULL);
+}
+
+static void
+dev_construct_success(void)
+{
+ struct spdk_scsi_dev *dev;
+ const char *bdev_name_list[1] = {"malloc0"};
+ int lun_id_list[1] = { 0 };
+
+ dev = spdk_scsi_dev_construct("Name", bdev_name_list, lun_id_list, 1,
+ SPDK_SPC_PROTOCOL_IDENTIFIER_ISCSI, NULL, NULL);
+
+ /* Successfully constructs and returns a dev */
+ CU_ASSERT_TRUE(dev != NULL);
+
+ /* free the dev */
+ spdk_scsi_dev_destruct(dev, NULL, NULL);
+}
+
+static void
+dev_construct_success_lun_zero_not_first(void)
+{
+ struct spdk_scsi_dev *dev;
+ const char *bdev_name_list[2] = {"malloc1", "malloc0"};
+ int lun_id_list[2] = { 1, 0 };
+
+ dev = spdk_scsi_dev_construct("Name", bdev_name_list, lun_id_list, 2,
+ SPDK_SPC_PROTOCOL_IDENTIFIER_ISCSI, NULL, NULL);
+
+ /* Successfully constructs and returns a dev */
+ CU_ASSERT_TRUE(dev != NULL);
+
+ /* free the dev */
+ spdk_scsi_dev_destruct(dev, NULL, NULL);
+}
+
+static void
+dev_queue_mgmt_task_success(void)
+{
+ struct spdk_scsi_dev *dev;
+ const char *bdev_name_list[1] = {"malloc0"};
+ int lun_id_list[1] = { 0 };
+ struct spdk_scsi_task *task;
+
+ dev = spdk_scsi_dev_construct("Name", bdev_name_list, lun_id_list, 1,
+ SPDK_SPC_PROTOCOL_IDENTIFIER_ISCSI, NULL, NULL);
+
+ /* Successfully constructs and returns a dev */
+ CU_ASSERT_TRUE(dev != NULL);
+
+ task = spdk_get_task(NULL);
+
+ task->function = SPDK_SCSI_TASK_FUNC_LUN_RESET;
+ spdk_scsi_dev_queue_mgmt_task(dev, task);
+
+ spdk_scsi_task_put(task);
+
+ spdk_scsi_dev_destruct(dev, NULL, NULL);
+}
+
+static void
+dev_queue_task_success(void)
+{
+ struct spdk_scsi_dev *dev;
+ const char *bdev_name_list[1] = {"malloc0"};
+ int lun_id_list[1] = { 0 };
+ struct spdk_scsi_task *task;
+
+ dev = spdk_scsi_dev_construct("Name", bdev_name_list, lun_id_list, 1,
+ SPDK_SPC_PROTOCOL_IDENTIFIER_ISCSI, NULL, NULL);
+
+ /* Successfully constructs and returns a dev */
+ CU_ASSERT_TRUE(dev != NULL);
+
+ task = spdk_get_task(NULL);
+
+ spdk_scsi_dev_queue_task(dev, task);
+
+ spdk_scsi_task_put(task);
+
+ spdk_scsi_dev_destruct(dev, NULL, NULL);
+}
+
+static void
+dev_stop_success(void)
+{
+ struct spdk_scsi_dev dev = { 0 };
+ struct spdk_scsi_task *task;
+ struct spdk_scsi_task *task_mgmt;
+
+ task = spdk_get_task(NULL);
+
+ spdk_scsi_dev_queue_task(&dev, task);
+
+ task_mgmt = spdk_get_task(NULL);
+
+ /* Enqueue the tasks into dev->task_mgmt_submit_queue */
+ task->function = SPDK_SCSI_TASK_FUNC_LUN_RESET;
+ spdk_scsi_dev_queue_mgmt_task(&dev, task_mgmt);
+
+ spdk_scsi_task_put(task);
+ spdk_scsi_task_put(task_mgmt);
+}
+
+static void
+dev_add_port_max_ports(void)
+{
+ struct spdk_scsi_dev dev = { 0 };
+ const char *name;
+ int id, rc;
+
+ /* dev is set to SPDK_SCSI_DEV_MAX_PORTS */
+ dev.num_ports = SPDK_SCSI_DEV_MAX_PORTS;
+ name = "Name of Port";
+ id = 1;
+
+ rc = spdk_scsi_dev_add_port(&dev, id, name);
+
+ /* returns -1; since the dev already has maximum
+ * number of ports (SPDK_SCSI_DEV_MAX_PORTS) */
+ CU_ASSERT_TRUE(rc < 0);
+}
+
+static void
+dev_add_port_construct_failure1(void)
+{
+ struct spdk_scsi_dev dev = { 0 };
+ const int port_name_length = SPDK_SCSI_PORT_MAX_NAME_LENGTH + 2;
+ char name[port_name_length];
+ uint64_t id;
+ int rc;
+
+ dev.num_ports = 1;
+ /* Set the name such that the length exceeds SPDK_SCSI_PORT_MAX_NAME_LENGTH
+ * SPDK_SCSI_PORT_MAX_NAME_LENGTH = 256 */
+ memset(name, 'a', port_name_length - 1);
+ name[port_name_length - 1] = '\0';
+ id = 1;
+
+ rc = spdk_scsi_dev_add_port(&dev, id, name);
+
+ /* returns -1; since the length of the name exceeds
+ * SPDK_SCSI_PORT_MAX_NAME_LENGTH */
+ CU_ASSERT_TRUE(rc < 0);
+}
+
+static void
+dev_add_port_construct_failure2(void)
+{
+ struct spdk_scsi_dev dev = { 0 };
+ const char *name;
+ uint64_t id;
+ int rc;
+
+ dev.num_ports = 1;
+ name = "Name of Port";
+ id = 1;
+
+ /* Initialize port[0] to be valid and its index is set to 1 */
+ dev.port[0].id = id;
+ dev.port[0].is_used = 1;
+
+ rc = spdk_scsi_dev_add_port(&dev, id, name);
+
+ /* returns -1; since the dev already has a port whose index to be 1 */
+ CU_ASSERT_TRUE(rc < 0);
+}
+
+static void
+dev_add_port_success1(void)
+{
+ struct spdk_scsi_dev dev = { 0 };
+ const char *name;
+ int id, rc;
+
+ dev.num_ports = 1;
+ name = "Name of Port";
+ id = 1;
+
+ rc = spdk_scsi_dev_add_port(&dev, id, name);
+
+ /* successfully adds a port */
+ CU_ASSERT_EQUAL(rc, 0);
+ /* Assert num_ports has been incremented to 2 */
+ CU_ASSERT_EQUAL(dev.num_ports, 2);
+}
+
+static void
+dev_add_port_success2(void)
+{
+ struct spdk_scsi_dev dev = { 0 };
+ const char *name;
+ uint64_t id;
+ int rc;
+
+ dev.num_ports = 1;
+ name = "Name of Port";
+ id = 1;
+ /* set id of invalid port[0] to 1. This must be ignored */
+ dev.port[0].id = id;
+ dev.port[0].is_used = 0;
+
+ rc = spdk_scsi_dev_add_port(&dev, id, name);
+
+ /* successfully adds a port */
+ CU_ASSERT_EQUAL(rc, 0);
+ /* Assert num_ports has been incremented to 1 */
+ CU_ASSERT_EQUAL(dev.num_ports, 2);
+}
+
+static void
+dev_add_port_success3(void)
+{
+ struct spdk_scsi_dev dev = { 0 };
+ const char *name;
+ uint64_t add_id;
+ int rc;
+
+ dev.num_ports = 1;
+ name = "Name of Port";
+ dev.port[0].id = 1;
+ dev.port[0].is_used = 1;
+ add_id = 2;
+
+ /* Add a port with id = 2 */
+ rc = spdk_scsi_dev_add_port(&dev, add_id, name);
+
+ /* successfully adds a port */
+ CU_ASSERT_EQUAL(rc, 0);
+ /* Assert num_ports has been incremented to 2 */
+ CU_ASSERT_EQUAL(dev.num_ports, 2);
+}
+
+static void
+dev_find_port_by_id_num_ports_zero(void)
+{
+ struct spdk_scsi_dev dev = { 0 };
+ struct spdk_scsi_port *rp_port;
+ uint64_t id;
+
+ dev.num_ports = 0;
+ id = 1;
+
+ rp_port = spdk_scsi_dev_find_port_by_id(&dev, id);
+
+ /* returns null; since dev's num_ports is 0 */
+ CU_ASSERT_TRUE(rp_port == NULL);
+}
+
+static void
+dev_find_port_by_id_id_not_found_failure(void)
+{
+ struct spdk_scsi_dev dev = { 0 };
+ struct spdk_scsi_port *rp_port;
+ const char *name;
+ int rc;
+ uint64_t id, find_id;
+
+ id = 1;
+ dev.num_ports = 1;
+ name = "Name of Port";
+ find_id = 2;
+
+ /* Add a port with id = 1 */
+ rc = spdk_scsi_dev_add_port(&dev, id, name);
+
+ CU_ASSERT_EQUAL(rc, 0);
+
+ /* Find port with id = 2 */
+ rp_port = spdk_scsi_dev_find_port_by_id(&dev, find_id);
+
+ /* returns null; failed to find port specified by id = 2 */
+ CU_ASSERT_TRUE(rp_port == NULL);
+}
+
+static void
+dev_find_port_by_id_success(void)
+{
+ struct spdk_scsi_dev dev = { 0 };
+ struct spdk_scsi_port *rp_port;
+ const char *name;
+ int rc;
+ uint64_t id;
+
+ id = 1;
+ dev.num_ports = 1;
+ name = "Name of Port";
+
+ /* Add a port */
+ rc = spdk_scsi_dev_add_port(&dev, id, name);
+
+ CU_ASSERT_EQUAL(rc, 0);
+
+ /* Find port by the same id as the one added above */
+ rp_port = spdk_scsi_dev_find_port_by_id(&dev, id);
+
+ /* Successfully found port specified by id */
+ CU_ASSERT_TRUE(rp_port != NULL);
+ if (rp_port != NULL) {
+ /* Assert the found port's id and name are same as
+ * the port added. */
+ CU_ASSERT_EQUAL(rp_port->id, 1);
+ CU_ASSERT_STRING_EQUAL(rp_port->name, "Name of Port");
+ }
+}
+
+static void
+dev_add_lun_bdev_not_found(void)
+{
+ int rc;
+ struct spdk_scsi_dev dev = {0};
+
+ rc = spdk_scsi_dev_add_lun(&dev, "malloc2", 0, NULL, NULL);
+
+ SPDK_CU_ASSERT_FATAL(dev.lun[0] == NULL);
+ CU_ASSERT_NOT_EQUAL(rc, 0);
+}
+
+static void
+dev_add_lun_no_free_lun_id(void)
+{
+ int rc;
+ int i;
+ struct spdk_scsi_dev dev = {0};
+ struct spdk_scsi_lun lun;
+
+ for (i = 0; i < SPDK_SCSI_DEV_MAX_LUN; i++) {
+ dev.lun[i] = &lun;
+ }
+
+ rc = spdk_scsi_dev_add_lun(&dev, "malloc0", -1, NULL, NULL);
+
+ CU_ASSERT_NOT_EQUAL(rc, 0);
+}
+
+static void
+dev_add_lun_success1(void)
+{
+ int rc;
+ struct spdk_scsi_dev dev = {0};
+
+ rc = spdk_scsi_dev_add_lun(&dev, "malloc0", -1, NULL, NULL);
+
+ CU_ASSERT_EQUAL(rc, 0);
+
+ spdk_scsi_dev_destruct(&dev, NULL, NULL);
+}
+
+static void
+dev_add_lun_success2(void)
+{
+ int rc;
+ struct spdk_scsi_dev dev = {0};
+
+ rc = spdk_scsi_dev_add_lun(&dev, "malloc0", 0, NULL, NULL);
+
+ CU_ASSERT_EQUAL(rc, 0);
+
+ spdk_scsi_dev_destruct(&dev, NULL, NULL);
+}
+
+static void
+dev_check_pending_tasks(void)
+{
+ struct spdk_scsi_dev dev = {};
+ struct spdk_scsi_lun lun = {};
+ struct spdk_scsi_port initiator_port = {};
+
+ g_initiator_port_with_pending_tasks = NULL;
+ g_initiator_port_with_pending_mgmt_tasks = NULL;
+
+ CU_ASSERT(spdk_scsi_dev_has_pending_tasks(&dev, NULL) == false);
+
+ dev.lun[SPDK_SCSI_DEV_MAX_LUN - 1] = &lun;
+
+ CU_ASSERT(spdk_scsi_dev_has_pending_tasks(&dev, NULL) == true);
+ CU_ASSERT(spdk_scsi_dev_has_pending_tasks(&dev, &initiator_port) == false);
+
+ g_initiator_port_with_pending_tasks = &initiator_port;
+ CU_ASSERT(spdk_scsi_dev_has_pending_tasks(&dev, NULL) == true);
+ CU_ASSERT(spdk_scsi_dev_has_pending_tasks(&dev, &initiator_port) == true);
+
+ g_initiator_port_with_pending_tasks = NULL;
+ g_initiator_port_with_pending_mgmt_tasks = &initiator_port;
+ CU_ASSERT(spdk_scsi_dev_has_pending_tasks(&dev, NULL) == true);
+ CU_ASSERT(spdk_scsi_dev_has_pending_tasks(&dev, &initiator_port) == true);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("dev_suite", NULL, NULL);
+
+ CU_ADD_TEST(suite, dev_destruct_null_dev);
+ CU_ADD_TEST(suite, dev_destruct_zero_luns);
+ CU_ADD_TEST(suite, dev_destruct_null_lun);
+ CU_ADD_TEST(suite, dev_destruct_success);
+ CU_ADD_TEST(suite, dev_construct_num_luns_zero);
+ CU_ADD_TEST(suite, dev_construct_no_lun_zero);
+ CU_ADD_TEST(suite, dev_construct_null_lun);
+ CU_ADD_TEST(suite, dev_construct_name_too_long);
+ CU_ADD_TEST(suite, dev_construct_success);
+ CU_ADD_TEST(suite, dev_construct_success_lun_zero_not_first);
+ CU_ADD_TEST(suite, dev_queue_mgmt_task_success);
+ CU_ADD_TEST(suite, dev_queue_task_success);
+ CU_ADD_TEST(suite, dev_stop_success);
+ CU_ADD_TEST(suite, dev_add_port_max_ports);
+ CU_ADD_TEST(suite, dev_add_port_construct_failure1);
+ CU_ADD_TEST(suite, dev_add_port_construct_failure2);
+ CU_ADD_TEST(suite, dev_add_port_success1);
+ CU_ADD_TEST(suite, dev_add_port_success2);
+ CU_ADD_TEST(suite, dev_add_port_success3);
+ CU_ADD_TEST(suite, dev_find_port_by_id_num_ports_zero);
+ CU_ADD_TEST(suite, dev_find_port_by_id_id_not_found_failure);
+ CU_ADD_TEST(suite, dev_find_port_by_id_success);
+ CU_ADD_TEST(suite, dev_add_lun_bdev_not_found);
+ CU_ADD_TEST(suite, dev_add_lun_no_free_lun_id);
+ CU_ADD_TEST(suite, dev_add_lun_success1);
+ CU_ADD_TEST(suite, dev_add_lun_success2);
+ CU_ADD_TEST(suite, dev_check_pending_tasks);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/scsi/lun.c/.gitignore b/src/spdk/test/unit/lib/scsi/lun.c/.gitignore
new file mode 100644
index 000000000..89bd2aaf1
--- /dev/null
+++ b/src/spdk/test/unit/lib/scsi/lun.c/.gitignore
@@ -0,0 +1 @@
+lun_ut
diff --git a/src/spdk/test/unit/lib/scsi/lun.c/Makefile b/src/spdk/test/unit/lib/scsi/lun.c/Makefile
new file mode 100644
index 000000000..95e179fe5
--- /dev/null
+++ b/src/spdk/test/unit/lib/scsi/lun.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = lun_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/scsi/lun.c/lun_ut.c b/src/spdk/test/unit/lib/scsi/lun.c/lun_ut.c
new file mode 100644
index 000000000..4efa8e364
--- /dev/null
+++ b/src/spdk/test/unit/lib/scsi/lun.c/lun_ut.c
@@ -0,0 +1,750 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "scsi/task.c"
+#include "scsi/lun.c"
+
+#include "spdk_internal/mock.h"
+/* These unit tests aren't multithreads, but we need to allocate threads since
+ * the lun.c code will register pollers.
+ */
+#include "common/lib/ut_multithread.c"
+
+/* Unit test bdev mockup */
+struct spdk_bdev {
+ int x;
+};
+
+SPDK_LOG_REGISTER_COMPONENT("scsi", SPDK_LOG_SCSI)
+
+struct spdk_scsi_globals g_spdk_scsi;
+
+static bool g_lun_execute_fail = false;
+static int g_lun_execute_status = SPDK_SCSI_TASK_PENDING;
+static uint32_t g_task_count = 0;
+
+struct spdk_trace_histories *g_trace_histories;
+
+DEFINE_STUB_V(_spdk_trace_record,
+ (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
+ uint32_t size, uint64_t object_id, uint64_t arg1));
+
+static void
+spdk_lun_ut_cpl_task(struct spdk_scsi_task *task)
+{
+ SPDK_CU_ASSERT_FATAL(g_task_count > 0);
+ g_task_count--;
+}
+
+static void
+spdk_lun_ut_free_task(struct spdk_scsi_task *task)
+{
+}
+
+static void
+ut_init_task(struct spdk_scsi_task *task)
+{
+ memset(task, 0, sizeof(*task));
+ spdk_scsi_task_construct(task, spdk_lun_ut_cpl_task,
+ spdk_lun_ut_free_task);
+ g_task_count++;
+}
+
+void
+spdk_bdev_free_io(struct spdk_bdev_io *bdev_io)
+{
+ CU_ASSERT(0);
+}
+
+DEFINE_STUB(spdk_bdev_open, int,
+ (struct spdk_bdev *bdev, bool write, spdk_bdev_remove_cb_t remove_cb,
+ void *remove_ctx, struct spdk_bdev_desc **desc),
+ 0);
+
+DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
+
+DEFINE_STUB(spdk_bdev_get_name, const char *,
+ (const struct spdk_bdev *bdev), "test");
+
+DEFINE_STUB_V(spdk_scsi_dev_queue_mgmt_task,
+ (struct spdk_scsi_dev *dev, struct spdk_scsi_task *task));
+
+DEFINE_STUB_V(spdk_scsi_dev_delete_lun,
+ (struct spdk_scsi_dev *dev, struct spdk_scsi_lun *lun));
+
+DEFINE_STUB(scsi_pr_check, int, (struct spdk_scsi_task *task), 0);
+DEFINE_STUB(scsi2_reserve_check, int, (struct spdk_scsi_task *task), 0);
+
+void
+bdev_scsi_reset(struct spdk_scsi_task *task)
+{
+ task->status = SPDK_SCSI_STATUS_GOOD;
+ task->response = SPDK_SCSI_TASK_MGMT_RESP_SUCCESS;
+
+ scsi_lun_complete_reset_task(task->lun, task);
+}
+
+int
+bdev_scsi_execute(struct spdk_scsi_task *task)
+{
+ if (g_lun_execute_fail) {
+ return -EINVAL;
+ } else {
+ task->status = SPDK_SCSI_STATUS_GOOD;
+
+ if (g_lun_execute_status == SPDK_SCSI_TASK_PENDING) {
+ return g_lun_execute_status;
+ } else if (g_lun_execute_status == SPDK_SCSI_TASK_COMPLETE) {
+ return g_lun_execute_status;
+ } else {
+ return 0;
+ }
+ }
+}
+
+DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *,
+ (struct spdk_bdev_desc *desc), NULL);
+
+static struct spdk_scsi_lun *lun_construct(void)
+{
+ struct spdk_scsi_lun *lun;
+ struct spdk_bdev bdev;
+
+ lun = scsi_lun_construct(&bdev, NULL, NULL);
+
+ SPDK_CU_ASSERT_FATAL(lun != NULL);
+ return lun;
+}
+
+static void
+lun_destruct(struct spdk_scsi_lun *lun)
+{
+ /* LUN will defer its removal if there are any unfinished tasks */
+ SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&lun->tasks));
+
+ scsi_lun_destruct(lun);
+}
+
+static void
+lun_task_mgmt_execute_abort_task_not_supported(void)
+{
+ struct spdk_scsi_lun *lun;
+ struct spdk_scsi_task task = { 0 };
+ struct spdk_scsi_task mgmt_task = { 0 };
+ struct spdk_scsi_port initiator_port = { 0 };
+ struct spdk_scsi_dev dev = { 0 };
+ uint8_t cdb[6] = { 0 };
+
+ lun = lun_construct();
+ lun->dev = &dev;
+
+ ut_init_task(&mgmt_task);
+ mgmt_task.lun = lun;
+ mgmt_task.initiator_port = &initiator_port;
+ mgmt_task.function = SPDK_SCSI_TASK_FUNC_ABORT_TASK;
+
+ /* Params to add regular task to the lun->tasks */
+ ut_init_task(&task);
+ task.lun = lun;
+ task.cdb = cdb;
+
+ scsi_lun_execute_task(lun, &task);
+
+ /* task should now be on the tasks list */
+ CU_ASSERT(!TAILQ_EMPTY(&lun->tasks));
+
+ scsi_lun_execute_mgmt_task(lun, &mgmt_task);
+
+ /* task abort is not supported */
+ CU_ASSERT(mgmt_task.response == SPDK_SCSI_TASK_MGMT_RESP_REJECT_FUNC_NOT_SUPPORTED);
+
+ /* task is still on the tasks list */
+ CU_ASSERT_EQUAL(g_task_count, 1);
+
+ scsi_lun_complete_task(lun, &task);
+ CU_ASSERT_EQUAL(g_task_count, 0);
+
+ lun_destruct(lun);
+}
+
+static void
+lun_task_mgmt_execute_abort_task_all_not_supported(void)
+{
+ struct spdk_scsi_lun *lun;
+ struct spdk_scsi_task task = { 0 };
+ struct spdk_scsi_task mgmt_task = { 0 };
+ struct spdk_scsi_port initiator_port = { 0 };
+ struct spdk_scsi_dev dev = { 0 };
+ uint8_t cdb[6] = { 0 };
+
+ lun = lun_construct();
+ lun->dev = &dev;
+
+ ut_init_task(&mgmt_task);
+ mgmt_task.lun = lun;
+ mgmt_task.initiator_port = &initiator_port;
+ mgmt_task.function = SPDK_SCSI_TASK_FUNC_ABORT_TASK_SET;
+
+ /* Params to add regular task to the lun->tasks */
+ ut_init_task(&task);
+ task.initiator_port = &initiator_port;
+ task.lun = lun;
+ task.cdb = cdb;
+
+ scsi_lun_execute_task(lun, &task);
+
+ /* task should now be on the tasks list */
+ CU_ASSERT(!TAILQ_EMPTY(&lun->tasks));
+
+ scsi_lun_execute_mgmt_task(lun, &mgmt_task);
+
+ /* task abort is not supported */
+ CU_ASSERT(mgmt_task.response == SPDK_SCSI_TASK_MGMT_RESP_REJECT_FUNC_NOT_SUPPORTED);
+
+ /* task is still on the tasks list */
+ CU_ASSERT_EQUAL(g_task_count, 1);
+
+ scsi_lun_complete_task(lun, &task);
+
+ CU_ASSERT_EQUAL(g_task_count, 0);
+
+ lun_destruct(lun);
+}
+
+static void
+lun_task_mgmt_execute_lun_reset(void)
+{
+ struct spdk_scsi_lun *lun;
+ struct spdk_scsi_task mgmt_task = { 0 };
+ struct spdk_scsi_dev dev = { 0 };
+
+ lun = lun_construct();
+ lun->dev = &dev;
+
+ ut_init_task(&mgmt_task);
+ mgmt_task.lun = lun;
+ mgmt_task.function = SPDK_SCSI_TASK_FUNC_LUN_RESET;
+
+ scsi_lun_execute_mgmt_task(lun, &mgmt_task);
+
+ /* Returns success */
+ CU_ASSERT_EQUAL(mgmt_task.status, SPDK_SCSI_STATUS_GOOD);
+ CU_ASSERT_EQUAL(mgmt_task.response, SPDK_SCSI_TASK_MGMT_RESP_SUCCESS);
+
+ lun_destruct(lun);
+
+ CU_ASSERT_EQUAL(g_task_count, 0);
+}
+
+static void
+lun_task_mgmt_execute_invalid_case(void)
+{
+ struct spdk_scsi_lun *lun;
+ struct spdk_scsi_task mgmt_task = { 0 };
+ struct spdk_scsi_dev dev = { 0 };
+
+ lun = lun_construct();
+ lun->dev = &dev;
+
+ ut_init_task(&mgmt_task);
+ mgmt_task.function = 5;
+
+ /* Pass an invalid value to the switch statement */
+ scsi_lun_execute_mgmt_task(lun, &mgmt_task);
+
+ /* function code is invalid */
+ CU_ASSERT_EQUAL(mgmt_task.response, SPDK_SCSI_TASK_MGMT_RESP_REJECT_FUNC_NOT_SUPPORTED);
+
+ lun_destruct(lun);
+
+ CU_ASSERT_EQUAL(g_task_count, 0);
+}
+
+static void
+lun_append_task_null_lun_task_cdb_spc_inquiry(void)
+{
+ struct spdk_scsi_task task = { 0 };
+ uint8_t cdb[6] = { 0 };
+
+ ut_init_task(&task);
+ task.cdb = cdb;
+ task.cdb[0] = SPDK_SPC_INQUIRY;
+ /* alloc_len >= 4096 */
+ task.cdb[3] = 0xFF;
+ task.cdb[4] = 0xFF;
+ task.lun = NULL;
+
+ spdk_scsi_task_process_null_lun(&task);
+
+ CU_ASSERT_EQUAL(task.status, SPDK_SCSI_STATUS_GOOD);
+
+ spdk_scsi_task_put(&task);
+
+ /* spdk_scsi_task_process_null_lun() does not call cpl_fn */
+ CU_ASSERT_EQUAL(g_task_count, 1);
+ g_task_count = 0;
+}
+
+static void
+lun_append_task_null_lun_alloc_len_lt_4096(void)
+{
+ struct spdk_scsi_task task = { 0 };
+ uint8_t cdb[6] = { 0 };
+
+ ut_init_task(&task);
+ task.cdb = cdb;
+ task.cdb[0] = SPDK_SPC_INQUIRY;
+ /* alloc_len < 4096 */
+ task.cdb[3] = 0;
+ task.cdb[4] = 0;
+ /* alloc_len is set to a minimal value of 4096
+ * Hence, buf of size 4096 is allocated */
+ spdk_scsi_task_process_null_lun(&task);
+
+ CU_ASSERT_EQUAL(task.status, SPDK_SCSI_STATUS_GOOD);
+
+ spdk_scsi_task_put(&task);
+
+ /* spdk_scsi_task_process_null_lun() does not call cpl_fn */
+ CU_ASSERT_EQUAL(g_task_count, 1);
+ g_task_count = 0;
+}
+
+static void
+lun_append_task_null_lun_not_supported(void)
+{
+ struct spdk_scsi_task task = { 0 };
+ uint8_t cdb[6] = { 0 };
+
+ ut_init_task(&task);
+ task.cdb = cdb;
+ task.lun = NULL;
+
+ spdk_scsi_task_process_null_lun(&task);
+
+ CU_ASSERT_EQUAL(task.status, SPDK_SCSI_STATUS_CHECK_CONDITION);
+ /* LUN not supported; task's data transferred should be 0 */
+ CU_ASSERT_EQUAL(task.data_transferred, 0);
+
+ /* spdk_scsi_task_process_null_lun() does not call cpl_fn */
+ CU_ASSERT_EQUAL(g_task_count, 1);
+ g_task_count = 0;
+}
+
+static void
+lun_execute_scsi_task_pending(void)
+{
+ struct spdk_scsi_lun *lun;
+ struct spdk_scsi_task task = { 0 };
+ struct spdk_scsi_dev dev = { 0 };
+
+ lun = lun_construct();
+
+ ut_init_task(&task);
+ task.lun = lun;
+ lun->dev = &dev;
+
+ g_lun_execute_fail = false;
+ g_lun_execute_status = SPDK_SCSI_TASK_PENDING;
+
+ /* the tasks list should still be empty since it has not been
+ executed yet
+ */
+ CU_ASSERT(TAILQ_EMPTY(&lun->tasks));
+
+ scsi_lun_execute_task(lun, &task);
+
+ /* Assert the task has been successfully added to the tasks queue */
+ CU_ASSERT(!TAILQ_EMPTY(&lun->tasks));
+
+ /* task is still on the tasks list */
+ CU_ASSERT_EQUAL(g_task_count, 1);
+
+ /* Need to complete task so LUN might be removed right now */
+ scsi_lun_complete_task(lun, &task);
+
+ CU_ASSERT_EQUAL(g_task_count, 0);
+
+ lun_destruct(lun);
+}
+
+static void
+lun_execute_scsi_task_complete(void)
+{
+ struct spdk_scsi_lun *lun;
+ struct spdk_scsi_task task = { 0 };
+ struct spdk_scsi_dev dev = { 0 };
+
+ lun = lun_construct();
+
+ ut_init_task(&task);
+ task.lun = lun;
+ lun->dev = &dev;
+
+ g_lun_execute_fail = false;
+ g_lun_execute_status = SPDK_SCSI_TASK_COMPLETE;
+
+ /* the tasks list should still be empty since it has not been
+ executed yet
+ */
+ CU_ASSERT(TAILQ_EMPTY(&lun->tasks));
+
+ scsi_lun_execute_task(lun, &task);
+
+ /* Assert the task has not been added to the tasks queue */
+ CU_ASSERT(TAILQ_EMPTY(&lun->tasks));
+
+ lun_destruct(lun);
+
+ CU_ASSERT_EQUAL(g_task_count, 0);
+}
+
+static void
+lun_destruct_success(void)
+{
+ struct spdk_scsi_lun *lun;
+
+ lun = lun_construct();
+
+ scsi_lun_destruct(lun);
+
+ CU_ASSERT_EQUAL(g_task_count, 0);
+}
+
+static void
+lun_construct_null_ctx(void)
+{
+ struct spdk_scsi_lun *lun;
+
+ lun = scsi_lun_construct(NULL, NULL, NULL);
+
+ /* lun should be NULL since we passed NULL for the ctx pointer. */
+ CU_ASSERT(lun == NULL);
+ CU_ASSERT_EQUAL(g_task_count, 0);
+}
+
+static void
+lun_construct_success(void)
+{
+ struct spdk_scsi_lun *lun = lun_construct();
+
+ lun_destruct(lun);
+
+ CU_ASSERT_EQUAL(g_task_count, 0);
+}
+
+static void
+lun_reset_task_wait_scsi_task_complete(void)
+{
+ struct spdk_scsi_lun *lun;
+ struct spdk_scsi_task task = { 0 };
+ struct spdk_scsi_task mgmt_task = { 0 };
+ struct spdk_scsi_dev dev = { 0 };
+
+ lun = lun_construct();
+ lun->dev = &dev;
+
+ ut_init_task(&task);
+ task.lun = lun;
+
+ g_lun_execute_fail = false;
+ g_lun_execute_status = SPDK_SCSI_TASK_PENDING;
+
+ ut_init_task(&mgmt_task);
+ mgmt_task.lun = lun;
+ mgmt_task.function = SPDK_SCSI_TASK_FUNC_LUN_RESET;
+
+ /* Execute the task but it is still in the task list. */
+ scsi_lun_execute_task(lun, &task);
+
+ CU_ASSERT(TAILQ_EMPTY(&lun->pending_tasks));
+ CU_ASSERT(!TAILQ_EMPTY(&lun->tasks));
+
+ /* Execute the reset task */
+ scsi_lun_execute_mgmt_task(lun, &mgmt_task);
+
+ /* The reset task should be on the submitted mgmt task list and
+ * a poller is created because the task prior to the reset task is pending.
+ */
+ CU_ASSERT(!TAILQ_EMPTY(&lun->mgmt_tasks));
+ CU_ASSERT(lun->reset_poller != NULL);
+
+ /* Execute the poller to check if the task prior to the reset task complete. */
+ scsi_lun_reset_check_outstanding_tasks(&mgmt_task);
+
+ CU_ASSERT(!TAILQ_EMPTY(&lun->mgmt_tasks));
+ CU_ASSERT(lun->reset_poller != NULL);
+
+ /* Complete the task. */
+ scsi_lun_complete_task(lun, &task);
+
+ CU_ASSERT(TAILQ_EMPTY(&lun->tasks));
+
+ /* Execute the poller to check if the task prior to the reset task complete. */
+ scsi_lun_reset_check_outstanding_tasks(&mgmt_task);
+
+ CU_ASSERT(TAILQ_EMPTY(&lun->mgmt_tasks));
+ CU_ASSERT(lun->reset_poller == NULL);
+ CU_ASSERT_EQUAL(mgmt_task.status, SPDK_SCSI_STATUS_GOOD);
+ CU_ASSERT_EQUAL(mgmt_task.response, SPDK_SCSI_TASK_MGMT_RESP_SUCCESS);
+
+ lun_destruct(lun);
+
+ CU_ASSERT_EQUAL(g_task_count, 0);
+}
+
+static void
+lun_reset_task_suspend_scsi_task(void)
+{
+ struct spdk_scsi_lun *lun;
+ struct spdk_scsi_task task = { 0 };
+ struct spdk_scsi_task mgmt_task = { 0 };
+ struct spdk_scsi_dev dev = { 0 };
+
+ lun = lun_construct();
+ lun->dev = &dev;
+
+ ut_init_task(&task);
+ task.lun = lun;
+
+ g_lun_execute_fail = false;
+ g_lun_execute_status = SPDK_SCSI_TASK_COMPLETE;
+
+ ut_init_task(&mgmt_task);
+ mgmt_task.lun = lun;
+ mgmt_task.function = SPDK_SCSI_TASK_FUNC_LUN_RESET;
+
+ /* Append a reset task to the pending mgmt task list. */
+ scsi_lun_append_mgmt_task(lun, &mgmt_task);
+
+ CU_ASSERT(!TAILQ_EMPTY(&lun->pending_mgmt_tasks));
+
+ /* Execute the task but it is on the pending task list. */
+ scsi_lun_execute_task(lun, &task);
+
+ CU_ASSERT(!TAILQ_EMPTY(&lun->pending_tasks));
+
+ /* Execute the reset task. The task will be executed then. */
+ _scsi_lun_execute_mgmt_task(lun);
+
+ CU_ASSERT(TAILQ_EMPTY(&lun->mgmt_tasks));
+ CU_ASSERT(lun->reset_poller == NULL);
+ CU_ASSERT_EQUAL(mgmt_task.status, SPDK_SCSI_STATUS_GOOD);
+ CU_ASSERT_EQUAL(mgmt_task.response, SPDK_SCSI_TASK_MGMT_RESP_SUCCESS);
+
+ CU_ASSERT(TAILQ_EMPTY(&lun->pending_tasks));
+ CU_ASSERT(TAILQ_EMPTY(&lun->tasks));
+
+ lun_destruct(lun);
+
+ CU_ASSERT_EQUAL(g_task_count, 0);
+}
+
+static void
+lun_check_pending_tasks_only_for_specific_initiator(void)
+{
+ struct spdk_bdev bdev = {};
+ struct spdk_scsi_lun *lun;
+ struct spdk_scsi_task task1 = {};
+ struct spdk_scsi_task task2 = {};
+ struct spdk_scsi_port initiator_port1 = {};
+ struct spdk_scsi_port initiator_port2 = {};
+ struct spdk_scsi_port initiator_port3 = {};
+
+ lun = scsi_lun_construct(&bdev, NULL, NULL);
+
+ task1.initiator_port = &initiator_port1;
+ task2.initiator_port = &initiator_port2;
+
+ TAILQ_INSERT_TAIL(&lun->tasks, &task1, scsi_link);
+ TAILQ_INSERT_TAIL(&lun->tasks, &task2, scsi_link);
+ CU_ASSERT(scsi_lun_has_outstanding_tasks(lun) == true);
+ CU_ASSERT(_scsi_lun_has_pending_tasks(lun) == false);
+ CU_ASSERT(scsi_lun_has_pending_tasks(lun, NULL) == true);
+ CU_ASSERT(scsi_lun_has_pending_tasks(lun, &initiator_port1) == true);
+ CU_ASSERT(scsi_lun_has_pending_tasks(lun, &initiator_port2) == true);
+ CU_ASSERT(scsi_lun_has_pending_tasks(lun, &initiator_port3) == false);
+ TAILQ_REMOVE(&lun->tasks, &task1, scsi_link);
+ TAILQ_REMOVE(&lun->tasks, &task2, scsi_link);
+ CU_ASSERT(_scsi_lun_has_pending_tasks(lun) == false);
+ CU_ASSERT(scsi_lun_has_pending_tasks(lun, NULL) == false);
+
+ TAILQ_INSERT_TAIL(&lun->pending_tasks, &task1, scsi_link);
+ TAILQ_INSERT_TAIL(&lun->pending_tasks, &task2, scsi_link);
+ CU_ASSERT(scsi_lun_has_outstanding_tasks(lun) == false);
+ CU_ASSERT(_scsi_lun_has_pending_tasks(lun) == true);
+ CU_ASSERT(scsi_lun_has_pending_tasks(lun, NULL) == true);
+ CU_ASSERT(scsi_lun_has_pending_tasks(lun, &initiator_port1) == true);
+ CU_ASSERT(scsi_lun_has_pending_tasks(lun, &initiator_port2) == true);
+ CU_ASSERT(scsi_lun_has_pending_tasks(lun, &initiator_port3) == false);
+ TAILQ_REMOVE(&lun->pending_tasks, &task1, scsi_link);
+ TAILQ_REMOVE(&lun->pending_tasks, &task2, scsi_link);
+ CU_ASSERT(_scsi_lun_has_pending_tasks(lun) == false);
+ CU_ASSERT(scsi_lun_has_pending_tasks(lun, NULL) == false);
+
+ TAILQ_INSERT_TAIL(&lun->mgmt_tasks, &task1, scsi_link);
+ TAILQ_INSERT_TAIL(&lun->mgmt_tasks, &task2, scsi_link);
+ CU_ASSERT(scsi_lun_has_outstanding_mgmt_tasks(lun) == true);
+ CU_ASSERT(_scsi_lun_has_pending_mgmt_tasks(lun) == false);
+ CU_ASSERT(scsi_lun_has_pending_mgmt_tasks(lun, NULL) == true);
+ CU_ASSERT(scsi_lun_has_pending_mgmt_tasks(lun, &initiator_port1) == true);
+ CU_ASSERT(scsi_lun_has_pending_mgmt_tasks(lun, &initiator_port2) == true);
+ CU_ASSERT(scsi_lun_has_pending_mgmt_tasks(lun, &initiator_port3) == false);
+ TAILQ_REMOVE(&lun->mgmt_tasks, &task1, scsi_link);
+ TAILQ_REMOVE(&lun->mgmt_tasks, &task2, scsi_link);
+ CU_ASSERT(_scsi_lun_has_pending_mgmt_tasks(lun) == false);
+ CU_ASSERT(scsi_lun_has_pending_mgmt_tasks(lun, NULL) == false);
+
+ TAILQ_INSERT_TAIL(&lun->pending_mgmt_tasks, &task1, scsi_link);
+ TAILQ_INSERT_TAIL(&lun->pending_mgmt_tasks, &task2, scsi_link);
+ CU_ASSERT(_scsi_lun_has_pending_mgmt_tasks(lun) == true);
+ CU_ASSERT(scsi_lun_has_pending_mgmt_tasks(lun, NULL) == true);
+ CU_ASSERT(scsi_lun_has_pending_mgmt_tasks(lun, &initiator_port1) == true);
+ CU_ASSERT(scsi_lun_has_pending_mgmt_tasks(lun, &initiator_port2) == true);
+ CU_ASSERT(scsi_lun_has_pending_mgmt_tasks(lun, &initiator_port3) == false);
+ TAILQ_REMOVE(&lun->pending_mgmt_tasks, &task1, scsi_link);
+ TAILQ_REMOVE(&lun->pending_mgmt_tasks, &task2, scsi_link);
+ CU_ASSERT(_scsi_lun_has_pending_mgmt_tasks(lun) == false);
+ CU_ASSERT(scsi_lun_has_pending_mgmt_tasks(lun, NULL) == false);
+
+ scsi_lun_remove(lun);
+}
+
+static void
+abort_pending_mgmt_tasks_when_lun_is_removed(void)
+{
+ struct spdk_bdev bdev = {};
+ struct spdk_scsi_lun *lun;
+ struct spdk_scsi_task task1, task2, task3;
+
+ lun = scsi_lun_construct(&bdev, NULL, NULL);
+
+ /* Normal case */
+ ut_init_task(&task1);
+ ut_init_task(&task2);
+ ut_init_task(&task3);
+ task1.lun = lun;
+ task2.lun = lun;
+ task3.lun = lun;
+ task1.function = SPDK_SCSI_TASK_FUNC_LUN_RESET;
+ task2.function = SPDK_SCSI_TASK_FUNC_LUN_RESET;
+ task3.function = SPDK_SCSI_TASK_FUNC_LUN_RESET;
+
+ CU_ASSERT(g_task_count == 3);
+
+ scsi_lun_append_mgmt_task(lun, &task1);
+ scsi_lun_append_mgmt_task(lun, &task2);
+ scsi_lun_append_mgmt_task(lun, &task3);
+
+ CU_ASSERT(!TAILQ_EMPTY(&lun->pending_mgmt_tasks));
+
+ _scsi_lun_execute_mgmt_task(lun);
+
+ CU_ASSERT(TAILQ_EMPTY(&lun->pending_mgmt_tasks));
+ CU_ASSERT(TAILQ_EMPTY(&lun->mgmt_tasks));
+ CU_ASSERT(g_task_count == 0);
+ CU_ASSERT(task1.response == SPDK_SCSI_TASK_MGMT_RESP_SUCCESS);
+ CU_ASSERT(task2.response == SPDK_SCSI_TASK_MGMT_RESP_SUCCESS);
+ CU_ASSERT(task3.response == SPDK_SCSI_TASK_MGMT_RESP_SUCCESS);
+
+ /* LUN hotplug case */
+ ut_init_task(&task1);
+ ut_init_task(&task2);
+ ut_init_task(&task3);
+ task1.function = SPDK_SCSI_TASK_FUNC_LUN_RESET;
+ task2.function = SPDK_SCSI_TASK_FUNC_LUN_RESET;
+ task3.function = SPDK_SCSI_TASK_FUNC_LUN_RESET;
+
+ CU_ASSERT(g_task_count == 3);
+
+ scsi_lun_append_mgmt_task(lun, &task1);
+ scsi_lun_append_mgmt_task(lun, &task2);
+ scsi_lun_append_mgmt_task(lun, &task3);
+
+ CU_ASSERT(!TAILQ_EMPTY(&lun->pending_mgmt_tasks));
+
+ lun->removed = true;
+
+ _scsi_lun_execute_mgmt_task(lun);
+
+ CU_ASSERT(TAILQ_EMPTY(&lun->pending_mgmt_tasks));
+ CU_ASSERT(TAILQ_EMPTY(&lun->mgmt_tasks));
+ CU_ASSERT(g_task_count == 0);
+ CU_ASSERT(task1.response == SPDK_SCSI_TASK_MGMT_RESP_INVALID_LUN);
+ CU_ASSERT(task2.response == SPDK_SCSI_TASK_MGMT_RESP_INVALID_LUN);
+ CU_ASSERT(task3.response == SPDK_SCSI_TASK_MGMT_RESP_INVALID_LUN);
+
+ scsi_lun_remove(lun);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("lun_suite", NULL, NULL);
+
+ CU_ADD_TEST(suite, lun_task_mgmt_execute_abort_task_not_supported);
+ CU_ADD_TEST(suite, lun_task_mgmt_execute_abort_task_all_not_supported);
+ CU_ADD_TEST(suite, lun_task_mgmt_execute_lun_reset);
+ CU_ADD_TEST(suite, lun_task_mgmt_execute_invalid_case);
+ CU_ADD_TEST(suite, lun_append_task_null_lun_task_cdb_spc_inquiry);
+ CU_ADD_TEST(suite, lun_append_task_null_lun_alloc_len_lt_4096);
+ CU_ADD_TEST(suite, lun_append_task_null_lun_not_supported);
+ CU_ADD_TEST(suite, lun_execute_scsi_task_pending);
+ CU_ADD_TEST(suite, lun_execute_scsi_task_complete);
+ CU_ADD_TEST(suite, lun_destruct_success);
+ CU_ADD_TEST(suite, lun_construct_null_ctx);
+ CU_ADD_TEST(suite, lun_construct_success);
+ CU_ADD_TEST(suite, lun_reset_task_wait_scsi_task_complete);
+ CU_ADD_TEST(suite, lun_reset_task_suspend_scsi_task);
+ CU_ADD_TEST(suite, lun_check_pending_tasks_only_for_specific_initiator);
+ CU_ADD_TEST(suite, abort_pending_mgmt_tasks_when_lun_is_removed);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ allocate_threads(1);
+ set_thread(0);
+ CU_basic_run_tests();
+ free_threads();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/scsi/scsi.c/.gitignore b/src/spdk/test/unit/lib/scsi/scsi.c/.gitignore
new file mode 100644
index 000000000..99a7db2b1
--- /dev/null
+++ b/src/spdk/test/unit/lib/scsi/scsi.c/.gitignore
@@ -0,0 +1 @@
+scsi_ut
diff --git a/src/spdk/test/unit/lib/scsi/scsi.c/Makefile b/src/spdk/test/unit/lib/scsi/scsi.c/Makefile
new file mode 100644
index 000000000..2ed249227
--- /dev/null
+++ b/src/spdk/test/unit/lib/scsi/scsi.c/Makefile
@@ -0,0 +1,39 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+SPDK_LIB_LIST = trace
+TEST_FILE = scsi_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/scsi/scsi.c/scsi_ut.c b/src/spdk/test/unit/lib/scsi/scsi.c/scsi_ut.c
new file mode 100644
index 000000000..430ff96b0
--- /dev/null
+++ b/src/spdk/test/unit/lib/scsi/scsi.c/scsi_ut.c
@@ -0,0 +1,69 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk/scsi.h"
+
+#include "spdk_cunit.h"
+
+#include "scsi/scsi.c"
+
+static void
+scsi_init(void)
+{
+ int rc;
+
+ rc = spdk_scsi_init();
+ CU_ASSERT_EQUAL(rc, 0);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("scsi_suite", NULL, NULL);
+
+ CU_ADD_TEST(suite, scsi_init);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/scsi/scsi_bdev.c/.gitignore b/src/spdk/test/unit/lib/scsi/scsi_bdev.c/.gitignore
new file mode 100644
index 000000000..8f1ecc12c
--- /dev/null
+++ b/src/spdk/test/unit/lib/scsi/scsi_bdev.c/.gitignore
@@ -0,0 +1 @@
+scsi_bdev_ut
diff --git a/src/spdk/test/unit/lib/scsi/scsi_bdev.c/Makefile b/src/spdk/test/unit/lib/scsi/scsi_bdev.c/Makefile
new file mode 100644
index 000000000..66a4119bb
--- /dev/null
+++ b/src/spdk/test/unit/lib/scsi/scsi_bdev.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = scsi_bdev_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/scsi/scsi_bdev.c/scsi_bdev_ut.c b/src/spdk/test/unit/lib/scsi/scsi_bdev.c/scsi_bdev_ut.c
new file mode 100644
index 000000000..4e64f7071
--- /dev/null
+++ b/src/spdk/test/unit/lib/scsi/scsi_bdev.c/scsi_bdev_ut.c
@@ -0,0 +1,1037 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "scsi/task.c"
+#include "scsi/scsi_bdev.c"
+#include "common/lib/test_env.c"
+
+#include "spdk_cunit.h"
+
+#include "spdk_internal/mock.h"
+
+SPDK_LOG_REGISTER_COMPONENT("scsi", SPDK_LOG_SCSI)
+
+struct spdk_scsi_globals g_spdk_scsi;
+
+static uint64_t g_test_bdev_num_blocks;
+
+TAILQ_HEAD(, spdk_bdev_io) g_bdev_io_queue;
+int g_scsi_cb_called = 0;
+
+TAILQ_HEAD(, spdk_bdev_io_wait_entry) g_io_wait_queue;
+bool g_bdev_io_pool_full = false;
+
+bool
+spdk_bdev_io_type_supported(struct spdk_bdev *bdev, enum spdk_bdev_io_type io_type)
+{
+ abort();
+ return false;
+}
+
+DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
+
+DEFINE_STUB(spdk_bdev_get_name, const char *,
+ (const struct spdk_bdev *bdev), "test");
+
+DEFINE_STUB(spdk_bdev_get_block_size, uint32_t,
+ (const struct spdk_bdev *bdev), 512);
+
+DEFINE_STUB(spdk_bdev_get_md_size, uint32_t,
+ (const struct spdk_bdev *bdev), 8);
+
+DEFINE_STUB(spdk_bdev_is_md_interleaved, bool,
+ (const struct spdk_bdev *bdev), false);
+
+DEFINE_STUB(spdk_bdev_get_data_block_size, uint32_t,
+ (const struct spdk_bdev *bdev), 512);
+
+uint64_t
+spdk_bdev_get_num_blocks(const struct spdk_bdev *bdev)
+{
+ return g_test_bdev_num_blocks;
+}
+
+DEFINE_STUB(spdk_bdev_get_product_name, const char *,
+ (const struct spdk_bdev *bdev), "test product");
+
+DEFINE_STUB(spdk_bdev_has_write_cache, bool,
+ (const struct spdk_bdev *bdev), false);
+
+DEFINE_STUB(spdk_bdev_get_dif_type, enum spdk_dif_type,
+ (const struct spdk_bdev *bdev), SPDK_DIF_DISABLE);
+
+DEFINE_STUB(spdk_bdev_is_dif_head_of_md, bool,
+ (const struct spdk_bdev *bdev), false);
+
+DEFINE_STUB(spdk_bdev_is_dif_check_enabled, bool,
+ (const struct spdk_bdev *bdev, enum spdk_dif_check_type check_type), false);
+
+DEFINE_STUB(scsi_pr_out, int, (struct spdk_scsi_task *task,
+ uint8_t *cdb, uint8_t *data, uint16_t data_len), 0);
+
+DEFINE_STUB(scsi_pr_in, int, (struct spdk_scsi_task *task, uint8_t *cdb,
+ uint8_t *data, uint16_t data_len), 0);
+
+DEFINE_STUB(scsi2_reserve, int, (struct spdk_scsi_task *task, uint8_t *cdb), 0);
+DEFINE_STUB(scsi2_release, int, (struct spdk_scsi_task *task), 0);
+
+void
+scsi_lun_complete_task(struct spdk_scsi_lun *lun, struct spdk_scsi_task *task)
+{
+ g_scsi_cb_called++;
+}
+
+DEFINE_STUB_V(scsi_lun_complete_reset_task,
+ (struct spdk_scsi_lun *lun, struct spdk_scsi_task *task));
+
+DEFINE_STUB(spdk_scsi_lun_id_int_to_fmt, uint64_t, (int lun_id), 0);
+
+static void
+ut_put_task(struct spdk_scsi_task *task)
+{
+ if (task->alloc_len) {
+ free(task->iov.iov_base);
+ }
+
+ task->iov.iov_base = NULL;
+ task->iov.iov_len = 0;
+ task->alloc_len = 0;
+ SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&g_bdev_io_queue));
+}
+
+static void
+ut_init_task(struct spdk_scsi_task *task)
+{
+ memset(task, 0xFF, sizeof(*task));
+ task->iov.iov_base = NULL;
+ task->iovs = &task->iov;
+ task->iovcnt = 1;
+ task->alloc_len = 0;
+ task->dxfer_dir = SPDK_SCSI_DIR_NONE;
+}
+
+void
+spdk_bdev_io_get_scsi_status(const struct spdk_bdev_io *bdev_io,
+ int *sc, int *sk, int *asc, int *ascq)
+{
+ switch (bdev_io->internal.status) {
+ case SPDK_BDEV_IO_STATUS_SUCCESS:
+ *sc = SPDK_SCSI_STATUS_GOOD;
+ *sk = SPDK_SCSI_SENSE_NO_SENSE;
+ *asc = SPDK_SCSI_ASC_NO_ADDITIONAL_SENSE;
+ *ascq = SPDK_SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+ case SPDK_BDEV_IO_STATUS_SCSI_ERROR:
+ *sc = bdev_io->internal.error.scsi.sc;
+ *sk = bdev_io->internal.error.scsi.sk;
+ *asc = bdev_io->internal.error.scsi.asc;
+ *ascq = bdev_io->internal.error.scsi.ascq;
+ break;
+ default:
+ *sc = SPDK_SCSI_STATUS_CHECK_CONDITION;
+ *sk = SPDK_SCSI_SENSE_ABORTED_COMMAND;
+ *asc = SPDK_SCSI_ASC_NO_ADDITIONAL_SENSE;
+ *ascq = SPDK_SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+ }
+}
+
+void
+spdk_bdev_io_get_iovec(struct spdk_bdev_io *bdev_io, struct iovec **iovp, int *iovcntp)
+{
+ *iovp = NULL;
+ *iovcntp = 0;
+}
+
+static void
+ut_bdev_io_flush(void)
+{
+ struct spdk_bdev_io *bdev_io;
+ struct spdk_bdev_io_wait_entry *entry;
+
+ while (!TAILQ_EMPTY(&g_bdev_io_queue) || !TAILQ_EMPTY(&g_io_wait_queue)) {
+ while (!TAILQ_EMPTY(&g_bdev_io_queue)) {
+ bdev_io = TAILQ_FIRST(&g_bdev_io_queue);
+ TAILQ_REMOVE(&g_bdev_io_queue, bdev_io, internal.link);
+ bdev_io->internal.cb(bdev_io, true, bdev_io->internal.caller_ctx);
+ free(bdev_io);
+ }
+
+ while (!TAILQ_EMPTY(&g_io_wait_queue)) {
+ entry = TAILQ_FIRST(&g_io_wait_queue);
+ TAILQ_REMOVE(&g_io_wait_queue, entry, link);
+ entry->cb_fn(entry->cb_arg);
+ }
+ }
+}
+
+static int
+_spdk_bdev_io_op(spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ struct spdk_bdev_io *bdev_io;
+
+ if (g_bdev_io_pool_full) {
+ g_bdev_io_pool_full = false;
+ return -ENOMEM;
+ }
+
+ bdev_io = calloc(1, sizeof(*bdev_io));
+ SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
+ bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ bdev_io->internal.cb = cb;
+ bdev_io->internal.caller_ctx = cb_arg;
+
+ TAILQ_INSERT_TAIL(&g_bdev_io_queue, bdev_io, internal.link);
+
+ return 0;
+}
+
+int
+spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct iovec *iov, int iovcnt,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ return _spdk_bdev_io_op(cb, cb_arg);
+}
+
+int
+spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct iovec *iov, int iovcnt,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ return _spdk_bdev_io_op(cb, cb_arg);
+}
+
+int
+spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ return _spdk_bdev_io_op(cb, cb_arg);
+}
+
+int
+spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ return _spdk_bdev_io_op(cb, cb_arg);
+}
+
+int
+spdk_bdev_flush_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ return _spdk_bdev_io_op(cb, cb_arg);
+}
+
+int
+spdk_bdev_queue_io_wait(struct spdk_bdev *bdev, struct spdk_io_channel *ch,
+ struct spdk_bdev_io_wait_entry *entry)
+{
+ TAILQ_INSERT_TAIL(&g_io_wait_queue, entry, link);
+ return 0;
+}
+
+int
+spdk_dif_ctx_init(struct spdk_dif_ctx *ctx, uint32_t block_size, uint32_t md_size,
+ bool md_interleave, bool dif_loc, enum spdk_dif_type dif_type, uint32_t dif_flags,
+ uint32_t init_ref_tag, uint16_t apptag_mask, uint16_t app_tag,
+ uint32_t data_offset, uint16_t guard_seed)
+{
+ ctx->init_ref_tag = init_ref_tag;
+ ctx->ref_tag_offset = data_offset / 512;
+ return 0;
+}
+
+/*
+ * This test specifically tests a mode select 6 command from the
+ * Windows SCSI compliance test that caused SPDK to crash.
+ */
+static void
+mode_select_6_test(void)
+{
+ struct spdk_bdev bdev;
+ struct spdk_scsi_task task;
+ struct spdk_scsi_lun lun;
+ struct spdk_scsi_dev dev;
+ char cdb[16];
+ char data[24];
+ int rc;
+
+ ut_init_task(&task);
+
+ cdb[0] = 0x15;
+ cdb[1] = 0x11;
+ cdb[2] = 0x00;
+ cdb[3] = 0x00;
+ cdb[4] = 0x18;
+ cdb[5] = 0x00;
+ task.cdb = cdb;
+
+ snprintf(&dev.name[0], sizeof(dev.name), "spdk_iscsi_translation_test");
+ lun.bdev = &bdev;
+ lun.dev = &dev;
+ task.lun = &lun;
+
+ memset(data, 0, sizeof(data));
+ data[4] = 0x08;
+ data[5] = 0x02;
+ spdk_scsi_task_set_data(&task, data, sizeof(data));
+
+ rc = bdev_scsi_execute(&task);
+
+ CU_ASSERT_EQUAL(rc, 0);
+
+ ut_put_task(&task);
+}
+
+/*
+ * This test specifically tests a mode select 6 command which
+ * contains no mode pages.
+ */
+static void
+mode_select_6_test2(void)
+{
+ struct spdk_bdev bdev;
+ struct spdk_scsi_task task;
+ struct spdk_scsi_lun lun;
+ struct spdk_scsi_dev dev;
+ char cdb[16];
+ int rc;
+
+ ut_init_task(&task);
+
+ cdb[0] = 0x15;
+ cdb[1] = 0x00;
+ cdb[2] = 0x00;
+ cdb[3] = 0x00;
+ cdb[4] = 0x00;
+ cdb[5] = 0x00;
+ task.cdb = cdb;
+
+ snprintf(&dev.name[0], sizeof(dev.name), "spdk_iscsi_translation_test");
+ lun.bdev = &bdev;
+ lun.dev = &dev;
+ task.lun = &lun;
+
+ rc = bdev_scsi_execute(&task);
+
+ CU_ASSERT_EQUAL(rc, 0);
+
+ ut_put_task(&task);
+}
+
+/*
+ * This test specifically tests a mode sense 6 command which
+ * return all subpage 00h mode pages.
+ */
+static void
+mode_sense_6_test(void)
+{
+ struct spdk_bdev bdev;
+ struct spdk_scsi_task task;
+ struct spdk_scsi_lun lun;
+ struct spdk_scsi_dev dev;
+ char cdb[12];
+ unsigned char *data;
+ int rc;
+ unsigned char mode_data_len = 0;
+ unsigned char medium_type = 0;
+ unsigned char dev_specific_param = 0;
+ unsigned char blk_descriptor_len = 0;
+
+ memset(&bdev, 0, sizeof(struct spdk_bdev));
+ ut_init_task(&task);
+ memset(cdb, 0, sizeof(cdb));
+
+ cdb[0] = 0x1A;
+ cdb[2] = 0x3F;
+ cdb[4] = 0xFF;
+ task.cdb = cdb;
+
+ snprintf(&dev.name[0], sizeof(dev.name), "spdk_iscsi_translation_test");
+ lun.bdev = &bdev;
+ lun.dev = &dev;
+ task.lun = &lun;
+
+ rc = bdev_scsi_execute(&task);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+
+ data = task.iovs[0].iov_base;
+ mode_data_len = data[0];
+ medium_type = data[1];
+ dev_specific_param = data[2];
+ blk_descriptor_len = data[3];
+
+ CU_ASSERT(mode_data_len >= 11);
+ CU_ASSERT_EQUAL(medium_type, 0);
+ CU_ASSERT_EQUAL(dev_specific_param, 0);
+ CU_ASSERT_EQUAL(blk_descriptor_len, 8);
+
+ ut_put_task(&task);
+}
+
+/*
+ * This test specifically tests a mode sense 10 command which
+ * return all subpage 00h mode pages.
+ */
+static void
+mode_sense_10_test(void)
+{
+ struct spdk_bdev bdev;
+ struct spdk_scsi_task task;
+ struct spdk_scsi_lun lun;
+ struct spdk_scsi_dev dev;
+ char cdb[12];
+ unsigned char *data;
+ int rc;
+ unsigned short mode_data_len = 0;
+ unsigned char medium_type = 0;
+ unsigned char dev_specific_param = 0;
+ unsigned short blk_descriptor_len = 0;
+
+ memset(&bdev, 0, sizeof(struct spdk_bdev));
+ ut_init_task(&task);
+ memset(cdb, 0, sizeof(cdb));
+ cdb[0] = 0x5A;
+ cdb[2] = 0x3F;
+ cdb[8] = 0xFF;
+ task.cdb = cdb;
+
+ snprintf(&dev.name[0], sizeof(dev.name), "spdk_iscsi_translation_test");
+ lun.bdev = &bdev;
+ lun.dev = &dev;
+ task.lun = &lun;
+
+ rc = bdev_scsi_execute(&task);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+
+ data = task.iovs[0].iov_base;
+ mode_data_len = ((data[0] << 8) + data[1]);
+ medium_type = data[2];
+ dev_specific_param = data[3];
+ blk_descriptor_len = ((data[6] << 8) + data[7]);
+
+ CU_ASSERT(mode_data_len >= 14);
+ CU_ASSERT_EQUAL(medium_type, 0);
+ CU_ASSERT_EQUAL(dev_specific_param, 0);
+ CU_ASSERT_EQUAL(blk_descriptor_len, 8);
+
+ ut_put_task(&task);
+}
+
+/*
+ * This test specifically tests a scsi inquiry command from the
+ * Windows SCSI compliance test that failed to return the
+ * expected SCSI error sense code.
+ */
+static void
+inquiry_evpd_test(void)
+{
+ struct spdk_bdev bdev;
+ struct spdk_scsi_task task;
+ struct spdk_scsi_lun lun;
+ struct spdk_scsi_dev dev;
+ char cdb[6];
+ int rc;
+
+ ut_init_task(&task);
+
+ cdb[0] = 0x12;
+ cdb[1] = 0x00; /* EVPD = 0 */
+ cdb[2] = 0xff; /* PageCode non-zero */
+ cdb[3] = 0x00;
+ cdb[4] = 0xff;
+ cdb[5] = 0x00;
+ task.cdb = cdb;
+
+ snprintf(&dev.name[0], sizeof(dev.name), "spdk_iscsi_translation_test");
+ lun.bdev = &bdev;
+ lun.dev = &dev;
+ task.lun = &lun;
+
+ rc = bdev_scsi_execute(&task);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+
+ CU_ASSERT_EQUAL(task.status, SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT_EQUAL(task.sense_data[2] & 0xf, SPDK_SCSI_SENSE_ILLEGAL_REQUEST);
+ CU_ASSERT_EQUAL(task.sense_data[12], 0x24);
+ CU_ASSERT_EQUAL(task.sense_data[13], 0x0);
+
+ ut_put_task(&task);
+}
+
+/*
+ * This test is to verify specific return data for a standard scsi inquiry
+ * command: Version
+ */
+static void
+inquiry_standard_test(void)
+{
+ struct spdk_bdev bdev = { .blocklen = 512 };
+ struct spdk_scsi_task task;
+ struct spdk_scsi_lun lun;
+ struct spdk_scsi_dev dev;
+ char cdb[6];
+ char *data;
+ struct spdk_scsi_cdb_inquiry_data *inq_data;
+ int rc;
+
+ ut_init_task(&task);
+
+ cdb[0] = 0x12;
+ cdb[1] = 0x00; /* EVPD = 0 */
+ cdb[2] = 0x00; /* PageCode zero - requesting standard inquiry */
+ cdb[3] = 0x00;
+ cdb[4] = 0xff; /* Indicate data size used by conformance test */
+ cdb[5] = 0x00;
+ task.cdb = cdb;
+
+ snprintf(&dev.name[0], sizeof(dev.name), "spdk_iscsi_translation_test");
+ lun.bdev = &bdev;
+ lun.dev = &dev;
+ task.lun = &lun;
+
+ rc = bdev_scsi_execute(&task);
+
+ data = task.iovs[0].iov_base;
+ inq_data = (struct spdk_scsi_cdb_inquiry_data *)&data[0];
+
+ CU_ASSERT_EQUAL(inq_data->version, SPDK_SPC_VERSION_SPC3);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ ut_put_task(&task);
+}
+
+static void
+_inquiry_overflow_test(uint8_t alloc_len)
+{
+ struct spdk_bdev bdev = { .blocklen = 512 };
+ struct spdk_scsi_task task;
+ struct spdk_scsi_lun lun;
+ struct spdk_scsi_dev dev;
+ uint8_t cdb[6];
+ int rc;
+ /* expects a 4K internal data buffer */
+ char data[4096], data_compare[4096];
+
+ ut_init_task(&task);
+
+ cdb[0] = 0x12;
+ cdb[1] = 0x00; /* EVPD = 0 */
+ cdb[2] = 0x00; /* PageCode zero - requesting standard inquiry */
+ cdb[3] = 0x00;
+ cdb[4] = alloc_len; /* Indicate data size used by conformance test */
+ cdb[5] = 0x00;
+ task.cdb = cdb;
+
+ snprintf(&dev.name[0], sizeof(dev.name), "spdk_iscsi_translation_test");
+ lun.bdev = &bdev;
+ lun.dev = &dev;
+ task.lun = &lun;
+
+ memset(data, 0, sizeof(data));
+ memset(data_compare, 0, sizeof(data_compare));
+
+ spdk_scsi_task_set_data(&task, data, sizeof(data));
+
+ rc = bdev_scsi_execute(&task);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+
+ CU_ASSERT_EQUAL(memcmp(data + alloc_len, data_compare + alloc_len, sizeof(data) - alloc_len), 0);
+ CU_ASSERT(task.data_transferred <= alloc_len);
+
+ ut_put_task(&task);
+}
+
+static void
+inquiry_overflow_test(void)
+{
+ int i;
+
+ for (i = 0; i < 256; i++) {
+ _inquiry_overflow_test(i);
+ }
+}
+
+static void
+scsi_name_padding_test(void)
+{
+ char name[SPDK_SCSI_DEV_MAX_NAME + 1];
+ char buf[SPDK_SCSI_DEV_MAX_NAME + 1];
+ int written, i;
+
+ /* case 1 */
+ memset(name, '\0', sizeof(name));
+ memset(name, 'x', 251);
+ written = bdev_scsi_pad_scsi_name(buf, name);
+
+ CU_ASSERT(written == 252);
+ CU_ASSERT(buf[250] == 'x');
+ CU_ASSERT(buf[251] == '\0');
+
+ /* case 2: */
+ memset(name, '\0', sizeof(name));
+ memset(name, 'x', 252);
+ written = bdev_scsi_pad_scsi_name(buf, name);
+
+ CU_ASSERT(written == 256);
+ CU_ASSERT(buf[251] == 'x');
+ for (i = 252; i < 256; i++) {
+ CU_ASSERT(buf[i] == '\0');
+ }
+
+ /* case 3 */
+ memset(name, '\0', sizeof(name));
+ memset(name, 'x', 255);
+ written = bdev_scsi_pad_scsi_name(buf, name);
+
+ CU_ASSERT(written == 256);
+ CU_ASSERT(buf[254] == 'x');
+ CU_ASSERT(buf[255] == '\0');
+}
+
+/*
+ * This test is to verify specific error translation from bdev to scsi.
+ */
+static void
+task_complete_test(void)
+{
+ struct spdk_scsi_task task;
+ struct spdk_bdev_io bdev_io = {};
+ struct spdk_scsi_lun lun;
+
+ ut_init_task(&task);
+
+ TAILQ_INIT(&lun.tasks);
+ TAILQ_INSERT_TAIL(&lun.tasks, &task, scsi_link);
+ task.lun = &lun;
+
+ bdev_io.internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ bdev_scsi_task_complete_cmd(&bdev_io, bdev_io.internal.status, &task);
+ CU_ASSERT_EQUAL(task.status, SPDK_SCSI_STATUS_GOOD);
+ CU_ASSERT(g_scsi_cb_called == 1);
+ g_scsi_cb_called = 0;
+
+ bdev_io.internal.status = SPDK_BDEV_IO_STATUS_SCSI_ERROR;
+ bdev_io.internal.error.scsi.sc = SPDK_SCSI_STATUS_CHECK_CONDITION;
+ bdev_io.internal.error.scsi.sk = SPDK_SCSI_SENSE_HARDWARE_ERROR;
+ bdev_io.internal.error.scsi.asc = SPDK_SCSI_ASC_WARNING;
+ bdev_io.internal.error.scsi.ascq = SPDK_SCSI_ASCQ_POWER_LOSS_EXPECTED;
+ bdev_scsi_task_complete_cmd(&bdev_io, bdev_io.internal.status, &task);
+ CU_ASSERT_EQUAL(task.status, SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT_EQUAL(task.sense_data[2] & 0xf, SPDK_SCSI_SENSE_HARDWARE_ERROR);
+ CU_ASSERT_EQUAL(task.sense_data[12], SPDK_SCSI_ASC_WARNING);
+ CU_ASSERT_EQUAL(task.sense_data[13], SPDK_SCSI_ASCQ_POWER_LOSS_EXPECTED);
+ CU_ASSERT(g_scsi_cb_called == 1);
+ g_scsi_cb_called = 0;
+
+ bdev_io.internal.status = SPDK_BDEV_IO_STATUS_FAILED;
+ bdev_scsi_task_complete_cmd(&bdev_io, bdev_io.internal.status, &task);
+ CU_ASSERT_EQUAL(task.status, SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT_EQUAL(task.sense_data[2] & 0xf, SPDK_SCSI_SENSE_ABORTED_COMMAND);
+ CU_ASSERT_EQUAL(task.sense_data[12], SPDK_SCSI_ASC_NO_ADDITIONAL_SENSE);
+ CU_ASSERT_EQUAL(task.sense_data[13], SPDK_SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ CU_ASSERT(g_scsi_cb_called == 1);
+ g_scsi_cb_called = 0;
+
+ ut_put_task(&task);
+}
+
+static void
+lba_range_test(void)
+{
+ struct spdk_bdev bdev = { .blocklen = 512 };
+ struct spdk_scsi_lun lun;
+ struct spdk_scsi_task task;
+ uint8_t cdb[16];
+ int rc;
+
+ lun.bdev = &bdev;
+
+ ut_init_task(&task);
+ task.lun = &lun;
+ task.lun->bdev_desc = NULL;
+ task.lun->io_channel = NULL;
+ task.cdb = cdb;
+
+ memset(cdb, 0, sizeof(cdb));
+ cdb[0] = 0x88; /* READ (16) */
+
+ /* Test block device size of 4 blocks */
+ g_test_bdev_num_blocks = 4;
+
+ /* LBA = 0, length = 1 (in range) */
+ to_be64(&cdb[2], 0); /* LBA */
+ to_be32(&cdb[10], 1); /* transfer length */
+ task.transfer_len = 1 * 512;
+ task.offset = 0;
+ task.length = 1 * 512;
+ rc = bdev_scsi_execute(&task);
+ CU_ASSERT(rc == SPDK_SCSI_TASK_PENDING);
+ CU_ASSERT(task.status == 0xFF);
+ SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&g_bdev_io_queue));
+ ut_bdev_io_flush();
+ CU_ASSERT(task.status == SPDK_SCSI_STATUS_GOOD);
+ CU_ASSERT(g_scsi_cb_called == 1);
+ g_scsi_cb_called = 0;
+
+ /* LBA = 4, length = 1 (LBA out of range) */
+ to_be64(&cdb[2], 4); /* LBA */
+ to_be32(&cdb[10], 1); /* transfer length */
+ task.transfer_len = 1 * 512;
+ task.offset = 0;
+ task.length = 1 * 512;
+ rc = bdev_scsi_execute(&task);
+ CU_ASSERT(rc == SPDK_SCSI_TASK_COMPLETE);
+ CU_ASSERT(task.status == SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT(task.sense_data[12] == SPDK_SCSI_ASC_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE);
+ SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&g_bdev_io_queue));
+
+ /* LBA = 0, length = 4 (in range, max valid size) */
+ to_be64(&cdb[2], 0); /* LBA */
+ to_be32(&cdb[10], 4); /* transfer length */
+ task.transfer_len = 4 * 512;
+ task.status = 0xFF;
+ task.offset = 0;
+ task.length = 1 * 512;
+ rc = bdev_scsi_execute(&task);
+ CU_ASSERT(rc == SPDK_SCSI_TASK_PENDING);
+ CU_ASSERT(task.status == 0xFF);
+ SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&g_bdev_io_queue));
+ ut_bdev_io_flush();
+ CU_ASSERT(task.status == SPDK_SCSI_STATUS_GOOD);
+ CU_ASSERT(g_scsi_cb_called == 1);
+ g_scsi_cb_called = 0;
+
+ /* LBA = 0, length = 5 (LBA in range, length beyond end of bdev) */
+ to_be64(&cdb[2], 0); /* LBA */
+ to_be32(&cdb[10], 5); /* transfer length */
+ task.transfer_len = 5 * 512;
+ task.offset = 0;
+ task.length = 1 * 512;
+ rc = bdev_scsi_execute(&task);
+ CU_ASSERT(rc == SPDK_SCSI_TASK_COMPLETE);
+ CU_ASSERT(task.status == SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT(task.sense_data[12] == SPDK_SCSI_ASC_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE);
+ SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&g_bdev_io_queue));
+
+ ut_put_task(&task);
+}
+
+static void
+xfer_len_test(void)
+{
+ struct spdk_bdev bdev = { .blocklen = 512 };
+ struct spdk_scsi_lun lun;
+ struct spdk_scsi_task task;
+ uint8_t cdb[16];
+ int rc;
+
+ lun.bdev = &bdev;
+
+ ut_init_task(&task);
+ task.lun = &lun;
+ task.lun->bdev_desc = NULL;
+ task.lun->io_channel = NULL;
+ task.cdb = cdb;
+
+ memset(cdb, 0, sizeof(cdb));
+ cdb[0] = 0x88; /* READ (16) */
+
+ /* Test block device size of 512 MiB */
+ g_test_bdev_num_blocks = 512 * 1024 * 1024;
+
+ /* 1 block */
+ to_be64(&cdb[2], 0); /* LBA */
+ to_be32(&cdb[10], 1); /* transfer length */
+ task.transfer_len = 1 * 512;
+ task.offset = 0;
+ task.length = 1 * 512;
+ rc = bdev_scsi_execute(&task);
+ CU_ASSERT(rc == SPDK_SCSI_TASK_PENDING);
+ CU_ASSERT(task.status == 0xFF);
+ SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&g_bdev_io_queue));
+ ut_bdev_io_flush();
+ CU_ASSERT(task.status == SPDK_SCSI_STATUS_GOOD);
+ CU_ASSERT(g_scsi_cb_called == 1);
+ g_scsi_cb_called = 0;
+
+ /* max transfer length (as reported in block limits VPD page) */
+ to_be64(&cdb[2], 0); /* LBA */
+ to_be32(&cdb[10], SPDK_WORK_BLOCK_SIZE / 512); /* transfer length */
+ task.transfer_len = SPDK_WORK_BLOCK_SIZE;
+ task.status = 0xFF;
+ task.offset = 0;
+ task.length = 1 * 512;
+ rc = bdev_scsi_execute(&task);
+ CU_ASSERT(rc == SPDK_SCSI_TASK_PENDING);
+ CU_ASSERT(task.status == 0xFF);
+ SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&g_bdev_io_queue));
+ ut_bdev_io_flush();
+ CU_ASSERT(task.status == SPDK_SCSI_STATUS_GOOD);
+ CU_ASSERT(g_scsi_cb_called == 1);
+ g_scsi_cb_called = 0;
+
+ /* max transfer length plus one block (invalid) */
+ to_be64(&cdb[2], 0); /* LBA */
+ to_be32(&cdb[10], SPDK_WORK_BLOCK_SIZE / 512 + 1); /* transfer length */
+ task.transfer_len = SPDK_WORK_BLOCK_SIZE + 512;
+ task.offset = 0;
+ task.length = 1 * 512;
+ rc = bdev_scsi_execute(&task);
+ CU_ASSERT(rc == SPDK_SCSI_TASK_COMPLETE);
+ CU_ASSERT(task.status == SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT((task.sense_data[2] & 0xf) == SPDK_SCSI_SENSE_ILLEGAL_REQUEST);
+ CU_ASSERT(task.sense_data[12] == SPDK_SCSI_ASC_INVALID_FIELD_IN_CDB);
+ SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&g_bdev_io_queue));
+
+ /* zero transfer length (valid) */
+ to_be64(&cdb[2], 0); /* LBA */
+ to_be32(&cdb[10], 0); /* transfer length */
+ task.transfer_len = 0;
+ task.offset = 0;
+ task.length = 0;
+ rc = bdev_scsi_execute(&task);
+ CU_ASSERT(rc == SPDK_SCSI_TASK_COMPLETE);
+ CU_ASSERT(task.status == SPDK_SCSI_STATUS_GOOD);
+ CU_ASSERT(task.data_transferred == 0);
+ SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&g_bdev_io_queue));
+
+ /* zero transfer length past end of disk (invalid) */
+ to_be64(&cdb[2], g_test_bdev_num_blocks); /* LBA */
+ to_be32(&cdb[10], 0); /* transfer length */
+ task.transfer_len = 0;
+ task.offset = 0;
+ task.length = 0;
+ rc = bdev_scsi_execute(&task);
+ CU_ASSERT(rc == SPDK_SCSI_TASK_COMPLETE);
+ CU_ASSERT(task.status == SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT(task.sense_data[12] == SPDK_SCSI_ASC_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE);
+ SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&g_bdev_io_queue));
+
+ ut_put_task(&task);
+}
+
+static void
+_xfer_test(bool bdev_io_pool_full)
+{
+ struct spdk_bdev bdev = { .blocklen = 512 };
+ struct spdk_scsi_lun lun;
+ struct spdk_scsi_task task;
+ uint8_t cdb[16];
+ char data[4096];
+ int rc;
+
+ lun.bdev = &bdev;
+
+ /* Test block device size of 512 MiB */
+ g_test_bdev_num_blocks = 512 * 1024 * 1024;
+
+ /* Read 1 block */
+ ut_init_task(&task);
+ task.lun = &lun;
+ task.lun->bdev_desc = NULL;
+ task.lun->io_channel = NULL;
+ task.cdb = cdb;
+ memset(cdb, 0, sizeof(cdb));
+ cdb[0] = 0x88; /* READ (16) */
+ to_be64(&cdb[2], 0); /* LBA */
+ to_be32(&cdb[10], 1); /* transfer length */
+ task.transfer_len = 1 * 512;
+ task.offset = 0;
+ task.length = 1 * 512;
+ g_bdev_io_pool_full = bdev_io_pool_full;
+ rc = bdev_scsi_execute(&task);
+ CU_ASSERT(rc == SPDK_SCSI_TASK_PENDING);
+ CU_ASSERT(task.status == 0xFF);
+
+ ut_bdev_io_flush();
+ CU_ASSERT(task.status == SPDK_SCSI_STATUS_GOOD);
+ CU_ASSERT(g_scsi_cb_called == 1);
+ g_scsi_cb_called = 0;
+ ut_put_task(&task);
+
+ /* Write 1 block */
+ ut_init_task(&task);
+ task.lun = &lun;
+ task.cdb = cdb;
+ memset(cdb, 0, sizeof(cdb));
+ cdb[0] = 0x8a; /* WRITE (16) */
+ to_be64(&cdb[2], 0); /* LBA */
+ to_be32(&cdb[10], 1); /* transfer length */
+ task.transfer_len = 1 * 512;
+ task.offset = 0;
+ task.length = 1 * 512;
+ g_bdev_io_pool_full = bdev_io_pool_full;
+ rc = bdev_scsi_execute(&task);
+ CU_ASSERT(rc == SPDK_SCSI_TASK_PENDING);
+ CU_ASSERT(task.status == 0xFF);
+
+ ut_bdev_io_flush();
+ CU_ASSERT(task.status == SPDK_SCSI_STATUS_GOOD);
+ CU_ASSERT(g_scsi_cb_called == 1);
+ g_scsi_cb_called = 0;
+ ut_put_task(&task);
+
+ /* Unmap 5 blocks using 2 descriptors */
+ ut_init_task(&task);
+ task.lun = &lun;
+ task.cdb = cdb;
+ memset(cdb, 0, sizeof(cdb));
+ cdb[0] = 0x42; /* UNMAP */
+ to_be16(&data[7], 2); /* 2 parameters in list */
+ memset(data, 0, sizeof(data));
+ to_be16(&data[2], 32); /* 2 descriptors */
+ to_be64(&data[8], 1); /* LBA 1 */
+ to_be32(&data[16], 2); /* 2 blocks */
+ to_be64(&data[24], 10); /* LBA 10 */
+ to_be32(&data[32], 3); /* 3 blocks */
+ spdk_scsi_task_set_data(&task, data, sizeof(data));
+ task.status = SPDK_SCSI_STATUS_GOOD;
+ g_bdev_io_pool_full = bdev_io_pool_full;
+ rc = bdev_scsi_execute(&task);
+ CU_ASSERT(rc == SPDK_SCSI_TASK_PENDING);
+ CU_ASSERT(task.status == SPDK_SCSI_STATUS_GOOD);
+
+ ut_bdev_io_flush();
+ CU_ASSERT(task.status == SPDK_SCSI_STATUS_GOOD);
+ CU_ASSERT(g_scsi_cb_called == 1);
+ g_scsi_cb_called = 0;
+ ut_put_task(&task);
+
+ /* Flush 1 block */
+ ut_init_task(&task);
+ task.lun = &lun;
+ task.cdb = cdb;
+ memset(cdb, 0, sizeof(cdb));
+ cdb[0] = 0x91; /* SYNCHRONIZE CACHE (16) */
+ to_be64(&cdb[2], 0); /* LBA */
+ to_be32(&cdb[10], 1); /* 1 blocks */
+ g_bdev_io_pool_full = bdev_io_pool_full;
+ rc = bdev_scsi_execute(&task);
+ CU_ASSERT(rc == SPDK_SCSI_TASK_PENDING);
+ CU_ASSERT(task.status == 0xFF);
+
+ ut_bdev_io_flush();
+ CU_ASSERT(task.status == SPDK_SCSI_STATUS_GOOD);
+ CU_ASSERT(g_scsi_cb_called == 1);
+ g_scsi_cb_called = 0;
+ SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&g_bdev_io_queue));
+
+ ut_put_task(&task);
+}
+
+static void
+xfer_test(void)
+{
+ _xfer_test(false);
+ _xfer_test(true);
+}
+
+static void
+get_dif_ctx_test(void)
+{
+ struct spdk_bdev bdev = {};
+ struct spdk_scsi_task task = {};
+ struct spdk_dif_ctx dif_ctx = {};
+ uint8_t cdb[16];
+ bool ret;
+
+ cdb[0] = SPDK_SBC_READ_6;
+ cdb[1] = 0x12;
+ cdb[2] = 0x34;
+ cdb[3] = 0x50;
+ task.cdb = cdb;
+ task.offset = 0x6 * 512;
+
+ ret = bdev_scsi_get_dif_ctx(&bdev, &task, &dif_ctx);
+ CU_ASSERT(ret == true);
+ CU_ASSERT(dif_ctx.init_ref_tag + dif_ctx.ref_tag_offset == 0x123456);
+
+ cdb[0] = SPDK_SBC_WRITE_12;
+ to_be32(&cdb[2], 0x12345670);
+ task.offset = 0x8 * 512;
+
+ ret = bdev_scsi_get_dif_ctx(&bdev, &task, &dif_ctx);
+ CU_ASSERT(ret == true);
+ CU_ASSERT(dif_ctx.init_ref_tag + dif_ctx.ref_tag_offset == 0x12345678);
+
+ cdb[0] = SPDK_SBC_WRITE_16;
+ to_be64(&cdb[2], 0x0000000012345670);
+ task.offset = 0x8 * 512;
+
+ ret = bdev_scsi_get_dif_ctx(&bdev, &task, &dif_ctx);
+ CU_ASSERT(ret == true);
+ CU_ASSERT(dif_ctx.init_ref_tag + dif_ctx.ref_tag_offset == 0x12345678);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ TAILQ_INIT(&g_bdev_io_queue);
+ TAILQ_INIT(&g_io_wait_queue);
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("translation_suite", NULL, NULL);
+
+ CU_ADD_TEST(suite, mode_select_6_test);
+ CU_ADD_TEST(suite, mode_select_6_test2);
+ CU_ADD_TEST(suite, mode_sense_6_test);
+ CU_ADD_TEST(suite, mode_sense_10_test);
+ CU_ADD_TEST(suite, inquiry_evpd_test);
+ CU_ADD_TEST(suite, inquiry_standard_test);
+ CU_ADD_TEST(suite, inquiry_overflow_test);
+ CU_ADD_TEST(suite, task_complete_test);
+ CU_ADD_TEST(suite, lba_range_test);
+ CU_ADD_TEST(suite, xfer_len_test);
+ CU_ADD_TEST(suite, xfer_test);
+ CU_ADD_TEST(suite, scsi_name_padding_test);
+ CU_ADD_TEST(suite, get_dif_ctx_test);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/scsi/scsi_pr.c/.gitignore b/src/spdk/test/unit/lib/scsi/scsi_pr.c/.gitignore
new file mode 100644
index 000000000..9655d812e
--- /dev/null
+++ b/src/spdk/test/unit/lib/scsi/scsi_pr.c/.gitignore
@@ -0,0 +1 @@
+scsi_pr_ut
diff --git a/src/spdk/test/unit/lib/scsi/scsi_pr.c/Makefile b/src/spdk/test/unit/lib/scsi/scsi_pr.c/Makefile
new file mode 100644
index 000000000..22be734ae
--- /dev/null
+++ b/src/spdk/test/unit/lib/scsi/scsi_pr.c/Makefile
@@ -0,0 +1,39 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+TEST_FILE = scsi_pr_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/scsi/scsi_pr.c/scsi_pr_ut.c b/src/spdk/test/unit/lib/scsi/scsi_pr.c/scsi_pr_ut.c
new file mode 100644
index 000000000..993277036
--- /dev/null
+++ b/src/spdk/test/unit/lib/scsi/scsi_pr.c/scsi_pr_ut.c
@@ -0,0 +1,673 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "scsi/port.c"
+#include "scsi/scsi_pr.c"
+
+#include "spdk_cunit.h"
+
+#include "spdk_internal/mock.h"
+
+SPDK_LOG_REGISTER_COMPONENT("scsi", SPDK_LOG_SCSI)
+
+void
+spdk_scsi_task_set_status(struct spdk_scsi_task *task, int sc, int sk,
+ int asc, int ascq)
+{
+ task->status = sc;
+}
+
+/*
+ * Reservation Unit Test Configuration
+ *
+ * -------- -------- -------
+ * | Host A | | Host B | | Host C|
+ * -------- -------- -------
+ * | | |
+ * ------ ------ ------
+ * |Port A| |Port B| |Port C|
+ * ------ ------ ------
+ * \ | /
+ * \ | /
+ * \ | /
+ * ------------------------
+ * | Target Node 1 Port 0 |
+ * ------------------------
+ * |
+ * ----------------------------------
+ * | Target Node |
+ * ----------------------------------
+ * |
+ * -----
+ * |LUN 0|
+ * -----
+ *
+ */
+
+static struct spdk_scsi_lun g_lun;
+static struct spdk_scsi_port g_i_port_a;
+static struct spdk_scsi_port g_i_port_b;
+static struct spdk_scsi_port g_i_port_c;
+static struct spdk_scsi_port g_t_port_0;
+
+static void
+ut_lun_deinit(void)
+{
+ struct spdk_scsi_pr_registrant *reg, *tmp;
+
+ TAILQ_FOREACH_SAFE(reg, &g_lun.reg_head, link, tmp) {
+ TAILQ_REMOVE(&g_lun.reg_head, reg, link);
+ free(reg);
+ }
+ g_lun.reservation.rtype = 0;
+ g_lun.reservation.crkey = 0;
+ g_lun.reservation.holder = NULL;
+ g_lun.pr_generation = 0;
+}
+
+static void
+ut_port_init(void)
+{
+ int rc;
+
+ /* g_i_port_a */
+ rc = scsi_port_construct(&g_i_port_a, 0xa, 0,
+ "iqn.2016-06.io.spdk:fe5aacf7420a,i,0x00023d00000a");
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ spdk_scsi_port_set_iscsi_transport_id(&g_i_port_a,
+ "iqn.2016-06.io.spdk:fe5aacf7420a", 0x00023d00000a);
+ /* g_i_port_b */
+ rc = scsi_port_construct(&g_i_port_b, 0xb, 0,
+ "iqn.2016-06.io.spdk:fe5aacf7420b,i,0x00023d00000b");
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ spdk_scsi_port_set_iscsi_transport_id(&g_i_port_b,
+ "iqn.2016-06.io.spdk:fe5aacf7420b", 0x00023d00000b);
+ /* g_i_port_c */
+ rc = scsi_port_construct(&g_i_port_c, 0xc, 0,
+ "iqn.2016-06.io.spdk:fe5aacf7420c,i,0x00023d00000c");
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ spdk_scsi_port_set_iscsi_transport_id(&g_i_port_c,
+ "iqn.2016-06.io.spdk:fe5aacf7420c", 0x00023d00000c);
+ /* g_t_port_0 */
+ rc = scsi_port_construct(&g_t_port_0, 0x0, 1,
+ "iqn.2016-06.io.spdk:fe5aacf74200,t,0x00023d000000");
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ spdk_scsi_port_set_iscsi_transport_id(&g_t_port_0,
+ "iqn.2016-06.io.spdk:fe5aacf74200", 0x00023d000000);
+}
+
+static void
+ut_lun_init(void)
+{
+ TAILQ_INIT(&g_lun.reg_head);
+}
+
+static void
+ut_init_reservation_test(void)
+{
+ ut_lun_init();
+ ut_port_init();
+ ut_lun_init();
+}
+
+static void
+ut_deinit_reservation_test(void)
+{
+ ut_lun_deinit();
+}
+
+/* Host A: register with key 0xa.
+ * Host B: register with key 0xb.
+ * Host C: register with key 0xc.
+ */
+static void
+test_build_registrants(void)
+{
+ struct spdk_scsi_pr_registrant *reg;
+ struct spdk_scsi_task task = {0};
+ uint32_t gen;
+ int rc;
+
+ task.lun = &g_lun;
+ task.target_port = &g_t_port_0;
+
+ gen = g_lun.pr_generation;
+
+ /* I_T nexus: Initiator Port A to Target Port 0 */
+ task.initiator_port = &g_i_port_a;
+ /* Test Case: Host A registers with a new key */
+ task.status = 0;
+ rc = scsi_pr_out_register(&task, SPDK_SCSI_PR_OUT_REGISTER,
+ 0x0, 0xa1, 0, 0, 0);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ reg = scsi_pr_get_registrant(&g_lun, &g_i_port_a, &g_t_port_0);
+ SPDK_CU_ASSERT_FATAL(reg != NULL);
+ SPDK_CU_ASSERT_FATAL(reg->rkey == 0xa1);
+ SPDK_CU_ASSERT_FATAL(g_lun.pr_generation == gen + 1);
+
+ /* Test Case: Host A replaces with a new key */
+ task.status = 0;
+ rc = scsi_pr_out_register(&task, SPDK_SCSI_PR_OUT_REGISTER,
+ 0xa1, 0xa, 0, 0, 0);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ reg = scsi_pr_get_registrant(&g_lun, &g_i_port_a, &g_t_port_0);
+ SPDK_CU_ASSERT_FATAL(reg != NULL);
+ SPDK_CU_ASSERT_FATAL(reg->rkey == 0xa);
+ SPDK_CU_ASSERT_FATAL(g_lun.pr_generation == gen + 2);
+
+ /* Test Case: Host A replaces with a new key, reservation conflict is expected */
+ task.status = 0;
+ rc = scsi_pr_out_register(&task, SPDK_SCSI_PR_OUT_REGISTER,
+ 0xa1, 0xdead, 0, 0, 0);
+ SPDK_CU_ASSERT_FATAL(rc < 0);
+ reg = scsi_pr_get_registrant(&g_lun, &g_i_port_a, &g_t_port_0);
+ SPDK_CU_ASSERT_FATAL(reg != NULL);
+ SPDK_CU_ASSERT_FATAL(reg->rkey == 0xa);
+ SPDK_CU_ASSERT_FATAL(g_lun.pr_generation == gen + 2);
+ SPDK_CU_ASSERT_FATAL(task.status == SPDK_SCSI_STATUS_RESERVATION_CONFLICT);
+
+ /* I_T nexus: Initiator Port B to Target Port 0 */
+ task.initiator_port = &g_i_port_b;
+ /* Test Case: Host B registers with a new key */
+ task.status = 0;
+ rc = scsi_pr_out_register(&task, SPDK_SCSI_PR_OUT_REGISTER,
+ 0x0, 0xb, 0, 0, 0);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ reg = scsi_pr_get_registrant(&g_lun, &g_i_port_b, &g_t_port_0);
+ SPDK_CU_ASSERT_FATAL(reg != NULL);
+ SPDK_CU_ASSERT_FATAL(reg->rkey == 0xb);
+ SPDK_CU_ASSERT_FATAL(g_lun.pr_generation == gen + 3);
+
+ /* I_T nexus: Initiator Port C to Target Port 0 */
+ task.initiator_port = &g_i_port_c;
+ /* Test Case: Host C registers with a new key */
+ task.status = 0;
+ rc = scsi_pr_out_register(&task, SPDK_SCSI_PR_OUT_REGISTER,
+ 0x0, 0xc, 0, 0, 0);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ reg = scsi_pr_get_registrant(&g_lun, &g_i_port_c, &g_t_port_0);
+ SPDK_CU_ASSERT_FATAL(reg != NULL);
+ SPDK_CU_ASSERT_FATAL(reg->rkey == 0xc);
+ SPDK_CU_ASSERT_FATAL(g_lun.pr_generation == gen + 4);
+}
+
+static void
+test_reservation_register(void)
+{
+ ut_init_reservation_test();
+
+ test_build_registrants();
+
+ ut_deinit_reservation_test();
+}
+
+static void
+test_reservation_reserve(void)
+{
+ struct spdk_scsi_pr_registrant *reg;
+ struct spdk_scsi_task task = {0};
+ uint32_t gen;
+ int rc;
+
+ task.lun = &g_lun;
+ task.target_port = &g_t_port_0;
+
+ ut_init_reservation_test();
+ test_build_registrants();
+
+ gen = g_lun.pr_generation;
+
+ task.initiator_port = &g_i_port_a;
+ task.status = 0;
+ /* Test Case: Host A acquires the reservation */
+ rc = scsi_pr_out_reserve(&task, SPDK_SCSI_PR_WRITE_EXCLUSIVE,
+ 0xa, 0, 0, 0);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.rtype == SPDK_SCSI_PR_WRITE_EXCLUSIVE);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.crkey == 0xa);
+ SPDK_CU_ASSERT_FATAL(g_lun.pr_generation == gen);
+
+ /* Test Case: Host B acquires the reservation, reservation
+ * conflict is expected.
+ */
+ task.initiator_port = &g_i_port_b;
+ task.status = 0;
+ rc = scsi_pr_out_reserve(&task, SPDK_SCSI_PR_WRITE_EXCLUSIVE,
+ 0xb, 0, 0, 0);
+ SPDK_CU_ASSERT_FATAL(rc < 0);
+ SPDK_CU_ASSERT_FATAL(task.status == SPDK_SCSI_STATUS_RESERVATION_CONFLICT);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.rtype == SPDK_SCSI_PR_WRITE_EXCLUSIVE);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.crkey == 0xa);
+ SPDK_CU_ASSERT_FATAL(g_lun.pr_generation == gen);
+
+ /* Test Case: Host A unregister with reservation */
+ task.initiator_port = &g_i_port_a;
+ task.status = 0;
+ rc = scsi_pr_out_register(&task, SPDK_SCSI_PR_OUT_REGISTER,
+ 0xa, 0, 0, 0, 0);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.rtype == 0);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.crkey == 0);
+ SPDK_CU_ASSERT_FATAL(g_lun.pr_generation == gen + 1);
+ reg = scsi_pr_get_registrant(&g_lun, &g_i_port_a, &g_t_port_0);
+ SPDK_CU_ASSERT_FATAL(reg == NULL);
+
+ /* Test Case: Host B acquires the reservation */
+ task.initiator_port = &g_i_port_b;
+ task.status = 0;
+ rc = scsi_pr_out_reserve(&task, SPDK_SCSI_PR_WRITE_EXCLUSIVE_ALL_REGS,
+ 0xb, 0, 0, 0);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.rtype == SPDK_SCSI_PR_WRITE_EXCLUSIVE_ALL_REGS);
+ SPDK_CU_ASSERT_FATAL(g_lun.pr_generation == gen + 1);
+
+ /* Test Case: Host C acquires the reservation with invalid type */
+ task.initiator_port = &g_i_port_c;
+ task.status = 0;
+ rc = scsi_pr_out_reserve(&task, SPDK_SCSI_PR_WRITE_EXCLUSIVE,
+ 0xc, 0, 0, 0);
+ SPDK_CU_ASSERT_FATAL(rc < 0);
+ SPDK_CU_ASSERT_FATAL(task.status == SPDK_SCSI_STATUS_RESERVATION_CONFLICT);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.rtype == SPDK_SCSI_PR_WRITE_EXCLUSIVE_ALL_REGS);
+ SPDK_CU_ASSERT_FATAL(g_lun.pr_generation == gen + 1);
+
+ /* Test Case: Host C acquires the reservation, all registrants type */
+ task.status = 0;
+ rc = scsi_pr_out_reserve(&task, SPDK_SCSI_PR_WRITE_EXCLUSIVE_ALL_REGS,
+ 0xc, 0, 0, 0);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.rtype == SPDK_SCSI_PR_WRITE_EXCLUSIVE_ALL_REGS);
+ SPDK_CU_ASSERT_FATAL(g_lun.pr_generation == gen + 1);
+
+ ut_deinit_reservation_test();
+}
+
+static void
+test_reservation_preempt_non_all_regs(void)
+{
+ struct spdk_scsi_pr_registrant *reg;
+ struct spdk_scsi_task task = {0};
+ uint32_t gen;
+ int rc;
+
+ task.lun = &g_lun;
+ task.target_port = &g_t_port_0;
+
+ ut_init_reservation_test();
+ test_build_registrants();
+
+ task.initiator_port = &g_i_port_a;
+ task.status = 0;
+ gen = g_lun.pr_generation;
+ /* Host A acquires the reservation */
+ rc = scsi_pr_out_reserve(&task, SPDK_SCSI_PR_WRITE_EXCLUSIVE_REGS_ONLY,
+ 0xa, 0, 0, 0);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.rtype == SPDK_SCSI_PR_WRITE_EXCLUSIVE_REGS_ONLY);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.crkey == 0xa);
+ SPDK_CU_ASSERT_FATAL(g_lun.pr_generation == gen);
+
+ /* Test Case: Host B premmpts Host A, Check condition is expected
+ * for zeroed service action reservation key */
+ task.initiator_port = &g_i_port_b;
+ task.status = 0;
+ rc = scsi_pr_out_preempt(&task, SPDK_SCSI_PR_OUT_PREEMPT,
+ SPDK_SCSI_PR_WRITE_EXCLUSIVE_REGS_ONLY,
+ 0xb, 0);
+ SPDK_CU_ASSERT_FATAL(rc < 0);
+ SPDK_CU_ASSERT_FATAL(task.status == SPDK_SCSI_STATUS_CHECK_CONDITION);
+
+ /* Test Case: Host B preempts Host A, Host A is unregisted */
+ task.status = 0;
+ gen = g_lun.pr_generation;
+ rc = scsi_pr_out_preempt(&task, SPDK_SCSI_PR_OUT_PREEMPT,
+ SPDK_SCSI_PR_WRITE_EXCLUSIVE,
+ 0xb, 0xa);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.rtype == SPDK_SCSI_PR_WRITE_EXCLUSIVE);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.crkey == 0xb);
+ SPDK_CU_ASSERT_FATAL(g_lun.pr_generation > gen);
+ reg = scsi_pr_get_registrant(&g_lun, &g_i_port_a, &g_t_port_0);
+ SPDK_CU_ASSERT_FATAL(reg == NULL);
+
+ /* Test Case: Host B preempts itself */
+ task.status = 0;
+ gen = g_lun.pr_generation;
+ rc = scsi_pr_out_preempt(&task, SPDK_SCSI_PR_OUT_PREEMPT,
+ SPDK_SCSI_PR_WRITE_EXCLUSIVE,
+ 0xb, 0xb);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.rtype == SPDK_SCSI_PR_WRITE_EXCLUSIVE);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.crkey == 0xb);
+ SPDK_CU_ASSERT_FATAL(g_lun.pr_generation > gen);
+
+ /* Test Case: Host B preempts itself and remove registrants */
+ task.status = 0;
+ gen = g_lun.pr_generation;
+ rc = scsi_pr_out_preempt(&task, SPDK_SCSI_PR_OUT_PREEMPT,
+ SPDK_SCSI_PR_WRITE_EXCLUSIVE,
+ 0xb, 0xc);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.rtype == SPDK_SCSI_PR_WRITE_EXCLUSIVE);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.crkey == 0xb);
+ reg = scsi_pr_get_registrant(&g_lun, &g_i_port_c, &g_t_port_0);
+ SPDK_CU_ASSERT_FATAL(reg == NULL);
+ SPDK_CU_ASSERT_FATAL(g_lun.pr_generation > gen);
+
+ ut_deinit_reservation_test();
+}
+
+static void
+test_reservation_preempt_all_regs(void)
+{
+ struct spdk_scsi_pr_registrant *reg;
+ struct spdk_scsi_task task = {0};
+ uint32_t gen;
+ int rc;
+
+ task.lun = &g_lun;
+ task.target_port = &g_t_port_0;
+
+ ut_init_reservation_test();
+ test_build_registrants();
+
+ /* Test Case: No reservation yet, Host B removes Host C's registrant */
+ task.initiator_port = &g_i_port_b;
+ task.status = 0;
+ gen = g_lun.pr_generation;
+ rc = scsi_pr_out_preempt(&task, SPDK_SCSI_PR_OUT_PREEMPT,
+ SPDK_SCSI_PR_WRITE_EXCLUSIVE_REGS_ONLY,
+ 0xb, 0xc);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ reg = scsi_pr_get_registrant(&g_lun, &g_i_port_c, &g_t_port_0);
+ SPDK_CU_ASSERT_FATAL(reg == NULL);
+ SPDK_CU_ASSERT_FATAL(g_lun.pr_generation > gen);
+
+ task.initiator_port = &g_i_port_a;
+ task.status = 0;
+ gen = g_lun.pr_generation;
+ /* Host A acquires the reservation */
+ rc = scsi_pr_out_reserve(&task, SPDK_SCSI_PR_WRITE_EXCLUSIVE_ALL_REGS,
+ 0xa, 0, 0, 0);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.rtype == SPDK_SCSI_PR_WRITE_EXCLUSIVE_ALL_REGS);
+ SPDK_CU_ASSERT_FATAL(g_lun.pr_generation == gen);
+
+ /* Test Case: Host B removes Host A's registrant and preempt */
+ task.initiator_port = &g_i_port_b;
+ task.status = 0;
+ gen = g_lun.pr_generation;
+ rc = scsi_pr_out_preempt(&task, SPDK_SCSI_PR_OUT_PREEMPT,
+ SPDK_SCSI_PR_EXCLUSIVE_ACCESS_ALL_REGS,
+ 0xb, 0x0);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ reg = scsi_pr_get_registrant(&g_lun, &g_i_port_a, &g_t_port_0);
+ SPDK_CU_ASSERT_FATAL(reg == NULL);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.rtype == SPDK_SCSI_PR_EXCLUSIVE_ACCESS_ALL_REGS);
+ SPDK_CU_ASSERT_FATAL(g_lun.pr_generation > gen);
+
+ ut_deinit_reservation_test();
+}
+
+static void
+test_reservation_cmds_conflict(void)
+{
+ struct spdk_scsi_pr_registrant *reg;
+ struct spdk_scsi_task task = {0};
+ uint8_t cdb[32];
+ int rc;
+
+ task.lun = &g_lun;
+ task.target_port = &g_t_port_0;
+ task.cdb = cdb;
+
+ ut_init_reservation_test();
+ test_build_registrants();
+
+ /* Host A acquires the reservation */
+ task.initiator_port = &g_i_port_a;
+ rc = scsi_pr_out_reserve(&task, SPDK_SCSI_PR_WRITE_EXCLUSIVE_REGS_ONLY,
+ 0xa, 0, 0, 0);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.rtype == SPDK_SCSI_PR_WRITE_EXCLUSIVE_REGS_ONLY);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.crkey == 0xa);
+
+ /* Remove Host B registrant */
+ task.initiator_port = &g_i_port_b;
+ task.status = 0;
+ rc = scsi_pr_out_register(&task, SPDK_SCSI_PR_OUT_REGISTER,
+ 0xb, 0, 0, 0, 0);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ reg = scsi_pr_get_registrant(&g_lun, &g_i_port_b, &g_t_port_0);
+ SPDK_CU_ASSERT_FATAL(reg == NULL);
+
+ /* Test Case: Host B sends Read/Write commands,
+ * reservation conflict is expected.
+ */
+ task.cdb[0] = SPDK_SBC_READ_10;
+ task.status = 0;
+ rc = scsi_pr_check(&task);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ task.cdb[0] = SPDK_SBC_WRITE_10;
+ task.status = 0;
+ rc = scsi_pr_check(&task);
+ SPDK_CU_ASSERT_FATAL(rc < 0);
+ SPDK_CU_ASSERT_FATAL(task.status == SPDK_SCSI_STATUS_RESERVATION_CONFLICT);
+
+ /* Test Case: Host C sends Read/Write commands */
+ task.initiator_port = &g_i_port_c;
+ task.cdb[0] = SPDK_SBC_READ_10;
+ task.status = 0;
+ rc = scsi_pr_check(&task);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ task.cdb[0] = SPDK_SBC_WRITE_10;
+ task.status = 0;
+ rc = scsi_pr_check(&task);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+
+ /* Host A preempts itself with SPDK_SCSI_PR_EXCLUSIVE_ACCESS */
+ task.initiator_port = &g_i_port_a;
+ rc = scsi_pr_out_preempt(&task, SPDK_SCSI_PR_OUT_PREEMPT,
+ SPDK_SCSI_PR_EXCLUSIVE_ACCESS,
+ 0xa, 0xa);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.rtype == SPDK_SCSI_PR_EXCLUSIVE_ACCESS);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.crkey == 0xa);
+
+ /* Test Case: Host C sends Read/Write commands */
+ task.initiator_port = &g_i_port_c;
+ task.cdb[0] = SPDK_SBC_READ_10;
+ task.status = 0;
+ rc = scsi_pr_check(&task);
+ SPDK_CU_ASSERT_FATAL(rc < 0);
+ SPDK_CU_ASSERT_FATAL(task.status == SPDK_SCSI_STATUS_RESERVATION_CONFLICT);
+ task.cdb[0] = SPDK_SBC_WRITE_10;
+ task.status = 0;
+ rc = scsi_pr_check(&task);
+ SPDK_CU_ASSERT_FATAL(rc < 0);
+ SPDK_CU_ASSERT_FATAL(task.status == SPDK_SCSI_STATUS_RESERVATION_CONFLICT);
+
+ /* Test Case: Host B sends Read/Write commands */
+ task.initiator_port = &g_i_port_b;
+ task.cdb[0] = SPDK_SBC_READ_10;
+ task.status = 0;
+ rc = scsi_pr_check(&task);
+ SPDK_CU_ASSERT_FATAL(rc < 0);
+ SPDK_CU_ASSERT_FATAL(task.status == SPDK_SCSI_STATUS_RESERVATION_CONFLICT);
+ task.cdb[0] = SPDK_SBC_WRITE_10;
+ task.status = 0;
+ rc = scsi_pr_check(&task);
+ SPDK_CU_ASSERT_FATAL(rc < 0);
+ SPDK_CU_ASSERT_FATAL(task.status == SPDK_SCSI_STATUS_RESERVATION_CONFLICT);
+
+ ut_deinit_reservation_test();
+}
+
+static void
+test_scsi2_reserve_release(void)
+{
+ struct spdk_scsi_task task = {0};
+ uint8_t cdb[32] = {};
+ int rc;
+
+ task.lun = &g_lun;
+ task.target_port = &g_t_port_0;
+ task.cdb = cdb;
+
+ ut_init_reservation_test();
+
+ /* Test Case: SPC2 RESERVE from Host A */
+ task.initiator_port = &g_i_port_a;
+ task.cdb[0] = SPDK_SPC2_RESERVE_10;
+ rc = scsi2_reserve(&task, task.cdb);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.holder != NULL);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.flags == SCSI_SPC2_RESERVE);
+
+ /* Test Case: READ command from Host B */
+ task.initiator_port = &g_i_port_b;
+ task.cdb[0] = SPDK_SBC_READ_10;
+ task.status = 0;
+ rc = scsi2_reserve_check(&task);
+ SPDK_CU_ASSERT_FATAL(rc < 0);
+ SPDK_CU_ASSERT_FATAL(task.status == SPDK_SCSI_STATUS_RESERVATION_CONFLICT);
+
+ /* Test Case: SPDK_SPC2_RELEASE10 command from Host B */
+ task.initiator_port = &g_i_port_b;
+ task.cdb[0] = SPDK_SPC2_RELEASE_10;
+ task.status = 0;
+ rc = scsi2_reserve_check(&task);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+
+ rc = scsi2_release(&task);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.holder == NULL);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.flags == 0);
+
+ /* Test Case: SPC2 RESERVE from Host B */
+ task.initiator_port = &g_i_port_b;
+ task.cdb[0] = SPDK_SPC2_RESERVE_10;
+ rc = scsi2_reserve(&task, task.cdb);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.holder != NULL);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.flags == SCSI_SPC2_RESERVE);
+
+ /* Test Case: READ command from Host B */
+ task.initiator_port = &g_i_port_b;
+ task.cdb[0] = SPDK_SBC_READ_10;
+ rc = scsi2_reserve_check(&task);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+
+ /* Test Case: SPDK_SPC2_RELEASE10 command from Host A */
+ task.initiator_port = &g_i_port_a;
+ task.cdb[0] = SPDK_SPC2_RELEASE_10;
+
+ rc = scsi2_release(&task);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.holder == NULL);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.flags == 0);
+
+ ut_deinit_reservation_test();
+}
+
+static void
+test_pr_with_scsi2_reserve_release(void)
+{
+ struct spdk_scsi_task task = {0};
+ uint8_t cdb[32] = {};
+ int rc;
+
+ task.lun = &g_lun;
+ task.target_port = &g_t_port_0;
+ task.cdb = cdb;
+
+ ut_init_reservation_test();
+ test_build_registrants();
+
+ task.initiator_port = &g_i_port_a;
+ task.status = 0;
+ /* Test Case: Host A acquires the reservation */
+ rc = scsi_pr_out_reserve(&task, SPDK_SCSI_PR_WRITE_EXCLUSIVE_REGS_ONLY,
+ 0xa, 0, 0, 0);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.rtype == SPDK_SCSI_PR_WRITE_EXCLUSIVE_REGS_ONLY);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.crkey == 0xa);
+
+ /* Test Case: SPDK_SPC2_RESERVE_10 command from Host B */
+ task.initiator_port = &g_i_port_b;
+ task.cdb[0] = SPDK_SPC2_RESERVE_10;
+ /* SPC2 RESERVE/RELEASE will pass to scsi2_reserve/release */
+ rc = scsi_pr_check(&task);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+
+ /* do nothing with PR but have good status */
+ rc = scsi2_reserve(&task, task.cdb);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.holder != NULL);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.rtype == SPDK_SCSI_PR_WRITE_EXCLUSIVE_REGS_ONLY);
+
+ rc = scsi2_release(&task);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.holder != NULL);
+ SPDK_CU_ASSERT_FATAL(g_lun.reservation.rtype == SPDK_SCSI_PR_WRITE_EXCLUSIVE_REGS_ONLY);
+
+ ut_deinit_reservation_test();
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("reservation_suite", NULL, NULL);
+ CU_ADD_TEST(suite, test_reservation_register);
+ CU_ADD_TEST(suite, test_reservation_reserve);
+ CU_ADD_TEST(suite, test_reservation_preempt_non_all_regs);
+ CU_ADD_TEST(suite, test_reservation_preempt_all_regs);
+ CU_ADD_TEST(suite, test_reservation_cmds_conflict);
+ CU_ADD_TEST(suite, test_scsi2_reserve_release);
+ CU_ADD_TEST(suite, test_pr_with_scsi2_reserve_release);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+
+}
diff --git a/src/spdk/test/unit/lib/sock/Makefile b/src/spdk/test/unit/lib/sock/Makefile
new file mode 100644
index 000000000..310f544ed
--- /dev/null
+++ b/src/spdk/test/unit/lib/sock/Makefile
@@ -0,0 +1,48 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = sock.c posix.c
+
+ifeq ($(OS), Linux)
+DIRS-$(CONFIG_URING) += uring.c
+endif
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/sock/posix.c/.gitignore b/src/spdk/test/unit/lib/sock/posix.c/.gitignore
new file mode 100644
index 000000000..7d8243ef0
--- /dev/null
+++ b/src/spdk/test/unit/lib/sock/posix.c/.gitignore
@@ -0,0 +1 @@
+posix_ut
diff --git a/src/spdk/test/unit/lib/sock/posix.c/Makefile b/src/spdk/test/unit/lib/sock/posix.c/Makefile
new file mode 100644
index 000000000..e06a2adb1
--- /dev/null
+++ b/src/spdk/test/unit/lib/sock/posix.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = posix_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/sock/posix.c/posix_ut.c b/src/spdk/test/unit/lib/sock/posix.c/posix_ut.c
new file mode 100644
index 000000000..498a37628
--- /dev/null
+++ b/src/spdk/test/unit/lib/sock/posix.c/posix_ut.c
@@ -0,0 +1,174 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+#include "spdk/util.h"
+
+#include "spdk_internal/mock.h"
+
+#include "spdk_cunit.h"
+
+#include "sock/posix/posix.c"
+
+DEFINE_STUB_V(spdk_net_impl_register, (struct spdk_net_impl *impl, int priority));
+DEFINE_STUB(spdk_sock_close, int, (struct spdk_sock **s), 0);
+
+static void
+_req_cb(void *cb_arg, int len)
+{
+ *(bool *)cb_arg = true;
+ CU_ASSERT(len == 0);
+}
+
+static void
+flush(void)
+{
+ struct spdk_posix_sock_group_impl group = {};
+ struct spdk_posix_sock psock = {};
+ struct spdk_sock *sock = &psock.base;
+ struct spdk_sock_request *req1, *req2;
+ bool cb_arg1, cb_arg2;
+ int rc;
+
+ /* Set up data structures */
+ TAILQ_INIT(&sock->queued_reqs);
+ TAILQ_INIT(&sock->pending_reqs);
+ sock->group_impl = &group.base;
+
+ req1 = calloc(1, sizeof(struct spdk_sock_request) + 2 * sizeof(struct iovec));
+ SPDK_CU_ASSERT_FATAL(req1 != NULL);
+ SPDK_SOCK_REQUEST_IOV(req1, 0)->iov_base = (void *)100;
+ SPDK_SOCK_REQUEST_IOV(req1, 0)->iov_len = 32;
+ SPDK_SOCK_REQUEST_IOV(req1, 1)->iov_base = (void *)200;
+ SPDK_SOCK_REQUEST_IOV(req1, 1)->iov_len = 32;
+ req1->iovcnt = 2;
+ req1->cb_fn = _req_cb;
+ req1->cb_arg = &cb_arg1;
+
+ req2 = calloc(1, sizeof(struct spdk_sock_request) + 2 * sizeof(struct iovec));
+ SPDK_CU_ASSERT_FATAL(req2 != NULL);
+ SPDK_SOCK_REQUEST_IOV(req2, 0)->iov_base = (void *)100;
+ SPDK_SOCK_REQUEST_IOV(req2, 0)->iov_len = 32;
+ SPDK_SOCK_REQUEST_IOV(req2, 1)->iov_base = (void *)200;
+ SPDK_SOCK_REQUEST_IOV(req2, 1)->iov_len = 32;
+ req2->iovcnt = 2;
+ req2->cb_fn = _req_cb;
+ req2->cb_arg = &cb_arg2;
+
+ /* Simple test - a request with a 2 element iovec
+ * that gets submitted in a single sendmsg. */
+ spdk_sock_request_queue(sock, req1);
+ MOCK_SET(sendmsg, 64);
+ cb_arg1 = false;
+ rc = _sock_flush(sock);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cb_arg1 == true);
+ CU_ASSERT(TAILQ_EMPTY(&sock->queued_reqs));
+
+ /* Two requests, where both can fully send. */
+ spdk_sock_request_queue(sock, req1);
+ spdk_sock_request_queue(sock, req2);
+ MOCK_SET(sendmsg, 128);
+ cb_arg1 = false;
+ cb_arg2 = false;
+ rc = _sock_flush(sock);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cb_arg1 == true);
+ CU_ASSERT(cb_arg2 == true);
+ CU_ASSERT(TAILQ_EMPTY(&sock->queued_reqs));
+
+ /* Two requests. Only first one can send */
+ spdk_sock_request_queue(sock, req1);
+ spdk_sock_request_queue(sock, req2);
+ MOCK_SET(sendmsg, 64);
+ cb_arg1 = false;
+ cb_arg2 = false;
+ rc = _sock_flush(sock);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cb_arg1 == true);
+ CU_ASSERT(cb_arg2 == false);
+ CU_ASSERT(TAILQ_FIRST(&sock->queued_reqs) == req2);
+ TAILQ_REMOVE(&sock->queued_reqs, req2, internal.link);
+ CU_ASSERT(TAILQ_EMPTY(&sock->queued_reqs));
+
+ /* One request. Partial send. */
+ spdk_sock_request_queue(sock, req1);
+ MOCK_SET(sendmsg, 10);
+ cb_arg1 = false;
+ rc = _sock_flush(sock);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cb_arg1 == false);
+ CU_ASSERT(TAILQ_FIRST(&sock->queued_reqs) == req1);
+
+ /* Do a second flush that partial sends again. */
+ MOCK_SET(sendmsg, 24);
+ cb_arg1 = false;
+ rc = _sock_flush(sock);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cb_arg1 == false);
+ CU_ASSERT(TAILQ_FIRST(&sock->queued_reqs) == req1);
+
+ /* Flush the rest of the data */
+ MOCK_SET(sendmsg, 30);
+ cb_arg1 = false;
+ rc = _sock_flush(sock);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cb_arg1 == true);
+ CU_ASSERT(TAILQ_EMPTY(&sock->queued_reqs));
+
+ free(req1);
+ free(req2);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("posix", NULL, NULL);
+
+ CU_ADD_TEST(suite, flush);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+
+ CU_basic_run_tests();
+
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/sock/sock.c/.gitignore b/src/spdk/test/unit/lib/sock/sock.c/.gitignore
new file mode 100644
index 000000000..bd9bf8335
--- /dev/null
+++ b/src/spdk/test/unit/lib/sock/sock.c/.gitignore
@@ -0,0 +1 @@
+sock_ut
diff --git a/src/spdk/test/unit/lib/sock/sock.c/Makefile b/src/spdk/test/unit/lib/sock/sock.c/Makefile
new file mode 100644
index 000000000..1d907c097
--- /dev/null
+++ b/src/spdk/test/unit/lib/sock/sock.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = sock_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/sock/sock.c/sock_ut.c b/src/spdk/test/unit/lib/sock/sock.c/sock_ut.c
new file mode 100644
index 000000000..bbe4822d7
--- /dev/null
+++ b/src/spdk/test/unit/lib/sock/sock.c/sock_ut.c
@@ -0,0 +1,982 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation. All rights reserved.
+ * Copyright (c) 2020 Mellanox Technologies LTD. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+#include "spdk/util.h"
+
+#include "spdk_cunit.h"
+
+#include "spdk_internal/sock.h"
+
+#include "sock/sock.c"
+#include "sock/posix/posix.c"
+
+#define UT_IP "test_ip"
+#define UT_PORT 1234
+
+bool g_read_data_called;
+ssize_t g_bytes_read;
+char g_buf[256];
+struct spdk_sock *g_server_sock_read;
+int g_ut_accept_count;
+struct spdk_ut_sock *g_ut_listen_sock;
+struct spdk_ut_sock *g_ut_client_sock;
+
+struct spdk_ut_sock {
+ struct spdk_sock base;
+ struct spdk_ut_sock *peer;
+ size_t bytes_avail;
+ char buf[256];
+};
+
+struct spdk_ut_sock_group_impl {
+ struct spdk_sock_group_impl base;
+ struct spdk_ut_sock *sock;
+};
+
+#define __ut_sock(sock) (struct spdk_ut_sock *)sock
+#define __ut_group(group) (struct spdk_ut_sock_group_impl *)group
+
+static int
+spdk_ut_sock_getaddr(struct spdk_sock *_sock, char *saddr, int slen, uint16_t *sport,
+ char *caddr, int clen, uint16_t *cport)
+{
+ return 0;
+}
+
+static struct spdk_sock *
+spdk_ut_sock_listen(const char *ip, int port, struct spdk_sock_opts *opts)
+{
+ struct spdk_ut_sock *sock;
+
+ if (strcmp(ip, UT_IP) || port != UT_PORT) {
+ return NULL;
+ }
+
+ CU_ASSERT(g_ut_listen_sock == NULL);
+
+ sock = calloc(1, sizeof(*sock));
+ SPDK_CU_ASSERT_FATAL(sock != NULL);
+ g_ut_listen_sock = sock;
+
+ return &sock->base;
+}
+
+static struct spdk_sock *
+spdk_ut_sock_connect(const char *ip, int port, struct spdk_sock_opts *opts)
+{
+ struct spdk_ut_sock *sock;
+
+ if (strcmp(ip, UT_IP) || port != UT_PORT) {
+ return NULL;
+ }
+
+ sock = calloc(1, sizeof(*sock));
+ SPDK_CU_ASSERT_FATAL(sock != NULL);
+ g_ut_accept_count++;
+ CU_ASSERT(g_ut_client_sock == NULL);
+ g_ut_client_sock = sock;
+
+ return &sock->base;
+}
+
+static struct spdk_sock *
+spdk_ut_sock_accept(struct spdk_sock *_sock)
+{
+ struct spdk_ut_sock *sock = __ut_sock(_sock);
+ struct spdk_ut_sock *new_sock;
+
+ CU_ASSERT(sock == g_ut_listen_sock);
+
+ if (g_ut_accept_count == 0) {
+ errno = EAGAIN;
+ return NULL;
+ }
+
+ g_ut_accept_count--;
+ new_sock = calloc(1, sizeof(*sock));
+ if (new_sock == NULL) {
+ SPDK_ERRLOG("sock allocation failed\n");
+ return NULL;
+ }
+
+ SPDK_CU_ASSERT_FATAL(g_ut_client_sock != NULL);
+ g_ut_client_sock->peer = new_sock;
+ new_sock->peer = g_ut_client_sock;
+
+ return &new_sock->base;
+}
+
+static int
+spdk_ut_sock_close(struct spdk_sock *_sock)
+{
+ struct spdk_ut_sock *sock = __ut_sock(_sock);
+
+ if (sock == g_ut_listen_sock) {
+ g_ut_listen_sock = NULL;
+ }
+ if (sock == g_ut_client_sock) {
+ g_ut_client_sock = NULL;
+ }
+
+ if (sock->peer != NULL) {
+ sock->peer->peer = NULL;
+ }
+
+ free(_sock);
+
+ return 0;
+}
+
+static ssize_t
+spdk_ut_sock_recv(struct spdk_sock *_sock, void *buf, size_t len)
+{
+ struct spdk_ut_sock *sock = __ut_sock(_sock);
+ char tmp[256];
+
+ len = spdk_min(len, sock->bytes_avail);
+
+ if (len == 0) {
+ errno = EAGAIN;
+ return -1;
+ }
+
+ memcpy(buf, sock->buf, len);
+ memcpy(tmp, &sock->buf[len], sock->bytes_avail - len);
+ memcpy(sock->buf, tmp, sock->bytes_avail - len);
+ sock->bytes_avail -= len;
+
+ return len;
+}
+
+static ssize_t
+spdk_ut_sock_readv(struct spdk_sock *_sock, struct iovec *iov, int iovcnt)
+{
+ struct spdk_ut_sock *sock = __ut_sock(_sock);
+ size_t len;
+ char tmp[256];
+
+ /* Test implementation only supports single iov for now. */
+ CU_ASSERT(iovcnt == 1);
+
+ len = spdk_min(iov[0].iov_len, sock->bytes_avail);
+
+ if (len == 0) {
+ errno = EAGAIN;
+ return -1;
+ }
+
+ memcpy(iov[0].iov_base, sock->buf, len);
+ memcpy(tmp, &sock->buf[len], sock->bytes_avail - len);
+ memcpy(sock->buf, tmp, sock->bytes_avail - len);
+ sock->bytes_avail -= len;
+
+ return len;
+}
+
+static ssize_t
+spdk_ut_sock_writev(struct spdk_sock *_sock, struct iovec *iov, int iovcnt)
+{
+ struct spdk_ut_sock *sock = __ut_sock(_sock);
+ struct spdk_ut_sock *peer;
+
+ SPDK_CU_ASSERT_FATAL(sock->peer != NULL);
+ peer = sock->peer;
+
+ /* Test implementation only supports single iov for now. */
+ CU_ASSERT(iovcnt == 1);
+
+ memcpy(&peer->buf[peer->bytes_avail], iov[0].iov_base, iov[0].iov_len);
+ peer->bytes_avail += iov[0].iov_len;
+
+ return iov[0].iov_len;
+}
+
+static int
+spdk_ut_sock_set_recvlowat(struct spdk_sock *_sock, int nbytes)
+{
+ return 0;
+}
+
+static int
+spdk_ut_sock_set_recvbuf(struct spdk_sock *_sock, int sz)
+{
+ return 0;
+}
+
+static int
+spdk_ut_sock_set_sendbuf(struct spdk_sock *_sock, int sz)
+{
+ return 0;
+}
+
+static bool
+spdk_ut_sock_is_ipv6(struct spdk_sock *_sock)
+{
+ return false;
+}
+
+static bool
+spdk_ut_sock_is_ipv4(struct spdk_sock *_sock)
+{
+ return true;
+}
+
+static bool
+spdk_ut_sock_is_connected(struct spdk_sock *_sock)
+{
+ struct spdk_ut_sock *sock = __ut_sock(_sock);
+
+ return (sock->peer != NULL);
+}
+
+static int
+spdk_ut_sock_get_placement_id(struct spdk_sock *_sock, int *placement_id)
+{
+ return -1;
+}
+
+static struct spdk_sock_group_impl *
+spdk_ut_sock_group_impl_create(void)
+{
+ struct spdk_ut_sock_group_impl *group_impl;
+
+ group_impl = calloc(1, sizeof(*group_impl));
+ SPDK_CU_ASSERT_FATAL(group_impl != NULL);
+
+ return &group_impl->base;
+}
+
+static int
+spdk_ut_sock_group_impl_add_sock(struct spdk_sock_group_impl *_group, struct spdk_sock *_sock)
+{
+ struct spdk_ut_sock_group_impl *group = __ut_group(_group);
+ struct spdk_ut_sock *sock = __ut_sock(_sock);
+
+ group->sock = sock;
+
+ return 0;
+}
+
+static int
+spdk_ut_sock_group_impl_remove_sock(struct spdk_sock_group_impl *_group, struct spdk_sock *_sock)
+{
+ struct spdk_ut_sock_group_impl *group = __ut_group(_group);
+ struct spdk_ut_sock *sock = __ut_sock(_sock);
+
+ CU_ASSERT(group->sock == sock);
+ group->sock = NULL;
+
+ return 0;
+}
+
+static int
+spdk_ut_sock_group_impl_poll(struct spdk_sock_group_impl *_group, int max_events,
+ struct spdk_sock **socks)
+{
+ struct spdk_ut_sock_group_impl *group = __ut_group(_group);
+
+ if (group->sock != NULL && group->sock->bytes_avail > 0) {
+ socks[0] = &group->sock->base;
+ return 1;
+ }
+
+ return 0;
+}
+
+static int
+spdk_ut_sock_group_impl_close(struct spdk_sock_group_impl *_group)
+{
+ struct spdk_ut_sock_group_impl *group = __ut_group(_group);
+
+ CU_ASSERT(group->sock == NULL);
+ free(_group);
+
+ return 0;
+}
+
+static struct spdk_net_impl g_ut_net_impl = {
+ .name = "ut",
+ .getaddr = spdk_ut_sock_getaddr,
+ .connect = spdk_ut_sock_connect,
+ .listen = spdk_ut_sock_listen,
+ .accept = spdk_ut_sock_accept,
+ .close = spdk_ut_sock_close,
+ .recv = spdk_ut_sock_recv,
+ .readv = spdk_ut_sock_readv,
+ .writev = spdk_ut_sock_writev,
+ .set_recvlowat = spdk_ut_sock_set_recvlowat,
+ .set_recvbuf = spdk_ut_sock_set_recvbuf,
+ .set_sendbuf = spdk_ut_sock_set_sendbuf,
+ .is_ipv6 = spdk_ut_sock_is_ipv6,
+ .is_ipv4 = spdk_ut_sock_is_ipv4,
+ .is_connected = spdk_ut_sock_is_connected,
+ .get_placement_id = spdk_ut_sock_get_placement_id,
+ .group_impl_create = spdk_ut_sock_group_impl_create,
+ .group_impl_add_sock = spdk_ut_sock_group_impl_add_sock,
+ .group_impl_remove_sock = spdk_ut_sock_group_impl_remove_sock,
+ .group_impl_poll = spdk_ut_sock_group_impl_poll,
+ .group_impl_close = spdk_ut_sock_group_impl_close,
+};
+
+SPDK_NET_IMPL_REGISTER(ut, &g_ut_net_impl, DEFAULT_SOCK_PRIORITY + 2);
+
+static void
+_sock(const char *ip, int port, char *impl_name)
+{
+ struct spdk_sock *listen_sock;
+ struct spdk_sock *server_sock;
+ struct spdk_sock *client_sock;
+ char *test_string = "abcdef";
+ char buffer[64];
+ ssize_t bytes_read, bytes_written;
+ struct iovec iov;
+ int rc;
+
+ listen_sock = spdk_sock_listen(ip, port, impl_name);
+ SPDK_CU_ASSERT_FATAL(listen_sock != NULL);
+
+ server_sock = spdk_sock_accept(listen_sock);
+ CU_ASSERT(server_sock == NULL);
+ CU_ASSERT(errno == EAGAIN || errno == EWOULDBLOCK);
+
+ client_sock = spdk_sock_connect(ip, port, impl_name);
+ SPDK_CU_ASSERT_FATAL(client_sock != NULL);
+
+ /*
+ * Delay a bit here before checking if server socket is
+ * ready.
+ */
+ usleep(1000);
+
+ server_sock = spdk_sock_accept(listen_sock);
+ SPDK_CU_ASSERT_FATAL(server_sock != NULL);
+ CU_ASSERT(spdk_sock_is_connected(client_sock) == true);
+ CU_ASSERT(spdk_sock_is_connected(server_sock) == true);
+
+ /* Test spdk_sock_recv */
+ iov.iov_base = test_string;
+ iov.iov_len = 7;
+ bytes_written = spdk_sock_writev(client_sock, &iov, 1);
+ CU_ASSERT(bytes_written == 7);
+
+ usleep(1000);
+
+ bytes_read = spdk_sock_recv(server_sock, buffer, 2);
+ CU_ASSERT(bytes_read == 2);
+
+ usleep(1000);
+
+ bytes_read += spdk_sock_recv(server_sock, buffer + 2, 5);
+ CU_ASSERT(bytes_read == 7);
+
+ CU_ASSERT(strncmp(test_string, buffer, 7) == 0);
+
+ /* Test spdk_sock_readv */
+ iov.iov_base = test_string;
+ iov.iov_len = 7;
+ bytes_written = spdk_sock_writev(client_sock, &iov, 1);
+ CU_ASSERT(bytes_written == 7);
+
+ usleep(1000);
+
+ iov.iov_base = buffer;
+ iov.iov_len = 2;
+ bytes_read = spdk_sock_readv(server_sock, &iov, 1);
+ CU_ASSERT(bytes_read == 2);
+
+ usleep(1000);
+
+ iov.iov_base = buffer + 2;
+ iov.iov_len = 5;
+ bytes_read += spdk_sock_readv(server_sock, &iov, 1);
+ CU_ASSERT(bytes_read == 7);
+
+ usleep(1000);
+
+ CU_ASSERT(strncmp(test_string, buffer, 7) == 0);
+
+ rc = spdk_sock_close(&client_sock);
+ CU_ASSERT(client_sock == NULL);
+ CU_ASSERT(rc == 0);
+
+#if defined(__FreeBSD__)
+ /* On FreeBSD, it takes a small amount of time for a close to propagate to the
+ * other side, even in loopback. Introduce a small sleep. */
+ sleep(1);
+#endif
+ CU_ASSERT(spdk_sock_is_connected(server_sock) == false);
+
+ rc = spdk_sock_close(&server_sock);
+ CU_ASSERT(server_sock == NULL);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_sock_close(&listen_sock);
+ CU_ASSERT(listen_sock == NULL);
+ CU_ASSERT(rc == 0);
+}
+
+static void
+posix_sock(void)
+{
+ _sock("127.0.0.1", UT_PORT, "posix");
+}
+
+static void
+ut_sock(void)
+{
+ _sock(UT_IP, UT_PORT, "ut");
+}
+
+static void
+read_data(void *cb_arg, struct spdk_sock_group *group, struct spdk_sock *sock)
+{
+ struct spdk_sock *server_sock = cb_arg;
+
+ CU_ASSERT(server_sock == sock);
+
+ g_read_data_called = true;
+ g_bytes_read += spdk_sock_recv(server_sock, g_buf + g_bytes_read, sizeof(g_buf) - g_bytes_read);
+}
+
+static void
+_sock_group(const char *ip, int port, char *impl_name)
+{
+ struct spdk_sock_group *group;
+ struct spdk_sock *listen_sock;
+ struct spdk_sock *server_sock;
+ struct spdk_sock *client_sock;
+ char *test_string = "abcdef";
+ ssize_t bytes_written;
+ struct iovec iov;
+ int rc;
+
+ listen_sock = spdk_sock_listen(ip, port, impl_name);
+ SPDK_CU_ASSERT_FATAL(listen_sock != NULL);
+
+ server_sock = spdk_sock_accept(listen_sock);
+ CU_ASSERT(server_sock == NULL);
+ CU_ASSERT(errno == EAGAIN || errno == EWOULDBLOCK);
+
+ client_sock = spdk_sock_connect(ip, port, impl_name);
+ SPDK_CU_ASSERT_FATAL(client_sock != NULL);
+
+ usleep(1000);
+
+ server_sock = spdk_sock_accept(listen_sock);
+ SPDK_CU_ASSERT_FATAL(server_sock != NULL);
+
+ group = spdk_sock_group_create(NULL);
+ SPDK_CU_ASSERT_FATAL(group != NULL);
+
+ /* pass null cb_fn */
+ rc = spdk_sock_group_add_sock(group, server_sock, NULL, NULL);
+ CU_ASSERT(rc == -1);
+ CU_ASSERT(errno == EINVAL);
+
+ rc = spdk_sock_group_add_sock(group, server_sock, read_data, server_sock);
+ CU_ASSERT(rc == 0);
+
+ /* try adding sock a second time */
+ rc = spdk_sock_group_add_sock(group, server_sock, read_data, server_sock);
+ CU_ASSERT(rc == -1);
+ CU_ASSERT(errno == EBUSY);
+
+ g_read_data_called = false;
+ g_bytes_read = 0;
+ rc = spdk_sock_group_poll(group);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_read_data_called == false);
+
+ iov.iov_base = test_string;
+ iov.iov_len = 7;
+ bytes_written = spdk_sock_writev(client_sock, &iov, 1);
+ CU_ASSERT(bytes_written == 7);
+
+ usleep(1000);
+
+ g_read_data_called = false;
+ g_bytes_read = 0;
+ rc = spdk_sock_group_poll(group);
+
+ CU_ASSERT(rc == 1);
+ CU_ASSERT(g_read_data_called == true);
+ CU_ASSERT(g_bytes_read == 7);
+
+ CU_ASSERT(strncmp(test_string, g_buf, 7) == 0);
+
+ rc = spdk_sock_close(&client_sock);
+ CU_ASSERT(client_sock == NULL);
+ CU_ASSERT(rc == 0);
+
+ /* Try to close sock_group while it still has sockets. */
+ rc = spdk_sock_group_close(&group);
+ CU_ASSERT(rc == -1);
+ CU_ASSERT(errno == EBUSY);
+
+ /* Try to close sock while it is still part of a sock_group. */
+ rc = spdk_sock_close(&server_sock);
+ CU_ASSERT(rc == -1);
+ CU_ASSERT(errno == EBUSY);
+
+ rc = spdk_sock_group_remove_sock(group, server_sock);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_sock_group_close(&group);
+ CU_ASSERT(group == NULL);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_sock_close(&server_sock);
+ CU_ASSERT(server_sock == NULL);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_sock_close(&listen_sock);
+ CU_ASSERT(listen_sock == NULL);
+ CU_ASSERT(rc == 0);
+}
+
+static void
+posix_sock_group(void)
+{
+ _sock_group("127.0.0.1", UT_PORT, "posix");
+}
+
+static void
+ut_sock_group(void)
+{
+ _sock_group(UT_IP, UT_PORT, "ut");
+}
+
+static void
+read_data_fairness(void *cb_arg, struct spdk_sock_group *group, struct spdk_sock *sock)
+{
+ struct spdk_sock *server_sock = cb_arg;
+ ssize_t bytes_read;
+ char buf[1];
+
+ CU_ASSERT(g_server_sock_read == NULL);
+ CU_ASSERT(server_sock == sock);
+
+ g_server_sock_read = server_sock;
+ bytes_read = spdk_sock_recv(server_sock, buf, 1);
+ CU_ASSERT(bytes_read == 1);
+}
+
+static void
+posix_sock_group_fairness(void)
+{
+ struct spdk_sock_group *group;
+ struct spdk_sock *listen_sock;
+ struct spdk_sock *server_sock[3];
+ struct spdk_sock *client_sock[3];
+ char test_char = 'a';
+ ssize_t bytes_written;
+ struct iovec iov;
+ int i, rc;
+
+ listen_sock = spdk_sock_listen("127.0.0.1", UT_PORT, "posix");
+ SPDK_CU_ASSERT_FATAL(listen_sock != NULL);
+
+ group = spdk_sock_group_create(NULL);
+ SPDK_CU_ASSERT_FATAL(group != NULL);
+
+ for (i = 0; i < 3; i++) {
+ client_sock[i] = spdk_sock_connect("127.0.0.1", UT_PORT, "posix");
+ SPDK_CU_ASSERT_FATAL(client_sock[i] != NULL);
+
+ usleep(1000);
+
+ server_sock[i] = spdk_sock_accept(listen_sock);
+ SPDK_CU_ASSERT_FATAL(server_sock[i] != NULL);
+
+ rc = spdk_sock_group_add_sock(group, server_sock[i],
+ read_data_fairness, server_sock[i]);
+ CU_ASSERT(rc == 0);
+ }
+
+ iov.iov_base = &test_char;
+ iov.iov_len = 1;
+
+ for (i = 0; i < 3; i++) {
+ bytes_written = spdk_sock_writev(client_sock[i], &iov, 1);
+ CU_ASSERT(bytes_written == 1);
+ }
+
+ usleep(1000);
+
+ /*
+ * Poll for just one event - this should be server sock 0, since that
+ * is the peer of the first client sock that we wrote to.
+ */
+ g_server_sock_read = NULL;
+ rc = spdk_sock_group_poll_count(group, 1);
+ CU_ASSERT(rc == 1);
+ CU_ASSERT(g_server_sock_read == server_sock[0]);
+
+ /*
+ * Now write another byte to client sock 0. We want to ensure that
+ * the sock group does not unfairly process the event for this sock
+ * before the socks that were written to earlier.
+ */
+ bytes_written = spdk_sock_writev(client_sock[0], &iov, 1);
+ CU_ASSERT(bytes_written == 1);
+
+ usleep(1000);
+
+ g_server_sock_read = NULL;
+ rc = spdk_sock_group_poll_count(group, 1);
+ CU_ASSERT(rc == 1);
+ CU_ASSERT(g_server_sock_read == server_sock[1]);
+
+ g_server_sock_read = NULL;
+ rc = spdk_sock_group_poll_count(group, 1);
+ CU_ASSERT(rc == 1);
+ CU_ASSERT(g_server_sock_read == server_sock[2]);
+
+ g_server_sock_read = NULL;
+ rc = spdk_sock_group_poll_count(group, 1);
+ CU_ASSERT(rc == 1);
+ CU_ASSERT(g_server_sock_read == server_sock[0]);
+
+ for (i = 0; i < 3; i++) {
+ rc = spdk_sock_group_remove_sock(group, server_sock[i]);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_sock_close(&client_sock[i]);
+ CU_ASSERT(client_sock[i] == NULL);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_sock_close(&server_sock[i]);
+ CU_ASSERT(server_sock[i] == NULL);
+ CU_ASSERT(rc == 0);
+ }
+
+ rc = spdk_sock_group_close(&group);
+ CU_ASSERT(group == NULL);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_sock_close(&listen_sock);
+ CU_ASSERT(listen_sock == NULL);
+ CU_ASSERT(rc == 0);
+}
+
+struct close_ctx {
+ struct spdk_sock_group *group;
+ struct spdk_sock *sock;
+ bool called;
+};
+
+static void
+_first_close_cb(void *cb_arg, int err)
+{
+ struct close_ctx *ctx = cb_arg;
+ int rc;
+
+ ctx->called = true;
+
+ /* Always close the socket here */
+ rc = spdk_sock_group_remove_sock(ctx->group, ctx->sock);
+ CU_ASSERT(rc == 0);
+ spdk_sock_close(&ctx->sock);
+
+ CU_ASSERT(err == 0);
+}
+
+static void
+_second_close_cb(void *cb_arg, int err)
+{
+ *(bool *)cb_arg = true;
+ CU_ASSERT(err == -ECANCELED);
+}
+
+static void
+_sock_close(const char *ip, int port, char *impl_name)
+{
+ struct spdk_sock_group *group;
+ struct spdk_sock *listen_sock;
+ struct spdk_sock *server_sock;
+ struct spdk_sock *client_sock;
+ uint8_t data_buf[64] = {};
+ struct spdk_sock_request *req1, *req2;
+ struct close_ctx ctx = {};
+ bool cb_arg2 = false;
+ int rc;
+
+ listen_sock = spdk_sock_listen(ip, port, impl_name);
+ SPDK_CU_ASSERT_FATAL(listen_sock != NULL);
+
+ client_sock = spdk_sock_connect(ip, port, impl_name);
+ SPDK_CU_ASSERT_FATAL(client_sock != NULL);
+
+ usleep(1000);
+
+ server_sock = spdk_sock_accept(listen_sock);
+ SPDK_CU_ASSERT_FATAL(server_sock != NULL);
+
+ group = spdk_sock_group_create(NULL);
+ SPDK_CU_ASSERT_FATAL(group != NULL);
+
+ rc = spdk_sock_group_add_sock(group, server_sock, read_data, server_sock);
+ CU_ASSERT(rc == 0);
+
+ /* Submit multiple async writevs on the server sock */
+
+ req1 = calloc(1, sizeof(struct spdk_sock_request) + sizeof(struct iovec));
+ SPDK_CU_ASSERT_FATAL(req1 != NULL);
+ SPDK_SOCK_REQUEST_IOV(req1, 0)->iov_base = data_buf;
+ SPDK_SOCK_REQUEST_IOV(req1, 0)->iov_len = 64;
+ ctx.group = group;
+ ctx.sock = server_sock;
+ ctx.called = false;
+ req1->iovcnt = 1;
+ req1->cb_fn = _first_close_cb;
+ req1->cb_arg = &ctx;
+ spdk_sock_writev_async(server_sock, req1);
+ CU_ASSERT(ctx.called == false);
+
+ req2 = calloc(1, sizeof(struct spdk_sock_request) + sizeof(struct iovec));
+ SPDK_CU_ASSERT_FATAL(req2 != NULL);
+ SPDK_SOCK_REQUEST_IOV(req2, 0)->iov_base = data_buf;
+ SPDK_SOCK_REQUEST_IOV(req2, 0)->iov_len = 64;
+ req2->iovcnt = 1;
+ req2->cb_fn = _second_close_cb;
+ req2->cb_arg = &cb_arg2;
+ spdk_sock_writev_async(server_sock, req2);
+ CU_ASSERT(cb_arg2 == false);
+
+ /* Poll the socket so the writev_async's send. The first one's
+ * callback will close the socket. */
+ spdk_sock_group_poll(group);
+ if (ctx.called == false) {
+ /* Sometimes the zerocopy completion isn't posted immediately. Delay slightly
+ * and poll one more time. */
+ usleep(1000);
+ spdk_sock_group_poll(group);
+ }
+ CU_ASSERT(ctx.called == true);
+ CU_ASSERT(cb_arg2 == true);
+
+ rc = spdk_sock_group_close(&group);
+ CU_ASSERT(group == NULL);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_sock_close(&client_sock);
+ CU_ASSERT(client_sock == NULL);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_sock_close(&listen_sock);
+ CU_ASSERT(listen_sock == NULL);
+ CU_ASSERT(rc == 0);
+
+ free(req1);
+ free(req2);
+}
+
+static void
+_posix_sock_close(void)
+{
+ _sock_close("127.0.0.1", UT_PORT, "posix");
+}
+
+static void
+sock_get_default_opts(void)
+{
+ struct spdk_sock_opts opts;
+
+ /* opts_size is 0 */
+ opts.opts_size = 0;
+ opts.priority = 3;
+ spdk_sock_get_default_opts(&opts);
+ CU_ASSERT(opts.priority == 3);
+ CU_ASSERT(opts.opts_size == 0);
+
+ /* opts_size is less than sizeof(opts) */
+ opts.opts_size = 4;
+ opts.priority = 3;
+ spdk_sock_get_default_opts(&opts);
+ CU_ASSERT(opts.priority == 3);
+ CU_ASSERT(opts.opts_size == 4);
+
+ /* opts_size is equal to sizeof(opts) */
+ opts.opts_size = sizeof(opts);
+ opts.priority = 3;
+ spdk_sock_get_default_opts(&opts);
+ CU_ASSERT(opts.priority == SPDK_SOCK_DEFAULT_PRIORITY);
+ CU_ASSERT(opts.opts_size == sizeof(opts));
+
+ /* opts_size is larger then sizeof(opts) */
+ opts.opts_size = sizeof(opts) + 1;
+ opts.priority = 3;
+ spdk_sock_get_default_opts(&opts);
+ CU_ASSERT(opts.priority == SPDK_SOCK_DEFAULT_PRIORITY);
+ CU_ASSERT(opts.opts_size == (sizeof(opts) + 1));
+}
+
+static void
+ut_sock_impl_get_set_opts(void)
+{
+ int rc;
+ size_t len = 0;
+ /* Use any pointer value for opts. It is never dereferenced in this test */
+ struct spdk_sock_impl_opts *opts = (struct spdk_sock_impl_opts *)0x123456789;
+
+ rc = spdk_sock_impl_get_opts("ut", NULL, &len);
+ CU_ASSERT(rc == -1);
+ CU_ASSERT(errno == EINVAL);
+ rc = spdk_sock_impl_get_opts("ut", opts, NULL);
+ CU_ASSERT(rc == -1);
+ CU_ASSERT(errno == EINVAL);
+ rc = spdk_sock_impl_get_opts("ut", opts, &len);
+ CU_ASSERT(rc == -1);
+ CU_ASSERT(errno == ENOTSUP);
+
+ rc = spdk_sock_impl_set_opts("ut", NULL, len);
+ CU_ASSERT(rc == -1);
+ CU_ASSERT(errno == EINVAL);
+ rc = spdk_sock_impl_set_opts("ut", opts, len);
+ CU_ASSERT(rc == -1);
+ CU_ASSERT(errno == ENOTSUP);
+}
+
+static void
+posix_sock_impl_get_set_opts(void)
+{
+ int rc;
+ size_t len = 0;
+ struct spdk_sock_impl_opts opts = {};
+ struct spdk_sock_impl_opts long_opts[2];
+
+ rc = spdk_sock_impl_get_opts("posix", NULL, &len);
+ CU_ASSERT(rc == -1);
+ CU_ASSERT(errno == EINVAL);
+ rc = spdk_sock_impl_get_opts("posix", &opts, NULL);
+ CU_ASSERT(rc == -1);
+ CU_ASSERT(errno == EINVAL);
+
+ /* Check default opts */
+ len = sizeof(opts);
+ rc = spdk_sock_impl_get_opts("posix", &opts, &len);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(len == sizeof(opts));
+ CU_ASSERT(opts.recv_buf_size == MIN_SO_RCVBUF_SIZE);
+ CU_ASSERT(opts.send_buf_size == MIN_SO_SNDBUF_SIZE);
+
+ /* Try to request more opts */
+ len = sizeof(long_opts);
+ rc = spdk_sock_impl_get_opts("posix", long_opts, &len);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(len == sizeof(opts));
+
+ /* Try to request zero opts */
+ len = 0;
+ rc = spdk_sock_impl_get_opts("posix", &opts, &len);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(len == 0);
+
+ rc = spdk_sock_impl_set_opts("posix", NULL, len);
+ CU_ASSERT(rc == -1);
+ CU_ASSERT(errno == EINVAL);
+
+ opts.recv_buf_size = 16;
+ opts.send_buf_size = 4;
+ rc = spdk_sock_impl_set_opts("posix", &opts, sizeof(opts));
+ CU_ASSERT(rc == 0);
+ len = sizeof(opts);
+ memset(&opts, 0, sizeof(opts));
+ rc = spdk_sock_impl_get_opts("posix", &opts, &len);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(opts.recv_buf_size == 16);
+ CU_ASSERT(opts.send_buf_size == 4);
+
+ /* Try to set more opts */
+ long_opts[0].recv_buf_size = 4;
+ long_opts[0].send_buf_size = 6;
+ long_opts[1].recv_buf_size = 0;
+ long_opts[1].send_buf_size = 0;
+ rc = spdk_sock_impl_set_opts("posix", long_opts, sizeof(long_opts));
+ CU_ASSERT(rc == 0);
+
+ /* Try to set less opts. Opts in the end should be untouched */
+ opts.recv_buf_size = 5;
+ opts.send_buf_size = 10;
+ rc = spdk_sock_impl_set_opts("posix", &opts, sizeof(opts.recv_buf_size));
+ CU_ASSERT(rc == 0);
+ len = sizeof(opts);
+ memset(&opts, 0, sizeof(opts));
+ rc = spdk_sock_impl_get_opts("posix", &opts, &len);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(opts.recv_buf_size == 5);
+ CU_ASSERT(opts.send_buf_size == 6);
+
+ /* Try to set partial option. It should not be changed */
+ opts.recv_buf_size = 1000;
+ rc = spdk_sock_impl_set_opts("posix", &opts, 1);
+ CU_ASSERT(rc == 0);
+ len = sizeof(opts);
+ memset(&opts, 0, sizeof(opts));
+ rc = spdk_sock_impl_get_opts("posix", &opts, &len);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(opts.recv_buf_size == 5);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("sock", NULL, NULL);
+
+ CU_ADD_TEST(suite, posix_sock);
+ CU_ADD_TEST(suite, ut_sock);
+ CU_ADD_TEST(suite, posix_sock_group);
+ CU_ADD_TEST(suite, ut_sock_group);
+ CU_ADD_TEST(suite, posix_sock_group_fairness);
+ CU_ADD_TEST(suite, _posix_sock_close);
+ CU_ADD_TEST(suite, sock_get_default_opts);
+ CU_ADD_TEST(suite, ut_sock_impl_get_set_opts);
+ CU_ADD_TEST(suite, posix_sock_impl_get_set_opts);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+
+ CU_basic_run_tests();
+
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/sock/uring.c/.gitignore b/src/spdk/test/unit/lib/sock/uring.c/.gitignore
new file mode 100644
index 000000000..ad7627b7b
--- /dev/null
+++ b/src/spdk/test/unit/lib/sock/uring.c/.gitignore
@@ -0,0 +1 @@
+uring_ut
diff --git a/src/spdk/test/unit/lib/sock/uring.c/Makefile b/src/spdk/test/unit/lib/sock/uring.c/Makefile
new file mode 100644
index 000000000..8b0da0181
--- /dev/null
+++ b/src/spdk/test/unit/lib/sock/uring.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = uring_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/sock/uring.c/uring_ut.c b/src/spdk/test/unit/lib/sock/uring.c/uring_ut.c
new file mode 100644
index 000000000..edad8e5da
--- /dev/null
+++ b/src/spdk/test/unit/lib/sock/uring.c/uring_ut.c
@@ -0,0 +1,272 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+#include "spdk/util.h"
+
+#include "spdk_internal/mock.h"
+
+#include "spdk_cunit.h"
+
+#include "sock/uring/uring.c"
+
+DEFINE_STUB_V(spdk_net_impl_register, (struct spdk_net_impl *impl, int priority));
+DEFINE_STUB(spdk_sock_close, int, (struct spdk_sock **s), 0);
+DEFINE_STUB(__io_uring_get_cqe, int, (struct io_uring *ring, struct io_uring_cqe **cqe_ptr,
+ unsigned submit,
+ unsigned wait_nr, sigset_t *sigmask), 0);
+DEFINE_STUB(io_uring_submit, int, (struct io_uring *ring), 0);
+DEFINE_STUB(io_uring_get_sqe, struct io_uring_sqe *, (struct io_uring *ring), 0);
+DEFINE_STUB(io_uring_queue_init, int, (unsigned entries, struct io_uring *ring, unsigned flags), 0);
+DEFINE_STUB_V(io_uring_queue_exit, (struct io_uring *ring));
+
+static void
+_req_cb(void *cb_arg, int len)
+{
+ *(bool *)cb_arg = true;
+ CU_ASSERT(len == 0);
+}
+
+static void
+flush_client(void)
+{
+ struct spdk_uring_sock_group_impl group = {};
+ struct spdk_uring_sock usock = {};
+ struct spdk_sock *sock = &usock.base;
+ struct spdk_sock_request *req1, *req2;
+ bool cb_arg1, cb_arg2;
+ int rc;
+
+ /* Set up data structures */
+ TAILQ_INIT(&sock->queued_reqs);
+ TAILQ_INIT(&sock->pending_reqs);
+ sock->group_impl = &group.base;
+
+ req1 = calloc(1, sizeof(struct spdk_sock_request) + 3 * sizeof(struct iovec));
+ SPDK_CU_ASSERT_FATAL(req1 != NULL);
+ SPDK_SOCK_REQUEST_IOV(req1, 0)->iov_base = (void *)100;
+ SPDK_SOCK_REQUEST_IOV(req1, 0)->iov_len = 64;
+ SPDK_SOCK_REQUEST_IOV(req1, 1)->iov_base = (void *)200;
+ SPDK_SOCK_REQUEST_IOV(req1, 1)->iov_len = 64;
+ SPDK_SOCK_REQUEST_IOV(req1, 2)->iov_base = (void *)300;
+ SPDK_SOCK_REQUEST_IOV(req1, 2)->iov_len = 64;
+ req1->iovcnt = 3;
+ req1->cb_fn = _req_cb;
+ req1->cb_arg = &cb_arg1;
+
+ req2 = calloc(1, sizeof(struct spdk_sock_request) + 2 * sizeof(struct iovec));
+ SPDK_CU_ASSERT_FATAL(req2 != NULL);
+ SPDK_SOCK_REQUEST_IOV(req2, 0)->iov_base = (void *)100;
+ SPDK_SOCK_REQUEST_IOV(req2, 0)->iov_len = 32;
+ SPDK_SOCK_REQUEST_IOV(req2, 1)->iov_base = (void *)200;
+ SPDK_SOCK_REQUEST_IOV(req2, 1)->iov_len = 32;
+ req2->iovcnt = 2;
+ req2->cb_fn = _req_cb;
+ req2->cb_arg = &cb_arg2;
+
+ /* Simple test - a request with a 3 element iovec
+ * that gets submitted in a single sendmsg. */
+ spdk_sock_request_queue(sock, req1);
+ MOCK_SET(sendmsg, 192);
+ cb_arg1 = false;
+ rc = _sock_flush_client(sock);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cb_arg1 == true);
+ CU_ASSERT(TAILQ_EMPTY(&sock->queued_reqs));
+
+ /* Two requests, where both can fully send. */
+ spdk_sock_request_queue(sock, req1);
+ spdk_sock_request_queue(sock, req2);
+ MOCK_SET(sendmsg, 256);
+ cb_arg1 = false;
+ cb_arg2 = false;
+ rc = _sock_flush_client(sock);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cb_arg1 == true);
+ CU_ASSERT(cb_arg2 == true);
+ CU_ASSERT(TAILQ_EMPTY(&sock->queued_reqs));
+
+ /* Two requests. Only first one can send */
+ spdk_sock_request_queue(sock, req1);
+ spdk_sock_request_queue(sock, req2);
+ MOCK_SET(sendmsg, 192);
+ cb_arg1 = false;
+ cb_arg2 = false;
+ rc = _sock_flush_client(sock);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cb_arg1 == true);
+ CU_ASSERT(cb_arg2 == false);
+ CU_ASSERT(TAILQ_FIRST(&sock->queued_reqs) == req2);
+ TAILQ_REMOVE(&sock->queued_reqs, req2, internal.link);
+ CU_ASSERT(TAILQ_EMPTY(&sock->queued_reqs));
+
+ /* One request. Partial send. */
+ spdk_sock_request_queue(sock, req1);
+ MOCK_SET(sendmsg, 10);
+ cb_arg1 = false;
+ rc = _sock_flush_client(sock);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cb_arg1 == false);
+ CU_ASSERT(TAILQ_FIRST(&sock->queued_reqs) == req1);
+
+ /* Do a second flush that partial sends again. */
+ MOCK_SET(sendmsg, 52);
+ cb_arg1 = false;
+ rc = _sock_flush_client(sock);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cb_arg1 == false);
+ CU_ASSERT(TAILQ_FIRST(&sock->queued_reqs) == req1);
+
+ /* Flush the rest of the data */
+ MOCK_SET(sendmsg, 130);
+ cb_arg1 = false;
+ rc = _sock_flush_client(sock);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cb_arg1 == true);
+ CU_ASSERT(TAILQ_EMPTY(&sock->queued_reqs));
+
+ free(req1);
+ free(req2);
+}
+
+static void
+flush_server(void)
+{
+ struct spdk_uring_sock_group_impl group = {};
+ struct spdk_uring_sock usock = {};
+ struct spdk_sock *sock = &usock.base;
+ struct spdk_sock_request *req1, *req2;
+ bool cb_arg1, cb_arg2;
+ int rc;
+
+ /* Set up data structures */
+ TAILQ_INIT(&sock->queued_reqs);
+ TAILQ_INIT(&sock->pending_reqs);
+ sock->group_impl = &group.base;
+ usock.write_task.sock = &usock;
+ usock.group = &group;
+
+ req1 = calloc(1, sizeof(struct spdk_sock_request) + 2 * sizeof(struct iovec));
+ SPDK_CU_ASSERT_FATAL(req1 != NULL);
+ SPDK_SOCK_REQUEST_IOV(req1, 0)->iov_base = (void *)100;
+ SPDK_SOCK_REQUEST_IOV(req1, 0)->iov_len = 64;
+ SPDK_SOCK_REQUEST_IOV(req1, 1)->iov_base = (void *)200;
+ SPDK_SOCK_REQUEST_IOV(req1, 1)->iov_len = 64;
+ req1->iovcnt = 2;
+ req1->cb_fn = _req_cb;
+ req1->cb_arg = &cb_arg1;
+
+ req2 = calloc(1, sizeof(struct spdk_sock_request) + 2 * sizeof(struct iovec));
+ SPDK_CU_ASSERT_FATAL(req2 != NULL);
+ SPDK_SOCK_REQUEST_IOV(req2, 0)->iov_base = (void *)100;
+ SPDK_SOCK_REQUEST_IOV(req2, 0)->iov_len = 32;
+ SPDK_SOCK_REQUEST_IOV(req2, 1)->iov_base = (void *)200;
+ SPDK_SOCK_REQUEST_IOV(req2, 1)->iov_len = 32;
+ req2->iovcnt = 2;
+ req2->cb_fn = _req_cb;
+ req2->cb_arg = &cb_arg2;
+
+ /* we should not call _sock_flush directly, since it will finally
+ * call liburing related funtions */
+
+ /* Simple test - a request with a 2 element iovec
+ * that is fully completed. */
+ spdk_sock_request_queue(sock, req1);
+ cb_arg1 = false;
+ rc = sock_prep_reqs(sock, usock.write_task.iovs, 0, NULL);
+ CU_ASSERT(rc == 2);
+ sock_complete_reqs(sock, 128);
+ CU_ASSERT(cb_arg1 == true);
+ CU_ASSERT(TAILQ_EMPTY(&sock->queued_reqs));
+
+ /* Two requests, where both can be fully completed. */
+ spdk_sock_request_queue(sock, req1);
+ spdk_sock_request_queue(sock, req2);
+ cb_arg1 = false;
+ cb_arg2 = false;
+ rc = sock_prep_reqs(sock, usock.write_task.iovs, 0, NULL);
+ CU_ASSERT(rc == 4);
+ sock_complete_reqs(sock, 192);
+ CU_ASSERT(cb_arg1 == true);
+ CU_ASSERT(cb_arg2 == true);
+ CU_ASSERT(TAILQ_EMPTY(&sock->queued_reqs));
+
+
+ /* One request that is partially sent. */
+ spdk_sock_request_queue(sock, req1);
+ cb_arg1 = false;
+ rc = sock_prep_reqs(sock, usock.write_task.iovs, 0, NULL);
+ CU_ASSERT(rc == 2);
+ sock_complete_reqs(sock, 92);
+ CU_ASSERT(rc == 2);
+ CU_ASSERT(cb_arg1 == false);
+ CU_ASSERT(TAILQ_FIRST(&sock->queued_reqs) == req1);
+
+ /* Get the second time partial sent result. */
+ sock_complete_reqs(sock, 10);
+ CU_ASSERT(cb_arg1 == false);
+ CU_ASSERT(TAILQ_FIRST(&sock->queued_reqs) == req1);
+
+ /* Data is finally sent. */
+ sock_complete_reqs(sock, 26);
+ CU_ASSERT(cb_arg1 == true);
+ CU_ASSERT(TAILQ_EMPTY(&sock->queued_reqs));
+
+ free(req1);
+ free(req2);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("uring", NULL, NULL);
+
+
+ CU_ADD_TEST(suite, flush_client);
+ CU_ADD_TEST(suite, flush_server);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+
+ CU_basic_run_tests();
+
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/thread/Makefile b/src/spdk/test/unit/lib/thread/Makefile
new file mode 100644
index 000000000..d73816947
--- /dev/null
+++ b/src/spdk/test/unit/lib/thread/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = thread.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/thread/thread.c/.gitignore b/src/spdk/test/unit/lib/thread/thread.c/.gitignore
new file mode 100644
index 000000000..1a165acb8
--- /dev/null
+++ b/src/spdk/test/unit/lib/thread/thread.c/.gitignore
@@ -0,0 +1 @@
+thread_ut
diff --git a/src/spdk/test/unit/lib/thread/thread.c/Makefile b/src/spdk/test/unit/lib/thread/thread.c/Makefile
new file mode 100644
index 000000000..461cfcd22
--- /dev/null
+++ b/src/spdk/test/unit/lib/thread/thread.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = thread_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/thread/thread.c/thread_ut.c b/src/spdk/test/unit/lib/thread/thread.c/thread_ut.c
new file mode 100644
index 000000000..d577671b8
--- /dev/null
+++ b/src/spdk/test/unit/lib/thread/thread.c/thread_ut.c
@@ -0,0 +1,1270 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "spdk_internal/thread.h"
+
+#include "thread/thread.c"
+#include "common/lib/ut_multithread.c"
+
+static int g_sched_rc = 0;
+
+static int
+_thread_schedule(struct spdk_thread *thread)
+{
+ return g_sched_rc;
+}
+
+static bool
+_thread_op_supported(enum spdk_thread_op op)
+{
+ switch (op) {
+ case SPDK_THREAD_OP_NEW:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int
+_thread_op(struct spdk_thread *thread, enum spdk_thread_op op)
+{
+ switch (op) {
+ case SPDK_THREAD_OP_NEW:
+ return _thread_schedule(thread);
+ default:
+ return -ENOTSUP;
+ }
+}
+
+static void
+thread_alloc(void)
+{
+ struct spdk_thread *thread;
+
+ /* No schedule callback */
+ spdk_thread_lib_init(NULL, 0);
+ thread = spdk_thread_create(NULL, NULL);
+ SPDK_CU_ASSERT_FATAL(thread != NULL);
+ spdk_set_thread(thread);
+ spdk_thread_exit(thread);
+ while (!spdk_thread_is_exited(thread)) {
+ spdk_thread_poll(thread, 0, 0);
+ }
+ spdk_thread_destroy(thread);
+ spdk_thread_lib_fini();
+
+ /* Schedule callback exists */
+ spdk_thread_lib_init(_thread_schedule, 0);
+
+ /* Scheduling succeeds */
+ g_sched_rc = 0;
+ thread = spdk_thread_create(NULL, NULL);
+ SPDK_CU_ASSERT_FATAL(thread != NULL);
+ spdk_set_thread(thread);
+ spdk_thread_exit(thread);
+ while (!spdk_thread_is_exited(thread)) {
+ spdk_thread_poll(thread, 0, 0);
+ }
+ spdk_thread_destroy(thread);
+
+ /* Scheduling fails */
+ g_sched_rc = -1;
+ thread = spdk_thread_create(NULL, NULL);
+ SPDK_CU_ASSERT_FATAL(thread == NULL);
+
+ spdk_thread_lib_fini();
+
+ /* Scheduling callback exists with extended thread library initialization. */
+ spdk_thread_lib_init_ext(_thread_op, _thread_op_supported, 0);
+
+ /* Scheduling succeeds */
+ g_sched_rc = 0;
+ thread = spdk_thread_create(NULL, NULL);
+ SPDK_CU_ASSERT_FATAL(thread != NULL);
+ spdk_set_thread(thread);
+ spdk_thread_exit(thread);
+ while (!spdk_thread_is_exited(thread)) {
+ spdk_thread_poll(thread, 0, 0);
+ }
+ spdk_thread_destroy(thread);
+
+ /* Scheduling fails */
+ g_sched_rc = -1;
+ thread = spdk_thread_create(NULL, NULL);
+ SPDK_CU_ASSERT_FATAL(thread == NULL);
+
+ spdk_thread_lib_fini();
+}
+
+static void
+send_msg_cb(void *ctx)
+{
+ bool *done = ctx;
+
+ *done = true;
+}
+
+static void
+thread_send_msg(void)
+{
+ struct spdk_thread *thread0;
+ bool done = false;
+
+ allocate_threads(2);
+ set_thread(0);
+ thread0 = spdk_get_thread();
+
+ set_thread(1);
+ /* Simulate thread 1 sending a message to thread 0. */
+ spdk_thread_send_msg(thread0, send_msg_cb, &done);
+
+ /* We have not polled thread 0 yet, so done should be false. */
+ CU_ASSERT(!done);
+
+ /*
+ * Poll thread 1. The message was sent to thread 0, so this should be
+ * a nop and done should still be false.
+ */
+ poll_thread(1);
+ CU_ASSERT(!done);
+
+ /*
+ * Poll thread 0. This should execute the message and done should then
+ * be true.
+ */
+ poll_thread(0);
+ CU_ASSERT(done);
+
+ free_threads();
+}
+
+static int
+poller_run_done(void *ctx)
+{
+ bool *poller_run = ctx;
+
+ *poller_run = true;
+
+ return -1;
+}
+
+static void
+thread_poller(void)
+{
+ struct spdk_poller *poller = NULL;
+ bool poller_run = false;
+
+ allocate_threads(1);
+
+ set_thread(0);
+ MOCK_SET(spdk_get_ticks, 0);
+ /* Register a poller with no-wait time and test execution */
+ poller = spdk_poller_register(poller_run_done, &poller_run, 0);
+ CU_ASSERT(poller != NULL);
+
+ poll_threads();
+ CU_ASSERT(poller_run == true);
+
+ spdk_poller_unregister(&poller);
+ CU_ASSERT(poller == NULL);
+
+ /* Register a poller with 1000us wait time and test single execution */
+ poller_run = false;
+ poller = spdk_poller_register(poller_run_done, &poller_run, 1000);
+ CU_ASSERT(poller != NULL);
+
+ poll_threads();
+ CU_ASSERT(poller_run == false);
+
+ spdk_delay_us(1000);
+ poll_threads();
+ CU_ASSERT(poller_run == true);
+
+ poller_run = false;
+ poll_threads();
+ CU_ASSERT(poller_run == false);
+
+ spdk_delay_us(1000);
+ poll_threads();
+ CU_ASSERT(poller_run == true);
+
+ spdk_poller_unregister(&poller);
+ CU_ASSERT(poller == NULL);
+
+ free_threads();
+}
+
+struct poller_ctx {
+ struct spdk_poller *poller;
+ bool run;
+};
+
+static int
+poller_run_pause(void *ctx)
+{
+ struct poller_ctx *poller_ctx = ctx;
+
+ poller_ctx->run = true;
+ spdk_poller_pause(poller_ctx->poller);
+
+ return 0;
+}
+
+static void
+poller_msg_pause_cb(void *ctx)
+{
+ struct spdk_poller *poller = ctx;
+
+ spdk_poller_pause(poller);
+}
+
+static void
+poller_msg_resume_cb(void *ctx)
+{
+ struct spdk_poller *poller = ctx;
+
+ spdk_poller_resume(poller);
+}
+
+static void
+poller_pause(void)
+{
+ struct poller_ctx poller_ctx = {};
+ unsigned int delay[] = { 0, 1000 };
+ unsigned int i;
+
+ allocate_threads(1);
+ set_thread(0);
+
+ /* Register a poller that pauses itself */
+ poller_ctx.poller = spdk_poller_register(poller_run_pause, &poller_ctx, 0);
+ CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
+
+ poller_ctx.run = false;
+ poll_threads();
+ CU_ASSERT_EQUAL(poller_ctx.run, true);
+
+ poller_ctx.run = false;
+ poll_threads();
+ CU_ASSERT_EQUAL(poller_ctx.run, false);
+
+ spdk_poller_unregister(&poller_ctx.poller);
+ CU_ASSERT_PTR_NULL(poller_ctx.poller);
+
+ /* Verify that resuming an unpaused poller doesn't do anything */
+ poller_ctx.poller = spdk_poller_register(poller_run_done, &poller_ctx.run, 0);
+ CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
+
+ spdk_poller_resume(poller_ctx.poller);
+
+ poller_ctx.run = false;
+ poll_threads();
+ CU_ASSERT_EQUAL(poller_ctx.run, true);
+
+ /* Verify that pausing the same poller twice works too */
+ spdk_poller_pause(poller_ctx.poller);
+
+ poller_ctx.run = false;
+ poll_threads();
+ CU_ASSERT_EQUAL(poller_ctx.run, false);
+
+ spdk_poller_pause(poller_ctx.poller);
+ poll_threads();
+ CU_ASSERT_EQUAL(poller_ctx.run, false);
+
+ spdk_poller_resume(poller_ctx.poller);
+ poll_threads();
+ CU_ASSERT_EQUAL(poller_ctx.run, true);
+
+ /* Verify that a poller is run when it's resumed immediately after pausing */
+ poller_ctx.run = false;
+ spdk_poller_pause(poller_ctx.poller);
+ spdk_poller_resume(poller_ctx.poller);
+ poll_threads();
+ CU_ASSERT_EQUAL(poller_ctx.run, true);
+
+ spdk_poller_unregister(&poller_ctx.poller);
+ CU_ASSERT_PTR_NULL(poller_ctx.poller);
+
+ /* Poll the thread to make sure the previous poller gets unregistered */
+ poll_threads();
+ CU_ASSERT_EQUAL(spdk_thread_has_pollers(spdk_get_thread()), false);
+
+ /* Verify that it's possible to unregister a paused poller */
+ poller_ctx.poller = spdk_poller_register(poller_run_done, &poller_ctx.run, 0);
+ CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
+
+ poller_ctx.run = false;
+ poll_threads();
+ CU_ASSERT_EQUAL(poller_ctx.run, true);
+
+ spdk_poller_pause(poller_ctx.poller);
+
+ poller_ctx.run = false;
+ poll_threads();
+ CU_ASSERT_EQUAL(poller_ctx.run, false);
+
+ spdk_poller_unregister(&poller_ctx.poller);
+
+ poll_threads();
+ CU_ASSERT_EQUAL(poller_ctx.run, false);
+ CU_ASSERT_EQUAL(spdk_thread_has_pollers(spdk_get_thread()), false);
+
+ /* Register pollers with 0 and 1000us wait time and pause/resume them */
+ for (i = 0; i < SPDK_COUNTOF(delay); ++i) {
+ poller_ctx.poller = spdk_poller_register(poller_run_done, &poller_ctx.run, delay[i]);
+ CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
+
+ spdk_delay_us(delay[i]);
+ poller_ctx.run = false;
+ poll_threads();
+ CU_ASSERT_EQUAL(poller_ctx.run, true);
+
+ spdk_poller_pause(poller_ctx.poller);
+
+ spdk_delay_us(delay[i]);
+ poller_ctx.run = false;
+ poll_threads();
+ CU_ASSERT_EQUAL(poller_ctx.run, false);
+
+ spdk_poller_resume(poller_ctx.poller);
+
+ spdk_delay_us(delay[i]);
+ poll_threads();
+ CU_ASSERT_EQUAL(poller_ctx.run, true);
+
+ /* Verify that the poller can be paused/resumed from spdk_thread_send_msg */
+ spdk_thread_send_msg(spdk_get_thread(), poller_msg_pause_cb, poller_ctx.poller);
+
+ spdk_delay_us(delay[i]);
+ poller_ctx.run = false;
+ poll_threads();
+ CU_ASSERT_EQUAL(poller_ctx.run, false);
+
+ spdk_thread_send_msg(spdk_get_thread(), poller_msg_resume_cb, poller_ctx.poller);
+
+ poll_threads();
+ if (delay[i] > 0) {
+ spdk_delay_us(delay[i]);
+ poll_threads();
+ }
+ CU_ASSERT_EQUAL(poller_ctx.run, true);
+
+ spdk_poller_unregister(&poller_ctx.poller);
+ CU_ASSERT_PTR_NULL(poller_ctx.poller);
+ }
+
+ free_threads();
+}
+
+static void
+for_each_cb(void *ctx)
+{
+ int *count = ctx;
+
+ (*count)++;
+}
+
+static void
+thread_for_each(void)
+{
+ int count = 0;
+ int i;
+
+ allocate_threads(3);
+ set_thread(0);
+
+ spdk_for_each_thread(for_each_cb, &count, for_each_cb);
+
+ /* We have not polled thread 0 yet, so count should be 0 */
+ CU_ASSERT(count == 0);
+
+ /* Poll each thread to verify the message is passed to each */
+ for (i = 0; i < 3; i++) {
+ poll_thread(i);
+ CU_ASSERT(count == (i + 1));
+ }
+
+ /*
+ * After each thread is called, the completion calls it
+ * one more time.
+ */
+ poll_thread(0);
+ CU_ASSERT(count == 4);
+
+ free_threads();
+}
+
+static int
+channel_create(void *io_device, void *ctx_buf)
+{
+ int *ch_count = io_device;
+
+ (*ch_count)++;
+ return 0;
+}
+
+static void
+channel_destroy(void *io_device, void *ctx_buf)
+{
+ int *ch_count = io_device;
+
+ (*ch_count)--;
+}
+
+static void
+channel_msg(struct spdk_io_channel_iter *i)
+{
+ int *msg_count = spdk_io_channel_iter_get_ctx(i);
+
+ (*msg_count)++;
+ spdk_for_each_channel_continue(i, 0);
+}
+
+static void
+channel_cpl(struct spdk_io_channel_iter *i, int status)
+{
+ int *msg_count = spdk_io_channel_iter_get_ctx(i);
+
+ (*msg_count)++;
+}
+
+static void
+for_each_channel_remove(void)
+{
+ struct spdk_io_channel *ch0, *ch1, *ch2;
+ int ch_count = 0;
+ int msg_count = 0;
+
+ allocate_threads(3);
+ set_thread(0);
+ spdk_io_device_register(&ch_count, channel_create, channel_destroy, sizeof(int), NULL);
+ ch0 = spdk_get_io_channel(&ch_count);
+ set_thread(1);
+ ch1 = spdk_get_io_channel(&ch_count);
+ set_thread(2);
+ ch2 = spdk_get_io_channel(&ch_count);
+ CU_ASSERT(ch_count == 3);
+
+ /*
+ * Test that io_channel handles the case where we start to iterate through
+ * the channels, and during the iteration, one of the channels is deleted.
+ * This is done in some different and sometimes non-intuitive orders, because
+ * some operations are deferred and won't execute until their threads are
+ * polled.
+ *
+ * Case #1: Put the I/O channel before spdk_for_each_channel.
+ */
+ set_thread(0);
+ spdk_put_io_channel(ch0);
+ CU_ASSERT(ch_count == 3);
+ poll_threads();
+ CU_ASSERT(ch_count == 2);
+ spdk_for_each_channel(&ch_count, channel_msg, &msg_count, channel_cpl);
+ CU_ASSERT(msg_count == 0);
+ poll_threads();
+ CU_ASSERT(msg_count == 3);
+
+ msg_count = 0;
+
+ /*
+ * Case #2: Put the I/O channel after spdk_for_each_channel, but before
+ * thread 0 is polled.
+ */
+ ch0 = spdk_get_io_channel(&ch_count);
+ CU_ASSERT(ch_count == 3);
+ spdk_for_each_channel(&ch_count, channel_msg, &msg_count, channel_cpl);
+ spdk_put_io_channel(ch0);
+ CU_ASSERT(ch_count == 3);
+
+ poll_threads();
+ CU_ASSERT(ch_count == 2);
+ CU_ASSERT(msg_count == 4);
+ set_thread(1);
+ spdk_put_io_channel(ch1);
+ CU_ASSERT(ch_count == 2);
+ set_thread(2);
+ spdk_put_io_channel(ch2);
+ CU_ASSERT(ch_count == 2);
+ poll_threads();
+ CU_ASSERT(ch_count == 0);
+
+ spdk_io_device_unregister(&ch_count, NULL);
+ poll_threads();
+
+ free_threads();
+}
+
+struct unreg_ctx {
+ bool ch_done;
+ bool foreach_done;
+};
+
+static void
+unreg_ch_done(struct spdk_io_channel_iter *i)
+{
+ struct unreg_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
+
+ ctx->ch_done = true;
+
+ SPDK_CU_ASSERT_FATAL(i->cur_thread != NULL);
+ spdk_for_each_channel_continue(i, 0);
+}
+
+static void
+unreg_foreach_done(struct spdk_io_channel_iter *i, int status)
+{
+ struct unreg_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
+
+ ctx->foreach_done = true;
+}
+
+static void
+for_each_channel_unreg(void)
+{
+ struct spdk_io_channel *ch0;
+ struct io_device *dev;
+ struct unreg_ctx ctx = {};
+ int io_target = 0;
+
+ allocate_threads(1);
+ set_thread(0);
+ CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
+ spdk_io_device_register(&io_target, channel_create, channel_destroy, sizeof(int), NULL);
+ CU_ASSERT(!TAILQ_EMPTY(&g_io_devices));
+ dev = TAILQ_FIRST(&g_io_devices);
+ SPDK_CU_ASSERT_FATAL(dev != NULL);
+ CU_ASSERT(TAILQ_NEXT(dev, tailq) == NULL);
+ ch0 = spdk_get_io_channel(&io_target);
+ spdk_for_each_channel(&io_target, unreg_ch_done, &ctx, unreg_foreach_done);
+
+ spdk_io_device_unregister(&io_target, NULL);
+ /*
+ * There is an outstanding foreach call on the io_device, so the unregister should not
+ * have removed the device.
+ */
+ CU_ASSERT(dev == TAILQ_FIRST(&g_io_devices));
+ spdk_io_device_register(&io_target, channel_create, channel_destroy, sizeof(int), NULL);
+ /*
+ * There is already a device registered at &io_target, so a new io_device should not
+ * have been added to g_io_devices.
+ */
+ CU_ASSERT(dev == TAILQ_FIRST(&g_io_devices));
+ CU_ASSERT(TAILQ_NEXT(dev, tailq) == NULL);
+
+ poll_thread(0);
+ CU_ASSERT(ctx.ch_done == true);
+ CU_ASSERT(ctx.foreach_done == true);
+ /*
+ * There are no more foreach operations outstanding, so we can unregister the device,
+ * even though a channel still exists for the device.
+ */
+ spdk_io_device_unregister(&io_target, NULL);
+ CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
+
+ set_thread(0);
+ spdk_put_io_channel(ch0);
+
+ poll_threads();
+
+ free_threads();
+}
+
+static void
+thread_name(void)
+{
+ struct spdk_thread *thread;
+ const char *name;
+
+ spdk_thread_lib_init(NULL, 0);
+
+ /* Create thread with no name, which automatically generates one */
+ thread = spdk_thread_create(NULL, NULL);
+ spdk_set_thread(thread);
+ thread = spdk_get_thread();
+ SPDK_CU_ASSERT_FATAL(thread != NULL);
+ name = spdk_thread_get_name(thread);
+ CU_ASSERT(name != NULL);
+ spdk_thread_exit(thread);
+ while (!spdk_thread_is_exited(thread)) {
+ spdk_thread_poll(thread, 0, 0);
+ }
+ spdk_thread_destroy(thread);
+
+ /* Create thread named "test_thread" */
+ thread = spdk_thread_create("test_thread", NULL);
+ spdk_set_thread(thread);
+ thread = spdk_get_thread();
+ SPDK_CU_ASSERT_FATAL(thread != NULL);
+ name = spdk_thread_get_name(thread);
+ SPDK_CU_ASSERT_FATAL(name != NULL);
+ CU_ASSERT(strcmp(name, "test_thread") == 0);
+ spdk_thread_exit(thread);
+ while (!spdk_thread_is_exited(thread)) {
+ spdk_thread_poll(thread, 0, 0);
+ }
+ spdk_thread_destroy(thread);
+
+ spdk_thread_lib_fini();
+}
+
+static uint64_t g_device1;
+static uint64_t g_device2;
+static uint64_t g_device3;
+
+static uint64_t g_ctx1 = 0x1111;
+static uint64_t g_ctx2 = 0x2222;
+
+static int g_create_cb_calls = 0;
+static int g_destroy_cb_calls = 0;
+
+static int
+create_cb_1(void *io_device, void *ctx_buf)
+{
+ CU_ASSERT(io_device == &g_device1);
+ *(uint64_t *)ctx_buf = g_ctx1;
+ g_create_cb_calls++;
+ return 0;
+}
+
+static void
+destroy_cb_1(void *io_device, void *ctx_buf)
+{
+ CU_ASSERT(io_device == &g_device1);
+ CU_ASSERT(*(uint64_t *)ctx_buf == g_ctx1);
+ g_destroy_cb_calls++;
+}
+
+static int
+create_cb_2(void *io_device, void *ctx_buf)
+{
+ CU_ASSERT(io_device == &g_device2);
+ *(uint64_t *)ctx_buf = g_ctx2;
+ g_create_cb_calls++;
+ return 0;
+}
+
+static void
+destroy_cb_2(void *io_device, void *ctx_buf)
+{
+ CU_ASSERT(io_device == &g_device2);
+ CU_ASSERT(*(uint64_t *)ctx_buf == g_ctx2);
+ g_destroy_cb_calls++;
+}
+
+static void
+channel(void)
+{
+ struct spdk_io_channel *ch1, *ch2;
+ void *ctx;
+
+ allocate_threads(1);
+ set_thread(0);
+
+ spdk_io_device_register(&g_device1, create_cb_1, destroy_cb_1, sizeof(g_ctx1), NULL);
+ spdk_io_device_register(&g_device2, create_cb_2, destroy_cb_2, sizeof(g_ctx2), NULL);
+
+ g_create_cb_calls = 0;
+ ch1 = spdk_get_io_channel(&g_device1);
+ CU_ASSERT(g_create_cb_calls == 1);
+ SPDK_CU_ASSERT_FATAL(ch1 != NULL);
+
+ g_create_cb_calls = 0;
+ ch2 = spdk_get_io_channel(&g_device1);
+ CU_ASSERT(g_create_cb_calls == 0);
+ CU_ASSERT(ch1 == ch2);
+ SPDK_CU_ASSERT_FATAL(ch2 != NULL);
+
+ g_destroy_cb_calls = 0;
+ spdk_put_io_channel(ch2);
+ poll_threads();
+ CU_ASSERT(g_destroy_cb_calls == 0);
+
+ g_create_cb_calls = 0;
+ ch2 = spdk_get_io_channel(&g_device2);
+ CU_ASSERT(g_create_cb_calls == 1);
+ CU_ASSERT(ch1 != ch2);
+ SPDK_CU_ASSERT_FATAL(ch2 != NULL);
+
+ ctx = spdk_io_channel_get_ctx(ch2);
+ CU_ASSERT(*(uint64_t *)ctx == g_ctx2);
+
+ g_destroy_cb_calls = 0;
+ spdk_put_io_channel(ch1);
+ poll_threads();
+ CU_ASSERT(g_destroy_cb_calls == 1);
+
+ g_destroy_cb_calls = 0;
+ spdk_put_io_channel(ch2);
+ poll_threads();
+ CU_ASSERT(g_destroy_cb_calls == 1);
+
+ ch1 = spdk_get_io_channel(&g_device3);
+ CU_ASSERT(ch1 == NULL);
+
+ spdk_io_device_unregister(&g_device1, NULL);
+ poll_threads();
+ spdk_io_device_unregister(&g_device2, NULL);
+ poll_threads();
+ CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
+ free_threads();
+ CU_ASSERT(TAILQ_EMPTY(&g_threads));
+}
+
+static int
+create_cb(void *io_device, void *ctx_buf)
+{
+ uint64_t *refcnt = (uint64_t *)ctx_buf;
+
+ CU_ASSERT(*refcnt == 0);
+ *refcnt = 1;
+
+ return 0;
+}
+
+static void
+destroy_cb(void *io_device, void *ctx_buf)
+{
+ uint64_t *refcnt = (uint64_t *)ctx_buf;
+
+ CU_ASSERT(*refcnt == 1);
+ *refcnt = 0;
+}
+
+/**
+ * This test is checking that a sequence of get, put, get, put without allowing
+ * the deferred put operation to complete doesn't result in releasing the memory
+ * for the channel twice.
+ */
+static void
+channel_destroy_races(void)
+{
+ uint64_t device;
+ struct spdk_io_channel *ch;
+
+ allocate_threads(1);
+ set_thread(0);
+
+ spdk_io_device_register(&device, create_cb, destroy_cb, sizeof(uint64_t), NULL);
+
+ ch = spdk_get_io_channel(&device);
+ SPDK_CU_ASSERT_FATAL(ch != NULL);
+
+ spdk_put_io_channel(ch);
+
+ ch = spdk_get_io_channel(&device);
+ SPDK_CU_ASSERT_FATAL(ch != NULL);
+
+ spdk_put_io_channel(ch);
+ poll_threads();
+
+ spdk_io_device_unregister(&device, NULL);
+ poll_threads();
+
+ CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
+ free_threads();
+ CU_ASSERT(TAILQ_EMPTY(&g_threads));
+}
+
+static void
+thread_exit_test(void)
+{
+ struct spdk_thread *thread;
+ struct spdk_io_channel *ch;
+ struct spdk_poller *poller1, *poller2;
+ void *ctx;
+ bool done1 = false, done2 = false, poller1_run = false, poller2_run = false;
+ int rc __attribute__((unused));
+
+ MOCK_SET(spdk_get_ticks, 10);
+ MOCK_SET(spdk_get_ticks_hz, 1);
+
+ allocate_threads(4);
+
+ /* Test if all pending messages are reaped for the exiting thread, and the
+ * thread moves to the exited state.
+ */
+ set_thread(0);
+ thread = spdk_get_thread();
+
+ /* Sending message to thread 0 will be accepted. */
+ rc = spdk_thread_send_msg(thread, send_msg_cb, &done1);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(!done1);
+
+ /* Move thread 0 to the exiting state. */
+ spdk_thread_exit(thread);
+
+ CU_ASSERT(spdk_thread_is_exited(thread) == false);
+
+ /* Sending message to thread 0 will be still accepted. */
+ rc = spdk_thread_send_msg(thread, send_msg_cb, &done2);
+ CU_ASSERT(rc == 0);
+
+ /* Thread 0 will reap pending messages. */
+ poll_thread(0);
+ CU_ASSERT(done1 == true);
+ CU_ASSERT(done2 == true);
+
+ /* Thread 0 will move to the exited state. */
+ CU_ASSERT(spdk_thread_is_exited(thread) == true);
+
+ /* Test releasing I/O channel is reaped even after the thread moves to
+ * the exiting state
+ */
+ set_thread(1);
+
+ spdk_io_device_register(&g_device1, create_cb_1, destroy_cb_1, sizeof(g_ctx1), NULL);
+
+ g_create_cb_calls = 0;
+ ch = spdk_get_io_channel(&g_device1);
+ CU_ASSERT(g_create_cb_calls == 1);
+ SPDK_CU_ASSERT_FATAL(ch != NULL);
+
+ ctx = spdk_io_channel_get_ctx(ch);
+ CU_ASSERT(*(uint64_t *)ctx == g_ctx1);
+
+ g_destroy_cb_calls = 0;
+ spdk_put_io_channel(ch);
+
+ thread = spdk_get_thread();
+ spdk_thread_exit(thread);
+
+ /* Thread 1 will not move to the exited state yet because I/O channel release
+ * does not complete yet.
+ */
+ CU_ASSERT(spdk_thread_is_exited(thread) == false);
+
+ /* Thread 1 will be able to get the another reference of I/O channel
+ * even after the thread moves to the exiting state.
+ */
+ g_create_cb_calls = 0;
+ ch = spdk_get_io_channel(&g_device1);
+
+ CU_ASSERT(g_create_cb_calls == 0);
+ SPDK_CU_ASSERT_FATAL(ch != NULL);
+
+ ctx = spdk_io_channel_get_ctx(ch);
+ CU_ASSERT(*(uint64_t *)ctx == g_ctx1);
+
+ spdk_put_io_channel(ch);
+
+ poll_threads();
+ CU_ASSERT(g_destroy_cb_calls == 1);
+
+ /* Thread 1 will move to the exited state after I/O channel is released.
+ * are released.
+ */
+ CU_ASSERT(spdk_thread_is_exited(thread) == true);
+
+ spdk_io_device_unregister(&g_device1, NULL);
+ poll_threads();
+
+ /* Test if unregistering poller is reaped for the exiting thread, and the
+ * thread moves to the exited thread.
+ */
+ set_thread(2);
+ thread = spdk_get_thread();
+
+ poller1 = spdk_poller_register(poller_run_done, &poller1_run, 0);
+ CU_ASSERT(poller1 != NULL);
+
+ spdk_poller_unregister(&poller1);
+
+ spdk_thread_exit(thread);
+
+ poller2 = spdk_poller_register(poller_run_done, &poller2_run, 0);
+
+ poll_threads();
+
+ CU_ASSERT(poller1_run == false);
+ CU_ASSERT(poller2_run == true);
+
+ CU_ASSERT(spdk_thread_is_exited(thread) == false);
+
+ spdk_poller_unregister(&poller2);
+
+ poll_threads();
+
+ CU_ASSERT(spdk_thread_is_exited(thread) == true);
+
+ /* Test if the exiting thread is exited forcefully after timeout. */
+ set_thread(3);
+ thread = spdk_get_thread();
+
+ poller1 = spdk_poller_register(poller_run_done, &poller1_run, 0);
+ CU_ASSERT(poller1 != NULL);
+
+ spdk_thread_exit(thread);
+
+ CU_ASSERT(spdk_thread_is_exited(thread) == false);
+
+ MOCK_SET(spdk_get_ticks, 11);
+
+ poll_threads();
+
+ CU_ASSERT(spdk_thread_is_exited(thread) == false);
+
+ /* Cause timeout forcefully. */
+ MOCK_SET(spdk_get_ticks, 15);
+
+ poll_threads();
+
+ CU_ASSERT(spdk_thread_is_exited(thread) == true);
+
+ spdk_poller_unregister(&poller1);
+
+ poll_threads();
+
+ MOCK_CLEAR(spdk_get_ticks);
+ MOCK_CLEAR(spdk_get_ticks_hz);
+
+ free_threads();
+}
+
+static int
+poller_run_idle(void *ctx)
+{
+ uint64_t delay_us = (uint64_t)ctx;
+
+ spdk_delay_us(delay_us);
+
+ return 0;
+}
+
+static int
+poller_run_busy(void *ctx)
+{
+ uint64_t delay_us = (uint64_t)ctx;
+
+ spdk_delay_us(delay_us);
+
+ return 1;
+}
+
+static void
+thread_update_stats_test(void)
+{
+ struct spdk_poller *poller;
+ struct spdk_thread *thread;
+
+ MOCK_SET(spdk_get_ticks, 10);
+
+ allocate_threads(1);
+
+ set_thread(0);
+ thread = spdk_get_thread();
+
+ CU_ASSERT(thread->tsc_last == 10);
+ CU_ASSERT(thread->stats.idle_tsc == 0);
+ CU_ASSERT(thread->stats.busy_tsc == 0);
+
+ /* Test if idle_tsc is updated expectedly. */
+ poller = spdk_poller_register(poller_run_idle, (void *)1000, 0);
+ CU_ASSERT(poller != NULL);
+
+ spdk_delay_us(100);
+
+ poll_thread_times(0, 1);
+
+ CU_ASSERT(thread->tsc_last == 1110);
+ CU_ASSERT(thread->stats.idle_tsc == 1000);
+ CU_ASSERT(thread->stats.busy_tsc == 0);
+
+ spdk_delay_us(100);
+
+ poll_thread_times(0, 1);
+
+ CU_ASSERT(thread->tsc_last == 2210);
+ CU_ASSERT(thread->stats.idle_tsc == 2000);
+ CU_ASSERT(thread->stats.busy_tsc == 0);
+
+ spdk_poller_unregister(&poller);
+
+ /* Test if busy_tsc is updated expectedly. */
+ poller = spdk_poller_register(poller_run_busy, (void *)100000, 0);
+ CU_ASSERT(poller != NULL);
+
+ spdk_delay_us(10000);
+
+ poll_thread_times(0, 1);
+
+ CU_ASSERT(thread->tsc_last == 112210);
+ CU_ASSERT(thread->stats.idle_tsc == 2000);
+ CU_ASSERT(thread->stats.busy_tsc == 100000);
+
+ spdk_delay_us(10000);
+
+ poll_thread_times(0, 1);
+
+ CU_ASSERT(thread->tsc_last == 222210);
+ CU_ASSERT(thread->stats.idle_tsc == 2000);
+ CU_ASSERT(thread->stats.busy_tsc == 200000);
+
+ spdk_poller_unregister(&poller);
+
+ MOCK_CLEAR(spdk_get_ticks);
+
+ free_threads();
+}
+
+struct ut_nested_ch {
+ struct spdk_io_channel *child;
+ struct spdk_poller *poller;
+};
+
+struct ut_nested_dev {
+ struct ut_nested_dev *child;
+};
+
+static struct io_device *
+ut_get_io_device(void *dev)
+{
+ struct io_device *tmp;
+
+ TAILQ_FOREACH(tmp, &g_io_devices, tailq) {
+ if (tmp->io_device == dev) {
+ return tmp;
+ }
+ }
+
+ return NULL;
+}
+
+static int
+ut_null_poll(void *ctx)
+{
+ return -1;
+}
+
+static int
+ut_nested_ch_create_cb(void *io_device, void *ctx_buf)
+{
+ struct ut_nested_ch *_ch = ctx_buf;
+ struct ut_nested_dev *_dev = io_device;
+ struct ut_nested_dev *_child;
+
+ _child = _dev->child;
+
+ if (_child != NULL) {
+ _ch->child = spdk_get_io_channel(_child);
+ SPDK_CU_ASSERT_FATAL(_ch->child != NULL);
+ } else {
+ _ch->child = NULL;
+ }
+
+ _ch->poller = spdk_poller_register(ut_null_poll, NULL, 0);
+ SPDK_CU_ASSERT_FATAL(_ch->poller != NULL);
+
+ return 0;
+}
+
+static void
+ut_nested_ch_destroy_cb(void *io_device, void *ctx_buf)
+{
+ struct ut_nested_ch *_ch = ctx_buf;
+ struct spdk_io_channel *child;
+
+ child = _ch->child;
+ if (child != NULL) {
+ spdk_put_io_channel(child);
+ }
+
+ spdk_poller_unregister(&_ch->poller);
+}
+
+static void
+ut_check_nested_ch_create(struct spdk_io_channel *ch, struct io_device *dev)
+{
+ CU_ASSERT(ch->ref == 1);
+ CU_ASSERT(ch->dev == dev);
+ CU_ASSERT(dev->refcnt == 1);
+}
+
+static void
+ut_check_nested_ch_destroy_pre(struct spdk_io_channel *ch, struct io_device *dev)
+{
+ CU_ASSERT(ch->ref == 0);
+ CU_ASSERT(ch->destroy_ref == 1);
+ CU_ASSERT(dev->refcnt == 1);
+}
+
+static void
+ut_check_nested_ch_destroy_post(struct io_device *dev)
+{
+ CU_ASSERT(dev->refcnt == 0);
+}
+
+static void
+ut_check_nested_poller_register(struct spdk_poller *poller)
+{
+ SPDK_CU_ASSERT_FATAL(poller != NULL);
+}
+
+static void
+nested_channel(void)
+{
+ struct ut_nested_dev _dev1, _dev2, _dev3;
+ struct ut_nested_ch *_ch1, *_ch2, *_ch3;
+ struct io_device *dev1, *dev2, *dev3;
+ struct spdk_io_channel *ch1, *ch2, *ch3;
+ struct spdk_poller *poller;
+ struct spdk_thread *thread;
+
+ allocate_threads(1);
+ set_thread(0);
+
+ thread = spdk_get_thread();
+ SPDK_CU_ASSERT_FATAL(thread != NULL);
+
+ _dev1.child = &_dev2;
+ _dev2.child = &_dev3;
+ _dev3.child = NULL;
+
+ spdk_io_device_register(&_dev1, ut_nested_ch_create_cb, ut_nested_ch_destroy_cb,
+ sizeof(struct ut_nested_ch), "dev1");
+ spdk_io_device_register(&_dev2, ut_nested_ch_create_cb, ut_nested_ch_destroy_cb,
+ sizeof(struct ut_nested_ch), "dev2");
+ spdk_io_device_register(&_dev3, ut_nested_ch_create_cb, ut_nested_ch_destroy_cb,
+ sizeof(struct ut_nested_ch), "dev3");
+
+ dev1 = ut_get_io_device(&_dev1);
+ SPDK_CU_ASSERT_FATAL(dev1 != NULL);
+ dev2 = ut_get_io_device(&_dev2);
+ SPDK_CU_ASSERT_FATAL(dev2 != NULL);
+ dev3 = ut_get_io_device(&_dev3);
+ SPDK_CU_ASSERT_FATAL(dev3 != NULL);
+
+ /* A single call spdk_get_io_channel() to dev1 will also create channels
+ * to dev2 and dev3 continuously. Pollers will be registered together.
+ */
+ ch1 = spdk_get_io_channel(&_dev1);
+ SPDK_CU_ASSERT_FATAL(ch1 != NULL);
+
+ _ch1 = spdk_io_channel_get_ctx(ch1);
+ ch2 = _ch1->child;
+ SPDK_CU_ASSERT_FATAL(ch2 != NULL);
+
+ _ch2 = spdk_io_channel_get_ctx(ch2);
+ ch3 = _ch2->child;
+ SPDK_CU_ASSERT_FATAL(ch3 != NULL);
+
+ _ch3 = spdk_io_channel_get_ctx(ch3);
+ CU_ASSERT(_ch3->child == NULL);
+
+ ut_check_nested_ch_create(ch1, dev1);
+ ut_check_nested_ch_create(ch2, dev2);
+ ut_check_nested_ch_create(ch3, dev3);
+
+ poller = spdk_poller_register(ut_null_poll, NULL, 0);
+
+ ut_check_nested_poller_register(poller);
+ ut_check_nested_poller_register(_ch1->poller);
+ ut_check_nested_poller_register(_ch2->poller);
+ ut_check_nested_poller_register(_ch3->poller);
+
+ spdk_poller_unregister(&poller);
+ poll_thread_times(0, 1);
+
+ /* A single call spdk_put_io_channel() to dev1 will also destroy channels
+ * to dev2 and dev3 continuously. Pollers will be unregistered together.
+ */
+ spdk_put_io_channel(ch1);
+
+ /* Start exiting the current thread after unregistering the non-nested
+ * I/O channel.
+ */
+ spdk_thread_exit(thread);
+
+ ut_check_nested_ch_destroy_pre(ch1, dev1);
+ poll_thread_times(0, 1);
+ ut_check_nested_ch_destroy_post(dev1);
+
+ CU_ASSERT(spdk_thread_is_exited(thread) == false);
+
+ ut_check_nested_ch_destroy_pre(ch2, dev2);
+ poll_thread_times(0, 1);
+ ut_check_nested_ch_destroy_post(dev2);
+
+ CU_ASSERT(spdk_thread_is_exited(thread) == false);
+
+ ut_check_nested_ch_destroy_pre(ch3, dev3);
+ poll_thread_times(0, 1);
+ ut_check_nested_ch_destroy_post(dev3);
+
+ CU_ASSERT(spdk_thread_is_exited(thread) == true);
+
+ spdk_io_device_unregister(&_dev1, NULL);
+ spdk_io_device_unregister(&_dev2, NULL);
+ spdk_io_device_unregister(&_dev3, NULL);
+ CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
+
+ free_threads();
+ CU_ASSERT(TAILQ_EMPTY(&g_threads));
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("io_channel", NULL, NULL);
+
+ CU_ADD_TEST(suite, thread_alloc);
+ CU_ADD_TEST(suite, thread_send_msg);
+ CU_ADD_TEST(suite, thread_poller);
+ CU_ADD_TEST(suite, poller_pause);
+ CU_ADD_TEST(suite, thread_for_each);
+ CU_ADD_TEST(suite, for_each_channel_remove);
+ CU_ADD_TEST(suite, for_each_channel_unreg);
+ CU_ADD_TEST(suite, thread_name);
+ CU_ADD_TEST(suite, channel);
+ CU_ADD_TEST(suite, channel_destroy_races);
+ CU_ADD_TEST(suite, thread_exit_test);
+ CU_ADD_TEST(suite, thread_update_stats_test);
+ CU_ADD_TEST(suite, nested_channel);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/util/Makefile b/src/spdk/test/unit/lib/util/Makefile
new file mode 100644
index 000000000..221715725
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/Makefile
@@ -0,0 +1,45 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = base64.c bit_array.c cpuset.c crc16.c crc32_ieee.c crc32c.c dif.c \
+ iov.c math.c pipe.c string.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/util/base64.c/.gitignore b/src/spdk/test/unit/lib/util/base64.c/.gitignore
new file mode 100644
index 000000000..a5b175236
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/base64.c/.gitignore
@@ -0,0 +1 @@
+base64_ut
diff --git a/src/spdk/test/unit/lib/util/base64.c/Makefile b/src/spdk/test/unit/lib/util/base64.c/Makefile
new file mode 100644
index 000000000..c0d91c076
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/base64.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = base64_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/util/base64.c/base64_ut.c b/src/spdk/test/unit/lib/util/base64.c/base64_ut.c
new file mode 100644
index 000000000..b1f70561c
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/base64.c/base64_ut.c
@@ -0,0 +1,381 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "util/base64.c"
+
+char text_A[] = "FZB3";
+uint8_t raw_A[] = {0x15, 0x90, 0x77};
+char text_B[] = "AbC/1+c=";
+char text_urlsafe_B[] = "AbC_1-c=";
+uint8_t raw_B[] = {0x01, 0xB0, 0xBF, 0xD7, 0xE7};
+char text_C[] = "AbC/1+cC";
+char text_urlsafe_C[] = "AbC_1-cC";
+uint8_t raw_C[] = {0x01, 0xB0, 0xBF, 0xD7, 0xE7, 0x02};
+char text_D[] = "AbC/1w==";
+char text_urlsafe_D[] = "AbC_1w==";
+uint8_t raw_D[] = {0x01, 0xB0, 0xBF, 0xD7};
+char text_E[] = "AbC12===";
+char text_F[] = "AbCd112";
+char text_G[] = "AbCd12";
+char text_H[] = "AbC12";
+char text_I[] = "AQu/1+cCCBUnOBFWv+HzoL3BOVUBItP2mRDdqhnxAtIT4hD1wbQ30Ylm8R+7khPS";//64 bytes
+char text_urlsafe_I[] =
+ "AQu_1-cCCBUnOBFWv-HzoL3BOVUBItP2mRDdqhnxAtIT4hD1wbQ30Ylm8R-7khPS";//64 bytes
+uint8_t raw_I[] = {0x01, 0x0B, 0xBF, 0xD7, 0xE7, 0x02, 0x08, 0x15, 0x27, 0x38, 0x11, 0x56, 0xBF, 0xE1, 0xF3, 0xA0,
+ 0xBD, 0xC1, 0x39, 0x55, 0x01, 0x22, 0xD3, 0xF6, 0x99, 0x10, 0xDD, 0xAA, 0x19, 0xF1, 0x02, 0xD2,
+ 0x13, 0xE2, 0x10, 0xF5, 0xC1, 0xB4, 0x37, 0xD1, 0x89, 0x66, 0xF1, 0x1F, 0xBB, 0x92, 0x13, 0xD2
+ };
+char text_J[] =
+ "AQu/1+cCCBUnOBFWv+HzoL3BOVUBItP2mRDdqhnxAtIT4hD1wbQ30Ylm8R+7khPSvcE5VecCCBUZ8QLiEPVm8b3BOVUBItP2GfEC4hD1ZvE5VQEi0/aJZvEfu5LiEPUTvcE5VQEi0/YZEQ==";
+char text_urlsafe_J[] =
+ "AQu_1-cCCBUnOBFWv-HzoL3BOVUBItP2mRDdqhnxAtIT4hD1wbQ30Ylm8R-7khPSvcE5VecCCBUZ8QLiEPVm8b3BOVUBItP2GfEC4hD1ZvE5VQEi0_aJZvEfu5LiEPUTvcE5VQEi0_YZEQ==";
+uint8_t raw_J[] = {0x01, 0x0B, 0xBF, 0xD7, 0xE7, 0x02, 0x08, 0x15, 0x27, 0x38, 0x11, 0x56, 0xBF, 0xE1, 0xF3, 0xA0,
+ 0xBD, 0xC1, 0x39, 0x55, 0x01, 0x22, 0xD3, 0xF6, 0x99, 0x10, 0xDD, 0xAA, 0x19, 0xF1, 0x02, 0xD2,
+ 0x13, 0xE2, 0x10, 0xF5, 0xC1, 0xB4, 0x37, 0xD1, 0x89, 0x66, 0xF1, 0x1F, 0xBB, 0x92, 0x13, 0xD2,
+ 0xBD, 0xC1, 0x39, 0x55, 0xE7, 0x02, 0x08, 0x15, 0x19, 0xF1, 0x02, 0xE2, 0x10, 0xF5, 0x66, 0xF1,
+ 0xBD, 0xC1, 0x39, 0x55, 0x01, 0x22, 0xD3, 0xF6, 0x19, 0xF1, 0x02, 0xE2, 0x10, 0xF5, 0x66, 0xF1,
+ 0x39, 0x55, 0x01, 0x22, 0xD3, 0xF6, 0x89, 0x66, 0xF1, 0x1F, 0xBB, 0x92, 0xE2, 0x10, 0xF5, 0x13,
+ 0xBD, 0xC1, 0x39, 0x55, 0x01, 0x22, 0xD3, 0xF6, 0x19, 0x11
+ };
+
+static void
+test_base64_get_encoded_strlen(void)
+{
+ uint32_t raw_lens[4] = {8, 9, 10, 11};
+ uint32_t text_strlens[4] = {12, 12, 16, 16};
+ uint32_t text_strlen;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ text_strlen = spdk_base64_get_encoded_strlen(raw_lens[i]);
+ CU_ASSERT_EQUAL(text_strlen, text_strlens[i]);
+ }
+}
+
+static void
+test_base64_get_decoded_len(void)
+{
+ uint32_t text_strlens[4] = {8, 10, 11, 12};
+ uint32_t raw_lens[4] = {6, 7, 8, 9};
+ uint32_t bin_len;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ bin_len = spdk_base64_get_decoded_len(text_strlens[i]);
+ CU_ASSERT_EQUAL(bin_len, raw_lens[i]);
+ }
+}
+
+static void
+test_base64_encode(void)
+{
+ char text[200];
+ int ret;
+
+ ret = spdk_base64_encode(text, raw_A, sizeof(raw_A));
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT(strcmp(text, text_A) == 0);
+ CU_ASSERT_EQUAL(strlen(text), strlen(text_A));
+
+ ret = spdk_base64_encode(text, raw_B, sizeof(raw_B));
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT(strcmp(text, text_B) == 0);
+ CU_ASSERT_EQUAL(strlen(text), strlen(text_B));
+
+ ret = spdk_base64_encode(text, raw_C, sizeof(raw_C));
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT(strcmp(text, text_C) == 0);
+
+ ret = spdk_base64_encode(text, raw_D, sizeof(raw_D));
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT(strcmp(text, text_D) == 0);
+
+ ret = spdk_base64_encode(text, raw_I, sizeof(raw_I));
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT(strcmp(text, text_I) == 0);
+
+ ret = spdk_base64_encode(text, raw_J, sizeof(raw_J));
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT(strcmp(text, text_J) == 0);
+
+ ret = spdk_base64_encode(NULL, raw_A, sizeof(raw_A));
+ CU_ASSERT_EQUAL(ret, -EINVAL);
+ ret = spdk_base64_encode(text, NULL, sizeof(raw_A));
+ CU_ASSERT_EQUAL(ret, -EINVAL);
+ ret = spdk_base64_encode(text, raw_A, 0);
+ CU_ASSERT_EQUAL(ret, -EINVAL);
+}
+
+static void
+test_base64_decode(void)
+{
+ char raw_buf[200];
+ void *raw = (void *)raw_buf;
+ size_t raw_len;
+ int ret;
+
+ /* len */
+ ret = spdk_base64_decode(NULL, &raw_len, text_A);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_A));
+
+ /* decode */
+ ret = spdk_base64_decode(raw, &raw_len, text_A);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_A));
+ CU_ASSERT(memcmp(raw, raw_A, sizeof(raw_A)) == 0);
+
+ /* len */
+ ret = spdk_base64_decode(NULL, &raw_len, text_B);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_B));
+
+ /* decode */
+ ret = spdk_base64_decode(raw, &raw_len, text_B);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_B));
+ CU_ASSERT(memcmp(raw, raw_B, sizeof(raw_B)) == 0);
+
+ /* len */
+ ret = spdk_base64_decode(NULL, &raw_len, text_C);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_C));
+
+ /* decode */
+ ret = spdk_base64_decode(raw, &raw_len, text_C);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_C));
+ CU_ASSERT(memcmp(raw, raw_C, sizeof(raw_C)) == 0);
+
+ /* len */
+ ret = spdk_base64_decode(NULL, &raw_len, text_D);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_D));
+
+ /* decode */
+ ret = spdk_base64_decode(raw, &raw_len, text_D);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_D));
+ CU_ASSERT(memcmp(raw, raw_D, sizeof(raw_D)) == 0);
+
+ /* len */
+ ret = spdk_base64_decode(NULL, &raw_len, text_I);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_I));
+
+ /* decode */
+ ret = spdk_base64_decode(raw, &raw_len, text_I);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_I));
+ CU_ASSERT(memcmp(raw, raw_I, sizeof(raw_I)) == 0);
+
+ /* len */
+ ret = spdk_base64_decode(NULL, &raw_len, text_J);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_J));
+
+ /* decode */
+ ret = spdk_base64_decode(raw, &raw_len, text_J);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_J));
+ CU_ASSERT(memcmp(raw, raw_J, sizeof(raw_J)) == 0);
+
+ ret = spdk_base64_decode(raw, &raw_len, text_E);
+ CU_ASSERT_EQUAL(ret, -EINVAL);
+ ret = spdk_base64_decode(raw, &raw_len, text_F);
+ CU_ASSERT_EQUAL(ret, -EINVAL);
+ ret = spdk_base64_decode(raw, &raw_len, text_G);
+ CU_ASSERT_EQUAL(ret, -EINVAL);
+ ret = spdk_base64_decode(raw, &raw_len, text_H);
+ CU_ASSERT_EQUAL(ret, -EINVAL);
+ ret = spdk_base64_decode(raw, &raw_len, NULL);
+ CU_ASSERT_EQUAL(ret, -EINVAL);
+}
+
+static void
+test_base64_urlsafe_encode(void)
+{
+ char text[200];
+ int ret;
+
+ ret = spdk_base64_urlsafe_encode(text, raw_A, sizeof(raw_A));
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT(strcmp(text, text_A) == 0);
+ CU_ASSERT_EQUAL(strlen(text), strlen(text_A));
+
+ ret = spdk_base64_urlsafe_encode(text, raw_B, sizeof(raw_B));
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT(strcmp(text, text_urlsafe_B) == 0);
+ CU_ASSERT_EQUAL(strlen(text), strlen(text_urlsafe_B));
+
+ ret = spdk_base64_urlsafe_encode(text, raw_C, sizeof(raw_C));
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT(strcmp(text, text_urlsafe_C) == 0);
+
+ ret = spdk_base64_urlsafe_encode(text, raw_D, sizeof(raw_D));
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT(strcmp(text, text_urlsafe_D) == 0);
+
+ ret = spdk_base64_urlsafe_encode(text, raw_I, sizeof(raw_I));
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT(strcmp(text, text_urlsafe_I) == 0);
+
+ ret = spdk_base64_urlsafe_encode(text, raw_J, sizeof(raw_J));
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT(strcmp(text, text_urlsafe_J) == 0);
+
+ ret = spdk_base64_urlsafe_encode(NULL, raw_A, sizeof(raw_A));
+ CU_ASSERT_EQUAL(ret, -EINVAL);
+ ret = spdk_base64_urlsafe_encode(text, NULL, sizeof(raw_A));
+ CU_ASSERT_EQUAL(ret, -EINVAL);
+ ret = spdk_base64_urlsafe_encode(text, raw_A, 0);
+ CU_ASSERT_EQUAL(ret, -EINVAL);
+}
+
+static void
+test_base64_urlsafe_decode(void)
+{
+ char raw_buf[200];
+ void *raw = (void *)raw_buf;
+ size_t raw_len = 0;
+ int ret;
+
+ /* len */
+ ret = spdk_base64_urlsafe_decode(NULL, &raw_len, text_A);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_A));
+
+ /* decode */
+ ret = spdk_base64_urlsafe_decode(raw, &raw_len, text_A);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_A));
+ CU_ASSERT(memcmp(raw, raw_A, sizeof(raw_A)) == 0);
+
+ /* len */
+ ret = spdk_base64_urlsafe_decode(NULL, &raw_len, text_urlsafe_B);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_B));
+
+ /* decode */
+ ret = spdk_base64_urlsafe_decode(raw, &raw_len, text_urlsafe_B);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_B));
+ CU_ASSERT(memcmp(raw, raw_B, sizeof(raw_B)) == 0);
+
+ /* len */
+ ret = spdk_base64_urlsafe_decode(NULL, &raw_len, text_urlsafe_C);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_C));
+
+ /* decode */
+ ret = spdk_base64_urlsafe_decode(raw, &raw_len, text_urlsafe_C);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_C));
+ CU_ASSERT(memcmp(raw, raw_C, sizeof(raw_C)) == 0);
+
+ /* len */
+ ret = spdk_base64_urlsafe_decode(NULL, &raw_len, text_urlsafe_D);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_D));
+
+ /* decode */
+ ret = spdk_base64_urlsafe_decode(raw, &raw_len, text_urlsafe_D);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_D));
+ CU_ASSERT(memcmp(raw, raw_D, sizeof(raw_D)) == 0);
+
+ /* len */
+ ret = spdk_base64_urlsafe_decode(NULL, &raw_len, text_urlsafe_I);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_I));
+
+ /* decode */
+ ret = spdk_base64_urlsafe_decode(raw, &raw_len, text_urlsafe_I);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_I));
+ CU_ASSERT(memcmp(raw, raw_I, sizeof(raw_I)) == 0);
+
+ /* len */
+ ret = spdk_base64_urlsafe_decode(NULL, &raw_len, text_urlsafe_J);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_J));
+
+ /* decode */
+ ret = spdk_base64_urlsafe_decode(raw, &raw_len, text_urlsafe_J);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_J));
+ CU_ASSERT(memcmp(raw, raw_J, sizeof(raw_J)) == 0);
+
+ ret = spdk_base64_urlsafe_decode(raw, &raw_len, text_E);
+ CU_ASSERT_EQUAL(ret, -EINVAL);
+ ret = spdk_base64_urlsafe_decode(raw, &raw_len, text_F);
+ CU_ASSERT_EQUAL(ret, -EINVAL);
+ ret = spdk_base64_urlsafe_decode(raw, &raw_len, text_G);
+ CU_ASSERT_EQUAL(ret, -EINVAL);
+ ret = spdk_base64_urlsafe_decode(raw, &raw_len, text_H);
+ CU_ASSERT_EQUAL(ret, -EINVAL);
+ ret = spdk_base64_urlsafe_decode(raw, &raw_len, NULL);
+ CU_ASSERT_EQUAL(ret, -EINVAL);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("base64", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_base64_get_encoded_strlen);
+ CU_ADD_TEST(suite, test_base64_get_decoded_len);
+ CU_ADD_TEST(suite, test_base64_encode);
+ CU_ADD_TEST(suite, test_base64_decode);
+ CU_ADD_TEST(suite, test_base64_urlsafe_encode);
+ CU_ADD_TEST(suite, test_base64_urlsafe_decode);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+
+ CU_basic_run_tests();
+
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/util/bit_array.c/.gitignore b/src/spdk/test/unit/lib/util/bit_array.c/.gitignore
new file mode 100644
index 000000000..24300cdb3
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/bit_array.c/.gitignore
@@ -0,0 +1 @@
+bit_array_ut
diff --git a/src/spdk/test/unit/lib/util/bit_array.c/Makefile b/src/spdk/test/unit/lib/util/bit_array.c/Makefile
new file mode 100644
index 000000000..281001af8
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/bit_array.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = bit_array_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/util/bit_array.c/bit_array_ut.c b/src/spdk/test/unit/lib/util/bit_array.c/bit_array_ut.c
new file mode 100644
index 000000000..5b19b409b
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/bit_array.c/bit_array_ut.c
@@ -0,0 +1,376 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "util/bit_array.c"
+#include "common/lib/test_env.c"
+
+static void
+test_1bit(void)
+{
+ struct spdk_bit_array *ba;
+
+ ba = spdk_bit_array_create(1);
+ SPDK_CU_ASSERT_FATAL(ba != NULL);
+ CU_ASSERT(spdk_bit_array_capacity(ba) == 1);
+
+ CU_ASSERT(spdk_bit_array_get(ba, 0) == false);
+ CU_ASSERT(spdk_bit_array_find_first_set(ba, 0) == UINT32_MAX);
+
+ /* Set bit 0 */
+ CU_ASSERT(spdk_bit_array_set(ba, 0) == 0);
+ CU_ASSERT(spdk_bit_array_get(ba, 0) == true);
+ CU_ASSERT(spdk_bit_array_find_first_set(ba, 0) == 0);
+
+ /* Clear bit 0 */
+ spdk_bit_array_clear(ba, 0);
+ CU_ASSERT(spdk_bit_array_get(ba, 0) == false);
+ CU_ASSERT(spdk_bit_array_find_first_set(ba, 0) == UINT32_MAX);
+
+ spdk_bit_array_free(&ba);
+ CU_ASSERT(ba == NULL);
+}
+
+static void
+test_64bit(void)
+{
+ struct spdk_bit_array *ba;
+
+ ba = spdk_bit_array_create(64);
+ SPDK_CU_ASSERT_FATAL(ba != NULL);
+ CU_ASSERT(spdk_bit_array_capacity(ba) == 64);
+ CU_ASSERT(spdk_bit_array_get(ba, 0) == false);
+ CU_ASSERT(spdk_bit_array_get(ba, 63) == false);
+ CU_ASSERT(spdk_bit_array_get(ba, 64) == false);
+ CU_ASSERT(spdk_bit_array_get(ba, 1000) == false);
+ CU_ASSERT(spdk_bit_array_find_first_set(ba, 0) == UINT32_MAX);
+
+ /* Set bit 1 */
+ CU_ASSERT(spdk_bit_array_set(ba, 1) == 0);
+ CU_ASSERT(spdk_bit_array_get(ba, 0) == false);
+ CU_ASSERT(spdk_bit_array_get(ba, 1) == true);
+ CU_ASSERT(spdk_bit_array_find_first_set(ba, 0) == 1);
+
+ /* Set bit 63 (1 still set) */
+ CU_ASSERT(spdk_bit_array_set(ba, 63) == 0);
+ CU_ASSERT(spdk_bit_array_get(ba, 0) == false);
+ CU_ASSERT(spdk_bit_array_get(ba, 1) == true);
+ CU_ASSERT(spdk_bit_array_get(ba, 63) == true);
+ CU_ASSERT(spdk_bit_array_find_first_set(ba, 0) == 1);
+
+ /* Clear bit 1 (63 still set) */
+ spdk_bit_array_clear(ba, 1);
+ CU_ASSERT(spdk_bit_array_get(ba, 1) == false);
+ CU_ASSERT(spdk_bit_array_find_first_set(ba, 0) == 63);
+
+ /* Clear bit 63 (no bits set) */
+ spdk_bit_array_clear(ba, 63);
+ CU_ASSERT(spdk_bit_array_get(ba, 63) == false);
+ CU_ASSERT(spdk_bit_array_find_first_set(ba, 0) == UINT32_MAX);
+
+ spdk_bit_array_free(&ba);
+}
+
+static void
+test_find(void)
+{
+ struct spdk_bit_array *ba;
+ uint32_t i;
+
+ ba = spdk_bit_array_create(256);
+ SPDK_CU_ASSERT_FATAL(ba != NULL);
+ CU_ASSERT(spdk_bit_array_capacity(ba) == 256);
+
+ /* Set all bits */
+ for (i = 0; i < 256; i++) {
+ CU_ASSERT(spdk_bit_array_set(ba, i) == 0);
+ }
+
+ /* Verify that find_first_set and find_first_clear work for each starting position */
+ for (i = 0; i < 256; i++) {
+ CU_ASSERT(spdk_bit_array_find_first_set(ba, i) == i);
+ CU_ASSERT(spdk_bit_array_find_first_clear(ba, i) == UINT32_MAX);
+ }
+ CU_ASSERT(spdk_bit_array_find_first_set(ba, 256) == UINT32_MAX);
+ CU_ASSERT(spdk_bit_array_find_first_clear(ba, 256) == UINT32_MAX);
+
+ /* Clear bits 0 through 31 */
+ for (i = 0; i < 32; i++) {
+ spdk_bit_array_clear(ba, i);
+ }
+
+ for (i = 0; i < 32; i++) {
+ CU_ASSERT(spdk_bit_array_find_first_set(ba, i) == 32);
+ CU_ASSERT(spdk_bit_array_find_first_clear(ba, i) == i);
+ }
+
+ for (i = 32; i < 256; i++) {
+ CU_ASSERT(spdk_bit_array_find_first_set(ba, i) == i);
+ CU_ASSERT(spdk_bit_array_find_first_clear(ba, i) == UINT32_MAX);
+ }
+
+ /* Clear bit 255 */
+ spdk_bit_array_clear(ba, 255);
+
+ for (i = 0; i < 32; i++) {
+ CU_ASSERT(spdk_bit_array_find_first_set(ba, i) == 32);
+ CU_ASSERT(spdk_bit_array_find_first_clear(ba, i) == i);
+ }
+
+ for (i = 32; i < 255; i++) {
+ CU_ASSERT(spdk_bit_array_find_first_set(ba, i) == i);
+ CU_ASSERT(spdk_bit_array_find_first_clear(ba, i) == 255);
+ }
+
+ CU_ASSERT(spdk_bit_array_find_first_clear(ba, 256) == UINT32_MAX);
+
+ spdk_bit_array_free(&ba);
+}
+
+static void
+test_resize(void)
+{
+ struct spdk_bit_array *ba;
+
+ /* Start with a 0 bit array */
+ ba = spdk_bit_array_create(0);
+ SPDK_CU_ASSERT_FATAL(ba != NULL);
+ CU_ASSERT(spdk_bit_array_capacity(ba) == 0);
+ CU_ASSERT(spdk_bit_array_get(ba, 0) == false);
+ CU_ASSERT(spdk_bit_array_set(ba, 0) == -EINVAL);
+ spdk_bit_array_clear(ba, 0);
+
+ /* Increase size to 1 bit */
+ SPDK_CU_ASSERT_FATAL(spdk_bit_array_resize(&ba, 1) == 0);
+ SPDK_CU_ASSERT_FATAL(ba != NULL);
+ CU_ASSERT(spdk_bit_array_capacity(ba) == 1);
+ CU_ASSERT(spdk_bit_array_get(ba, 0) == false);
+ CU_ASSERT(spdk_bit_array_set(ba, 0) == 0);
+ CU_ASSERT(spdk_bit_array_get(ba, 0) == true);
+
+ /* Increase size to 2 bits */
+ SPDK_CU_ASSERT_FATAL(spdk_bit_array_resize(&ba, 2) == 0);
+ SPDK_CU_ASSERT_FATAL(ba != NULL);
+ CU_ASSERT(spdk_bit_array_capacity(ba) == 2);
+ CU_ASSERT(spdk_bit_array_get(ba, 1) == false);
+ CU_ASSERT(spdk_bit_array_set(ba, 1) == 0);
+ CU_ASSERT(spdk_bit_array_get(ba, 1) == true);
+
+ /* Shrink size back to 1 bit */
+ SPDK_CU_ASSERT_FATAL(spdk_bit_array_resize(&ba, 1) == 0);
+ SPDK_CU_ASSERT_FATAL(ba != NULL);
+ CU_ASSERT(spdk_bit_array_capacity(ba) == 1);
+ CU_ASSERT(spdk_bit_array_get(ba, 0) == true);
+ CU_ASSERT(spdk_bit_array_get(ba, 1) == false);
+
+ /* Increase size to 65 bits */
+ SPDK_CU_ASSERT_FATAL(spdk_bit_array_resize(&ba, 65) == 0);
+ SPDK_CU_ASSERT_FATAL(ba != NULL);
+ CU_ASSERT(spdk_bit_array_capacity(ba) == 65);
+ CU_ASSERT(spdk_bit_array_get(ba, 0) == true);
+ CU_ASSERT(spdk_bit_array_get(ba, 1) == false);
+ CU_ASSERT(spdk_bit_array_set(ba, 64) == 0);
+ CU_ASSERT(spdk_bit_array_get(ba, 64) == true);
+
+ /* Shrink size back to 0 bits */
+ SPDK_CU_ASSERT_FATAL(spdk_bit_array_resize(&ba, 0) == 0);
+ SPDK_CU_ASSERT_FATAL(ba != NULL);
+ CU_ASSERT(spdk_bit_array_capacity(ba) == 0);
+ CU_ASSERT(spdk_bit_array_get(ba, 0) == false);
+ CU_ASSERT(spdk_bit_array_get(ba, 1) == false);
+
+ spdk_bit_array_free(&ba);
+}
+
+static void
+test_errors(void)
+{
+ /* Passing NULL to resize should fail. */
+ CU_ASSERT(spdk_bit_array_resize(NULL, 0) == -EINVAL);
+
+ /* Passing NULL to free is a no-op. */
+ spdk_bit_array_free(NULL);
+}
+
+static void
+test_count(void)
+{
+ struct spdk_bit_array *ba;
+ uint32_t i;
+
+ /* 0-bit array should have 0 bits set and 0 bits clear */
+ ba = spdk_bit_array_create(0);
+ SPDK_CU_ASSERT_FATAL(ba != NULL);
+ CU_ASSERT(spdk_bit_array_count_set(ba) == 0);
+ CU_ASSERT(spdk_bit_array_count_clear(ba) == 0);
+ spdk_bit_array_free(&ba);
+
+ /* 1-bit array */
+ ba = spdk_bit_array_create(1);
+ SPDK_CU_ASSERT_FATAL(ba != NULL);
+ CU_ASSERT(spdk_bit_array_count_set(ba) == 0);
+ CU_ASSERT(spdk_bit_array_count_clear(ba) == 1);
+ spdk_bit_array_set(ba, 0);
+ CU_ASSERT(spdk_bit_array_count_set(ba) == 1);
+ CU_ASSERT(spdk_bit_array_count_clear(ba) == 0);
+ spdk_bit_array_free(&ba);
+
+ /* 65-bit array */
+ ba = spdk_bit_array_create(65);
+ SPDK_CU_ASSERT_FATAL(ba != NULL);
+ CU_ASSERT(spdk_bit_array_count_set(ba) == 0);
+ CU_ASSERT(spdk_bit_array_count_clear(ba) == 65);
+ spdk_bit_array_set(ba, 0);
+ CU_ASSERT(spdk_bit_array_count_set(ba) == 1);
+ CU_ASSERT(spdk_bit_array_count_clear(ba) == 64);
+ spdk_bit_array_set(ba, 5);
+ CU_ASSERT(spdk_bit_array_count_set(ba) == 2);
+ CU_ASSERT(spdk_bit_array_count_clear(ba) == 63);
+ spdk_bit_array_set(ba, 13);
+ CU_ASSERT(spdk_bit_array_count_set(ba) == 3);
+ CU_ASSERT(spdk_bit_array_count_clear(ba) == 62);
+ spdk_bit_array_clear(ba, 0);
+ CU_ASSERT(spdk_bit_array_count_set(ba) == 2);
+ CU_ASSERT(spdk_bit_array_count_clear(ba) == 63);
+ for (i = 0; i < 65; i++) {
+ spdk_bit_array_set(ba, i);
+ }
+ CU_ASSERT(spdk_bit_array_count_set(ba) == 65);
+ CU_ASSERT(spdk_bit_array_count_clear(ba) == 0);
+ for (i = 0; i < 65; i++) {
+ spdk_bit_array_clear(ba, i);
+ CU_ASSERT(spdk_bit_array_count_set(ba) == 65 - i - 1);
+ CU_ASSERT(spdk_bit_array_count_clear(ba) == i + 1);
+ }
+ spdk_bit_array_free(&ba);
+}
+
+#define TEST_MASK_SIZE 128
+#define TEST_BITS_NUM (TEST_MASK_SIZE * 8 - 3)
+static void
+test_mask_store_load(void)
+{
+ struct spdk_bit_array *ba;
+ uint8_t mask[TEST_MASK_SIZE] = { 0 };
+ uint32_t i;
+
+ ba = spdk_bit_array_create(TEST_BITS_NUM);
+
+ /* Check if stored mask is consistent with bit array mask */
+ spdk_bit_array_set(ba, 0);
+ spdk_bit_array_set(ba, TEST_BITS_NUM / 2);
+ spdk_bit_array_set(ba, TEST_BITS_NUM - 1);
+
+ spdk_bit_array_store_mask(ba, mask);
+
+ for (i = 0; i < TEST_BITS_NUM; i++) {
+ if (i == 0 || i == TEST_BITS_NUM / 2 || i == TEST_BITS_NUM - 1) {
+ CU_ASSERT((mask[i / 8] & (1U << (i % 8))));
+ } else {
+ CU_ASSERT(!(mask[i / 8] & (1U << (i % 8))));
+ }
+ }
+
+ /* Check if loaded mask is consistent with bit array mask */
+ memset(mask, 0, TEST_MASK_SIZE);
+ mask[0] = 1;
+ mask[TEST_MASK_SIZE - 1] = 1U << 4;
+
+ spdk_bit_array_load_mask(ba, mask);
+
+ CU_ASSERT(spdk_bit_array_get(ba, 0));
+ CU_ASSERT(spdk_bit_array_get(ba, TEST_BITS_NUM - 1));
+
+ spdk_bit_array_clear(ba, 0);
+ spdk_bit_array_clear(ba, TEST_BITS_NUM - 1);
+
+ for (i = 0; i < TEST_BITS_NUM; i++) {
+ CU_ASSERT(!spdk_bit_array_get(ba, i));
+ }
+
+ spdk_bit_array_free(&ba);
+}
+
+static void
+test_mask_clear(void)
+{
+ struct spdk_bit_array *ba;
+ uint32_t i;
+
+ ba = spdk_bit_array_create(TEST_BITS_NUM);
+
+ for (i = 0; i < TEST_BITS_NUM; i++) {
+ spdk_bit_array_set(ba, i);
+ }
+
+ spdk_bit_array_clear_mask(ba);
+
+ for (i = 0; i < TEST_BITS_NUM; i++) {
+ CU_ASSERT(!spdk_bit_array_get(ba, i));
+ }
+
+ spdk_bit_array_free(&ba);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("bit_array", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_1bit);
+ CU_ADD_TEST(suite, test_64bit);
+ CU_ADD_TEST(suite, test_find);
+ CU_ADD_TEST(suite, test_resize);
+ CU_ADD_TEST(suite, test_errors);
+ CU_ADD_TEST(suite, test_count);
+ CU_ADD_TEST(suite, test_mask_store_load);
+ CU_ADD_TEST(suite, test_mask_clear);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+
+ CU_basic_run_tests();
+
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/util/cpuset.c/.gitignore b/src/spdk/test/unit/lib/util/cpuset.c/.gitignore
new file mode 100644
index 000000000..2ca1a2d36
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/cpuset.c/.gitignore
@@ -0,0 +1 @@
+cpuset_ut
diff --git a/src/spdk/test/unit/lib/util/cpuset.c/Makefile b/src/spdk/test/unit/lib/util/cpuset.c/Makefile
new file mode 100644
index 000000000..6b2374935
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/cpuset.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = cpuset_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/util/cpuset.c/cpuset_ut.c b/src/spdk/test/unit/lib/util/cpuset.c/cpuset_ut.c
new file mode 100644
index 000000000..3630c5cbd
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/cpuset.c/cpuset_ut.c
@@ -0,0 +1,262 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+#include "spdk/cpuset.h"
+
+#include "spdk_cunit.h"
+
+#include "util/cpuset.c"
+
+static int
+cpuset_check_range(struct spdk_cpuset *core_mask, uint32_t min, uint32_t max, bool isset)
+{
+ uint32_t core;
+ for (core = min; core <= max; core++) {
+ if (isset != spdk_cpuset_get_cpu(core_mask, core)) {
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static void
+test_cpuset(void)
+{
+ uint32_t cpu;
+ struct spdk_cpuset *set = spdk_cpuset_alloc();
+
+ SPDK_CU_ASSERT_FATAL(set != NULL);
+ CU_ASSERT(spdk_cpuset_count(set) == 0);
+
+ /* Set cpu 0 */
+ spdk_cpuset_set_cpu(set, 0, true);
+ CU_ASSERT(spdk_cpuset_get_cpu(set, 0) == true);
+ CU_ASSERT(cpuset_check_range(set, 1, SPDK_CPUSET_SIZE - 1, false) == 0);
+ CU_ASSERT(spdk_cpuset_count(set) == 1);
+
+ /* Set last cpu (cpu 0 already set) */
+ spdk_cpuset_set_cpu(set, SPDK_CPUSET_SIZE - 1, true);
+ CU_ASSERT(spdk_cpuset_get_cpu(set, 0) == true);
+ CU_ASSERT(spdk_cpuset_get_cpu(set, SPDK_CPUSET_SIZE - 1) == true);
+ CU_ASSERT(cpuset_check_range(set, 1, SPDK_CPUSET_SIZE - 2, false) == 0);
+ CU_ASSERT(spdk_cpuset_count(set) == 2);
+
+ /* Clear cpu 0 (last cpu already set) */
+ spdk_cpuset_set_cpu(set, 0, false);
+ CU_ASSERT(spdk_cpuset_get_cpu(set, 0) == false);
+ CU_ASSERT(cpuset_check_range(set, 1, SPDK_CPUSET_SIZE - 2, false) == 0);
+ CU_ASSERT(spdk_cpuset_get_cpu(set, SPDK_CPUSET_SIZE - 1) == true);
+ CU_ASSERT(spdk_cpuset_count(set) == 1);
+
+ /* Set middle cpu (last cpu already set) */
+ cpu = (SPDK_CPUSET_SIZE - 1) / 2;
+ spdk_cpuset_set_cpu(set, cpu, true);
+ CU_ASSERT(spdk_cpuset_get_cpu(set, cpu) == true);
+ CU_ASSERT(spdk_cpuset_get_cpu(set, SPDK_CPUSET_SIZE - 1) == true);
+ CU_ASSERT(cpuset_check_range(set, 1, cpu - 1, false) == 0);
+ CU_ASSERT(cpuset_check_range(set, cpu + 1, SPDK_CPUSET_SIZE - 2, false) == 0);
+ CU_ASSERT(spdk_cpuset_count(set) == 2);
+
+ /* Set all cpus */
+ for (cpu = 0; cpu < SPDK_CPUSET_SIZE; cpu++) {
+ spdk_cpuset_set_cpu(set, cpu, true);
+ }
+ CU_ASSERT(cpuset_check_range(set, 0, SPDK_CPUSET_SIZE - 1, true) == 0);
+ CU_ASSERT(spdk_cpuset_count(set) == SPDK_CPUSET_SIZE);
+
+ /* Clear all cpus */
+ spdk_cpuset_zero(set);
+ CU_ASSERT(cpuset_check_range(set, 0, SPDK_CPUSET_SIZE - 1, false) == 0);
+ CU_ASSERT(spdk_cpuset_count(set) == 0);
+
+ spdk_cpuset_free(set);
+}
+
+static void
+test_cpuset_parse(void)
+{
+ int rc;
+ struct spdk_cpuset *core_mask;
+ char buf[1024];
+
+ core_mask = spdk_cpuset_alloc();
+ SPDK_CU_ASSERT_FATAL(core_mask != NULL);
+
+ /* Only core 0 should be set */
+ rc = spdk_cpuset_parse(core_mask, "0x1");
+ CU_ASSERT(rc >= 0);
+ CU_ASSERT(cpuset_check_range(core_mask, 0, 0, true) == 0);
+ CU_ASSERT(cpuset_check_range(core_mask, 1, SPDK_CPUSET_SIZE - 1, false) == 0);
+
+ /* Only core 1 should be set */
+ rc = spdk_cpuset_parse(core_mask, "[1]");
+ CU_ASSERT(rc >= 0);
+ CU_ASSERT(cpuset_check_range(core_mask, 0, 0, false) == 0);
+ CU_ASSERT(cpuset_check_range(core_mask, 1, 1, true) == 0);
+ CU_ASSERT(cpuset_check_range(core_mask, 2, SPDK_CPUSET_SIZE - 1, false) == 0);
+
+ /* Set cores 0-10,12,128-254 */
+ rc = spdk_cpuset_parse(core_mask, "[0-10,12,128-254]");
+ CU_ASSERT(rc >= 0);
+ CU_ASSERT(cpuset_check_range(core_mask, 0, 10, true) == 0);
+ CU_ASSERT(cpuset_check_range(core_mask, 11, 11, false) == 0);
+ CU_ASSERT(cpuset_check_range(core_mask, 12, 12, true) == 0);
+ CU_ASSERT(cpuset_check_range(core_mask, 13, 127, false) == 0);
+ CU_ASSERT(cpuset_check_range(core_mask, 128, 254, true) == 0);
+ CU_ASSERT(cpuset_check_range(core_mask, 255, SPDK_CPUSET_SIZE - 1, false) == 0);
+
+ /* Set all cores */
+ snprintf(buf, sizeof(buf), "[0-%d]", SPDK_CPUSET_SIZE - 1);
+ rc = spdk_cpuset_parse(core_mask, buf);
+ CU_ASSERT(rc >= 0);
+ CU_ASSERT(cpuset_check_range(core_mask, 0, SPDK_CPUSET_SIZE - 1, true) == 0);
+
+ /* Null parameters not allowed */
+ rc = spdk_cpuset_parse(core_mask, NULL);
+ CU_ASSERT(rc < 0);
+
+ rc = spdk_cpuset_parse(NULL, "[1]");
+ CU_ASSERT(rc < 0);
+
+ /* Wrong formated core lists */
+ rc = spdk_cpuset_parse(core_mask, "");
+ CU_ASSERT(rc < 0);
+
+ rc = spdk_cpuset_parse(core_mask, "[");
+ CU_ASSERT(rc < 0);
+
+ rc = spdk_cpuset_parse(core_mask, "[]");
+ CU_ASSERT(rc < 0);
+
+ rc = spdk_cpuset_parse(core_mask, "[10--11]");
+ CU_ASSERT(rc < 0);
+
+ rc = spdk_cpuset_parse(core_mask, "[11-10]");
+ CU_ASSERT(rc < 0);
+
+ rc = spdk_cpuset_parse(core_mask, "[10-11,]");
+ CU_ASSERT(rc < 0);
+
+ rc = spdk_cpuset_parse(core_mask, "[,10-11]");
+ CU_ASSERT(rc < 0);
+
+ /* Out of range value */
+ snprintf(buf, sizeof(buf), "[%d]", SPDK_CPUSET_SIZE + 1);
+ rc = spdk_cpuset_parse(core_mask, buf);
+ CU_ASSERT(rc < 0);
+
+ /* Overflow value (UINT64_MAX * 10) */
+ rc = spdk_cpuset_parse(core_mask, "[184467440737095516150]");
+ CU_ASSERT(rc < 0);
+
+ spdk_cpuset_free(core_mask);
+}
+
+static void
+test_cpuset_fmt(void)
+{
+ int i;
+ uint32_t lcore;
+ struct spdk_cpuset *core_mask = spdk_cpuset_alloc();
+ const char *hex_mask;
+ char hex_mask_ref[SPDK_CPUSET_SIZE / 4 + 1];
+
+ /* Clear coremask. hex_mask should be "0" */
+ spdk_cpuset_zero(core_mask);
+ hex_mask = spdk_cpuset_fmt(core_mask);
+ SPDK_CU_ASSERT_FATAL(hex_mask != NULL);
+ CU_ASSERT(strcmp("0", hex_mask) == 0);
+
+ /* Set coremask 0x51234. Result should be "51234" */
+ spdk_cpuset_zero(core_mask);
+ spdk_cpuset_set_cpu(core_mask, 2, true);
+ spdk_cpuset_set_cpu(core_mask, 4, true);
+ spdk_cpuset_set_cpu(core_mask, 5, true);
+ spdk_cpuset_set_cpu(core_mask, 9, true);
+ spdk_cpuset_set_cpu(core_mask, 12, true);
+ spdk_cpuset_set_cpu(core_mask, 16, true);
+ spdk_cpuset_set_cpu(core_mask, 18, true);
+ hex_mask = spdk_cpuset_fmt(core_mask);
+ SPDK_CU_ASSERT_FATAL(hex_mask != NULL);
+ CU_ASSERT(strcmp("51234", hex_mask) == 0);
+
+ /* Set all cores */
+ spdk_cpuset_zero(core_mask);
+ CU_ASSERT(cpuset_check_range(core_mask, 0, SPDK_CPUSET_SIZE - 1, false) == 0);
+
+ for (lcore = 0; lcore < SPDK_CPUSET_SIZE; lcore++) {
+ spdk_cpuset_set_cpu(core_mask, lcore, true);
+ }
+ for (i = 0; i < SPDK_CPUSET_SIZE / 4; i++) {
+ hex_mask_ref[i] = 'f';
+ }
+ hex_mask_ref[SPDK_CPUSET_SIZE / 4] = '\0';
+
+ /* Check data before format */
+ CU_ASSERT(cpuset_check_range(core_mask, 0, SPDK_CPUSET_SIZE - 1, true) == 0);
+
+ hex_mask = spdk_cpuset_fmt(core_mask);
+ SPDK_CU_ASSERT_FATAL(hex_mask != NULL);
+ CU_ASSERT(strcmp(hex_mask_ref, hex_mask) == 0);
+
+ /* Check data integrity after format */
+ CU_ASSERT(cpuset_check_range(core_mask, 0, SPDK_CPUSET_SIZE - 1, true) == 0);
+
+ spdk_cpuset_free(core_mask);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("cpuset", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_cpuset);
+ CU_ADD_TEST(suite, test_cpuset_parse);
+ CU_ADD_TEST(suite, test_cpuset_fmt);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+
+ CU_basic_run_tests();
+
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/util/crc16.c/.gitignore b/src/spdk/test/unit/lib/util/crc16.c/.gitignore
new file mode 100644
index 000000000..d026adf09
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/crc16.c/.gitignore
@@ -0,0 +1 @@
+crc16_ut
diff --git a/src/spdk/test/unit/lib/util/crc16.c/Makefile b/src/spdk/test/unit/lib/util/crc16.c/Makefile
new file mode 100644
index 000000000..339146be5
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/crc16.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = crc16_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/util/crc16.c/crc16_ut.c b/src/spdk/test/unit/lib/util/crc16.c/crc16_ut.c
new file mode 100644
index 000000000..03e6c65cd
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/crc16.c/crc16_ut.c
@@ -0,0 +1,104 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "util/crc16.c"
+
+static void
+test_crc16_t10dif(void)
+{
+ uint16_t crc;
+ char buf[] = "123456789";
+
+ crc = spdk_crc16_t10dif(0, buf, strlen(buf));
+ CU_ASSERT(crc == 0xd0db);
+}
+
+static void
+test_crc16_t10dif_seed(void)
+{
+ uint16_t crc = 0;
+ char buf1[] = "1234";
+ char buf2[] = "56789";
+
+ crc = spdk_crc16_t10dif(crc, buf1, strlen(buf1));
+ crc = spdk_crc16_t10dif(crc, buf2, strlen(buf2));
+ CU_ASSERT(crc == 0xd0db);
+}
+
+static void
+test_crc16_t10dif_copy(void)
+{
+ uint16_t crc1 = 0, crc2;
+ char buf1[] = "1234";
+ char buf2[] = "56789";
+ char *buf3 = calloc(1, strlen(buf1) + strlen(buf2) + 1);
+ SPDK_CU_ASSERT_FATAL(buf3 != NULL);
+
+ crc1 = spdk_crc16_t10dif_copy(crc1, buf3, buf1, strlen(buf1));
+ crc1 = spdk_crc16_t10dif_copy(crc1, buf3 + strlen(buf1), buf2, strlen(buf2));
+ CU_ASSERT(crc1 == 0xd0db);
+
+ crc2 = spdk_crc16_t10dif(0, buf3, strlen(buf3));
+ CU_ASSERT(crc2 == 0xd0db);
+
+ free(buf3);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("crc16", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_crc16_t10dif);
+ CU_ADD_TEST(suite, test_crc16_t10dif_seed);
+ CU_ADD_TEST(suite, test_crc16_t10dif_copy);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+
+ CU_basic_run_tests();
+
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/util/crc32_ieee.c/.gitignore b/src/spdk/test/unit/lib/util/crc32_ieee.c/.gitignore
new file mode 100644
index 000000000..40a85a93f
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/crc32_ieee.c/.gitignore
@@ -0,0 +1 @@
+crc32_ieee_ut
diff --git a/src/spdk/test/unit/lib/util/crc32_ieee.c/Makefile b/src/spdk/test/unit/lib/util/crc32_ieee.c/Makefile
new file mode 100644
index 000000000..6b976721c
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/crc32_ieee.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = crc32_ieee_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/util/crc32_ieee.c/crc32_ieee_ut.c b/src/spdk/test/unit/lib/util/crc32_ieee.c/crc32_ieee_ut.c
new file mode 100644
index 000000000..2187438bf
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/crc32_ieee.c/crc32_ieee_ut.c
@@ -0,0 +1,74 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "util/crc32.c"
+#include "util/crc32_ieee.c"
+
+static void
+test_crc32_ieee(void)
+{
+ uint32_t crc;
+ char buf[] = "Hello world!";
+
+ crc = 0xFFFFFFFFu;
+ crc = spdk_crc32_ieee_update(buf, strlen(buf), crc);
+ crc ^= 0xFFFFFFFFu;
+ CU_ASSERT(crc == 0x1b851995);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("crc32_ieee", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_crc32_ieee);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+
+ CU_basic_run_tests();
+
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/util/crc32c.c/.gitignore b/src/spdk/test/unit/lib/util/crc32c.c/.gitignore
new file mode 100644
index 000000000..55bedec7f
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/crc32c.c/.gitignore
@@ -0,0 +1 @@
+crc32c_ut
diff --git a/src/spdk/test/unit/lib/util/crc32c.c/Makefile b/src/spdk/test/unit/lib/util/crc32c.c/Makefile
new file mode 100644
index 000000000..4f1cc0e4b
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/crc32c.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = crc32c_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/util/crc32c.c/crc32c_ut.c b/src/spdk/test/unit/lib/util/crc32c.c/crc32c_ut.c
new file mode 100644
index 000000000..6313d7bf6
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/crc32c.c/crc32c_ut.c
@@ -0,0 +1,145 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "util/crc32.c"
+#include "util/crc32c.c"
+
+static void
+test_crc32c(void)
+{
+ uint32_t crc;
+ char buf[1024];
+
+ /* Verify a string's CRC32-C value against the known correct result. */
+ snprintf(buf, sizeof(buf), "%s", "Hello world!");
+ crc = 0xFFFFFFFFu;
+ crc = spdk_crc32c_update(buf, strlen(buf), crc);
+ crc ^= 0xFFFFFFFFu;
+ CU_ASSERT(crc == 0x7b98e751);
+
+ /*
+ * The main loop of the optimized CRC32-C implementation processes data in 8-byte blocks,
+ * followed by a loop to handle the 0-7 trailing bytes.
+ * Test all buffer sizes from 0 to 7 in order to hit all possible trailing byte counts.
+ */
+
+ /* 0-byte buffer should not modify CRC at all, so final result should be ~0 ^ ~0 == 0 */
+ snprintf(buf, sizeof(buf), "%s", "");
+ crc = 0xFFFFFFFFu;
+ crc = spdk_crc32c_update(buf, strlen(buf), crc);
+ crc ^= 0xFFFFFFFFu;
+ CU_ASSERT(crc == 0);
+
+ /* 1-byte buffer */
+ snprintf(buf, sizeof(buf), "%s", "1");
+ crc = 0xFFFFFFFFu;
+ crc = spdk_crc32c_update(buf, strlen(buf), crc);
+ crc ^= 0xFFFFFFFFu;
+ CU_ASSERT(crc == 0x90F599E3);
+
+ /* 2-byte buffer */
+ snprintf(buf, sizeof(buf), "%s", "12");
+ crc = 0xFFFFFFFFu;
+ crc = spdk_crc32c_update(buf, strlen(buf), crc);
+ crc ^= 0xFFFFFFFFu;
+ CU_ASSERT(crc == 0x7355C460);
+
+ /* 3-byte buffer */
+ snprintf(buf, sizeof(buf), "%s", "123");
+ crc = 0xFFFFFFFFu;
+ crc = spdk_crc32c_update(buf, strlen(buf), crc);
+ crc ^= 0xFFFFFFFFu;
+ CU_ASSERT(crc == 0x107B2FB2);
+
+ /* 4-byte buffer */
+ snprintf(buf, sizeof(buf), "%s", "1234");
+ crc = 0xFFFFFFFFu;
+ crc = spdk_crc32c_update(buf, strlen(buf), crc);
+ crc ^= 0xFFFFFFFFu;
+ CU_ASSERT(crc == 0xF63AF4EE);
+
+ /* 5-byte buffer */
+ snprintf(buf, sizeof(buf), "%s", "12345");
+ crc = 0xFFFFFFFFu;
+ crc = spdk_crc32c_update(buf, strlen(buf), crc);
+ crc ^= 0xFFFFFFFFu;
+ CU_ASSERT(crc == 0x18D12335);
+
+ /* 6-byte buffer */
+ snprintf(buf, sizeof(buf), "%s", "123456");
+ crc = 0xFFFFFFFFu;
+ crc = spdk_crc32c_update(buf, strlen(buf), crc);
+ crc ^= 0xFFFFFFFFu;
+ CU_ASSERT(crc == 0x41357186);
+
+ /* 7-byte buffer */
+ snprintf(buf, sizeof(buf), "%s", "1234567");
+ crc = 0xFFFFFFFFu;
+ crc = spdk_crc32c_update(buf, strlen(buf), crc);
+ crc ^= 0xFFFFFFFFu;
+ CU_ASSERT(crc == 0x124297EA);
+
+ /* Test a buffer of exactly 8 bytes (one block in the main CRC32-C loop). */
+ snprintf(buf, sizeof(buf), "%s", "12345678");
+ crc = 0xFFFFFFFFu;
+ crc = spdk_crc32c_update(buf, strlen(buf), crc);
+ crc ^= 0xFFFFFFFFu;
+ CU_ASSERT(crc == 0x6087809A);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("crc32c", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_crc32c);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+
+ CU_basic_run_tests();
+
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/util/dif.c/.gitignore b/src/spdk/test/unit/lib/util/dif.c/.gitignore
new file mode 100644
index 000000000..040b296b7
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/dif.c/.gitignore
@@ -0,0 +1 @@
+dif_ut
diff --git a/src/spdk/test/unit/lib/util/dif.c/Makefile b/src/spdk/test/unit/lib/util/dif.c/Makefile
new file mode 100644
index 000000000..714928236
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/dif.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = dif_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/util/dif.c/dif_ut.c b/src/spdk/test/unit/lib/util/dif.c/dif_ut.c
new file mode 100644
index 000000000..0b069b189
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/dif.c/dif_ut.c
@@ -0,0 +1,2669 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "util/dif.c"
+
+#define DATA_PATTERN(offset) ((uint8_t)(0xAB + (offset)))
+#define GUARD_SEED 0xCD
+
+static int
+ut_data_pattern_generate(struct iovec *iovs, int iovcnt,
+ uint32_t block_size, uint32_t md_size, uint32_t num_blocks)
+{
+ struct _dif_sgl sgl;
+ uint32_t offset_blocks, offset_in_block, buf_len, data_offset, i;
+ uint8_t *buf;
+
+ _dif_sgl_init(&sgl, iovs, iovcnt);
+
+ if (!_dif_sgl_is_valid(&sgl, block_size * num_blocks)) {
+ return -1;
+ }
+
+ offset_blocks = 0;
+ data_offset = 0;
+
+ while (offset_blocks < num_blocks) {
+ offset_in_block = 0;
+ while (offset_in_block < block_size) {
+ _dif_sgl_get_buf(&sgl, (void *)&buf, &buf_len);
+ if (offset_in_block < block_size - md_size) {
+ buf_len = spdk_min(buf_len,
+ block_size - md_size - offset_in_block);
+ for (i = 0; i < buf_len; i++) {
+ buf[i] = DATA_PATTERN(data_offset + i);
+ }
+ data_offset += buf_len;
+ } else {
+ buf_len = spdk_min(buf_len, block_size - offset_in_block);
+ memset(buf, 0, buf_len);
+ }
+ _dif_sgl_advance(&sgl, buf_len);
+ offset_in_block += buf_len;
+ }
+ offset_blocks++;
+ }
+
+ return 0;
+}
+
+static int
+ut_data_pattern_verify(struct iovec *iovs, int iovcnt,
+ uint32_t block_size, uint32_t md_size, uint32_t num_blocks)
+{
+ struct _dif_sgl sgl;
+ uint32_t offset_blocks, offset_in_block, buf_len, data_offset, i;
+ uint8_t *buf;
+
+ _dif_sgl_init(&sgl, iovs, iovcnt);
+
+ if (!_dif_sgl_is_valid(&sgl, block_size * num_blocks)) {
+ return -1;
+ }
+
+ offset_blocks = 0;
+ data_offset = 0;
+
+ while (offset_blocks < num_blocks) {
+ offset_in_block = 0;
+ while (offset_in_block < block_size) {
+ _dif_sgl_get_buf(&sgl, (void *)&buf, &buf_len);
+
+ if (offset_in_block < block_size - md_size) {
+ buf_len = spdk_min(buf_len,
+ block_size - md_size - offset_in_block);
+ for (i = 0; i < buf_len; i++) {
+ if (buf[i] != DATA_PATTERN(data_offset + i)) {
+ return -1;
+ }
+ }
+ data_offset += buf_len;
+ } else {
+ buf_len = spdk_min(buf_len, block_size - offset_in_block);
+ }
+ _dif_sgl_advance(&sgl, buf_len);
+ offset_in_block += buf_len;
+ }
+ offset_blocks++;
+ }
+
+ return 0;
+}
+
+static void
+_iov_alloc_buf(struct iovec *iov, uint32_t len)
+{
+ iov->iov_base = calloc(1, len);
+ iov->iov_len = len;
+ SPDK_CU_ASSERT_FATAL(iov->iov_base != NULL);
+}
+
+static void
+_iov_free_buf(struct iovec *iov)
+{
+ free(iov->iov_base);
+}
+
+static void
+_iov_set_buf(struct iovec *iov, uint8_t *buf, uint32_t buf_len)
+{
+ iov->iov_base = buf;
+ iov->iov_len = buf_len;
+}
+
+static bool
+_iov_check(struct iovec *iov, void *iov_base, uint32_t iov_len)
+{
+ return (iov->iov_base == iov_base && iov->iov_len == iov_len);
+}
+
+static void
+_dif_generate_and_verify(struct iovec *iov,
+ uint32_t block_size, uint32_t md_size, bool dif_loc,
+ enum spdk_dif_type dif_type, uint32_t dif_flags,
+ uint32_t ref_tag, uint32_t e_ref_tag,
+ uint16_t app_tag, uint16_t apptag_mask, uint16_t e_app_tag,
+ bool expect_pass)
+{
+ struct spdk_dif_ctx ctx = {};
+ uint32_t guard_interval;
+ uint16_t guard = 0;
+ int rc;
+
+ rc = ut_data_pattern_generate(iov, 1, block_size, md_size, 1);
+ CU_ASSERT(rc == 0);
+
+ guard_interval = _get_guard_interval(block_size, md_size, dif_loc, true);
+
+ ctx.dif_type = dif_type;
+ ctx.dif_flags = dif_flags;
+ ctx.init_ref_tag = ref_tag;
+ ctx.app_tag = app_tag;
+
+ if (dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
+ guard = spdk_crc16_t10dif(0, iov->iov_base, guard_interval);
+ }
+
+ _dif_generate(iov->iov_base + guard_interval, guard, 0, &ctx);
+
+ ctx.init_ref_tag = e_ref_tag;
+ ctx.apptag_mask = apptag_mask;
+ ctx.app_tag = e_app_tag;
+
+ rc = _dif_verify(iov->iov_base + guard_interval, guard, 0, &ctx, NULL);
+ CU_ASSERT((expect_pass && rc == 0) || (!expect_pass && rc != 0));
+
+ rc = ut_data_pattern_verify(iov, 1, block_size, md_size, 1);
+ CU_ASSERT(rc == 0);
+}
+
+static void
+dif_generate_and_verify_test(void)
+{
+ struct iovec iov;
+ uint32_t dif_flags;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ _iov_alloc_buf(&iov, 4096 + 128);
+
+ /* Positive cases */
+
+ /* The case that DIF is contained in the first 8 bytes of metadata. */
+ _dif_generate_and_verify(&iov,
+ 4096 + 128, 128, true,
+ SPDK_DIF_TYPE1, dif_flags,
+ 22, 22,
+ 0x22, 0xFFFF, 0x22,
+ true);
+
+ /* The case that DIF is contained in the last 8 bytes of metadata. */
+ _dif_generate_and_verify(&iov,
+ 4096 + 128, 128, false,
+ SPDK_DIF_TYPE1, dif_flags,
+ 22, 22,
+ 0x22, 0xFFFF, 0x22,
+ true);
+
+ /* Negative cases */
+
+ /* Reference tag doesn't match. */
+ _dif_generate_and_verify(&iov,
+ 4096 + 128, 128, false,
+ SPDK_DIF_TYPE1, dif_flags,
+ 22, 23,
+ 0x22, 0xFFFF, 0x22,
+ false);
+
+ /* Application tag doesn't match. */
+ _dif_generate_and_verify(&iov,
+ 4096 + 128, 128, false,
+ SPDK_DIF_TYPE1, dif_flags,
+ 22, 22,
+ 0x22, 0xFFFF, 0x23,
+ false);
+
+ _iov_free_buf(&iov);
+}
+
+static void
+dif_disable_check_test(void)
+{
+ struct iovec iov;
+ uint32_t dif_flags;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ _iov_alloc_buf(&iov, 4096 + 128);
+
+ /* The case that DIF check is disabled when the Application Tag is 0xFFFF for
+ * Type 1. DIF check is disabled and pass is expected.
+ */
+ _dif_generate_and_verify(&iov,
+ 4096 + 128, 128, false,
+ SPDK_DIF_TYPE1, dif_flags,
+ 22, 22,
+ 0xFFFF, 0xFFFF, 0x22,
+ true);
+
+ /* The case that DIF check is not disabled when the Application Tag is 0xFFFF but
+ * the Reference Tag is not 0xFFFFFFFF for Type 3. DIF check is not disabled and
+ * fail is expected.
+ */
+ _dif_generate_and_verify(&iov,
+ 4096 + 128, 128, false,
+ SPDK_DIF_TYPE3, dif_flags,
+ 22, 22,
+ 0xFFFF, 0xFFFF, 0x22,
+ false);
+
+ /* The case that DIF check is disabled when the Application Tag is 0xFFFF and
+ * the Reference Tag is 0xFFFFFFFF for Type 3. DIF check is disabled and
+ * pass is expected.
+ */
+ _dif_generate_and_verify(&iov,
+ 4096 + 128, 128, false,
+ SPDK_DIF_TYPE3, dif_flags,
+ 0xFFFFFFFF, 22,
+ 0xFFFF, 0xFFFF, 0x22,
+ true);
+
+ _iov_free_buf(&iov);
+}
+
+static void
+dif_sec_512_md_0_error_test(void)
+{
+ struct spdk_dif_ctx ctx = {};
+ int rc;
+
+ /* Metadata size is 0. */
+ rc = spdk_dif_ctx_init(&ctx, 512, 0, true, false, SPDK_DIF_TYPE1, 0, 0, 0, 0, 0, 0);
+ CU_ASSERT(rc != 0);
+}
+
+static void
+dif_guard_seed_test(void)
+{
+ struct iovec iov;
+ struct spdk_dif_ctx ctx = {};
+ struct spdk_dif_error err_blk = {};
+ struct spdk_dif *dif;
+ uint16_t guard;
+ int rc;
+
+ _iov_alloc_buf(&iov, 512 + 8);
+
+ memset(iov.iov_base, 0, 512 + 8);
+
+ dif = (struct spdk_dif *)(iov.iov_base + 512);
+
+ rc = spdk_dif_ctx_init(&ctx, 512 + 8, 8, true, false, SPDK_DIF_TYPE1,
+ SPDK_DIF_FLAGS_GUARD_CHECK, 0, 0, 0, 0, 0);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_generate(&iov, 1, 1, &ctx);
+ CU_ASSERT(rc == 0);
+
+ /* Guard should be zero if the block is all zero and seed is not added. */
+ guard = from_be16(&dif->guard);
+ CU_ASSERT(guard == 0);
+
+ rc = spdk_dif_verify(&iov, 1, 1, &ctx, &err_blk);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_ctx_init(&ctx, 512 + 8, 8, true, false, SPDK_DIF_TYPE1,
+ SPDK_DIF_FLAGS_GUARD_CHECK, 0, 0, 0, 0, GUARD_SEED);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_generate(&iov, 1, 1, &ctx);
+ CU_ASSERT(rc == 0);
+
+ /* Guard should not be zero if the block is all zero but seed is added. */
+ guard = from_be16(&dif->guard);
+ CU_ASSERT(guard != 0);
+
+ rc = spdk_dif_verify(&iov, 1, 1, &ctx, &err_blk);
+ CU_ASSERT(rc == 0);
+
+ _iov_free_buf(&iov);
+}
+
+static void
+dif_generate_and_verify(struct iovec *iovs, int iovcnt,
+ uint32_t block_size, uint32_t md_size, uint32_t num_blocks,
+ bool dif_loc, enum spdk_dif_type dif_type, uint32_t dif_flags,
+ uint32_t init_ref_tag, uint16_t apptag_mask, uint16_t app_tag)
+{
+ struct spdk_dif_ctx ctx = {};
+ int rc;
+
+ rc = ut_data_pattern_generate(iovs, iovcnt, block_size, md_size, num_blocks);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_ctx_init(&ctx, block_size, md_size, true, dif_loc, dif_type, dif_flags,
+ init_ref_tag, apptag_mask, app_tag, 0, GUARD_SEED);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_generate(iovs, iovcnt, num_blocks, &ctx);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_verify(iovs, iovcnt, num_blocks, &ctx, NULL);
+ CU_ASSERT(rc == 0);
+
+ rc = ut_data_pattern_verify(iovs, iovcnt, block_size, md_size, num_blocks);
+ CU_ASSERT(rc == 0);
+}
+
+static void
+dif_disable_sec_512_md_8_single_iov_test(void)
+{
+ struct iovec iov;
+
+ _iov_alloc_buf(&iov, 512 + 8);
+
+ dif_generate_and_verify(&iov, 1, 512 + 8, 8, 1, false, SPDK_DIF_DISABLE, 0, 0, 0, 0);
+
+ _iov_free_buf(&iov);
+}
+
+static void
+dif_sec_512_md_8_prchk_0_single_iov_test(void)
+{
+ struct iovec iov;
+
+ _iov_alloc_buf(&iov, 512 + 8);
+
+ dif_generate_and_verify(&iov, 1, 512 + 8, 8, 1, false, SPDK_DIF_TYPE1, 0, 0, 0, 0);
+
+ _iov_free_buf(&iov);
+}
+
+static void
+dif_sec_512_md_8_prchk_0_1_2_4_multi_iovs_test(void)
+{
+ struct iovec iovs[4];
+ int i, num_blocks;
+
+ num_blocks = 0;
+
+ for (i = 0; i < 4; i++) {
+ _iov_alloc_buf(&iovs[i], (512 + 8) * (i + 1));
+ num_blocks += i + 1;
+ }
+
+ dif_generate_and_verify(iovs, 4, 512 + 8, 8, num_blocks, false, SPDK_DIF_TYPE1,
+ 0, 22, 0xFFFF, 0x22);
+
+ dif_generate_and_verify(iovs, 4, 512 + 8, 8, num_blocks, false, SPDK_DIF_TYPE1,
+ SPDK_DIF_FLAGS_GUARD_CHECK, 22, 0xFFFF, 0x22);
+
+ dif_generate_and_verify(iovs, 4, 512 + 8, 8, num_blocks, false, SPDK_DIF_TYPE1,
+ SPDK_DIF_FLAGS_APPTAG_CHECK, 22, 0xFFFF, 0x22);
+
+ dif_generate_and_verify(iovs, 4, 512 + 8, 8, num_blocks, false, SPDK_DIF_TYPE1,
+ SPDK_DIF_FLAGS_REFTAG_CHECK, 22, 0xFFFF, 0x22);
+
+ for (i = 0; i < 4; i++) {
+ _iov_free_buf(&iovs[i]);
+ }
+}
+
+static void
+dif_sec_4096_md_128_prchk_7_multi_iovs_test(void)
+{
+ struct iovec iovs[4];
+ int i, num_blocks;
+ uint32_t dif_flags;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ num_blocks = 0;
+
+ for (i = 0; i < 4; i++) {
+ _iov_alloc_buf(&iovs[i], (4096 + 128) * (i + 1));
+ num_blocks += i + 1;
+ }
+
+ dif_generate_and_verify(iovs, 4, 4096 + 128, 128, num_blocks, false, SPDK_DIF_TYPE1,
+ dif_flags, 22, 0xFFFF, 0x22);
+
+ dif_generate_and_verify(iovs, 4, 4096 + 128, 128, num_blocks, true, SPDK_DIF_TYPE1,
+ dif_flags, 22, 0xFFFF, 0x22);
+
+ for (i = 0; i < 4; i++) {
+ _iov_free_buf(&iovs[i]);
+ }
+}
+
+static void
+dif_sec_512_md_8_prchk_7_multi_iovs_split_data_and_md_test(void)
+{
+ struct iovec iovs[2];
+ uint32_t dif_flags;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ _iov_alloc_buf(&iovs[0], 512);
+ _iov_alloc_buf(&iovs[1], 8);
+
+ dif_generate_and_verify(iovs, 2, 512 + 8, 8, 1, false, SPDK_DIF_TYPE1,
+ dif_flags, 22, 0xFFFF, 0x22);
+
+ _iov_free_buf(&iovs[0]);
+ _iov_free_buf(&iovs[1]);
+}
+
+static void
+dif_sec_512_md_8_prchk_7_multi_iovs_split_data_test(void)
+{
+ struct iovec iovs[2];
+ uint32_t dif_flags;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ _iov_alloc_buf(&iovs[0], 256);
+ _iov_alloc_buf(&iovs[1], 264);
+
+ dif_generate_and_verify(iovs, 2, 512 + 8, 8, 1, false, SPDK_DIF_TYPE1,
+ dif_flags, 22, 0xFFFF, 0x22);
+
+ _iov_free_buf(&iovs[0]);
+ _iov_free_buf(&iovs[1]);
+}
+
+static void
+dif_sec_512_md_8_prchk_7_multi_iovs_split_guard_test(void)
+{
+ struct iovec iovs[2];
+ uint32_t dif_flags;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ _iov_alloc_buf(&iovs[0], 513);
+ _iov_alloc_buf(&iovs[1], 7);
+
+ dif_generate_and_verify(iovs, 2, 512 + 8, 8, 1, false, SPDK_DIF_TYPE1,
+ dif_flags, 22, 0xFFFF, 0x22);
+
+ _iov_free_buf(&iovs[0]);
+ _iov_free_buf(&iovs[1]);
+}
+
+static void
+dif_sec_512_md_8_prchk_7_multi_iovs_split_apptag_test(void)
+{
+ struct iovec iovs[2];
+ uint32_t dif_flags;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ _iov_alloc_buf(&iovs[0], 515);
+ _iov_alloc_buf(&iovs[1], 5);
+
+ dif_generate_and_verify(iovs, 2, 512 + 8, 8, 1, false, SPDK_DIF_TYPE1,
+ dif_flags, 22, 0xFFFF, 0x22);
+
+ _iov_free_buf(&iovs[0]);
+ _iov_free_buf(&iovs[1]);
+}
+
+static void
+dif_sec_512_md_8_prchk_7_multi_iovs_split_reftag_test(void)
+{
+ struct iovec iovs[2];
+ uint32_t dif_flags;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ _iov_alloc_buf(&iovs[0], 518);
+ _iov_alloc_buf(&iovs[1], 2);
+
+ dif_generate_and_verify(iovs, 2, 512 + 8, 8, 1, false, SPDK_DIF_TYPE1,
+ dif_flags, 22, 0xFFFF, 0x22);
+
+ _iov_free_buf(&iovs[0]);
+ _iov_free_buf(&iovs[1]);
+}
+
+static void
+dif_sec_512_md_8_prchk_7_multi_iovs_complex_splits_test(void)
+{
+ struct iovec iovs[9];
+ uint32_t dif_flags;
+ int i;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ /* data[0][255:0] */
+ _iov_alloc_buf(&iovs[0], 256);
+
+ /* data[0][511:256], guard[0][0] */
+ _iov_alloc_buf(&iovs[1], 256 + 1);
+
+ /* guard[0][1], apptag[0][0] */
+ _iov_alloc_buf(&iovs[2], 1 + 1);
+
+ /* apptag[0][1], reftag[0][0] */
+ _iov_alloc_buf(&iovs[3], 1 + 1);
+
+ /* reftag[0][3:1], data[1][255:0] */
+ _iov_alloc_buf(&iovs[4], 3 + 256);
+
+ /* data[1][511:256], guard[1][0] */
+ _iov_alloc_buf(&iovs[5], 256 + 1);
+
+ /* guard[1][1], apptag[1][0] */
+ _iov_alloc_buf(&iovs[6], 1 + 1);
+
+ /* apptag[1][1], reftag[1][0] */
+ _iov_alloc_buf(&iovs[7], 1 + 1);
+
+ /* reftag[1][3:1] */
+ _iov_alloc_buf(&iovs[8], 3);
+
+ dif_generate_and_verify(iovs, 9, 512 + 8, 8, 2, false, SPDK_DIF_TYPE1, dif_flags,
+ 22, 0xFFFF, 0x22);
+
+ for (i = 0; i < 9; i++) {
+ _iov_free_buf(&iovs[i]);
+ }
+}
+
+static void
+dif_sec_4096_md_128_prchk_7_multi_iovs_complex_splits_test(void)
+{
+ struct iovec iovs[11];
+ uint32_t dif_flags;
+ int i;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ /* data[0][1000:0] */
+ _iov_alloc_buf(&iovs[0], 1000);
+
+ /* data[0][3095:1000], guard[0][0] */
+ _iov_alloc_buf(&iovs[1], 3096 + 1);
+
+ /* guard[0][1], apptag[0][0] */
+ _iov_alloc_buf(&iovs[2], 1 + 1);
+
+ /* apptag[0][1], reftag[0][0] */
+ _iov_alloc_buf(&iovs[3], 1 + 1);
+
+ /* reftag[0][3:1], ignore[0][59:0] */
+ _iov_alloc_buf(&iovs[4], 3 + 60);
+
+ /* ignore[119:60], data[1][3050:0] */
+ _iov_alloc_buf(&iovs[5], 60 + 3051);
+
+ /* data[1][4095:3050], guard[1][0] */
+ _iov_alloc_buf(&iovs[6], 1045 + 1);
+
+ /* guard[1][1], apptag[1][0] */
+ _iov_alloc_buf(&iovs[7], 1 + 1);
+
+ /* apptag[1][1], reftag[1][0] */
+ _iov_alloc_buf(&iovs[8], 1 + 1);
+
+ /* reftag[1][3:1], ignore[1][9:0] */
+ _iov_alloc_buf(&iovs[9], 3 + 10);
+
+ /* ignore[1][127:9] */
+ _iov_alloc_buf(&iovs[10], 118);
+
+ dif_generate_and_verify(iovs, 11, 4096 + 128, 128, 2, false, SPDK_DIF_TYPE1, dif_flags,
+ 22, 0xFFFF, 0x22);
+ dif_generate_and_verify(iovs, 11, 4096 + 128, 128, 2, true, SPDK_DIF_TYPE1, dif_flags,
+ 22, 0xFFFF, 0x22);
+
+ for (i = 0; i < 11; i++) {
+ _iov_free_buf(&iovs[i]);
+ }
+}
+
+static void
+_dif_inject_error_and_verify(struct iovec *iovs, int iovcnt,
+ uint32_t block_size, uint32_t md_size, uint32_t num_blocks,
+ uint32_t inject_flags, bool dif_loc)
+{
+ struct spdk_dif_ctx ctx = {};
+ struct spdk_dif_error err_blk = {};
+ uint32_t inject_offset = 0, dif_flags;
+ int rc;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ rc = ut_data_pattern_generate(iovs, iovcnt, block_size, md_size, num_blocks);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_ctx_init(&ctx, block_size, md_size, true, dif_loc,
+ SPDK_DIF_TYPE1, dif_flags, 88, 0xFFFF, 0x88, 0, GUARD_SEED);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_generate(iovs, iovcnt, num_blocks, &ctx);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_inject_error(iovs, iovcnt, num_blocks, &ctx, inject_flags, &inject_offset);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_verify(iovs, iovcnt, num_blocks, &ctx, &err_blk);
+ CU_ASSERT(rc != 0);
+ if (inject_flags == SPDK_DIF_DATA_ERROR) {
+ CU_ASSERT(SPDK_DIF_GUARD_ERROR == err_blk.err_type);
+ } else {
+ CU_ASSERT(inject_flags == err_blk.err_type);
+ }
+ CU_ASSERT(inject_offset == err_blk.err_offset);
+
+ rc = ut_data_pattern_verify(iovs, iovcnt, block_size, md_size, num_blocks);
+ CU_ASSERT((rc == 0 && (inject_flags != SPDK_DIF_DATA_ERROR)) ||
+ (rc != 0 && (inject_flags == SPDK_DIF_DATA_ERROR)));
+}
+
+static void
+dif_inject_error_and_verify(struct iovec *iovs, int iovcnt,
+ uint32_t block_size, uint32_t md_size, uint32_t num_blocks,
+ uint32_t inject_flags)
+{
+ /* The case that DIF is contained in the first 8 bytes of metadata. */
+ _dif_inject_error_and_verify(iovs, iovcnt, block_size, md_size, num_blocks,
+ inject_flags, true);
+
+ /* The case that DIF is contained in the last 8 bytes of metadata. */
+ _dif_inject_error_and_verify(iovs, iovcnt, block_size, md_size, num_blocks,
+ inject_flags, false);
+}
+
+static void
+dif_sec_4096_md_128_inject_1_2_4_8_multi_iovs_test(void)
+{
+ struct iovec iovs[4];
+ int i, num_blocks;
+
+ num_blocks = 0;
+
+ for (i = 0; i < 4; i++) {
+ _iov_alloc_buf(&iovs[i], (4096 + 128) * (i + 1));
+ num_blocks += i + 1;
+ }
+
+ dif_inject_error_and_verify(iovs, 4, 4096 + 128, 128, num_blocks, SPDK_DIF_GUARD_ERROR);
+ dif_inject_error_and_verify(iovs, 4, 4096 + 128, 128, num_blocks, SPDK_DIF_APPTAG_ERROR);
+ dif_inject_error_and_verify(iovs, 4, 4096 + 128, 128, num_blocks, SPDK_DIF_REFTAG_ERROR);
+ dif_inject_error_and_verify(iovs, 4, 4096 + 128, 128, num_blocks, SPDK_DIF_DATA_ERROR);
+
+ for (i = 0; i < 4; i++) {
+ _iov_free_buf(&iovs[i]);
+ }
+}
+
+static void
+dif_sec_4096_md_128_inject_1_2_4_8_multi_iovs_split_data_and_md_test(void)
+{
+ struct iovec iovs[2];
+
+ _iov_alloc_buf(&iovs[0], 4096);
+ _iov_alloc_buf(&iovs[1], 128);
+
+ dif_inject_error_and_verify(iovs, 2, 4096 + 128, 128, 1, SPDK_DIF_GUARD_ERROR);
+ dif_inject_error_and_verify(iovs, 2, 4096 + 128, 128, 1, SPDK_DIF_APPTAG_ERROR);
+ dif_inject_error_and_verify(iovs, 2, 4096 + 128, 128, 1, SPDK_DIF_REFTAG_ERROR);
+ dif_inject_error_and_verify(iovs, 2, 4096 + 128, 128, 1, SPDK_DIF_DATA_ERROR);
+
+ _iov_free_buf(&iovs[0]);
+ _iov_free_buf(&iovs[1]);
+}
+
+static void
+dif_sec_4096_md_128_inject_1_2_4_8_multi_iovs_split_data_test(void)
+{
+ struct iovec iovs[2];
+
+ _iov_alloc_buf(&iovs[0], 2048);
+ _iov_alloc_buf(&iovs[1], 2048 + 128);
+
+ dif_inject_error_and_verify(iovs, 2, 4096 + 128, 128, 1, SPDK_DIF_GUARD_ERROR);
+ dif_inject_error_and_verify(iovs, 2, 4096 + 128, 128, 1, SPDK_DIF_APPTAG_ERROR);
+ dif_inject_error_and_verify(iovs, 2, 4096 + 128, 128, 1, SPDK_DIF_REFTAG_ERROR);
+ dif_inject_error_and_verify(iovs, 2, 4096 + 128, 128, 1, SPDK_DIF_DATA_ERROR);
+
+ _iov_free_buf(&iovs[0]);
+ _iov_free_buf(&iovs[1]);
+}
+
+static void
+dif_sec_4096_md_128_inject_1_2_4_8_multi_iovs_split_guard_test(void)
+{
+ struct iovec iovs[2];
+
+ _iov_alloc_buf(&iovs[0], 4096 + 1);
+ _iov_alloc_buf(&iovs[1], 127);
+
+ dif_inject_error_and_verify(iovs, 2, 4096 + 128, 128, 1, SPDK_DIF_GUARD_ERROR);
+ dif_inject_error_and_verify(iovs, 2, 4096 + 128, 128, 1, SPDK_DIF_APPTAG_ERROR);
+ dif_inject_error_and_verify(iovs, 2, 4096 + 128, 128, 1, SPDK_DIF_REFTAG_ERROR);
+ dif_inject_error_and_verify(iovs, 2, 4096 + 128, 128, 1, SPDK_DIF_DATA_ERROR);
+
+ _iov_free_buf(&iovs[0]);
+ _iov_free_buf(&iovs[1]);
+}
+
+static void
+dif_sec_4096_md_128_inject_1_2_4_8_multi_iovs_split_apptag_test(void)
+{
+ struct iovec iovs[2];
+
+ _iov_alloc_buf(&iovs[0], 4096 + 3);
+ _iov_alloc_buf(&iovs[1], 125);
+
+ dif_inject_error_and_verify(iovs, 2, 4096 + 128, 128, 1, SPDK_DIF_GUARD_ERROR);
+ dif_inject_error_and_verify(iovs, 2, 4096 + 128, 128, 1, SPDK_DIF_APPTAG_ERROR);
+ dif_inject_error_and_verify(iovs, 2, 4096 + 128, 128, 1, SPDK_DIF_REFTAG_ERROR);
+ dif_inject_error_and_verify(iovs, 2, 4096 + 128, 128, 1, SPDK_DIF_DATA_ERROR);
+
+ _iov_free_buf(&iovs[0]);
+ _iov_free_buf(&iovs[1]);
+}
+
+static void
+dif_sec_4096_md_128_inject_1_2_4_8_multi_iovs_split_reftag_test(void)
+{
+ struct iovec iovs[2];
+
+ _iov_alloc_buf(&iovs[0], 4096 + 6);
+ _iov_alloc_buf(&iovs[1], 122);
+
+ dif_inject_error_and_verify(iovs, 2, 4096 + 128, 128, 1, SPDK_DIF_GUARD_ERROR);
+ dif_inject_error_and_verify(iovs, 2, 4096 + 128, 128, 1, SPDK_DIF_APPTAG_ERROR);
+ dif_inject_error_and_verify(iovs, 2, 4096 + 128, 128, 1, SPDK_DIF_REFTAG_ERROR);
+ dif_inject_error_and_verify(iovs, 2, 4096 + 128, 128, 1, SPDK_DIF_DATA_ERROR);
+
+ _iov_free_buf(&iovs[0]);
+ _iov_free_buf(&iovs[1]);
+}
+
+static void
+dif_copy_gen_and_verify(struct iovec *iovs, int iovcnt, struct iovec *bounce_iov,
+ uint32_t block_size, uint32_t md_size, uint32_t num_blocks,
+ bool dif_loc, enum spdk_dif_type dif_type, uint32_t dif_flags,
+ uint32_t init_ref_tag, uint16_t apptag_mask, uint16_t app_tag)
+{
+ struct spdk_dif_ctx ctx = {};
+ int rc;
+
+ rc = ut_data_pattern_generate(iovs, iovcnt, block_size - md_size, 0, num_blocks);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_ctx_init(&ctx, block_size, md_size, true, dif_loc, dif_type, dif_flags,
+ init_ref_tag, apptag_mask, app_tag, 0, GUARD_SEED);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_generate_copy(iovs, iovcnt, bounce_iov, num_blocks, &ctx);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_verify_copy(iovs, iovcnt, bounce_iov, num_blocks, &ctx, NULL);
+ CU_ASSERT(rc == 0);
+
+ rc = ut_data_pattern_verify(iovs, iovcnt, block_size - md_size, 0, num_blocks);
+ CU_ASSERT(rc == 0);
+}
+
+static void
+dif_copy_sec_512_md_8_prchk_0_single_iov(void)
+{
+ struct iovec iov, bounce_iov;
+
+ _iov_alloc_buf(&iov, 512 * 4);
+ _iov_alloc_buf(&bounce_iov, (512 + 8) * 4);
+
+ dif_copy_gen_and_verify(&iov, 1, &bounce_iov, 512 + 8, 8, 4,
+ false, SPDK_DIF_TYPE1, 0, 0, 0, 0);
+ dif_copy_gen_and_verify(&iov, 1, &bounce_iov, 512 + 8, 8, 4,
+ true, SPDK_DIF_TYPE1, 0, 0, 0, 0);
+
+ _iov_free_buf(&iov);
+ _iov_free_buf(&bounce_iov);
+}
+
+static void
+dif_copy_sec_512_md_8_prchk_0_1_2_4_multi_iovs(void)
+{
+ struct iovec iovs[4], bounce_iov;
+ int i, num_blocks;
+
+ num_blocks = 0;
+
+ for (i = 0; i < 4; i++) {
+ _iov_alloc_buf(&iovs[i], 512 * (i + 1));
+ num_blocks += i + 1;
+ }
+
+ _iov_alloc_buf(&bounce_iov, (512 + 8) * num_blocks);
+
+ dif_copy_gen_and_verify(iovs, 4, &bounce_iov, 512 + 8, 8, num_blocks,
+ false, SPDK_DIF_TYPE1, 0, 22, 0xFFFF, 0x22);
+
+ dif_copy_gen_and_verify(iovs, 4, &bounce_iov, 512 + 8, 8, num_blocks,
+ false, SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK, 22, 0xFFFF, 0x22);
+
+ dif_copy_gen_and_verify(iovs, 4, &bounce_iov, 512 + 8, 8, num_blocks,
+ false, SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_APPTAG_CHECK, 22, 0xFFFF, 0x22);
+
+ dif_copy_gen_and_verify(iovs, 4, &bounce_iov, 512 + 8, 8, num_blocks,
+ false, SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_REFTAG_CHECK, 22, 0xFFFF, 0x22);
+
+ for (i = 0; i < 4; i++) {
+ _iov_free_buf(&iovs[i]);
+ }
+ _iov_free_buf(&bounce_iov);
+}
+
+static void
+dif_copy_sec_4096_md_128_prchk_7_multi_iovs(void)
+{
+ struct iovec iovs[4], bounce_iov;
+ uint32_t dif_flags;
+ int i, num_blocks;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ num_blocks = 0;
+
+ for (i = 0; i < 4; i++) {
+ _iov_alloc_buf(&iovs[i], 4096 * (i + 1));
+ num_blocks += i + 1;
+ }
+
+ _iov_alloc_buf(&bounce_iov, (4096 + 128) * num_blocks);
+
+ dif_copy_gen_and_verify(iovs, 4, &bounce_iov, 4096 + 128, 128, num_blocks,
+ false, SPDK_DIF_TYPE1, dif_flags, 22, 0xFFFF, 0x22);
+ dif_copy_gen_and_verify(iovs, 4, &bounce_iov, 4096 + 128, 128, num_blocks,
+ true, SPDK_DIF_TYPE1, dif_flags, 22, 0xFFFF, 0x22);
+
+ for (i = 0; i < 4; i++) {
+ _iov_free_buf(&iovs[i]);
+ }
+ _iov_free_buf(&bounce_iov);
+}
+
+static void
+dif_copy_sec_512_md_8_prchk_7_multi_iovs_split_data(void)
+{
+ struct iovec iovs[2], bounce_iov;
+ uint32_t dif_flags;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ _iov_alloc_buf(&iovs[0], 256);
+ _iov_alloc_buf(&iovs[1], 256);
+
+ _iov_alloc_buf(&bounce_iov, 512 + 8);
+
+ dif_copy_gen_and_verify(iovs, 2, &bounce_iov, 512 + 8, 8, 1,
+ false, SPDK_DIF_TYPE1, dif_flags, 22, 0xFFFF, 0x22);
+
+ _iov_free_buf(&iovs[0]);
+ _iov_free_buf(&iovs[1]);
+ _iov_free_buf(&bounce_iov);
+}
+
+static void
+dif_copy_sec_512_md_8_prchk_7_multi_iovs_complex_splits(void)
+{
+ struct iovec iovs[6], bounce_iov;
+ uint32_t dif_flags;
+ int i;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ /* data[0][255:0] */
+ _iov_alloc_buf(&iovs[0], 256);
+
+ /* data[0][511:256], data[1][255:0] */
+ _iov_alloc_buf(&iovs[1], 256 + 256);
+
+ /* data[1][382:256] */
+ _iov_alloc_buf(&iovs[2], 128);
+
+ /* data[1][383] */
+ _iov_alloc_buf(&iovs[3], 1);
+
+ /* data[1][510:384] */
+ _iov_alloc_buf(&iovs[4], 126);
+
+ /* data[1][511], data[2][511:0], data[3][511:0] */
+ _iov_alloc_buf(&iovs[5], 1 + 512 * 2);
+
+ _iov_alloc_buf(&bounce_iov, (512 + 8) * 4);
+
+ dif_copy_gen_and_verify(iovs, 6, &bounce_iov, 512 + 8, 8, 4,
+ true, SPDK_DIF_TYPE1, dif_flags, 22, 0xFFFF, 0x22);
+
+ for (i = 0; i < 6; i++) {
+ _iov_free_buf(&iovs[i]);
+ }
+ _iov_free_buf(&bounce_iov);
+}
+
+static void
+_dif_copy_inject_error_and_verify(struct iovec *iovs, int iovcnt, struct iovec *bounce_iov,
+ uint32_t block_size, uint32_t md_size, uint32_t num_blocks,
+ uint32_t inject_flags, bool dif_loc)
+{
+ struct spdk_dif_ctx ctx = {};
+ struct spdk_dif_error err_blk = {};
+ uint32_t inject_offset = 0, dif_flags;
+ int rc;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ rc = ut_data_pattern_generate(iovs, iovcnt, block_size - md_size, 0, num_blocks);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_ctx_init(&ctx, block_size, md_size, true, dif_loc, SPDK_DIF_TYPE1, dif_flags,
+ 88, 0xFFFF, 0x88, 0, GUARD_SEED);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+
+ rc = spdk_dif_generate_copy(iovs, iovcnt, bounce_iov, num_blocks, &ctx);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_inject_error(bounce_iov, 1, num_blocks, &ctx, inject_flags, &inject_offset);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_verify_copy(iovs, iovcnt, bounce_iov, num_blocks, &ctx, &err_blk);
+ CU_ASSERT(rc != 0);
+ if (inject_flags == SPDK_DIF_DATA_ERROR) {
+ CU_ASSERT(SPDK_DIF_GUARD_ERROR == err_blk.err_type);
+ } else {
+ CU_ASSERT(inject_flags == err_blk.err_type);
+ }
+ CU_ASSERT(inject_offset == err_blk.err_offset);
+}
+
+static void
+dif_copy_inject_error_and_verify(struct iovec *iovs, int iovcnt, struct iovec *bounce_iov,
+ uint32_t block_size, uint32_t md_size, uint32_t num_blocks,
+ uint32_t inject_flags)
+{
+ /* The case that DIF is contained in the first 8 bytes of metadata. */
+ _dif_copy_inject_error_and_verify(iovs, iovcnt, bounce_iov,
+ block_size, md_size, num_blocks,
+ inject_flags, true);
+
+ /* The case that DIF is contained in the last 8 bytes of metadata. */
+ _dif_copy_inject_error_and_verify(iovs, iovcnt, bounce_iov,
+ block_size, md_size, num_blocks,
+ inject_flags, false);
+}
+
+static void
+dif_copy_sec_4096_md_128_inject_1_2_4_8_multi_iovs_test(void)
+{
+ struct iovec iovs[4], bounce_iov;
+ int i, num_blocks;
+
+ num_blocks = 0;
+
+ for (i = 0; i < 4; i++) {
+ _iov_alloc_buf(&iovs[i], 4096 * (i + 1));
+ num_blocks += i + 1;
+ }
+
+ _iov_alloc_buf(&bounce_iov, (4096 + 128) * num_blocks);
+
+ dif_copy_inject_error_and_verify(iovs, 4, &bounce_iov, 4096 + 128, 128,
+ num_blocks, SPDK_DIF_GUARD_ERROR);
+
+ dif_copy_inject_error_and_verify(iovs, 4, &bounce_iov, 4096 + 128, 128,
+ num_blocks, SPDK_DIF_APPTAG_ERROR);
+
+ dif_copy_inject_error_and_verify(iovs, 4, &bounce_iov, 4096 + 128, 128,
+ num_blocks, SPDK_DIF_REFTAG_ERROR);
+
+ dif_copy_inject_error_and_verify(iovs, 4, &bounce_iov, 4096 + 128, 128,
+ num_blocks, SPDK_DIF_DATA_ERROR);
+
+ for (i = 0; i < 4; i++) {
+ _iov_free_buf(&iovs[i]);
+ }
+ _iov_free_buf(&bounce_iov);
+}
+
+static void
+dif_copy_sec_4096_md_128_inject_1_2_4_8_multi_iovs_split_test(void)
+{
+ struct iovec iovs[4], bounce_iov;
+ int i;
+
+ _iov_alloc_buf(&iovs[0], 2048);
+ _iov_alloc_buf(&iovs[1], 2048);
+ _iov_alloc_buf(&iovs[2], 1);
+ _iov_alloc_buf(&iovs[3], 4095);
+
+ _iov_alloc_buf(&bounce_iov, (4096 + 128) * 2);
+
+ dif_copy_inject_error_and_verify(iovs, 4, &bounce_iov, 4096 + 128, 128,
+ 2, SPDK_DIF_GUARD_ERROR);
+
+ dif_copy_inject_error_and_verify(iovs, 4, &bounce_iov, 4096 + 128, 128,
+ 2, SPDK_DIF_APPTAG_ERROR);
+
+ dif_copy_inject_error_and_verify(iovs, 4, &bounce_iov, 4096 + 128, 128,
+ 2, SPDK_DIF_REFTAG_ERROR);
+
+ dif_copy_inject_error_and_verify(iovs, 4, &bounce_iov, 4096 + 128, 128,
+ 2, SPDK_DIF_DATA_ERROR);
+
+ for (i = 0; i < 4; i++) {
+ _iov_free_buf(&iovs[i]);
+ }
+ _iov_free_buf(&bounce_iov);
+}
+
+static void
+dix_sec_512_md_0_error(void)
+{
+ struct spdk_dif_ctx ctx;
+ int rc;
+
+ rc = spdk_dif_ctx_init(&ctx, 512, 0, false, false, SPDK_DIF_TYPE1, 0, 0, 0, 0, 0, 0);
+ CU_ASSERT(rc != 0);
+}
+
+static void
+dix_generate_and_verify(struct iovec *iovs, int iovcnt, struct iovec *md_iov,
+ uint32_t block_size, uint32_t md_size, uint32_t num_blocks,
+ bool dif_loc, enum spdk_dif_type dif_type, uint32_t dif_flags,
+ uint32_t init_ref_tag, uint16_t apptag_mask, uint16_t app_tag)
+{
+ struct spdk_dif_ctx ctx;
+ int rc;
+
+ rc = ut_data_pattern_generate(iovs, iovcnt, block_size, 0, num_blocks);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_ctx_init(&ctx, block_size, md_size, false, dif_loc, dif_type, dif_flags,
+ init_ref_tag, apptag_mask, app_tag, 0, GUARD_SEED);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dix_generate(iovs, iovcnt, md_iov, num_blocks, &ctx);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dix_verify(iovs, iovcnt, md_iov, num_blocks, &ctx, NULL);
+ CU_ASSERT(rc == 0);
+
+ rc = ut_data_pattern_verify(iovs, iovcnt, block_size, 0, num_blocks);
+ CU_ASSERT(rc == 0);
+}
+
+static void
+dix_sec_512_md_8_prchk_0_single_iov(void)
+{
+ struct iovec iov, md_iov;
+
+ _iov_alloc_buf(&iov, 512 * 4);
+ _iov_alloc_buf(&md_iov, 8 * 4);
+
+ dix_generate_and_verify(&iov, 1, &md_iov, 512, 8, 4, false, SPDK_DIF_TYPE1, 0, 0, 0, 0);
+ dix_generate_and_verify(&iov, 1, &md_iov, 512, 8, 4, true, SPDK_DIF_TYPE1, 0, 0, 0, 0);
+
+ _iov_free_buf(&iov);
+ _iov_free_buf(&md_iov);
+}
+
+static void
+dix_sec_512_md_8_prchk_0_1_2_4_multi_iovs(void)
+{
+ struct iovec iovs[4], md_iov;
+ int i, num_blocks;
+
+ num_blocks = 0;
+
+ for (i = 0; i < 4; i++) {
+ _iov_alloc_buf(&iovs[i], 512 * (i + 1));
+ num_blocks += i + 1;
+ }
+ _iov_alloc_buf(&md_iov, 8 * num_blocks);
+
+ dix_generate_and_verify(iovs, 4, &md_iov, 512, 8, num_blocks, false, SPDK_DIF_TYPE1,
+ 0, 22, 0xFFFF, 0x22);
+
+ dix_generate_and_verify(iovs, 4, &md_iov, 512, 8, num_blocks, false, SPDK_DIF_TYPE1,
+ SPDK_DIF_FLAGS_GUARD_CHECK, 22, 0xFFFF, 0x22);
+
+ dix_generate_and_verify(iovs, 4, &md_iov, 512, 8, num_blocks, false, SPDK_DIF_TYPE1,
+ SPDK_DIF_FLAGS_APPTAG_CHECK, 22, 0xFFFF, 0x22);
+
+ dix_generate_and_verify(iovs, 4, &md_iov, 512, 8, num_blocks, false, SPDK_DIF_TYPE1,
+ SPDK_DIF_FLAGS_REFTAG_CHECK, 22, 0xFFFF, 0x22);
+
+ for (i = 0; i < 4; i++) {
+ _iov_free_buf(&iovs[i]);
+ }
+ _iov_free_buf(&md_iov);
+}
+
+static void
+dix_sec_4096_md_128_prchk_7_multi_iovs(void)
+{
+ struct iovec iovs[4], md_iov;
+ uint32_t dif_flags;
+ int i, num_blocks;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ num_blocks = 0;
+
+ for (i = 0; i < 4; i++) {
+ _iov_alloc_buf(&iovs[i], 4096 * (i + 1));
+ num_blocks += i + 1;
+ }
+ _iov_alloc_buf(&md_iov, 128 * num_blocks);
+
+ dix_generate_and_verify(iovs, 4, &md_iov, 4096, 128, num_blocks, false, SPDK_DIF_TYPE1,
+ dif_flags, 22, 0xFFFF, 0x22);
+ dix_generate_and_verify(iovs, 4, &md_iov, 4096, 128, num_blocks, true, SPDK_DIF_TYPE1,
+ dif_flags, 22, 0xFFFF, 0x22);
+
+ for (i = 0; i < 4; i++) {
+ _iov_free_buf(&iovs[i]);
+ }
+ _iov_free_buf(&md_iov);
+}
+
+static void
+dix_sec_512_md_8_prchk_7_multi_iovs_split_data(void)
+{
+ struct iovec iovs[2], md_iov;
+ uint32_t dif_flags;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ _iov_alloc_buf(&iovs[0], 256);
+ _iov_alloc_buf(&iovs[1], 256);
+ _iov_alloc_buf(&md_iov, 8);
+
+ dix_generate_and_verify(iovs, 2, &md_iov, 512, 8, 1, false, SPDK_DIF_TYPE1,
+ dif_flags, 22, 0xFFFF, 0x22);
+
+ _iov_free_buf(&iovs[0]);
+ _iov_free_buf(&iovs[1]);
+ _iov_free_buf(&md_iov);
+}
+
+static void
+dix_sec_512_md_8_prchk_7_multi_iovs_complex_splits(void)
+{
+ struct iovec iovs[6], md_iov;
+ uint32_t dif_flags;
+ int i;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ /* data[0][255:0] */
+ _iov_alloc_buf(&iovs[0], 256);
+
+ /* data[0][511:256], data[1][255:0] */
+ _iov_alloc_buf(&iovs[1], 256 + 256);
+
+ /* data[1][382:256] */
+ _iov_alloc_buf(&iovs[2], 128);
+
+ /* data[1][383] */
+ _iov_alloc_buf(&iovs[3], 1);
+
+ /* data[1][510:384] */
+ _iov_alloc_buf(&iovs[4], 126);
+
+ /* data[1][511], data[2][511:0], data[3][511:0] */
+ _iov_alloc_buf(&iovs[5], 1 + 512 * 2);
+
+ _iov_alloc_buf(&md_iov, 8 * 4);
+
+ dix_generate_and_verify(iovs, 6, &md_iov, 512, 8, 4, false, SPDK_DIF_TYPE1,
+ dif_flags, 22, 0xFFFF, 0x22);
+
+ for (i = 0; i < 6; i++) {
+ _iov_free_buf(&iovs[i]);
+ }
+ _iov_free_buf(&md_iov);
+}
+
+static void
+_dix_inject_error_and_verify(struct iovec *iovs, int iovcnt, struct iovec *md_iov,
+ uint32_t block_size, uint32_t md_size, uint32_t num_blocks,
+ uint32_t inject_flags, bool dif_loc)
+{
+ struct spdk_dif_ctx ctx = {};
+ struct spdk_dif_error err_blk = {};
+ uint32_t inject_offset = 0, dif_flags;
+ int rc;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ rc = ut_data_pattern_generate(iovs, iovcnt, block_size, 0, num_blocks);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_ctx_init(&ctx, block_size, md_size, false, dif_loc, SPDK_DIF_TYPE1, dif_flags,
+ 88, 0xFFFF, 0x88, 0, GUARD_SEED);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dix_generate(iovs, iovcnt, md_iov, num_blocks, &ctx);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dix_inject_error(iovs, iovcnt, md_iov, num_blocks, &ctx, inject_flags, &inject_offset);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dix_verify(iovs, iovcnt, md_iov, num_blocks, &ctx, &err_blk);
+ CU_ASSERT(rc != 0);
+
+ if (inject_flags == SPDK_DIF_DATA_ERROR) {
+ CU_ASSERT(SPDK_DIF_GUARD_ERROR == err_blk.err_type);
+ } else {
+ CU_ASSERT(inject_flags == err_blk.err_type);
+ }
+ CU_ASSERT(inject_offset == err_blk.err_offset);
+}
+
+static void
+dix_inject_error_and_verify(struct iovec *iovs, int iovcnt, struct iovec *md_iov,
+ uint32_t block_size, uint32_t md_size, uint32_t num_blocks,
+ uint32_t inject_flags)
+{
+ /* The case that DIF is contained in the first 8 bytes of metadata. */
+ _dix_inject_error_and_verify(iovs, iovcnt, md_iov, block_size, md_size, num_blocks,
+ inject_flags, true);
+
+ /* The case that DIF is contained in the last 8 bytes of metadata. */
+ _dix_inject_error_and_verify(iovs, iovcnt, md_iov, block_size, md_size, num_blocks,
+ inject_flags, false);
+}
+
+static void
+dix_sec_4096_md_128_inject_1_2_4_8_multi_iovs_test(void)
+{
+ struct iovec iovs[4], md_iov;
+ int i, num_blocks;
+
+ num_blocks = 0;
+
+ for (i = 0; i < 4; i++) {
+ _iov_alloc_buf(&iovs[i], 4096 * (i + 1));
+ num_blocks += i + 1;
+ }
+
+ _iov_alloc_buf(&md_iov, 128 * num_blocks);
+
+ dix_inject_error_and_verify(iovs, 4, &md_iov, 4096, 128, num_blocks, SPDK_DIF_GUARD_ERROR);
+ dix_inject_error_and_verify(iovs, 4, &md_iov, 4096, 128, num_blocks, SPDK_DIF_APPTAG_ERROR);
+ dix_inject_error_and_verify(iovs, 4, &md_iov, 4096, 128, num_blocks, SPDK_DIF_REFTAG_ERROR);
+ dix_inject_error_and_verify(iovs, 4, &md_iov, 4096, 128, num_blocks, SPDK_DIF_DATA_ERROR);
+
+ for (i = 0; i < 4; i++) {
+ _iov_free_buf(&iovs[i]);
+ }
+ _iov_free_buf(&md_iov);
+}
+
+static void
+dix_sec_4096_md_128_inject_1_2_4_8_multi_iovs_split_test(void)
+{
+ struct iovec iovs[4], md_iov;
+ int i;
+
+ _iov_alloc_buf(&iovs[0], 2048);
+ _iov_alloc_buf(&iovs[1], 2048);
+ _iov_alloc_buf(&iovs[2], 1);
+ _iov_alloc_buf(&iovs[3], 4095);
+
+ _iov_alloc_buf(&md_iov, 128 * 2);
+
+ dix_inject_error_and_verify(iovs, 4, &md_iov, 4096, 128, 2, SPDK_DIF_GUARD_ERROR);
+ dix_inject_error_and_verify(iovs, 4, &md_iov, 4096, 128, 2, SPDK_DIF_APPTAG_ERROR);
+ dix_inject_error_and_verify(iovs, 4, &md_iov, 4096, 128, 2, SPDK_DIF_REFTAG_ERROR);
+ dix_inject_error_and_verify(iovs, 4, &md_iov, 4096, 128, 2, SPDK_DIF_DATA_ERROR);
+
+ for (i = 0; i < 4; i++) {
+ _iov_free_buf(&iovs[i]);
+ }
+ _iov_free_buf(&md_iov);
+}
+
+static int
+ut_readv(uint32_t read_base, uint32_t read_len, struct iovec *iovs, int iovcnt)
+{
+ int i;
+ uint32_t j, offset;
+ uint8_t *buf;
+
+ offset = 0;
+ for (i = 0; i < iovcnt; i++) {
+ buf = iovs[i].iov_base;
+ for (j = 0; j < iovs[i].iov_len; j++, offset++) {
+ if (offset >= read_len) {
+ return offset;
+ }
+ buf[j] = DATA_PATTERN(read_base + offset);
+ }
+ }
+
+ return offset;
+}
+
+static void
+set_md_interleave_iovs_test(void)
+{
+ struct spdk_dif_ctx ctx = {};
+ struct spdk_dif_error err_blk = {};
+ struct iovec iov1, iov2, dif_iovs[4] = {};
+ uint32_t dif_check_flags, data_len, read_len, data_offset, mapped_len = 0;
+ uint8_t *buf1, *buf2;
+ int rc;
+
+ dif_check_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ rc = spdk_dif_ctx_init(&ctx, 4096 + 128, 128, true, false, SPDK_DIF_TYPE1,
+ dif_check_flags, 22, 0xFFFF, 0x22, 0, GUARD_SEED);
+ CU_ASSERT(rc == 0);
+
+ /* The first data buffer:
+ * - Create iovec array to Leave a space for metadata for each block
+ * - Split vectored read and so creating iovec array is done before every vectored read.
+ */
+ buf1 = calloc(1, (4096 + 128) * 4);
+ SPDK_CU_ASSERT_FATAL(buf1 != NULL);
+ _iov_set_buf(&iov1, buf1, (4096 + 128) * 4);
+
+ data_offset = 0;
+ data_len = 4096 * 4;
+
+ /* 1st read */
+ rc = spdk_dif_set_md_interleave_iovs(dif_iovs, 4, &iov1, 1,
+ data_offset, data_len, &mapped_len, &ctx);
+ CU_ASSERT(rc == 4);
+ CU_ASSERT(mapped_len == 4096 * 4);
+ CU_ASSERT(_iov_check(&dif_iovs[0], buf1, 4096) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[1], buf1 + 4096 + 128, 4096) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[2], buf1 + (4096 + 128) * 2, 4096) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[3], buf1 + (4096 + 128) * 3, 4096) == true);
+
+ read_len = ut_readv(data_offset, 1024, dif_iovs, 4);
+ CU_ASSERT(read_len == 1024);
+
+ rc = spdk_dif_generate_stream(&iov1, 1, data_offset, read_len, &ctx);
+ CU_ASSERT(rc == 0);
+
+ data_offset += read_len;
+ data_len -= read_len;
+
+ /* 2nd read */
+ rc = spdk_dif_set_md_interleave_iovs(dif_iovs, 4, &iov1, 1,
+ data_offset, data_len, &mapped_len, &ctx);
+ CU_ASSERT(rc == 4);
+ CU_ASSERT(mapped_len == 3072 + 4096 * 3);
+ CU_ASSERT(_iov_check(&dif_iovs[0], buf1 + 1024, 3072) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[1], buf1 + 4096 + 128, 4096) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[2], buf1 + (4096 + 128) * 2, 4096) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[3], buf1 + (4096 + 128) * 3, 4096) == true);
+
+ read_len = ut_readv(data_offset, 3071, dif_iovs, 4);
+ CU_ASSERT(read_len == 3071);
+
+ rc = spdk_dif_generate_stream(&iov1, 1, data_offset, read_len, &ctx);
+ CU_ASSERT(rc == 0);
+
+ data_offset += read_len;
+ data_len -= read_len;
+
+ /* 3rd read */
+ rc = spdk_dif_set_md_interleave_iovs(dif_iovs, 4, &iov1, 1,
+ data_offset, data_len, &mapped_len, &ctx);
+ CU_ASSERT(rc == 4);
+ CU_ASSERT(mapped_len == 1 + 4096 * 3);
+ CU_ASSERT(_iov_check(&dif_iovs[0], buf1 + 4095, 1) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[1], buf1 + 4096 + 128, 4096) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[2], buf1 + (4096 + 128) * 2, 4096) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[3], buf1 + (4096 + 128) * 3, 4096) == true);
+
+ read_len = ut_readv(data_offset, 1 + 4096 * 2 + 512, dif_iovs, 4);
+ CU_ASSERT(read_len == 1 + 4096 * 2 + 512);
+
+ rc = spdk_dif_generate_stream(&iov1, 1, data_offset, read_len, &ctx);
+ CU_ASSERT(rc == 0);
+
+ data_offset += read_len;
+ data_len -= read_len;
+
+ /* 4th read */
+ rc = spdk_dif_set_md_interleave_iovs(dif_iovs, 4, &iov1, 1,
+ data_offset, data_len, &mapped_len, &ctx);
+ CU_ASSERT(rc == 1);
+ CU_ASSERT(mapped_len == 3584);
+ CU_ASSERT(_iov_check(&dif_iovs[0], buf1 + (4096 + 128) * 3 + 512, 3584) == true);
+
+ read_len = ut_readv(data_offset, 3584, dif_iovs, 1);
+ CU_ASSERT(read_len == 3584);
+
+ rc = spdk_dif_generate_stream(&iov1, 1, data_offset, read_len, &ctx);
+ CU_ASSERT(rc == 0);
+
+ data_offset += read_len;
+ CU_ASSERT(data_offset == 4096 * 4);
+ data_len -= read_len;
+ CU_ASSERT(data_len == 0);
+
+ /* The second data buffer:
+ * - Set data pattern with a space for metadata for each block.
+ */
+ buf2 = calloc(1, (4096 + 128) * 4);
+ SPDK_CU_ASSERT_FATAL(buf2 != NULL);
+ _iov_set_buf(&iov2, buf2, (4096 + 128) * 4);
+
+ rc = ut_data_pattern_generate(&iov2, 1, 4096 + 128, 128, 4);
+ CU_ASSERT(rc == 0);
+ rc = spdk_dif_generate(&iov2, 1, 4, &ctx);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_verify(&iov1, 1, 4, &ctx, &err_blk);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_verify(&iov2, 1, 4, &ctx, &err_blk);
+ CU_ASSERT(rc == 0);
+
+ /* Compare the first and the second data buffer by byte. */
+ rc = memcmp(buf1, buf2, (4096 + 128) * 4);
+ CU_ASSERT(rc == 0);
+
+ free(buf1);
+ free(buf2);
+}
+
+static void
+set_md_interleave_iovs_split_test(void)
+{
+ struct spdk_dif_ctx ctx = {};
+ struct spdk_dif_error err_blk = {};
+ struct iovec iovs1[7], iovs2[7], dif_iovs[8] = {};
+ uint32_t dif_check_flags, data_len, read_len, data_offset, mapped_len = 0;
+ int rc, i;
+
+ dif_check_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ rc = spdk_dif_ctx_init(&ctx, 512 + 8, 8, true, false, SPDK_DIF_TYPE1,
+ dif_check_flags, 22, 0xFFFF, 0x22, 0, GUARD_SEED);
+ CU_ASSERT(rc == 0);
+
+ /* The first SGL data buffer:
+ * - Create iovec array to leave a space for metadata for each block
+ * - Split vectored read and so creating iovec array is done before every vectored read.
+ */
+ _iov_alloc_buf(&iovs1[0], 512 + 8 + 128);
+ _iov_alloc_buf(&iovs1[1], 128);
+ _iov_alloc_buf(&iovs1[2], 256 + 8);
+ _iov_alloc_buf(&iovs1[3], 100);
+ _iov_alloc_buf(&iovs1[4], 412 + 5);
+ _iov_alloc_buf(&iovs1[5], 3 + 300);
+ _iov_alloc_buf(&iovs1[6], 212 + 8);
+
+ data_offset = 0;
+ data_len = 512 * 4;
+
+ /* 1st read */
+ rc = spdk_dif_set_md_interleave_iovs(dif_iovs, 8, iovs1, 7,
+ data_offset, data_len, &mapped_len, &ctx);
+ CU_ASSERT(rc == 8);
+ CU_ASSERT(mapped_len == 512 * 4);
+ CU_ASSERT(_iov_check(&dif_iovs[0], iovs1[0].iov_base, 512) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[1], iovs1[0].iov_base + 512 + 8, 128) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[2], iovs1[1].iov_base, 128) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[3], iovs1[2].iov_base, 256) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[4], iovs1[3].iov_base, 100) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[5], iovs1[4].iov_base, 412) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[6], iovs1[5].iov_base + 3, 300) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[7], iovs1[6].iov_base, 212) == true);
+
+ read_len = ut_readv(data_offset, 128, dif_iovs, 8);
+ CU_ASSERT(read_len == 128);
+
+ rc = spdk_dif_generate_stream(iovs1, 7, data_offset, read_len, &ctx);
+ CU_ASSERT(rc == 0);
+
+ data_offset += read_len;
+ data_len -= read_len;
+
+ /* 2nd read */
+ rc = spdk_dif_set_md_interleave_iovs(dif_iovs, 8, iovs1, 7,
+ data_offset, data_len, &mapped_len, &ctx);
+ CU_ASSERT(rc == 8);
+ CU_ASSERT(mapped_len == 384 + 512 * 3);
+ CU_ASSERT(_iov_check(&dif_iovs[0], iovs1[0].iov_base + 128, 384) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[1], iovs1[0].iov_base + 512 + 8, 128) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[2], iovs1[1].iov_base, 128) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[3], iovs1[2].iov_base, 256) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[4], iovs1[3].iov_base, 100) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[5], iovs1[4].iov_base, 412) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[6], iovs1[5].iov_base + 3, 300) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[7], iovs1[6].iov_base, 212) == true);
+
+ read_len = ut_readv(data_offset, 383, dif_iovs, 8);
+ CU_ASSERT(read_len == 383);
+
+ rc = spdk_dif_generate_stream(iovs1, 7, data_offset, read_len, &ctx);
+ CU_ASSERT(rc == 0);
+
+ data_offset += read_len;
+ data_len -= read_len;
+
+ /* 3rd read */
+ rc = spdk_dif_set_md_interleave_iovs(dif_iovs, 8, iovs1, 7,
+ data_offset, data_len, &mapped_len, &ctx);
+ CU_ASSERT(rc == 8);
+ CU_ASSERT(mapped_len == 1 + 512 * 3);
+ CU_ASSERT(_iov_check(&dif_iovs[0], iovs1[0].iov_base + 511, 1) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[1], iovs1[0].iov_base + 512 + 8, 128) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[2], iovs1[1].iov_base, 128) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[3], iovs1[2].iov_base, 256) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[4], iovs1[3].iov_base, 100) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[5], iovs1[4].iov_base, 412) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[6], iovs1[5].iov_base + 3, 300) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[7], iovs1[6].iov_base, 212) == true);
+
+ read_len = ut_readv(data_offset, 1 + 512 * 2 + 128, dif_iovs, 8);
+ CU_ASSERT(read_len == 1 + 512 * 2 + 128);
+
+ rc = spdk_dif_generate_stream(iovs1, 7, data_offset, read_len, &ctx);
+ CU_ASSERT(rc == 0);
+
+ data_offset += read_len;
+ data_len -= read_len;
+
+ /* 4th read */
+ rc = spdk_dif_set_md_interleave_iovs(dif_iovs, 8, iovs1, 7,
+ data_offset, data_len, &mapped_len, &ctx);
+ CU_ASSERT(rc == 2);
+ CU_ASSERT(mapped_len == 384);
+ CU_ASSERT(_iov_check(&dif_iovs[0], iovs1[5].iov_base + 3 + 128, 172) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[1], iovs1[6].iov_base, 212) == true);
+
+ read_len = ut_readv(data_offset, 384, dif_iovs, 8);
+ CU_ASSERT(read_len == 384);
+
+ rc = spdk_dif_generate_stream(iovs1, 7, data_offset, read_len, &ctx);
+ CU_ASSERT(rc == 0);
+
+ data_offset += read_len;
+ CU_ASSERT(data_offset == 512 * 4);
+ data_len -= read_len;
+ CU_ASSERT(data_len == 0);
+
+ /* The second SGL data buffer:
+ * - Set data pattern with a space for metadata for each block.
+ */
+ _iov_alloc_buf(&iovs2[0], 512 + 8 + 128);
+ _iov_alloc_buf(&iovs2[1], 128);
+ _iov_alloc_buf(&iovs2[2], 256 + 8);
+ _iov_alloc_buf(&iovs2[3], 100);
+ _iov_alloc_buf(&iovs2[4], 412 + 5);
+ _iov_alloc_buf(&iovs2[5], 3 + 300);
+ _iov_alloc_buf(&iovs2[6], 212 + 8);
+
+ rc = ut_data_pattern_generate(iovs2, 7, 512 + 8, 8, 4);
+ CU_ASSERT(rc == 0);
+ rc = spdk_dif_generate(iovs2, 7, 4, &ctx);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_verify(iovs1, 7, 4, &ctx, &err_blk);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_verify(iovs2, 7, 4, &ctx, &err_blk);
+ CU_ASSERT(rc == 0);
+
+ /* Compare the first and the second SGL data buffer by byte. */
+ for (i = 0; i < 7; i++) {
+ rc = memcmp(iovs1[i].iov_base, iovs2[i].iov_base,
+ iovs1[i].iov_len);
+ CU_ASSERT(rc == 0);
+ }
+
+ for (i = 0; i < 7; i++) {
+ _iov_free_buf(&iovs1[i]);
+ _iov_free_buf(&iovs2[i]);
+ }
+}
+
+static void
+dif_generate_stream_test(void)
+{
+ struct iovec iov;
+ struct spdk_dif_ctx ctx;
+ struct spdk_dif_error err_blk;
+ uint32_t dif_flags;
+ int rc;
+
+ _iov_alloc_buf(&iov, (512 + 8) * 5);
+
+ rc = ut_data_pattern_generate(&iov, 1, 512 + 8, 8, 5);
+ CU_ASSERT(rc == 0);
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ rc = spdk_dif_ctx_init(&ctx, 512 + 8, 8, true, false, SPDK_DIF_TYPE1, dif_flags,
+ 22, 0xFFFF, 0x22, 0, GUARD_SEED);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_generate_stream(&iov, 1, 0, 511, &ctx);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_generate_stream(&iov, 1, 511, 1, &ctx);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_generate_stream(&iov, 1, 512, 256, &ctx);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_generate_stream(&iov, 1, 768, 512, &ctx);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_generate_stream(&iov, 1, 1280, 1024, &ctx);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_generate_stream(&iov, 1, 2304, 256, &ctx);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_generate_stream(&iov, 1, 2560, 512, &ctx);
+ CU_ASSERT(rc == -ERANGE);
+
+ rc = spdk_dif_verify(&iov, 1, 5, &ctx, &err_blk);
+ CU_ASSERT(rc == 0);
+
+ rc = ut_data_pattern_verify(&iov, 1, 512 + 8, 8, 5);
+ CU_ASSERT(rc == 0);
+
+ _iov_free_buf(&iov);
+}
+
+static void
+set_md_interleave_iovs_alignment_test(void)
+{
+ struct iovec iovs[3], dif_iovs[5] = {};
+ uint32_t mapped_len = 0;
+ int rc;
+ struct spdk_dif_ctx ctx;
+
+ rc = spdk_dif_ctx_init(&ctx, 512 + 8, 8, true, false, SPDK_DIF_TYPE1,
+ 0, 0, 0, 0, 0, 0);
+ CU_ASSERT(rc == 0);
+
+ /* The case that buffer size is smaller than necessary. */
+ _iov_set_buf(&iovs[0], (uint8_t *)0xDEADBEEF, 1024);
+ _iov_set_buf(&iovs[1], (uint8_t *)0xFEEDBEEF, 1024);
+ _iov_set_buf(&iovs[2], (uint8_t *)0xC0FFEE, 24);
+
+ rc = spdk_dif_set_md_interleave_iovs(dif_iovs, 5, iovs, 3, 0, 2048, &mapped_len, &ctx);
+ CU_ASSERT(rc == -ERANGE);
+
+ /* The folllowing are the normal cases. */
+ _iov_set_buf(&iovs[2], (uint8_t *)0xC0FFEE, 32);
+
+ /* data length is less than a data block size. */
+ rc = spdk_dif_set_md_interleave_iovs(dif_iovs, 5, iovs, 3, 0, 500, &mapped_len, &ctx);
+ CU_ASSERT(rc == 1);
+ CU_ASSERT(mapped_len == 500);
+ CU_ASSERT(_iov_check(&dif_iovs[0], (void *)0xDEADBEEF, 500) == true);
+
+ /* Pass enough number of iovecs */
+ rc = spdk_dif_set_md_interleave_iovs(dif_iovs, 5, iovs, 3, 500, 1000, &mapped_len, &ctx);
+ CU_ASSERT(rc == 4);
+ CU_ASSERT(mapped_len == 1000);
+ CU_ASSERT(_iov_check(&dif_iovs[0], (void *)(0xDEADBEEF + 500), 12) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[1], (void *)(0xDEADBEEF + 520), 504) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[2], (void *)0xFEEDBEEF, 8) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[3], (void *)(0xFEEDBEEF + 16), 476) == true);
+
+ /* Pass iovecs smaller than necessary */
+ rc = spdk_dif_set_md_interleave_iovs(dif_iovs, 3, iovs, 3, 500, 1000, &mapped_len, &ctx);
+ CU_ASSERT(rc == 3);
+ CU_ASSERT(mapped_len == 524);
+ CU_ASSERT(_iov_check(&dif_iovs[0], (void *)(0xDEADBEEF + 500), 12) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[1], (void *)(0xDEADBEEF + 520), 504) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[2], (void *)0xFEEDBEEF, 8) == true);
+
+ rc = spdk_dif_set_md_interleave_iovs(dif_iovs, 5, iovs, 3, 1500, 500, &mapped_len, &ctx);
+ CU_ASSERT(rc == 2);
+ CU_ASSERT(mapped_len == 500);
+ CU_ASSERT(_iov_check(&dif_iovs[0], (void *)(0xFEEDBEEF + 492), 36) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[1], (void *)(0xFEEDBEEF + 536), 464) == true);
+
+ rc = spdk_dif_set_md_interleave_iovs(dif_iovs, 5, iovs, 3, 2000, 48, &mapped_len, &ctx);
+ CU_ASSERT(rc == 2);
+ CU_ASSERT(mapped_len == 48);
+ CU_ASSERT(_iov_check(&dif_iovs[0], (void *)0xFEEDBEEF + 1000, 24) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[1], (void *)0xC0FFEE, 24) == true);
+}
+
+static void
+_dif_generate_split_test(void)
+{
+ struct spdk_dif_ctx ctx = {};
+ struct iovec iov;
+ uint8_t *buf1, *buf2;
+ struct _dif_sgl sgl;
+ uint16_t guard = 0, prev_guard;
+ uint32_t dif_flags;
+ int rc;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ rc = spdk_dif_ctx_init(&ctx, 4096 + 128, 128, true, false, SPDK_DIF_TYPE1,
+ dif_flags, 0, 0, 0, 0, GUARD_SEED);
+ CU_ASSERT(rc == 0);
+
+ buf1 = calloc(1, 4096 + 128);
+ SPDK_CU_ASSERT_FATAL(buf1 != NULL);
+ _iov_set_buf(&iov, buf1, 4096 + 128);
+
+ rc = ut_data_pattern_generate(&iov, 1, 4096 + 128, 128, 1);
+ CU_ASSERT(rc == 0);
+
+ _dif_sgl_init(&sgl, &iov, 1);
+
+ guard = GUARD_SEED;
+ prev_guard = GUARD_SEED;
+
+ guard = _dif_generate_split(&sgl, 0, 1000, guard, 0, &ctx);
+ CU_ASSERT(sgl.iov_offset == 1000);
+ CU_ASSERT(guard == spdk_crc16_t10dif(prev_guard, buf1, 1000));
+
+ prev_guard = guard;
+
+ guard = _dif_generate_split(&sgl, 1000, 3000, guard, 0, &ctx);
+ CU_ASSERT(sgl.iov_offset == 4000);
+ CU_ASSERT(guard == spdk_crc16_t10dif(prev_guard, buf1 + 1000, 3000));
+
+ guard = _dif_generate_split(&sgl, 4000, 96 + 128, guard, 0, &ctx);
+ CU_ASSERT(guard == GUARD_SEED);
+ CU_ASSERT(sgl.iov_offset == 0);
+ CU_ASSERT(sgl.iovcnt == 0);
+
+ rc = ut_data_pattern_verify(&iov, 1, 4096 + 128, 128, 1);
+ CU_ASSERT(rc == 0);
+
+ _dif_sgl_init(&sgl, &iov, 1);
+
+ rc = dif_verify(&sgl, 1, &ctx, NULL);
+ CU_ASSERT(rc == 0);
+
+ buf2 = calloc(1, 4096 + 128);
+ SPDK_CU_ASSERT_FATAL(buf2 != NULL);
+ _iov_set_buf(&iov, buf2, 4096 + 128);
+
+ rc = ut_data_pattern_generate(&iov, 1, 4096 + 128, 128, 1);
+ CU_ASSERT(rc == 0);
+
+ _dif_sgl_init(&sgl, &iov, 1);
+
+ dif_generate(&sgl, 1, &ctx);
+
+ rc = ut_data_pattern_verify(&iov, 1, 4096 + 128, 128, 1);
+ CU_ASSERT(rc == 0);
+
+ _dif_sgl_init(&sgl, &iov, 1);
+
+ rc = dif_verify(&sgl, 1, &ctx, NULL);
+ CU_ASSERT(rc == 0);
+
+ rc = memcmp(buf1, buf2, 4096 + 128);
+ CU_ASSERT(rc == 0);
+
+ free(buf1);
+ free(buf2);
+}
+
+static void
+set_md_interleave_iovs_multi_segments_test(void)
+{
+ struct spdk_dif_ctx ctx = {};
+ struct spdk_dif_error err_blk = {};
+ struct iovec iov1 = {}, iov2 = {}, dif_iovs[4] = {};
+ uint32_t dif_check_flags, data_len, read_len, data_offset, read_offset, mapped_len = 0;
+ uint8_t *buf1, *buf2;
+ int rc;
+
+ dif_check_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ rc = spdk_dif_ctx_init(&ctx, 4096 + 128, 128, true, false, SPDK_DIF_TYPE1,
+ dif_check_flags, 22, 0xFFFF, 0x22, 0, GUARD_SEED);
+ CU_ASSERT(rc == 0);
+
+ /* The first data buffer:
+ * - Data buffer is split into multi data segments
+ * - For each data segment,
+ * - Create iovec array to Leave a space for metadata for each block
+ * - Split vectored read and so creating iovec array is done before every vectored read.
+ */
+ buf1 = calloc(1, (4096 + 128) * 4);
+ SPDK_CU_ASSERT_FATAL(buf1 != NULL);
+ _iov_set_buf(&iov1, buf1, (4096 + 128) * 4);
+
+ /* 1st data segment */
+ data_offset = 0;
+ data_len = 1024;
+
+ spdk_dif_ctx_set_data_offset(&ctx, data_offset);
+
+ read_offset = 0;
+
+ /* 1st read in 1st data segment */
+ rc = spdk_dif_set_md_interleave_iovs(dif_iovs, 4, &iov1, 1,
+ read_offset, data_len - read_offset,
+ &mapped_len, &ctx);
+ CU_ASSERT(rc == 1);
+ CU_ASSERT(mapped_len == 1024);
+ CU_ASSERT(_iov_check(&dif_iovs[0], buf1, 1024) == true);
+
+ read_len = ut_readv(data_offset + read_offset, 1024, dif_iovs, 4);
+ CU_ASSERT(read_len == 1024);
+
+ rc = spdk_dif_generate_stream(&iov1, 1, read_offset, read_len, &ctx);
+ CU_ASSERT(rc == 0);
+
+ read_offset += read_len;
+ CU_ASSERT(read_offset == data_len);
+
+ /* 2nd data segment */
+ data_offset += data_len;
+ data_len = 3072 + 4096 * 2 + 512;
+
+ spdk_dif_ctx_set_data_offset(&ctx, data_offset);
+ _iov_set_buf(&iov1, buf1 + 1024, 3072 + 128 + (4096 + 128) * 3 + 512);
+
+ read_offset = 0;
+
+ /* 1st read in 2nd data segment */
+ rc = spdk_dif_set_md_interleave_iovs(dif_iovs, 4, &iov1, 1,
+ read_offset, data_len - read_offset,
+ &mapped_len, &ctx);
+ CU_ASSERT(rc == 4);
+ CU_ASSERT(mapped_len == 3072 + 4096 * 2 + 512);
+ CU_ASSERT(_iov_check(&dif_iovs[0], buf1 + 1024, 3072) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[1], buf1 + 4096 + 128, 4096) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[2], buf1 + (4096 + 128) * 2, 4096) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[3], buf1 + (4096 + 128) * 3, 512) == true);
+
+ read_len = ut_readv(data_offset + read_offset, 3071, dif_iovs, 4);
+ CU_ASSERT(read_len == 3071);
+
+ rc = spdk_dif_generate_stream(&iov1, 1, read_offset, read_len, &ctx);
+ CU_ASSERT(rc == 0);
+
+ read_offset += read_len;
+
+ /* 2nd read in 2nd data segment */
+ rc = spdk_dif_set_md_interleave_iovs(dif_iovs, 4, &iov1, 1,
+ read_offset, data_len - read_offset,
+ &mapped_len, &ctx);
+ CU_ASSERT(rc == 4);
+ CU_ASSERT(mapped_len == 1 + 4096 * 2 + 512);
+ CU_ASSERT(_iov_check(&dif_iovs[0], buf1 + 4095, 1) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[1], buf1 + 4096 + 128, 4096) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[2], buf1 + (4096 + 128) * 2, 4096) == true);
+ CU_ASSERT(_iov_check(&dif_iovs[3], buf1 + (4096 + 128) * 3, 512) == true);
+
+ read_len = ut_readv(data_offset + read_offset, 1 + 4096 * 2 + 512, dif_iovs, 4);
+ CU_ASSERT(read_len == 1 + 4096 * 2 + 512);
+
+ rc = spdk_dif_generate_stream(&iov1, 1, read_offset, read_len, &ctx);
+ CU_ASSERT(rc == 0);
+
+ read_offset += read_len;
+ CU_ASSERT(read_offset == data_len);
+
+ /* 3rd data segment */
+ data_offset += data_len;
+ data_len = 3584;
+
+ spdk_dif_ctx_set_data_offset(&ctx, data_offset);
+ _iov_set_buf(&iov1, buf1 + (4096 + 128) * 3 + 512, 3584 + 128);
+
+ read_offset = 0;
+
+ /* 1st read in 3rd data segment */
+ rc = spdk_dif_set_md_interleave_iovs(dif_iovs, 4, &iov1, 1,
+ read_offset, data_len - read_offset,
+ &mapped_len, &ctx);
+ CU_ASSERT(rc == 1);
+ CU_ASSERT(mapped_len == 3584);
+ CU_ASSERT(_iov_check(&dif_iovs[0], buf1 + (4096 + 128) * 3 + 512, 3584) == true);
+
+ read_len = ut_readv(data_offset + read_offset, 3584, dif_iovs, 1);
+ CU_ASSERT(read_len == 3584);
+
+ rc = spdk_dif_generate_stream(&iov1, 1, read_offset, read_len, &ctx);
+ CU_ASSERT(rc == 0);
+
+ read_offset += read_len;
+ CU_ASSERT(read_offset == data_len);
+ data_offset += data_len;
+ CU_ASSERT(data_offset == 4096 * 4);
+
+ spdk_dif_ctx_set_data_offset(&ctx, 0);
+ _iov_set_buf(&iov1, buf1, (4096 + 128) * 4);
+
+ /* The second data buffer:
+ * - Set data pattern with a space for metadata for each block.
+ */
+ buf2 = calloc(1, (4096 + 128) * 4);
+ SPDK_CU_ASSERT_FATAL(buf2 != NULL);
+ _iov_set_buf(&iov2, buf2, (4096 + 128) * 4);
+
+ rc = ut_data_pattern_generate(&iov2, 1, 4096 + 128, 128, 4);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_generate(&iov2, 1, 4, &ctx);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_verify(&iov1, 1, 4, &ctx, &err_blk);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_verify(&iov2, 1, 4, &ctx, &err_blk);
+ CU_ASSERT(rc == 0);
+
+ /* Compare the first and the second data buffer by byte. */
+ rc = memcmp(buf1, buf2, (4096 + 128) * 4);
+ CU_ASSERT(rc == 0);
+
+ free(buf1);
+ free(buf2);
+}
+
+static void
+_dif_verify_split_test(void)
+{
+ struct spdk_dif_ctx ctx = {};
+ struct spdk_dif_error err_blk = {};
+ struct iovec iov;
+ uint8_t *buf;
+ struct _dif_sgl sgl;
+ uint16_t guard = 0, prev_guard = 0;
+ uint32_t dif_flags;
+ int rc;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ rc = spdk_dif_ctx_init(&ctx, 4096 + 128, 128, true, false, SPDK_DIF_TYPE1,
+ dif_flags, 0, 0, 0, 0, GUARD_SEED);
+ CU_ASSERT(rc == 0);
+
+ buf = calloc(1, 4096 + 128);
+ SPDK_CU_ASSERT_FATAL(buf != NULL);
+ _iov_set_buf(&iov, buf, 4096 + 128);
+
+ rc = ut_data_pattern_generate(&iov, 1, 4096 + 128, 128, 1);
+ CU_ASSERT(rc == 0);
+
+ _dif_sgl_init(&sgl, &iov, 1);
+
+ dif_generate(&sgl, 1, &ctx);
+
+ _dif_sgl_init(&sgl, &iov, 1);
+
+ guard = GUARD_SEED;
+ prev_guard = GUARD_SEED;
+
+ rc = _dif_verify_split(&sgl, 0, 1000, &guard, 0, &ctx, &err_blk);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(guard == spdk_crc16_t10dif(prev_guard, buf, 1000));
+ CU_ASSERT(sgl.iov_offset == 1000);
+
+ prev_guard = guard;
+
+ rc = _dif_verify_split(&sgl, 1000, 3000, &guard, 0, &ctx, &err_blk);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(guard == spdk_crc16_t10dif(prev_guard, buf + 1000, 3000));
+ CU_ASSERT(sgl.iov_offset == 4000);
+
+ rc = _dif_verify_split(&sgl, 4000, 96 + 128, &guard, 0, &ctx, &err_blk);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(guard == GUARD_SEED);
+ CU_ASSERT(sgl.iov_offset == 0);
+ CU_ASSERT(sgl.iovcnt == 0);
+
+ _dif_sgl_init(&sgl, &iov, 1);
+
+ rc = dif_verify(&sgl, 1, &ctx, &err_blk);
+ CU_ASSERT(rc == 0);
+
+ rc = ut_data_pattern_verify(&iov, 1, 4096 + 128, 128, 1);
+ CU_ASSERT(rc == 0);
+
+ free(buf);
+}
+
+static void
+dif_verify_stream_multi_segments_test(void)
+{
+ struct spdk_dif_ctx ctx = {};
+ struct spdk_dif_error err_blk = {};
+ struct iovec iov = {};
+ uint8_t *buf;
+ uint32_t dif_flags;
+ int rc;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ rc = spdk_dif_ctx_init(&ctx, 4096 + 128, 128, true, false, SPDK_DIF_TYPE1,
+ dif_flags, 22, 0xFFFF, 0x22, 0, GUARD_SEED);
+ CU_ASSERT(rc == 0);
+
+ buf = calloc(1, (4096 + 128) * 4);
+ SPDK_CU_ASSERT_FATAL(buf != NULL);
+ _iov_set_buf(&iov, buf, (4096 + 128) * 4);
+
+ rc = ut_data_pattern_generate(&iov, 1, 4096 + 128, 128, 4);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_generate(&iov, 1, 4, &ctx);
+ CU_ASSERT(rc == 0);
+
+ /* 1st data segment */
+ _iov_set_buf(&iov, buf, 1024);
+ spdk_dif_ctx_set_data_offset(&ctx, 0);
+
+ rc = spdk_dif_verify_stream(&iov, 1, 0, 1024, &ctx, &err_blk);
+ CU_ASSERT(rc == 0);
+
+ /* 2nd data segment */
+ _iov_set_buf(&iov, buf + 1024, (3072 + 128) + (4096 + 128) * 2 + 512);
+ spdk_dif_ctx_set_data_offset(&ctx, 1024);
+
+ rc = spdk_dif_verify_stream(&iov, 1, 0, 3072 + 4096 * 2 + 512, &ctx, &err_blk);
+ CU_ASSERT(rc == 0);
+
+ /* 3rd data segment */
+ _iov_set_buf(&iov, buf + (4096 + 128) * 3 + 512, 3584 + 128);
+ spdk_dif_ctx_set_data_offset(&ctx, 4096 * 3);
+
+ rc = spdk_dif_verify_stream(&iov, 1, 0, 3584, &ctx, &err_blk);
+ CU_ASSERT(rc == 0);
+
+ /* verify all data segments once */
+ _iov_set_buf(&iov, buf, (4096 + 128) * 4);
+ spdk_dif_ctx_set_data_offset(&ctx, 0);
+
+ rc = spdk_dif_verify(&iov, 1, 4, &ctx, &err_blk);
+ CU_ASSERT(rc == 0);
+
+ rc = ut_data_pattern_verify(&iov, 1, 4096 + 128, 128, 4);
+ CU_ASSERT(rc == 0);
+
+ free(buf);
+}
+
+#define UT_CRC32C_XOR 0xffffffffUL
+
+static void
+update_crc32c_test(void)
+{
+ struct spdk_dif_ctx ctx = {};
+ struct iovec iovs[7];
+ uint32_t crc32c1, crc32c2, crc32c3, crc32c4;
+ uint32_t dif_flags;
+ int i, rc;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ rc = spdk_dif_ctx_init(&ctx, 512 + 8, 8, true, false, SPDK_DIF_TYPE1,
+ dif_flags, 0, 0, 0, 0, 0);
+ CU_ASSERT(rc == 0);
+
+ /* data[0][255:0] */
+ _iov_alloc_buf(&iovs[0], 256);
+
+ /* data[0][511:256], md[0][0] */
+ _iov_alloc_buf(&iovs[1], 256 + 1);
+
+ /* md[0][4:1] */
+ _iov_alloc_buf(&iovs[2], 4);
+
+ /* md[0][7:5], data[1][122:0] */
+ _iov_alloc_buf(&iovs[3], 3 + 123);
+
+ /* data[1][511:123], md[1][5:0] */
+ _iov_alloc_buf(&iovs[4], 389 + 6);
+
+ /* md[1][7:6], data[2][511:0], md[2][7:0], data[3][431:0] */
+ _iov_alloc_buf(&iovs[5], 2 + 512 + 8 + 432);
+
+ /* data[3][511:432], md[3][7:0] */
+ _iov_alloc_buf(&iovs[6], 80 + 8);
+
+ rc = ut_data_pattern_generate(iovs, 7, 512 + 8, 8, 4);
+ CU_ASSERT(rc == 0);
+
+ crc32c1 = UT_CRC32C_XOR;
+
+ rc = spdk_dif_update_crc32c(iovs, 7, 4, &crc32c1, &ctx);
+ CU_ASSERT(rc == 0);
+
+ /* Test if DIF doesn't affect CRC for split case. */
+ rc = spdk_dif_generate(iovs, 7, 4, &ctx);
+ CU_ASSERT(rc == 0);
+
+ crc32c2 = UT_CRC32C_XOR;
+
+ rc = spdk_dif_update_crc32c(iovs, 7, 4, &crc32c2, &ctx);
+ CU_ASSERT(rc == 0);
+
+ CU_ASSERT(crc32c1 == crc32c2);
+
+ for (i = 0; i < 7; i++) {
+ _iov_free_buf(&iovs[i]);
+ }
+
+ /* Test if CRC is same regardless of splitting. */
+ for (i = 0; i < 4; i++) {
+ _iov_alloc_buf(&iovs[i], 512 + 8);
+ }
+
+ rc = ut_data_pattern_generate(iovs, 4, 512 + 8, 8, 4);
+ CU_ASSERT(rc == 0);
+
+ crc32c3 = UT_CRC32C_XOR;
+
+ rc = spdk_dif_update_crc32c(iovs, 4, 4, &crc32c3, &ctx);
+ CU_ASSERT(rc == 0);
+
+ CU_ASSERT(crc32c1 == crc32c3);
+
+ /* Test if DIF doesn't affect CRC for non-split case. */
+ rc = spdk_dif_generate(iovs, 4, 4, &ctx);
+ CU_ASSERT(rc == 0);
+
+ crc32c4 = UT_CRC32C_XOR;
+
+ rc = spdk_dif_update_crc32c(iovs, 4, 4, &crc32c4, &ctx);
+ CU_ASSERT(rc == 0);
+
+ CU_ASSERT(crc32c1 == crc32c4);
+
+ for (i = 0; i < 4; i++) {
+ _iov_free_buf(&iovs[i]);
+ }
+}
+
+static void
+_dif_update_crc32c_split_test(void)
+{
+ struct spdk_dif_ctx ctx = {};
+ struct iovec iov;
+ uint8_t *buf;
+ struct _dif_sgl sgl;
+ uint32_t dif_flags, crc32c, prev_crc32c;
+ int rc;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ rc = spdk_dif_ctx_init(&ctx, 4096 + 128, 128, true, false, SPDK_DIF_TYPE1,
+ dif_flags, 0, 0, 0, 0, GUARD_SEED);
+ CU_ASSERT(rc == 0);
+
+ buf = calloc(1, 4096 + 128);
+ SPDK_CU_ASSERT_FATAL(buf != NULL);
+ _iov_set_buf(&iov, buf, 4096 + 128);
+
+ rc = ut_data_pattern_generate(&iov, 1, 4096 + 128, 128, 1);
+ CU_ASSERT(rc == 0);
+
+ _dif_sgl_init(&sgl, &iov, 1);
+
+ dif_generate(&sgl, 1, &ctx);
+
+ _dif_sgl_init(&sgl, &iov, 1);
+
+ crc32c = _dif_update_crc32c_split(&sgl, 0, 1000, UT_CRC32C_XOR, &ctx);
+ CU_ASSERT(crc32c == spdk_crc32c_update(buf, 1000, UT_CRC32C_XOR));
+
+ prev_crc32c = crc32c;
+
+ crc32c = _dif_update_crc32c_split(&sgl, 1000, 3000, prev_crc32c, &ctx);
+ CU_ASSERT(crc32c == spdk_crc32c_update(buf + 1000, 3000, prev_crc32c));
+
+ prev_crc32c = crc32c;
+
+ crc32c = _dif_update_crc32c_split(&sgl, 4000, 96 + 128, prev_crc32c, &ctx);
+ CU_ASSERT(crc32c == spdk_crc32c_update(buf + 4000, 96, prev_crc32c));
+
+ CU_ASSERT(crc32c == spdk_crc32c_update(buf, 4096, UT_CRC32C_XOR));
+
+ free(buf);
+}
+
+static void
+dif_update_crc32c_stream_multi_segments_test(void)
+{
+ struct spdk_dif_ctx ctx = {};
+ struct iovec iov = {};
+ uint8_t *buf;
+ uint32_t dif_flags, crc32c1, crc32c2;
+ int rc;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ rc = spdk_dif_ctx_init(&ctx, 4096 + 128, 128, true, false, SPDK_DIF_TYPE1,
+ dif_flags, 22, 0xFFFF, 0x22, 0, GUARD_SEED);
+ CU_ASSERT(rc == 0);
+
+ buf = calloc(1, (4096 + 128) * 4);
+ SPDK_CU_ASSERT_FATAL(buf != NULL);
+ _iov_set_buf(&iov, buf, (4096 + 128) * 4);
+
+ rc = ut_data_pattern_generate(&iov, 1, 4096 + 128, 128, 4);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_generate(&iov, 1, 4, &ctx);
+ CU_ASSERT(rc == 0);
+
+ crc32c1 = UT_CRC32C_XOR;
+ crc32c2 = UT_CRC32C_XOR;
+
+ /* 1st data segment */
+ _iov_set_buf(&iov, buf, 1024);
+ spdk_dif_ctx_set_data_offset(&ctx, 0);
+
+ rc = spdk_dif_update_crc32c_stream(&iov, 1, 0, 1024, &crc32c1, &ctx);
+ CU_ASSERT(rc == 0);
+
+ /* 2nd data segment */
+ _iov_set_buf(&iov, buf + 1024, (3072 + 128) + (4096 + 128) * 2 + 512);
+ spdk_dif_ctx_set_data_offset(&ctx, 1024);
+
+ rc = spdk_dif_update_crc32c_stream(&iov, 1, 0, 3072 + 4096 * 2 + 512, &crc32c1, &ctx);
+ CU_ASSERT(rc == 0);
+
+ /* 3rd data segment */
+ _iov_set_buf(&iov, buf + (4096 + 128) * 3 + 512, 3584 + 128);
+ spdk_dif_ctx_set_data_offset(&ctx, 4096 * 3);
+
+ rc = spdk_dif_update_crc32c_stream(&iov, 1, 0, 3584, &crc32c1, &ctx);
+ CU_ASSERT(rc == 0);
+
+ /* Update CRC32C for all data segments once */
+ _iov_set_buf(&iov, buf, (4096 + 128) * 4);
+ spdk_dif_ctx_set_data_offset(&ctx, 0);
+
+ rc = spdk_dif_update_crc32c(&iov, 1, 4, &crc32c2, &ctx);
+ CU_ASSERT(rc == 0);
+
+ CU_ASSERT(crc32c1 == crc32c2);
+
+ free(buf);
+}
+
+static void
+get_range_with_md_test(void)
+{
+ struct spdk_dif_ctx ctx = {};
+ uint32_t buf_offset, buf_len;
+ int rc;
+
+ rc = spdk_dif_ctx_init(&ctx, 4096 + 128, 128, true, false, 0, 0, 0, 0, 0, 0, 0);
+ CU_ASSERT(rc == 0);
+
+ spdk_dif_get_range_with_md(0, 2048, &buf_offset, &buf_len, &ctx);
+ CU_ASSERT(buf_offset == 0);
+ CU_ASSERT(buf_len == 2048);
+
+ spdk_dif_get_range_with_md(2048, 4096, &buf_offset, &buf_len, &ctx);
+ CU_ASSERT(buf_offset == 2048);
+ CU_ASSERT(buf_len == 4096 + 128);
+
+ spdk_dif_get_range_with_md(4096, 10240, &buf_offset, &buf_len, &ctx);
+ CU_ASSERT(buf_offset == 4096 + 128);
+ CU_ASSERT(buf_len == 10240 + 256);
+
+ spdk_dif_get_range_with_md(10240, 2048, &buf_offset, &buf_len, &ctx);
+ CU_ASSERT(buf_offset == 10240 + 256);
+ CU_ASSERT(buf_len == 2048 + 128);
+
+ buf_len = spdk_dif_get_length_with_md(6144, &ctx);
+ CU_ASSERT(buf_len == 6144 + 128);
+}
+
+static void
+dif_generate_remap_and_verify(struct iovec *iovs, int iovcnt,
+ uint32_t block_size, uint32_t md_size, uint32_t num_blocks,
+ bool dif_loc, enum spdk_dif_type dif_type, uint32_t dif_flags,
+ uint32_t init_ref_tag, uint32_t remapped_init_ref_tag,
+ uint16_t apptag_mask, uint16_t app_tag)
+{
+ struct spdk_dif_ctx ctx = {};
+ int rc;
+
+ rc = ut_data_pattern_generate(iovs, iovcnt, block_size, md_size, num_blocks);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_ctx_init(&ctx, block_size, md_size, true, dif_loc, dif_type, dif_flags,
+ init_ref_tag, apptag_mask, app_tag, 0, GUARD_SEED);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_generate(iovs, iovcnt, num_blocks, &ctx);
+ CU_ASSERT(rc == 0);
+
+ spdk_dif_ctx_set_remapped_init_ref_tag(&ctx, remapped_init_ref_tag);
+
+ rc = spdk_dif_remap_ref_tag(iovs, iovcnt, num_blocks, &ctx, NULL);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_ctx_init(&ctx, block_size, md_size, true, dif_loc, dif_type, dif_flags,
+ remapped_init_ref_tag, apptag_mask, app_tag, 0, GUARD_SEED);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_verify(iovs, iovcnt, num_blocks, &ctx, NULL);
+ CU_ASSERT(rc == 0);
+
+ rc = ut_data_pattern_verify(iovs, iovcnt, block_size, md_size, num_blocks);
+ CU_ASSERT(rc == 0);
+}
+
+static void
+dif_sec_4096_md_128_prchk_7_multi_iovs_remap_test(void)
+{
+ struct iovec iovs[4];
+ int i, num_blocks;
+ uint32_t dif_flags;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ num_blocks = 0;
+
+ for (i = 0; i < 4; i++) {
+ _iov_alloc_buf(&iovs[i], (512 + 8) * (i + 1));
+ num_blocks += i + 1;
+ }
+
+ dif_generate_remap_and_verify(iovs, 4, 512 + 8, 8, num_blocks, false, SPDK_DIF_TYPE1,
+ dif_flags, 22, 99, 0xFFFF, 0x22);
+
+ dif_generate_remap_and_verify(iovs, 4, 512 + 8, 8, num_blocks, true, SPDK_DIF_TYPE1,
+ dif_flags, 22, 99, 0xFFFF, 0x22);
+
+ for (i = 0; i < 4; i++) {
+ _iov_free_buf(&iovs[i]);
+ }
+}
+
+static void
+dif_sec_4096_md_128_prchk_7_multi_iovs_complex_splits_remap_test(void)
+{
+ struct iovec iovs[11];
+ uint32_t dif_flags;
+ int i;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ /* data[0][1000:0] */
+ _iov_alloc_buf(&iovs[0], 1000);
+
+ /* data[0][3095:1000], guard[0][0] */
+ _iov_alloc_buf(&iovs[1], 3096 + 1);
+
+ /* guard[0][1], apptag[0][0] */
+ _iov_alloc_buf(&iovs[2], 1 + 1);
+
+ /* apptag[0][1], reftag[0][0] */
+ _iov_alloc_buf(&iovs[3], 1 + 1);
+
+ /* reftag[0][3:1], ignore[0][59:0] */
+ _iov_alloc_buf(&iovs[4], 3 + 60);
+
+ /* ignore[119:60], data[1][3050:0] */
+ _iov_alloc_buf(&iovs[5], 60 + 3051);
+
+ /* data[1][4095:3050], guard[1][0] */
+ _iov_alloc_buf(&iovs[6], 1045 + 1);
+
+ /* guard[1][1], apptag[1][0] */
+ _iov_alloc_buf(&iovs[7], 1 + 1);
+
+ /* apptag[1][1], reftag[1][0] */
+ _iov_alloc_buf(&iovs[8], 1 + 1);
+
+ /* reftag[1][3:1], ignore[1][9:0] */
+ _iov_alloc_buf(&iovs[9], 3 + 10);
+
+ /* ignore[1][127:9] */
+ _iov_alloc_buf(&iovs[10], 118);
+
+ dif_generate_remap_and_verify(iovs, 11, 4096 + 128, 128, 2, false, SPDK_DIF_TYPE1, dif_flags,
+ 22, 99, 0xFFFF, 0x22);
+ dif_generate_remap_and_verify(iovs, 11, 4096 + 128, 128, 2, true, SPDK_DIF_TYPE1, dif_flags,
+ 22, 99, 0xFFFF, 0x22);
+
+ for (i = 0; i < 11; i++) {
+ _iov_free_buf(&iovs[i]);
+ }
+}
+
+static void
+dix_generate_remap_and_verify(struct iovec *iovs, int iovcnt, struct iovec *md_iov,
+ uint32_t block_size, uint32_t md_size, uint32_t num_blocks,
+ bool dif_loc, enum spdk_dif_type dif_type, uint32_t dif_flags,
+ uint32_t init_ref_tag, uint32_t remapped_init_ref_tag,
+ uint16_t apptag_mask, uint16_t app_tag)
+{
+ struct spdk_dif_ctx ctx;
+ int rc;
+
+ rc = ut_data_pattern_generate(iovs, iovcnt, block_size, 0, num_blocks);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_ctx_init(&ctx, block_size, md_size, false, dif_loc, dif_type, dif_flags,
+ init_ref_tag, apptag_mask, app_tag, 0, GUARD_SEED);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dix_generate(iovs, iovcnt, md_iov, num_blocks, &ctx);
+ CU_ASSERT(rc == 0);
+
+ spdk_dif_ctx_set_remapped_init_ref_tag(&ctx, remapped_init_ref_tag);
+
+ rc = spdk_dix_remap_ref_tag(md_iov, num_blocks, &ctx, NULL);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dif_ctx_init(&ctx, block_size, md_size, false, dif_loc, dif_type, dif_flags,
+ remapped_init_ref_tag, apptag_mask, app_tag, 0, GUARD_SEED);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_dix_verify(iovs, iovcnt, md_iov, num_blocks, &ctx, NULL);
+ CU_ASSERT(rc == 0);
+
+ rc = ut_data_pattern_verify(iovs, iovcnt, block_size, 0, num_blocks);
+ CU_ASSERT(rc == 0);
+}
+
+static void
+dix_sec_4096_md_128_prchk_7_multi_iovs_remap(void)
+{
+ struct iovec iovs[4], md_iov;
+ uint32_t dif_flags;
+ int i, num_blocks;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ num_blocks = 0;
+
+ for (i = 0; i < 4; i++) {
+ _iov_alloc_buf(&iovs[i], 4096 * (i + 1));
+ num_blocks += i + 1;
+ }
+ _iov_alloc_buf(&md_iov, 128 * num_blocks);
+
+ dix_generate_remap_and_verify(iovs, 4, &md_iov, 4096, 128, num_blocks, false, SPDK_DIF_TYPE1,
+ dif_flags, 22, 99, 0xFFFF, 0x22);
+ dix_generate_remap_and_verify(iovs, 4, &md_iov, 4096, 128, num_blocks, true, SPDK_DIF_TYPE1,
+ dif_flags, 22, 99, 0xFFFF, 0x22);
+
+ for (i = 0; i < 4; i++) {
+ _iov_free_buf(&iovs[i]);
+ }
+ _iov_free_buf(&md_iov);
+}
+
+static void
+dix_sec_512_md_8_prchk_7_multi_iovs_complex_splits_remap(void)
+{
+ struct iovec iovs[6], md_iov;
+ uint32_t dif_flags;
+ int i;
+
+ dif_flags = SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK |
+ SPDK_DIF_FLAGS_REFTAG_CHECK;
+
+ /* data[0][255:0] */
+ _iov_alloc_buf(&iovs[0], 256);
+
+ /* data[0][511:256], data[1][255:0] */
+ _iov_alloc_buf(&iovs[1], 256 + 256);
+
+ /* data[1][382:256] */
+ _iov_alloc_buf(&iovs[2], 128);
+
+ /* data[1][383] */
+ _iov_alloc_buf(&iovs[3], 1);
+
+ /* data[1][510:384] */
+ _iov_alloc_buf(&iovs[4], 126);
+
+ /* data[1][511], data[2][511:0], data[3][511:0] */
+ _iov_alloc_buf(&iovs[5], 1 + 512 * 2);
+
+ _iov_alloc_buf(&md_iov, 8 * 4);
+
+ dix_generate_remap_and_verify(iovs, 6, &md_iov, 512, 8, 4, false, SPDK_DIF_TYPE1,
+ dif_flags, 22, 99, 0xFFFF, 0x22);
+
+ for (i = 0; i < 6; i++) {
+ _iov_free_buf(&iovs[i]);
+ }
+ _iov_free_buf(&md_iov);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("dif", NULL, NULL);
+
+ CU_ADD_TEST(suite, dif_generate_and_verify_test);
+ CU_ADD_TEST(suite, dif_disable_check_test);
+ CU_ADD_TEST(suite, dif_sec_512_md_0_error_test);
+ CU_ADD_TEST(suite, dif_guard_seed_test);
+ CU_ADD_TEST(suite, dif_disable_sec_512_md_8_single_iov_test);
+ CU_ADD_TEST(suite, dif_sec_512_md_8_prchk_0_single_iov_test);
+ CU_ADD_TEST(suite, dif_sec_512_md_8_prchk_0_1_2_4_multi_iovs_test);
+ CU_ADD_TEST(suite, dif_sec_4096_md_128_prchk_7_multi_iovs_test);
+ CU_ADD_TEST(suite, dif_sec_512_md_8_prchk_7_multi_iovs_split_data_and_md_test);
+ CU_ADD_TEST(suite, dif_sec_512_md_8_prchk_7_multi_iovs_split_data_test);
+ CU_ADD_TEST(suite, dif_sec_512_md_8_prchk_7_multi_iovs_split_guard_test);
+ CU_ADD_TEST(suite, dif_sec_512_md_8_prchk_7_multi_iovs_split_apptag_test);
+ CU_ADD_TEST(suite, dif_sec_512_md_8_prchk_7_multi_iovs_split_reftag_test);
+ CU_ADD_TEST(suite, dif_sec_512_md_8_prchk_7_multi_iovs_complex_splits_test);
+ CU_ADD_TEST(suite, dif_sec_4096_md_128_prchk_7_multi_iovs_complex_splits_test);
+ CU_ADD_TEST(suite, dif_sec_4096_md_128_inject_1_2_4_8_multi_iovs_test);
+ CU_ADD_TEST(suite, dif_sec_4096_md_128_inject_1_2_4_8_multi_iovs_split_data_and_md_test);
+ CU_ADD_TEST(suite, dif_sec_4096_md_128_inject_1_2_4_8_multi_iovs_split_data_test);
+ CU_ADD_TEST(suite, dif_sec_4096_md_128_inject_1_2_4_8_multi_iovs_split_guard_test);
+ CU_ADD_TEST(suite, dif_sec_4096_md_128_inject_1_2_4_8_multi_iovs_split_apptag_test);
+ CU_ADD_TEST(suite, dif_sec_4096_md_128_inject_1_2_4_8_multi_iovs_split_reftag_test);
+ CU_ADD_TEST(suite, dif_copy_sec_512_md_8_prchk_0_single_iov);
+ CU_ADD_TEST(suite, dif_copy_sec_512_md_8_prchk_0_1_2_4_multi_iovs);
+ CU_ADD_TEST(suite, dif_copy_sec_4096_md_128_prchk_7_multi_iovs);
+ CU_ADD_TEST(suite, dif_copy_sec_512_md_8_prchk_7_multi_iovs_split_data);
+ CU_ADD_TEST(suite, dif_copy_sec_512_md_8_prchk_7_multi_iovs_complex_splits);
+ CU_ADD_TEST(suite, dif_copy_sec_4096_md_128_inject_1_2_4_8_multi_iovs_test);
+ CU_ADD_TEST(suite, dif_copy_sec_4096_md_128_inject_1_2_4_8_multi_iovs_split_test);
+ CU_ADD_TEST(suite, dix_sec_512_md_0_error);
+ CU_ADD_TEST(suite, dix_sec_512_md_8_prchk_0_single_iov);
+ CU_ADD_TEST(suite, dix_sec_512_md_8_prchk_0_1_2_4_multi_iovs);
+ CU_ADD_TEST(suite, dix_sec_4096_md_128_prchk_7_multi_iovs);
+ CU_ADD_TEST(suite, dix_sec_512_md_8_prchk_7_multi_iovs_split_data);
+ CU_ADD_TEST(suite, dix_sec_512_md_8_prchk_7_multi_iovs_complex_splits);
+ CU_ADD_TEST(suite, dix_sec_4096_md_128_inject_1_2_4_8_multi_iovs_test);
+ CU_ADD_TEST(suite, dix_sec_4096_md_128_inject_1_2_4_8_multi_iovs_split_test);
+ CU_ADD_TEST(suite, set_md_interleave_iovs_test);
+ CU_ADD_TEST(suite, set_md_interleave_iovs_split_test);
+ CU_ADD_TEST(suite, dif_generate_stream_test);
+ CU_ADD_TEST(suite, set_md_interleave_iovs_alignment_test);
+ CU_ADD_TEST(suite, _dif_generate_split_test);
+ CU_ADD_TEST(suite, set_md_interleave_iovs_multi_segments_test);
+ CU_ADD_TEST(suite, _dif_verify_split_test);
+ CU_ADD_TEST(suite, dif_verify_stream_multi_segments_test);
+ CU_ADD_TEST(suite, update_crc32c_test);
+ CU_ADD_TEST(suite, _dif_update_crc32c_split_test);
+ CU_ADD_TEST(suite, dif_update_crc32c_stream_multi_segments_test);
+ CU_ADD_TEST(suite, get_range_with_md_test);
+ CU_ADD_TEST(suite, dif_sec_4096_md_128_prchk_7_multi_iovs_remap_test);
+ CU_ADD_TEST(suite, dif_sec_4096_md_128_prchk_7_multi_iovs_complex_splits_remap_test);
+ CU_ADD_TEST(suite, dix_sec_4096_md_128_prchk_7_multi_iovs_remap);
+ CU_ADD_TEST(suite, dix_sec_512_md_8_prchk_7_multi_iovs_complex_splits_remap);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+
+ CU_basic_run_tests();
+
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/util/iov.c/.gitignore b/src/spdk/test/unit/lib/util/iov.c/.gitignore
new file mode 100644
index 000000000..94d8d9621
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/iov.c/.gitignore
@@ -0,0 +1 @@
+iov_ut
diff --git a/src/spdk/test/unit/lib/util/iov.c/Makefile b/src/spdk/test/unit/lib/util/iov.c/Makefile
new file mode 100644
index 000000000..c7b4ccd5a
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/iov.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = iov_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/util/iov.c/iov_ut.c b/src/spdk/test/unit/lib/util/iov.c/iov_ut.c
new file mode 100644
index 000000000..248ab91ff
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/iov.c/iov_ut.c
@@ -0,0 +1,249 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "util/iov.c"
+
+static int
+_check_val(void *buf, size_t len, uint8_t val)
+{
+ size_t i;
+ uint8_t *data = buf;
+
+ for (i = 0; i < len; i++) {
+ if (data[i] != val) {
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static void
+test_single_iov(void)
+{
+ struct iovec siov[1];
+ struct iovec diov[1];
+ uint8_t sdata[64];
+ uint8_t ddata[64];
+ ssize_t rc;
+
+ /* Simplest cases- 1 element in each iovec. */
+
+ /* Same size. */
+ memset(sdata, 1, sizeof(sdata));
+ memset(ddata, 0, sizeof(ddata));
+ siov[0].iov_base = sdata;
+ siov[0].iov_len = sizeof(sdata);
+ diov[0].iov_base = ddata;
+ diov[0].iov_len = sizeof(ddata);
+
+ rc = spdk_iovcpy(siov, 1, diov, 1);
+ CU_ASSERT(rc == sizeof(sdata));
+ CU_ASSERT(_check_val(ddata, 64, 1) == 0);
+
+ /* Source smaller than dest */
+ memset(sdata, 1, sizeof(sdata));
+ memset(ddata, 0, sizeof(ddata));
+ siov[0].iov_base = sdata;
+ siov[0].iov_len = 48;
+ diov[0].iov_base = ddata;
+ diov[0].iov_len = sizeof(ddata);
+
+ rc = spdk_iovcpy(siov, 1, diov, 1);
+ CU_ASSERT(rc == 48);
+ CU_ASSERT(_check_val(ddata, 48, 1) == 0);
+ CU_ASSERT(_check_val(&ddata[48], 16, 0) == 0);
+
+ /* Dest smaller than source */
+ memset(sdata, 1, sizeof(sdata));
+ memset(ddata, 0, sizeof(ddata));
+ siov[0].iov_base = sdata;
+ siov[0].iov_len = sizeof(sdata);
+ diov[0].iov_base = ddata;
+ diov[0].iov_len = 48;
+
+ rc = spdk_iovcpy(siov, 1, diov, 1);
+ CU_ASSERT(rc == 48);
+ CU_ASSERT(_check_val(ddata, 48, 1) == 0);
+ CU_ASSERT(_check_val(&ddata[48], 16, 0) == 0);
+}
+
+static void
+test_simple_iov(void)
+{
+ struct iovec siov[4];
+ struct iovec diov[4];
+ uint8_t sdata[64];
+ uint8_t ddata[64];
+ ssize_t rc;
+ int i;
+
+ /* Simple cases with 4 iov elements */
+
+ /* Same size. */
+ memset(sdata, 1, sizeof(sdata));
+ memset(ddata, 0, sizeof(ddata));
+ for (i = 0; i < 4; i++) {
+ siov[i].iov_base = sdata + (16 * i);
+ siov[i].iov_len = 16;
+ diov[i].iov_base = ddata + (16 * i);
+ diov[i].iov_len = 16;
+ }
+
+ rc = spdk_iovcpy(siov, 4, diov, 4);
+ CU_ASSERT(rc == sizeof(sdata));
+ CU_ASSERT(_check_val(ddata, 64, 1) == 0);
+
+ /* Source smaller than dest */
+ memset(sdata, 1, sizeof(sdata));
+ memset(ddata, 0, sizeof(ddata));
+ for (i = 0; i < 4; i++) {
+ siov[i].iov_base = sdata + (8 * i);
+ siov[i].iov_len = 8;
+ diov[i].iov_base = ddata + (16 * i);
+ diov[i].iov_len = 16;
+ }
+
+ rc = spdk_iovcpy(siov, 4, diov, 4);
+ CU_ASSERT(rc == 32);
+ CU_ASSERT(_check_val(ddata, 32, 1) == 0);
+ CU_ASSERT(_check_val(&ddata[32], 32, 0) == 0);
+
+ /* Dest smaller than source */
+ memset(sdata, 1, sizeof(sdata));
+ memset(ddata, 0, sizeof(ddata));
+ for (i = 0; i < 4; i++) {
+ siov[i].iov_base = sdata + (16 * i);
+ siov[i].iov_len = 16;
+ diov[i].iov_base = ddata + (8 * i);
+ diov[i].iov_len = 8;
+ }
+
+ rc = spdk_iovcpy(siov, 4, diov, 4);
+ CU_ASSERT(rc == 32);
+ CU_ASSERT(_check_val(ddata, 32, 1) == 0);
+ CU_ASSERT(_check_val(&ddata[32], 32, 0) == 0);
+}
+
+static void
+test_complex_iov(void)
+{
+ struct iovec siov[4];
+ struct iovec diov[4];
+ uint8_t sdata[64];
+ uint8_t ddata[64];
+ ssize_t rc;
+ int i;
+
+ /* More source elements */
+ memset(sdata, 1, sizeof(sdata));
+ memset(ddata, 0, sizeof(ddata));
+ for (i = 0; i < 4; i++) {
+ siov[i].iov_base = sdata + (16 * i);
+ siov[i].iov_len = 16;
+ }
+ diov[0].iov_base = ddata;
+ diov[0].iov_len = sizeof(ddata);
+
+ rc = spdk_iovcpy(siov, 4, diov, 1);
+ CU_ASSERT(rc == sizeof(sdata));
+ CU_ASSERT(_check_val(ddata, 64, 1) == 0);
+
+ /* More dest elements */
+ memset(sdata, 1, sizeof(sdata));
+ memset(ddata, 0, sizeof(ddata));
+ for (i = 0; i < 4; i++) {
+ diov[i].iov_base = ddata + (16 * i);
+ diov[i].iov_len = 16;
+ }
+ siov[0].iov_base = sdata;
+ siov[0].iov_len = sizeof(sdata);
+
+ rc = spdk_iovcpy(siov, 1, diov, 4);
+ CU_ASSERT(rc == sizeof(sdata));
+ CU_ASSERT(_check_val(ddata, 64, 1) == 0);
+
+ /* Build one by hand that's really terrible */
+ memset(sdata, 1, sizeof(sdata));
+ memset(ddata, 0, sizeof(ddata));
+ siov[0].iov_base = sdata;
+ siov[0].iov_len = 1;
+ siov[1].iov_base = siov[0].iov_base + siov[0].iov_len;
+ siov[1].iov_len = 13;
+ siov[2].iov_base = siov[1].iov_base + siov[1].iov_len;
+ siov[2].iov_len = 6;
+ siov[3].iov_base = siov[2].iov_base + siov[2].iov_len;
+ siov[3].iov_len = 44;
+
+ diov[0].iov_base = ddata;
+ diov[0].iov_len = 31;
+ diov[1].iov_base = diov[0].iov_base + diov[0].iov_len;
+ diov[1].iov_len = 9;
+ diov[2].iov_base = diov[1].iov_base + diov[1].iov_len;
+ diov[2].iov_len = 1;
+ diov[3].iov_base = diov[2].iov_base + diov[2].iov_len;
+ diov[3].iov_len = 23;
+
+ rc = spdk_iovcpy(siov, 4, diov, 4);
+ CU_ASSERT(rc == 64);
+ CU_ASSERT(_check_val(ddata, 64, 1) == 0);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("iov", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_single_iov);
+ CU_ADD_TEST(suite, test_simple_iov);
+ CU_ADD_TEST(suite, test_complex_iov);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+
+ CU_basic_run_tests();
+
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/util/math.c/.gitignore b/src/spdk/test/unit/lib/util/math.c/.gitignore
new file mode 100644
index 000000000..e51846f2b
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/math.c/.gitignore
@@ -0,0 +1 @@
+math_ut
diff --git a/src/spdk/test/unit/lib/util/math.c/Makefile b/src/spdk/test/unit/lib/util/math.c/Makefile
new file mode 100644
index 000000000..e8b20c6be
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/math.c/Makefile
@@ -0,0 +1,39 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+TEST_FILE = math_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/util/math.c/math_ut.c b/src/spdk/test/unit/lib/util/math.c/math_ut.c
new file mode 100644
index 000000000..66e063e12
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/math.c/math_ut.c
@@ -0,0 +1,81 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "util/math.c"
+
+static void
+test_serial_number_arithmetic(void)
+{
+ CU_ASSERT(spdk_sn32_add(0, 1) == 1);
+ CU_ASSERT(spdk_sn32_add(1, 1) == 2);
+ CU_ASSERT(spdk_sn32_add(1, 2) == 3);
+ CU_ASSERT(spdk_sn32_add(1, UINT32_MAX) == 0);
+ CU_ASSERT(spdk_sn32_add(UINT32_MAX, UINT32_MAX) == UINT32_MAX - 1);
+ CU_ASSERT(spdk_sn32_gt(1, 0) == true);
+ CU_ASSERT(spdk_sn32_gt(2, 1) == true);
+ CU_ASSERT(spdk_sn32_gt(UINT32_MAX, UINT32_MAX - 1) == true);
+ CU_ASSERT(spdk_sn32_gt(0, UINT32_MAX) == true);
+ CU_ASSERT(spdk_sn32_gt(100, UINT32_MAX - 100) == true);
+ CU_ASSERT(spdk_sn32_lt(1, 0) == false);
+ CU_ASSERT(spdk_sn32_lt(2, 1) == false);
+ CU_ASSERT(spdk_sn32_lt(UINT32_MAX, UINT32_MAX - 1) == false);
+ CU_ASSERT(spdk_sn32_lt(0, UINT32_MAX) == false);
+ CU_ASSERT(spdk_sn32_lt(100, UINT32_MAX - 100) == false);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("math", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_serial_number_arithmetic);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+
+ CU_basic_run_tests();
+
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/util/pipe.c/.gitignore b/src/spdk/test/unit/lib/util/pipe.c/.gitignore
new file mode 100644
index 000000000..493aa5572
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/pipe.c/.gitignore
@@ -0,0 +1 @@
+pipe_ut
diff --git a/src/spdk/test/unit/lib/util/pipe.c/Makefile b/src/spdk/test/unit/lib/util/pipe.c/Makefile
new file mode 100644
index 000000000..99592cfb4
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/pipe.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = pipe_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/util/pipe.c/pipe_ut.c b/src/spdk/test/unit/lib/util/pipe.c/pipe_ut.c
new file mode 100644
index 000000000..8ac76dfe9
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/pipe.c/pipe_ut.c
@@ -0,0 +1,653 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "util/pipe.c"
+#include "common/lib/test_env.c"
+
+static void
+test_create_destroy(void)
+{
+ struct spdk_pipe *pipe;
+ uint8_t mem[10];
+
+ pipe = spdk_pipe_create(mem, sizeof(mem));
+ SPDK_CU_ASSERT_FATAL(pipe != NULL);
+
+ spdk_pipe_destroy(pipe);
+}
+
+static void
+test_write_get_buffer(void)
+{
+ struct spdk_pipe *pipe;
+ uint8_t mem[10];
+ struct iovec iovs[2];
+ int rc;
+
+ pipe = spdk_pipe_create(mem, sizeof(mem));
+ SPDK_CU_ASSERT_FATAL(pipe != NULL);
+
+ /* Get some available memory. */
+ rc = spdk_pipe_writer_get_buffer(pipe, 5, iovs);
+ CU_ASSERT(rc == 5);
+ CU_ASSERT(iovs[0].iov_base == mem);
+ CU_ASSERT(iovs[0].iov_len == 5);
+ CU_ASSERT(iovs[1].iov_base == NULL);
+ CU_ASSERT(iovs[1].iov_len == 0);
+ CU_ASSERT(pipe->write == 0);
+ CU_ASSERT(pipe->read == 0);
+
+ memset(iovs, 0, sizeof(iovs));
+
+ /* Get 0 bytes. */
+ rc = spdk_pipe_writer_get_buffer(pipe, 0, iovs);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(iovs[0].iov_base == NULL);
+ CU_ASSERT(iovs[0].iov_len == 0);
+ CU_ASSERT(iovs[1].iov_base == NULL);
+ CU_ASSERT(iovs[1].iov_len == 0);
+ CU_ASSERT(pipe->write == 0);
+ CU_ASSERT(pipe->read == 0);
+
+ memset(iovs, 0, sizeof(iovs));
+
+ /* Get all available memory */
+ rc = spdk_pipe_writer_get_buffer(pipe, 9, iovs);
+ CU_ASSERT(rc == 9);
+ CU_ASSERT(iovs[0].iov_base == mem);
+ CU_ASSERT(iovs[0].iov_len == 9);
+ CU_ASSERT(iovs[1].iov_base == NULL);
+ CU_ASSERT(iovs[1].iov_len == 0);
+ CU_ASSERT(pipe->write == 0);
+ CU_ASSERT(pipe->read == 0);
+
+ memset(iovs, 0, sizeof(iovs));
+
+ /* Get the full size of the data buffer backing the pipe, which isn't allowed */
+ rc = spdk_pipe_writer_get_buffer(pipe, 10, iovs);
+ CU_ASSERT(rc == 9);
+ CU_ASSERT(iovs[0].iov_base == mem);
+ CU_ASSERT(iovs[0].iov_len == 9);
+ CU_ASSERT(iovs[1].iov_base == NULL);
+ CU_ASSERT(iovs[1].iov_len == 0);
+ CU_ASSERT(pipe->write == 0);
+ CU_ASSERT(pipe->read == 0);
+
+ memset(iovs, 0, sizeof(iovs));
+
+ /* Advance the write pointer 7 bytes in. */
+ pipe->write = 7;
+
+ /* Get all of the available memory. */
+ rc = spdk_pipe_writer_get_buffer(pipe, 2, iovs);
+ CU_ASSERT(rc == 2);
+ CU_ASSERT(iovs[0].iov_base == (mem + 7));
+ CU_ASSERT(iovs[0].iov_len == 2);
+ CU_ASSERT(iovs[1].iov_base == NULL);
+ CU_ASSERT(iovs[1].iov_len == 0);
+ CU_ASSERT(pipe->write == 7);
+ CU_ASSERT(pipe->read == 0);
+
+ memset(iovs, 0, sizeof(iovs));
+
+ /* Get more than the available memory */
+ rc = spdk_pipe_writer_get_buffer(pipe, 3, iovs);
+ CU_ASSERT(rc == 2);
+ CU_ASSERT(iovs[0].iov_base == (mem + 7));
+ CU_ASSERT(iovs[0].iov_len == 2);
+ CU_ASSERT(iovs[1].iov_base == NULL);
+ CU_ASSERT(iovs[1].iov_len == 0);
+ CU_ASSERT(pipe->write == 7);
+ CU_ASSERT(pipe->read == 0);
+
+ memset(iovs, 0, sizeof(iovs));
+
+ /* Advance the read pointer 3 bytes in. */
+ pipe->read = 3;
+
+ /* Get all of the available memory. */
+ rc = spdk_pipe_writer_get_buffer(pipe, 5, iovs);
+ CU_ASSERT(rc == 5);
+ CU_ASSERT(iovs[0].iov_base == (mem + 7));
+ CU_ASSERT(iovs[0].iov_len == 3);
+ CU_ASSERT(iovs[1].iov_base == mem);
+ CU_ASSERT(iovs[1].iov_len == 2);
+ CU_ASSERT(pipe->write == 7);
+ CU_ASSERT(pipe->read == 3);
+
+ memset(iovs, 0, sizeof(iovs));
+
+ /* Get more than the available memory */
+ rc = spdk_pipe_writer_get_buffer(pipe, 6, iovs);
+ CU_ASSERT(rc == 5);
+ CU_ASSERT(iovs[0].iov_base == (mem + 7));
+ CU_ASSERT(iovs[0].iov_len == 3);
+ CU_ASSERT(iovs[1].iov_base == mem);
+ CU_ASSERT(iovs[1].iov_len == 2);
+ CU_ASSERT(pipe->write == 7);
+ CU_ASSERT(pipe->read == 3);
+
+ memset(iovs, 0, sizeof(iovs));
+
+ /* Advance the read pointer past the write pointer */
+ pipe->read = 9;
+
+ /* Get all of the available memory. */
+ rc = spdk_pipe_writer_get_buffer(pipe, 1, iovs);
+ CU_ASSERT(rc == 1);
+ CU_ASSERT(iovs[0].iov_base == (mem + 7));
+ CU_ASSERT(iovs[0].iov_len == 1);
+ CU_ASSERT(iovs[1].iov_base == NULL);
+ CU_ASSERT(iovs[1].iov_len == 0);
+ CU_ASSERT(pipe->write == 7);
+ CU_ASSERT(pipe->read == 9);
+
+ memset(iovs, 0, sizeof(iovs));
+
+ /* Get more than the available memory */
+ rc = spdk_pipe_writer_get_buffer(pipe, 2, iovs);
+ CU_ASSERT(rc == 1);
+ CU_ASSERT(iovs[0].iov_base == (mem + 7));
+ CU_ASSERT(iovs[0].iov_len == 1);
+ CU_ASSERT(iovs[1].iov_base == NULL);
+ CU_ASSERT(iovs[1].iov_len == 0);
+ CU_ASSERT(pipe->write == 7);
+ CU_ASSERT(pipe->read == 9);
+
+ memset(iovs, 0, sizeof(iovs));
+
+ /* Fill the pipe */
+ pipe->write = 8;
+
+ /* Get data while the pipe is full */
+ rc = spdk_pipe_writer_get_buffer(pipe, 1, iovs);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(iovs[0].iov_base == NULL);
+ CU_ASSERT(iovs[0].iov_len == 0);
+ CU_ASSERT(iovs[1].iov_base == NULL);
+ CU_ASSERT(iovs[1].iov_len == 0);
+ CU_ASSERT(pipe->write == 8);
+ CU_ASSERT(pipe->read == 9);
+
+ spdk_pipe_destroy(pipe);
+}
+
+static void
+test_write_advance(void)
+{
+ struct spdk_pipe *pipe;
+ uint8_t mem[10];
+ int rc;
+
+ pipe = spdk_pipe_create(mem, sizeof(mem));
+ SPDK_CU_ASSERT_FATAL(pipe != NULL);
+
+ /* Advance half way through the pipe */
+ rc = spdk_pipe_writer_advance(pipe, 5);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(pipe->write == 5);
+ CU_ASSERT(pipe->read == 0);
+
+ pipe->write = 0;
+
+ /* Advance to the end of the pipe */
+ rc = spdk_pipe_writer_advance(pipe, 9);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(pipe->write == 9);
+ CU_ASSERT(pipe->read == 0);
+
+ pipe->write = 0;
+
+ /* Advance beyond the end */
+ rc = spdk_pipe_writer_advance(pipe, 10);
+ CU_ASSERT(rc == -EINVAL);
+ CU_ASSERT(pipe->write == 0);
+ CU_ASSERT(pipe->read == 0);
+
+ /* Move the read pointer forward */
+ pipe->write = 0;
+ pipe->read = 5;
+
+ /* Advance to the end of the pipe */
+ rc = spdk_pipe_writer_advance(pipe, 4);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(pipe->write == 4);
+ CU_ASSERT(pipe->read == 5);
+
+ pipe->write = 0;
+ pipe->read = 5;
+
+ /* Advance beyond the end */
+ rc = spdk_pipe_writer_advance(pipe, 5);
+ CU_ASSERT(rc == -EINVAL);
+ CU_ASSERT(pipe->write == 0);
+ CU_ASSERT(pipe->read == 5);
+
+ /* Test wrap around */
+ pipe->write = 7;
+ pipe->read = 3;
+
+ /* Advance to the end of the pipe */
+ rc = spdk_pipe_writer_advance(pipe, 5);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(pipe->write == 2);
+ CU_ASSERT(pipe->read == 3);
+
+ pipe->write = 7;
+ pipe->read = 3;
+
+ /* Advance beyond the end */
+ rc = spdk_pipe_writer_advance(pipe, 6);
+ CU_ASSERT(rc == -EINVAL);
+ CU_ASSERT(pipe->write == 7);
+ CU_ASSERT(pipe->read == 3);
+
+ spdk_pipe_destroy(pipe);
+}
+
+static void
+test_read_get_buffer(void)
+{
+ struct spdk_pipe *pipe;
+ uint8_t mem[10];
+ struct iovec iovs[2];
+ int rc;
+
+ pipe = spdk_pipe_create(mem, sizeof(mem));
+ SPDK_CU_ASSERT_FATAL(pipe != NULL);
+
+ /* Set the write pointer to the end, making all data available. */
+ pipe->write = 9;
+
+ /* Get half the available memory. */
+ rc = spdk_pipe_reader_get_buffer(pipe, 5, iovs);
+ CU_ASSERT(rc == 5);
+ CU_ASSERT(iovs[0].iov_base == mem);
+ CU_ASSERT(iovs[0].iov_len == 5);
+ CU_ASSERT(iovs[1].iov_base == NULL);
+ CU_ASSERT(iovs[1].iov_len == 0);
+ CU_ASSERT(pipe->write == 9);
+ CU_ASSERT(pipe->read == 0);
+
+ memset(iovs, 0, sizeof(iovs));
+
+ /* Get 0 bytes. */
+ rc = spdk_pipe_reader_get_buffer(pipe, 0, iovs);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(iovs[0].iov_base == NULL);
+ CU_ASSERT(iovs[0].iov_len == 0);
+ CU_ASSERT(iovs[1].iov_base == NULL);
+ CU_ASSERT(iovs[1].iov_len == 0);
+ CU_ASSERT(pipe->write == 9);
+ CU_ASSERT(pipe->read == 0);
+
+ memset(iovs, 0, sizeof(iovs));
+
+ /* Get all available memory */
+ rc = spdk_pipe_reader_get_buffer(pipe, 9, iovs);
+ CU_ASSERT(rc == 9);
+ CU_ASSERT(iovs[0].iov_base == mem);
+ CU_ASSERT(iovs[0].iov_len == 9);
+ CU_ASSERT(iovs[1].iov_base == NULL);
+ CU_ASSERT(iovs[1].iov_len == 0);
+ CU_ASSERT(pipe->write == 9);
+ CU_ASSERT(pipe->read == 0);
+
+ memset(iovs, 0, sizeof(iovs));
+
+ /* Get more bytes than exist */
+ rc = spdk_pipe_reader_get_buffer(pipe, 10, iovs);
+ CU_ASSERT(rc == 9);
+ CU_ASSERT(iovs[0].iov_base == mem);
+ CU_ASSERT(iovs[0].iov_len == 9);
+ CU_ASSERT(iovs[1].iov_base == NULL);
+ CU_ASSERT(iovs[1].iov_len == 0);
+ CU_ASSERT(pipe->write == 9);
+ CU_ASSERT(pipe->read == 0);
+
+ memset(iovs, 0, sizeof(iovs));
+
+ /* Advance the read pointer 5 bytes in. */
+ pipe->read = 5;
+ pipe->write = 0;
+
+ /* Get all of the available memory. */
+ rc = spdk_pipe_reader_get_buffer(pipe, 5, iovs);
+ CU_ASSERT(rc == 5);
+ CU_ASSERT(iovs[0].iov_base == (mem + 5));
+ CU_ASSERT(iovs[0].iov_len == 5);
+ CU_ASSERT(iovs[1].iov_base == NULL);
+ CU_ASSERT(iovs[1].iov_len == 0);
+ CU_ASSERT(pipe->write == 0);
+ CU_ASSERT(pipe->read == 5);
+
+ memset(iovs, 0, sizeof(iovs));
+
+ /* Get more than the available memory */
+ rc = spdk_pipe_reader_get_buffer(pipe, 6, iovs);
+ CU_ASSERT(rc == 5);
+ CU_ASSERT(iovs[0].iov_base == (mem + 5));
+ CU_ASSERT(iovs[0].iov_len == 5);
+ CU_ASSERT(iovs[1].iov_base == NULL);
+ CU_ASSERT(iovs[1].iov_len == 0);
+ CU_ASSERT(pipe->write == 0);
+ CU_ASSERT(pipe->read == 5);
+
+ memset(iovs, 0, sizeof(iovs));
+
+ /* Invert the write and read pointers */
+ pipe->read = 7;
+ pipe->write = 3;
+
+ /* Get all of the available memory. */
+ rc = spdk_pipe_reader_get_buffer(pipe, 6, iovs);
+ CU_ASSERT(rc == 6);
+ CU_ASSERT(iovs[0].iov_base == (mem + 7));
+ CU_ASSERT(iovs[0].iov_len == 3);
+ CU_ASSERT(iovs[1].iov_base == mem);
+ CU_ASSERT(iovs[1].iov_len == 3);
+ CU_ASSERT(pipe->write == 3);
+ CU_ASSERT(pipe->read == 7);
+
+ memset(iovs, 0, sizeof(iovs));
+
+ /* Get more than the available memory */
+ rc = spdk_pipe_reader_get_buffer(pipe, 7, iovs);
+ CU_ASSERT(rc == 6);
+ CU_ASSERT(iovs[0].iov_base == (mem + 7));
+ CU_ASSERT(iovs[0].iov_len == 3);
+ CU_ASSERT(iovs[1].iov_base == mem);
+ CU_ASSERT(iovs[1].iov_len == 3);
+ CU_ASSERT(pipe->write == 3);
+ CU_ASSERT(pipe->read == 7);
+
+ memset(iovs, 0, sizeof(iovs));
+
+ /* Empty the pipe */
+ pipe->read = 8;
+ pipe->write = 8;
+
+ /* Get data while the pipe is empty */
+ rc = spdk_pipe_reader_get_buffer(pipe, 1, iovs);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(iovs[0].iov_base == NULL);
+ CU_ASSERT(iovs[0].iov_len == 0);
+ CU_ASSERT(iovs[1].iov_base == NULL);
+ CU_ASSERT(iovs[1].iov_len == 0);
+ CU_ASSERT(pipe->write == 8);
+ CU_ASSERT(pipe->read == 8);
+
+ spdk_pipe_destroy(pipe);
+}
+
+static void
+test_read_advance(void)
+{
+ struct spdk_pipe *pipe;
+ uint8_t mem[10];
+ int rc;
+
+ pipe = spdk_pipe_create(mem, sizeof(mem));
+ SPDK_CU_ASSERT_FATAL(pipe != NULL);
+
+ pipe->read = 0;
+ pipe->write = 9;
+
+ /* Advance half way through the pipe */
+ rc = spdk_pipe_reader_advance(pipe, 5);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(pipe->read == 5);
+ CU_ASSERT(pipe->write == 9);
+
+ pipe->read = 0;
+ pipe->write = 9;
+
+ /* Advance to the end of the pipe */
+ rc = spdk_pipe_reader_advance(pipe, 9);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(pipe->read == 9);
+ CU_ASSERT(pipe->write == 9);
+
+ pipe->read = 0;
+ pipe->write = 9;
+
+ /* Advance beyond the end */
+ rc = spdk_pipe_reader_advance(pipe, 10);
+ CU_ASSERT(rc == -EINVAL);
+ CU_ASSERT(pipe->read == 0);
+ CU_ASSERT(pipe->write == 9);
+
+ /* Move the write pointer forward */
+ pipe->read = 0;
+ pipe->write = 5;
+
+ /* Advance to the end of the pipe */
+ rc = spdk_pipe_reader_advance(pipe, 5);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(pipe->write == 5);
+ CU_ASSERT(pipe->read == 5);
+
+ pipe->read = 0;
+ pipe->write = 5;
+
+ /* Advance beyond the end */
+ rc = spdk_pipe_reader_advance(pipe, 6);
+ CU_ASSERT(rc == -EINVAL);
+ CU_ASSERT(pipe->read == 0);
+ CU_ASSERT(pipe->write == 5);
+
+ /* Test wrap around */
+ pipe->read = 7;
+ pipe->write = 3;
+
+ /* Advance to the end of the pipe */
+ rc = spdk_pipe_reader_advance(pipe, 6);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(pipe->read == 3);
+ CU_ASSERT(pipe->write == 3);
+
+ pipe->read = 7;
+ pipe->write = 3;
+
+ /* Advance beyond the end */
+ rc = spdk_pipe_writer_advance(pipe, 7);
+ CU_ASSERT(rc == -EINVAL);
+ CU_ASSERT(pipe->read == 7);
+ CU_ASSERT(pipe->write == 3);
+
+ spdk_pipe_destroy(pipe);
+}
+
+static void
+test_data(void)
+{
+ struct spdk_pipe *pipe;
+ uint8_t mem[10];
+ struct iovec iovs[2];
+ uint8_t *data;
+ int rc;
+ size_t i;
+
+ memset(mem, 0, sizeof(mem));
+ memset(iovs, 0, sizeof(iovs));
+
+ pipe = spdk_pipe_create(mem, sizeof(mem));
+ SPDK_CU_ASSERT_FATAL(pipe != NULL);
+
+ /* Place 1 byte in the pipe */
+ rc = spdk_pipe_writer_get_buffer(pipe, 1, iovs);
+ CU_ASSERT(rc == 1);
+ CU_ASSERT(iovs[0].iov_base != NULL);
+ CU_ASSERT(iovs[0].iov_len == 1);
+
+ memset(iovs[0].iov_base, 'A', 1);
+
+ rc = spdk_pipe_writer_advance(pipe, 1);
+ CU_ASSERT(rc == 0);
+
+ CU_ASSERT(mem[0] == 'A');
+ CU_ASSERT(mem[1] == 0);
+ CU_ASSERT(mem[2] == 0);
+ CU_ASSERT(mem[3] == 0);
+ CU_ASSERT(mem[4] == 0);
+ CU_ASSERT(mem[5] == 0);
+ CU_ASSERT(mem[6] == 0);
+ CU_ASSERT(mem[7] == 0);
+ CU_ASSERT(mem[8] == 0);
+ CU_ASSERT(mem[9] == 0);
+
+ memset(iovs, 0, sizeof(iovs));
+
+ /* Get 1 byte from the pipe */
+ CU_ASSERT(spdk_pipe_reader_bytes_available(pipe) == 1);
+ rc = spdk_pipe_reader_get_buffer(pipe, 10, iovs);
+ CU_ASSERT(rc == 1);
+
+ data = iovs[0].iov_base;
+ CU_ASSERT(*data = 'A');
+
+ spdk_pipe_reader_advance(pipe, 1);
+
+ /* Put 9 more bytes in the pipe, so every byte has
+ * been written */
+ rc = spdk_pipe_writer_get_buffer(pipe, 9, iovs);
+ CU_ASSERT(rc == 9);
+ CU_ASSERT(iovs[0].iov_len == 9);
+ CU_ASSERT(iovs[1].iov_len == 0);
+
+ memset(iovs[0].iov_base, 'B', iovs[0].iov_len);
+
+ rc = spdk_pipe_writer_advance(pipe, 9);
+ CU_ASSERT(rc == 0);
+
+ CU_ASSERT(mem[0] == 'A');
+ CU_ASSERT(mem[1] == 'B');
+ CU_ASSERT(mem[2] == 'B');
+ CU_ASSERT(mem[3] == 'B');
+ CU_ASSERT(mem[4] == 'B');
+ CU_ASSERT(mem[5] == 'B');
+ CU_ASSERT(mem[6] == 'B');
+ CU_ASSERT(mem[7] == 'B');
+ CU_ASSERT(mem[8] == 'B');
+ CU_ASSERT(mem[9] == 'B');
+
+ memset(iovs, 0, sizeof(iovs));
+
+ /* Get 7 bytes of the previously written 9. */
+ CU_ASSERT(spdk_pipe_reader_bytes_available(pipe) == 9);
+ rc = spdk_pipe_reader_get_buffer(pipe, 7, iovs);
+ CU_ASSERT(rc == 7);
+
+ CU_ASSERT(iovs[0].iov_len == 7);
+ data = iovs[0].iov_base;
+ for (i = 0; i < iovs[0].iov_len; i++) {
+ CU_ASSERT(data[i] == 'B');
+ }
+
+ spdk_pipe_reader_advance(pipe, 7);
+
+ memset(iovs, 0, sizeof(iovs));
+
+ /* Put 1 more byte in the pipe, overwriting the original 'A' */
+ rc = spdk_pipe_writer_get_buffer(pipe, 1, iovs);
+ CU_ASSERT(rc == 1);
+ CU_ASSERT(iovs[0].iov_len == 1);
+ CU_ASSERT(iovs[1].iov_len == 0);
+
+ memset(iovs[0].iov_base, 'C', iovs[0].iov_len);
+
+ rc = spdk_pipe_writer_advance(pipe, 1);
+ CU_ASSERT(rc == 0);
+
+ CU_ASSERT(mem[0] == 'C');
+ CU_ASSERT(mem[1] == 'B');
+ CU_ASSERT(mem[2] == 'B');
+ CU_ASSERT(mem[3] == 'B');
+ CU_ASSERT(mem[4] == 'B');
+ CU_ASSERT(mem[5] == 'B');
+ CU_ASSERT(mem[6] == 'B');
+ CU_ASSERT(mem[7] == 'B');
+ CU_ASSERT(mem[8] == 'B');
+ CU_ASSERT(mem[9] == 'B');
+
+ memset(iovs, 0, sizeof(iovs));
+
+ /* Get all of the data out of the pipe */
+ CU_ASSERT(spdk_pipe_reader_bytes_available(pipe) == 3);
+ rc = spdk_pipe_reader_get_buffer(pipe, 3, iovs);
+ CU_ASSERT(rc == 3);
+ CU_ASSERT(iovs[0].iov_len == 2);
+ CU_ASSERT(iovs[1].iov_len == 1);
+
+ data = iovs[0].iov_base;
+ CU_ASSERT(data[0] == 'B');
+ CU_ASSERT(data[1] == 'B');
+ data = iovs[1].iov_base;
+ CU_ASSERT(data[0] == 'C');
+
+ spdk_pipe_reader_advance(pipe, 3);
+
+ spdk_pipe_destroy(pipe);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("pipe", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_create_destroy);
+ CU_ADD_TEST(suite, test_write_get_buffer);
+ CU_ADD_TEST(suite, test_write_advance);
+ CU_ADD_TEST(suite, test_read_get_buffer);
+ CU_ADD_TEST(suite, test_read_advance);
+ CU_ADD_TEST(suite, test_data);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+
+ CU_basic_run_tests();
+
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/util/string.c/.gitignore b/src/spdk/test/unit/lib/util/string.c/.gitignore
new file mode 100644
index 000000000..5d85d4d93
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/string.c/.gitignore
@@ -0,0 +1 @@
+string_ut
diff --git a/src/spdk/test/unit/lib/util/string.c/Makefile b/src/spdk/test/unit/lib/util/string.c/Makefile
new file mode 100644
index 000000000..016fb07e9
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/string.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = string_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/util/string.c/string_ut.c b/src/spdk/test/unit/lib/util/string.c/string_ut.c
new file mode 100644
index 000000000..d61c62536
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/string.c/string_ut.c
@@ -0,0 +1,407 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "util/string.c"
+
+static void
+test_parse_ip_addr(void)
+{
+ int rc;
+ char *host;
+ char *port;
+ char ip[255];
+
+ /* IPv4 */
+ snprintf(ip, 255, "%s", "192.168.0.1");
+ rc = spdk_parse_ip_addr(ip, &host, &port);
+ CU_ASSERT_EQUAL(rc, 0);
+ SPDK_CU_ASSERT_FATAL(host != NULL);
+ CU_ASSERT(strcmp(host, "192.168.0.1") == 0);
+ CU_ASSERT_EQUAL(strlen(host), 11);
+ CU_ASSERT_EQUAL(port, NULL);
+
+ /* IPv4 with port */
+ snprintf(ip, 255, "%s", "123.456.789.0:5520");
+ rc = spdk_parse_ip_addr(ip, &host, &port);
+ CU_ASSERT_EQUAL(rc, 0);
+ SPDK_CU_ASSERT_FATAL(host != NULL);
+ CU_ASSERT(strcmp(host, "123.456.789.0") == 0);
+ CU_ASSERT_EQUAL(strlen(host), 13);
+ SPDK_CU_ASSERT_FATAL(port != NULL);
+ CU_ASSERT(strcmp(port, "5520") == 0);
+ CU_ASSERT_EQUAL(strlen(port), 4);
+
+ /* IPv6 */
+ snprintf(ip, 255, "%s", "[2001:db8:85a3:8d3:1319:8a2e:370:7348]");
+ rc = spdk_parse_ip_addr(ip, &host, &port);
+ CU_ASSERT_EQUAL(rc, 0);
+ SPDK_CU_ASSERT_FATAL(host != NULL);
+ CU_ASSERT(strcmp(host, "2001:db8:85a3:8d3:1319:8a2e:370:7348") == 0);
+ CU_ASSERT_EQUAL(strlen(host), 36);
+ CU_ASSERT_EQUAL(port, NULL);
+
+ /* IPv6 with port */
+ snprintf(ip, 255, "%s", "[2001:db8:85a3:8d3:1319:8a2e:370:7348]:443");
+ rc = spdk_parse_ip_addr(ip, &host, &port);
+ CU_ASSERT_EQUAL(rc, 0);
+ SPDK_CU_ASSERT_FATAL(host != NULL);
+ CU_ASSERT(strcmp(host, "2001:db8:85a3:8d3:1319:8a2e:370:7348") == 0);
+ CU_ASSERT_EQUAL(strlen(host), 36);
+ SPDK_CU_ASSERT_FATAL(port != NULL);
+ CU_ASSERT(strcmp(port, "443") == 0);
+ CU_ASSERT_EQUAL(strlen(port), 3);
+
+ /* IPv6 dangling colon */
+ snprintf(ip, 255, "%s", "[2001:db8:85a3:8d3:1319:8a2e:370:7348]:");
+ rc = spdk_parse_ip_addr(ip, &host, &port);
+ CU_ASSERT_EQUAL(rc, 0);
+ SPDK_CU_ASSERT_FATAL(host != NULL);
+ CU_ASSERT(strcmp(host, "2001:db8:85a3:8d3:1319:8a2e:370:7348") == 0);
+ CU_ASSERT_EQUAL(strlen(host), 36);
+ CU_ASSERT_EQUAL(port, NULL);
+}
+
+static void
+test_str_chomp(void)
+{
+ char s[1024];
+
+ /* One \n newline */
+ snprintf(s, sizeof(s), "%s", "hello world\n");
+ CU_ASSERT(spdk_str_chomp(s) == 1);
+ CU_ASSERT(strcmp(s, "hello world") == 0);
+
+ /* One \r\n newline */
+ snprintf(s, sizeof(s), "%s", "hello world\r\n");
+ CU_ASSERT(spdk_str_chomp(s) == 2);
+ CU_ASSERT(strcmp(s, "hello world") == 0);
+
+ /* No newlines */
+ snprintf(s, sizeof(s), "%s", "hello world");
+ CU_ASSERT(spdk_str_chomp(s) == 0);
+ CU_ASSERT(strcmp(s, "hello world") == 0);
+
+ /* Two newlines */
+ snprintf(s, sizeof(s), "%s", "hello world\n\n");
+ CU_ASSERT(spdk_str_chomp(s) == 2);
+ CU_ASSERT(strcmp(s, "hello world") == 0);
+
+ /* Empty string */
+ snprintf(s, sizeof(s), "%s", "");
+ CU_ASSERT(spdk_str_chomp(s) == 0);
+ CU_ASSERT(strcmp(s, "") == 0);
+
+ /* One-character string with only \n */
+ snprintf(s, sizeof(s), "%s", "\n");
+ CU_ASSERT(spdk_str_chomp(s) == 1);
+ CU_ASSERT(strcmp(s, "") == 0);
+
+ /* One-character string without a newline */
+ snprintf(s, sizeof(s), "%s", "a");
+ CU_ASSERT(spdk_str_chomp(s) == 0);
+ CU_ASSERT(strcmp(s, "a") == 0);
+}
+
+static void
+test_parse_capacity(void)
+{
+ char str[128];
+ uint64_t cap;
+ int rc;
+ bool has_prefix = true;
+
+ rc = spdk_parse_capacity("472", &cap, &has_prefix);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cap == 472);
+ CU_ASSERT(has_prefix == false);
+
+ snprintf(str, sizeof(str), "%"PRIu64, UINT64_MAX);
+ rc = spdk_parse_capacity(str, &cap, &has_prefix);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cap == UINT64_MAX);
+ CU_ASSERT(has_prefix == false);
+
+ rc = spdk_parse_capacity("12k", &cap, &has_prefix);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cap == 12 * 1024);
+ CU_ASSERT(has_prefix == true);
+
+ rc = spdk_parse_capacity("12K", &cap, &has_prefix);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cap == 12 * 1024);
+ CU_ASSERT(has_prefix == true);
+
+ rc = spdk_parse_capacity("12KB", &cap, &has_prefix);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cap == 12 * 1024);
+ CU_ASSERT(has_prefix == true);
+
+ rc = spdk_parse_capacity("100M", &cap, &has_prefix);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cap == 100 * 1024 * 1024);
+ CU_ASSERT(has_prefix == true);
+
+ rc = spdk_parse_capacity("128M", &cap, &has_prefix);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cap == 128 * 1024 * 1024);
+ CU_ASSERT(has_prefix == true);
+
+ rc = spdk_parse_capacity("4G", &cap, &has_prefix);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cap == 4ULL * 1024 * 1024 * 1024);
+ CU_ASSERT(has_prefix == true);
+
+ rc = spdk_parse_capacity("100M 512k", &cap, &has_prefix);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cap == 100ULL * 1024 * 1024);
+
+ rc = spdk_parse_capacity("12k8K", &cap, &has_prefix);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cap == 12 * 1024);
+ CU_ASSERT(has_prefix == true);
+
+ /* Non-number */
+ rc = spdk_parse_capacity("G", &cap, &has_prefix);
+ CU_ASSERT(rc != 0);
+
+ rc = spdk_parse_capacity("darsto", &cap, &has_prefix);
+ CU_ASSERT(rc != 0);
+}
+
+static void
+test_sprintf_append_realloc(void)
+{
+ char *str1, *str2, *str3, *str4;
+
+ /* Test basic functionality. */
+ str1 = spdk_sprintf_alloc("hello world\ngood morning\n" \
+ "good afternoon\ngood evening\n");
+ SPDK_CU_ASSERT_FATAL(str1 != NULL);
+
+ str2 = spdk_sprintf_append_realloc(NULL, "hello world\n");
+ SPDK_CU_ASSERT_FATAL(str2);
+
+ str2 = spdk_sprintf_append_realloc(str2, "good morning\n");
+ SPDK_CU_ASSERT_FATAL(str2);
+
+ str2 = spdk_sprintf_append_realloc(str2, "good afternoon\n");
+ SPDK_CU_ASSERT_FATAL(str2);
+
+ str2 = spdk_sprintf_append_realloc(str2, "good evening\n");
+ SPDK_CU_ASSERT_FATAL(str2);
+
+ CU_ASSERT(strcmp(str1, str2) == 0);
+
+ free(str1);
+ free(str2);
+
+ /* Test doubling buffer size. */
+ str3 = spdk_sprintf_append_realloc(NULL, "aaaaaaaaaa\n");
+ str3 = spdk_sprintf_append_realloc(str3, "bbbbbbbbbb\n");
+ str3 = spdk_sprintf_append_realloc(str3, "cccccccccc\n");
+
+ str4 = malloc(33 + 1);
+ memset(&str4[0], 'a', 10);
+ str4[10] = '\n';
+ memset(&str4[11], 'b', 10);
+ str4[21] = '\n';
+ memset(&str4[22], 'c', 10);
+ str4[32] = '\n';
+ str4[33] = 0;
+
+ CU_ASSERT(strcmp(str3, str4) == 0);
+
+ free(str3);
+ free(str4);
+}
+static void
+test_strtol(void)
+{
+ long int val;
+
+ const char *val1 = "no_digits";
+ /* LLONG_MIN - 1 */
+ const char *val2 = "-9223372036854775809";
+ /* LONG_MIN */
+ const char *val3 = "-9223372036854775808";
+ /* LONG_MIN + 1 */
+ const char *val4 = "-9223372036854775807";
+ /* LONG_MAX - 1 */
+ const char *val5 = "9223372036854775806";
+ /* LONG_MAX */
+ const char *val6 = "9223372036854775807";
+ /* LONG_MAX + 1 */
+ const char *val7 = "9223372036854775808";
+ /* digits + chars */
+ const char *val8 = "10_is_ten";
+ /* chars + digits */
+ const char *val9 = "ten_is_10";
+ /* all zeroes */
+ const char *val10 = "00000000";
+ /* leading minus sign, but not negative */
+ const char *val11 = "-0";
+
+ val = spdk_strtol(val1, 10);
+ CU_ASSERT(val == -EINVAL);
+
+ val = spdk_strtol(val2, 10);
+ CU_ASSERT(val == -ERANGE);
+
+ val = spdk_strtol(val3, 10);
+ CU_ASSERT(val == -ERANGE);
+
+ val = spdk_strtol(val4, 10);
+ CU_ASSERT(val == -ERANGE);
+
+ val = spdk_strtol(val5, 10);
+ CU_ASSERT(val == LONG_MAX - 1);
+
+ val = spdk_strtol(val6, 10);
+ CU_ASSERT(val == LONG_MAX);
+
+ val = spdk_strtol(val7, 10);
+ CU_ASSERT(val == -ERANGE);
+
+ val = spdk_strtol(val8, 10);
+ CU_ASSERT(val == -EINVAL);
+
+ val = spdk_strtol(val9, 10);
+ CU_ASSERT(val == -EINVAL);
+
+ val = spdk_strtol(val10, 10);
+ CU_ASSERT(val == 0);
+
+ /* Invalid base */
+ val = spdk_strtol(val10, 1);
+ CU_ASSERT(val == -EINVAL);
+
+ val = spdk_strtol(val11, 10);
+ CU_ASSERT(val == 0);
+}
+
+static void
+test_strtoll(void)
+{
+ long long int val;
+
+ const char *val1 = "no_digits";
+ /* LLONG_MIN - 1 */
+ const char *val2 = "-9223372036854775809";
+ /* LLONG_MIN */
+ const char *val3 = "-9223372036854775808";
+ /* LLONG_MIN + 1 */
+ const char *val4 = "-9223372036854775807";
+ /* LLONG_MAX - 1 */
+ const char *val5 = "9223372036854775806";
+ /* LLONG_MAX */
+ const char *val6 = "9223372036854775807";
+ /* LLONG_MAX + 1 */
+ const char *val7 = "9223372036854775808";
+ /* digits + chars */
+ const char *val8 = "10_is_ten";
+ /* chars + digits */
+ const char *val9 = "ten_is_10";
+ /* all zeroes */
+ const char *val10 = "00000000";
+ /* leading minus sign, but not negative */
+ const char *val11 = "-0";
+
+ val = spdk_strtoll(val1, 10);
+ CU_ASSERT(val == -EINVAL);
+
+ val = spdk_strtoll(val2, 10);
+ CU_ASSERT(val == -ERANGE);
+
+ val = spdk_strtoll(val3, 10);
+ CU_ASSERT(val == -ERANGE);
+
+ val = spdk_strtoll(val4, 10);
+ CU_ASSERT(val == -ERANGE);
+
+ val = spdk_strtoll(val5, 10);
+ CU_ASSERT(val == LLONG_MAX - 1);
+
+ val = spdk_strtoll(val6, 10);
+ CU_ASSERT(val == LLONG_MAX);
+
+ val = spdk_strtoll(val7, 10);
+ CU_ASSERT(val == -ERANGE);
+
+ val = spdk_strtoll(val8, 10);
+ CU_ASSERT(val == -EINVAL);
+
+ val = spdk_strtoll(val9, 10);
+ CU_ASSERT(val == -EINVAL);
+
+ val = spdk_strtoll(val10, 10);
+ CU_ASSERT(val == 0);
+
+ /* Invalid base */
+ val = spdk_strtoll(val10, 1);
+ CU_ASSERT(val == -EINVAL);
+
+ val = spdk_strtoll(val11, 10);
+ CU_ASSERT(val == 0);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("string", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_parse_ip_addr);
+ CU_ADD_TEST(suite, test_str_chomp);
+ CU_ADD_TEST(suite, test_parse_capacity);
+ CU_ADD_TEST(suite, test_sprintf_append_realloc);
+ CU_ADD_TEST(suite, test_strtol);
+ CU_ADD_TEST(suite, test_strtoll);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+
+ CU_basic_run_tests();
+
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/vhost/Makefile b/src/spdk/test/unit/lib/vhost/Makefile
new file mode 100644
index 000000000..0f569f6d2
--- /dev/null
+++ b/src/spdk/test/unit/lib/vhost/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = vhost.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/vhost/vhost.c/.gitignore b/src/spdk/test/unit/lib/vhost/vhost.c/.gitignore
new file mode 100644
index 000000000..16cead8f9
--- /dev/null
+++ b/src/spdk/test/unit/lib/vhost/vhost.c/.gitignore
@@ -0,0 +1 @@
+vhost_ut
diff --git a/src/spdk/test/unit/lib/vhost/vhost.c/Makefile b/src/spdk/test/unit/lib/vhost/vhost.c/Makefile
new file mode 100644
index 000000000..23438ec4d
--- /dev/null
+++ b/src/spdk/test/unit/lib/vhost/vhost.c/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/config.mk
+
+ifeq ($(CONFIG_VHOST_INTERNAL_LIB),y)
+CFLAGS += -I$(SPDK_ROOT_DIR)/lib/rte_vhost
+endif
+
+CFLAGS += $(ENV_CFLAGS)
+TEST_FILE = vhost_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/vhost/vhost.c/vhost_ut.c b/src/spdk/test/unit/lib/vhost/vhost.c/vhost_ut.c
new file mode 100644
index 000000000..a62c7666f
--- /dev/null
+++ b/src/spdk/test/unit/lib/vhost/vhost.c/vhost_ut.c
@@ -0,0 +1,547 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "CUnit/Basic.h"
+#include "spdk_cunit.h"
+#include "spdk/thread.h"
+#include "spdk_internal/mock.h"
+#include "common/lib/test_env.c"
+#include "unit/lib/json_mock.c"
+
+#include "vhost/vhost.c"
+
+DEFINE_STUB(rte_vhost_set_vring_base, int, (int vid, uint16_t queue_id,
+ uint16_t last_avail_idx, uint16_t last_used_idx), 0);
+DEFINE_STUB(rte_vhost_get_vring_base, int, (int vid, uint16_t queue_id,
+ uint16_t *last_avail_idx, uint16_t *last_used_idx), 0);
+DEFINE_STUB_V(vhost_session_install_rte_compat_hooks,
+ (struct spdk_vhost_session *vsession));
+DEFINE_STUB(vhost_register_unix_socket, int, (const char *path, const char *name,
+ uint64_t virtio_features, uint64_t disabled_features, uint64_t protocol_features), 0);
+DEFINE_STUB(vhost_driver_unregister, int, (const char *path), 0);
+DEFINE_STUB(spdk_mem_register, int, (void *vaddr, size_t len), 0);
+DEFINE_STUB(spdk_mem_unregister, int, (void *vaddr, size_t len), 0);
+DEFINE_STUB(rte_vhost_vring_call, int, (int vid, uint16_t vring_idx), 0);
+DEFINE_STUB_V(rte_vhost_log_used_vring, (int vid, uint16_t vring_idx,
+ uint64_t offset, uint64_t len));
+
+DEFINE_STUB(rte_vhost_get_mem_table, int, (int vid, struct rte_vhost_memory **mem), 0);
+DEFINE_STUB(rte_vhost_get_negotiated_features, int, (int vid, uint64_t *features), 0);
+DEFINE_STUB(rte_vhost_get_vhost_vring, int,
+ (int vid, uint16_t vring_idx, struct rte_vhost_vring *vring), 0);
+DEFINE_STUB(rte_vhost_enable_guest_notification, int,
+ (int vid, uint16_t queue_id, int enable), 0);
+DEFINE_STUB(rte_vhost_get_ifname, int, (int vid, char *buf, size_t len), 0);
+DEFINE_STUB(rte_vhost_driver_start, int, (const char *name), 0);
+DEFINE_STUB(rte_vhost_driver_callback_register, int,
+ (const char *path, struct vhost_device_ops const *const ops), 0);
+DEFINE_STUB(rte_vhost_driver_disable_features, int, (const char *path, uint64_t features), 0);
+DEFINE_STUB(rte_vhost_driver_set_features, int, (const char *path, uint64_t features), 0);
+DEFINE_STUB(rte_vhost_driver_register, int, (const char *path, uint64_t flags), 0);
+DEFINE_STUB(vhost_nvme_admin_passthrough, int, (int vid, void *cmd, void *cqe, void *buf), 0);
+DEFINE_STUB(vhost_nvme_set_cq_call, int, (int vid, uint16_t qid, int fd), 0);
+DEFINE_STUB(vhost_nvme_set_bar_mr, int, (int vid, void *bar, uint64_t bar_size), 0);
+DEFINE_STUB(vhost_nvme_get_cap, int, (int vid, uint64_t *cap), 0);
+
+void *
+spdk_call_unaffinitized(void *cb(void *arg), void *arg)
+{
+ return cb(arg);
+}
+
+static struct spdk_vhost_dev_backend g_vdev_backend;
+
+static int
+test_setup(void)
+{
+ return 0;
+}
+
+static int
+alloc_vdev(struct spdk_vhost_dev **vdev_p, const char *name, const char *cpumask)
+{
+ struct spdk_vhost_dev *vdev = NULL;
+ int rc;
+
+ /* spdk_vhost_dev must be allocated on a cache line boundary. */
+ rc = posix_memalign((void **)&vdev, 64, sizeof(*vdev));
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(vdev != NULL);
+ memset(vdev, 0, sizeof(*vdev));
+ rc = vhost_dev_register(vdev, name, cpumask, &g_vdev_backend);
+ if (rc == 0) {
+ *vdev_p = vdev;
+ } else {
+ free(vdev);
+ *vdev_p = NULL;
+ }
+
+ return rc;
+}
+
+static void
+start_vdev(struct spdk_vhost_dev *vdev)
+{
+ struct rte_vhost_memory *mem;
+ struct spdk_vhost_session *vsession = NULL;
+ int rc;
+
+ mem = calloc(1, sizeof(*mem) + 2 * sizeof(struct rte_vhost_mem_region));
+ SPDK_CU_ASSERT_FATAL(mem != NULL);
+ mem->nregions = 2;
+ mem->regions[0].guest_phys_addr = 0;
+ mem->regions[0].size = 0x400000; /* 4 MB */
+ mem->regions[0].host_user_addr = 0x1000000;
+ mem->regions[1].guest_phys_addr = 0x400000;
+ mem->regions[1].size = 0x400000; /* 4 MB */
+ mem->regions[1].host_user_addr = 0x2000000;
+
+ assert(TAILQ_EMPTY(&vdev->vsessions));
+ /* spdk_vhost_dev must be allocated on a cache line boundary. */
+ rc = posix_memalign((void **)&vsession, 64, sizeof(*vsession));
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(vsession != NULL);
+ vsession->started = true;
+ vsession->vid = 0;
+ vsession->mem = mem;
+ TAILQ_INSERT_TAIL(&vdev->vsessions, vsession, tailq);
+}
+
+static void
+stop_vdev(struct spdk_vhost_dev *vdev)
+{
+ struct spdk_vhost_session *vsession = TAILQ_FIRST(&vdev->vsessions);
+
+ TAILQ_REMOVE(&vdev->vsessions, vsession, tailq);
+ free(vsession->mem);
+ free(vsession);
+}
+
+static void
+cleanup_vdev(struct spdk_vhost_dev *vdev)
+{
+ if (!TAILQ_EMPTY(&vdev->vsessions)) {
+ stop_vdev(vdev);
+ }
+ vhost_dev_unregister(vdev);
+ free(vdev);
+}
+
+static void
+desc_to_iov_test(void)
+{
+ struct spdk_vhost_dev *vdev;
+ struct spdk_vhost_session *vsession;
+ struct iovec iov[SPDK_VHOST_IOVS_MAX];
+ uint16_t iov_index;
+ struct vring_desc desc;
+ int rc;
+
+ spdk_cpuset_set_cpu(&g_vhost_core_mask, 0, true);
+
+ rc = alloc_vdev(&vdev, "vdev_name_0", "0x1");
+ SPDK_CU_ASSERT_FATAL(rc == 0 && vdev);
+ start_vdev(vdev);
+
+ vsession = TAILQ_FIRST(&vdev->vsessions);
+
+ /* Test simple case where iov falls fully within a 2MB page. */
+ desc.addr = 0x110000;
+ desc.len = 0x1000;
+ iov_index = 0;
+ rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(iov_index == 1);
+ CU_ASSERT(iov[0].iov_base == (void *)0x1110000);
+ CU_ASSERT(iov[0].iov_len == 0x1000);
+ /*
+ * Always memset the iov to ensure each test validates data written by its call
+ * to the function under test.
+ */
+ memset(iov, 0, sizeof(iov));
+
+ /* Same test, but ensure it respects the non-zero starting iov_index. */
+ iov_index = SPDK_VHOST_IOVS_MAX - 1;
+ rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(iov_index == SPDK_VHOST_IOVS_MAX);
+ CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_base == (void *)0x1110000);
+ CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_len == 0x1000);
+ memset(iov, 0, sizeof(iov));
+
+ /* Test for failure if iov_index already equals SPDK_VHOST_IOVS_MAX. */
+ iov_index = SPDK_VHOST_IOVS_MAX;
+ rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
+ CU_ASSERT(rc != 0);
+ memset(iov, 0, sizeof(iov));
+
+ /* Test case where iov spans a 2MB boundary, but does not span a vhost memory region. */
+ desc.addr = 0x1F0000;
+ desc.len = 0x20000;
+ iov_index = 0;
+ rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(iov_index == 1);
+ CU_ASSERT(iov[0].iov_base == (void *)0x11F0000);
+ CU_ASSERT(iov[0].iov_len == 0x20000);
+ memset(iov, 0, sizeof(iov));
+
+ /* Same test, but ensure it respects the non-zero starting iov_index. */
+ iov_index = SPDK_VHOST_IOVS_MAX - 1;
+ rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(iov_index == SPDK_VHOST_IOVS_MAX);
+ CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_base == (void *)0x11F0000);
+ CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_len == 0x20000);
+ memset(iov, 0, sizeof(iov));
+
+ /* Test case where iov spans a vhost memory region. */
+ desc.addr = 0x3F0000;
+ desc.len = 0x20000;
+ iov_index = 0;
+ rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(iov_index == 2);
+ CU_ASSERT(iov[0].iov_base == (void *)0x13F0000);
+ CU_ASSERT(iov[0].iov_len == 0x10000);
+ CU_ASSERT(iov[1].iov_base == (void *)0x2000000);
+ CU_ASSERT(iov[1].iov_len == 0x10000);
+ memset(iov, 0, sizeof(iov));
+
+ cleanup_vdev(vdev);
+
+ CU_ASSERT(true);
+}
+
+static void
+create_controller_test(void)
+{
+ struct spdk_vhost_dev *vdev, *vdev2;
+ int ret;
+ char long_name[PATH_MAX];
+
+ spdk_cpuset_set_cpu(&g_vhost_core_mask, 0, true);
+
+ /* Create device with no name */
+ ret = alloc_vdev(&vdev, NULL, "0x1");
+ CU_ASSERT(ret != 0);
+
+ /* Create device with incorrect cpumask */
+ ret = alloc_vdev(&vdev, "vdev_name_0", "0x2");
+ CU_ASSERT(ret != 0);
+
+ /* Create device with too long name and path */
+ memset(long_name, 'x', sizeof(long_name));
+ long_name[PATH_MAX - 1] = 0;
+ snprintf(dev_dirname, sizeof(dev_dirname), "some_path/");
+ ret = alloc_vdev(&vdev, long_name, "0x1");
+ CU_ASSERT(ret != 0);
+ dev_dirname[0] = 0;
+
+ /* Create device when device name is already taken */
+ ret = alloc_vdev(&vdev, "vdev_name_0", "0x1");
+ SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
+ ret = alloc_vdev(&vdev2, "vdev_name_0", "0x1");
+ CU_ASSERT(ret != 0);
+ cleanup_vdev(vdev);
+}
+
+static void
+session_find_by_vid_test(void)
+{
+ struct spdk_vhost_dev *vdev;
+ struct spdk_vhost_session *vsession;
+ struct spdk_vhost_session *tmp;
+ int rc;
+
+ rc = alloc_vdev(&vdev, "vdev_name_0", "0x1");
+ SPDK_CU_ASSERT_FATAL(rc == 0 && vdev);
+ start_vdev(vdev);
+
+ vsession = TAILQ_FIRST(&vdev->vsessions);
+
+ tmp = vhost_session_find_by_vid(vsession->vid);
+ CU_ASSERT(tmp == vsession);
+
+ /* Search for a device with incorrect vid */
+ tmp = vhost_session_find_by_vid(vsession->vid + 0xFF);
+ CU_ASSERT(tmp == NULL);
+
+ cleanup_vdev(vdev);
+}
+
+static void
+remove_controller_test(void)
+{
+ struct spdk_vhost_dev *vdev;
+ int ret;
+
+ ret = alloc_vdev(&vdev, "vdev_name_0", "0x1");
+ SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
+
+ /* Remove device when controller is in use */
+ start_vdev(vdev);
+ SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&vdev->vsessions));
+ ret = vhost_dev_unregister(vdev);
+ CU_ASSERT(ret != 0);
+
+ cleanup_vdev(vdev);
+}
+
+static void
+vq_avail_ring_get_test(void)
+{
+ struct spdk_vhost_virtqueue vq;
+ uint16_t avail_mem[34];
+ uint16_t reqs[32];
+ uint16_t reqs_len, ret, i;
+
+ /* Basic example reap all requests */
+ vq.vring.avail = (struct vring_avail *)avail_mem;
+ vq.vring.size = 32;
+ vq.last_avail_idx = 24;
+ vq.vring.avail->idx = 29;
+ reqs_len = 6;
+
+ for (i = 0; i < 32; i++) {
+ vq.vring.avail->ring[i] = i;
+ }
+
+ ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len);
+ CU_ASSERT(ret == 5);
+ CU_ASSERT(vq.last_avail_idx == 29);
+ for (i = 0; i < ret; i++) {
+ CU_ASSERT(reqs[i] == vq.vring.avail->ring[i + 24]);
+ }
+
+ /* Basic example reap only some requests */
+ vq.last_avail_idx = 20;
+ vq.vring.avail->idx = 29;
+ reqs_len = 6;
+
+ ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len);
+ CU_ASSERT(ret == reqs_len);
+ CU_ASSERT(vq.last_avail_idx == 26);
+ for (i = 0; i < ret; i++) {
+ CU_ASSERT(reqs[i] == vq.vring.avail->ring[i + 20]);
+ }
+
+ /* Test invalid example */
+ vq.last_avail_idx = 20;
+ vq.vring.avail->idx = 156;
+ reqs_len = 6;
+
+ ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len);
+ CU_ASSERT(ret == 0);
+
+ /* Test overflow in the avail->idx variable. */
+ vq.last_avail_idx = 65535;
+ vq.vring.avail->idx = 4;
+ reqs_len = 6;
+ ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len);
+ CU_ASSERT(ret == 5);
+ CU_ASSERT(vq.last_avail_idx == 4);
+ CU_ASSERT(reqs[0] == vq.vring.avail->ring[31]);
+ for (i = 1; i < ret; i++) {
+ CU_ASSERT(reqs[i] == vq.vring.avail->ring[i - 1]);
+ }
+}
+
+static bool
+vq_desc_guest_is_used(struct spdk_vhost_virtqueue *vq, int16_t guest_last_used_idx,
+ int16_t guest_used_phase)
+{
+ return (!!(vq->vring.desc_packed[guest_last_used_idx].flags & VRING_DESC_F_USED) ==
+ !!guest_used_phase);
+}
+
+static void
+vq_desc_guest_set_avail(struct spdk_vhost_virtqueue *vq, int16_t *guest_last_avail_idx,
+ int16_t *guest_avail_phase)
+{
+ if (*guest_avail_phase) {
+ vq->vring.desc_packed[*guest_last_avail_idx].flags |= VRING_DESC_F_AVAIL;
+ vq->vring.desc_packed[*guest_last_avail_idx].flags &= ~VRING_DESC_F_USED;
+ } else {
+ vq->vring.desc_packed[*guest_last_avail_idx].flags &= ~VRING_DESC_F_AVAIL;
+ vq->vring.desc_packed[*guest_last_avail_idx].flags |= VRING_DESC_F_USED;
+ }
+
+ if (++(*guest_last_avail_idx) >= vq->vring.size) {
+ *guest_last_avail_idx -= vq->vring.size;
+ *guest_avail_phase = !(*guest_avail_phase);
+ }
+}
+
+static int16_t
+vq_desc_guest_handle_completed_desc(struct spdk_vhost_virtqueue *vq, int16_t *guest_last_used_idx,
+ int16_t *guest_used_phase)
+{
+ int16_t buffer_id = -1;
+
+ if (vq_desc_guest_is_used(vq, *guest_last_used_idx, *guest_used_phase)) {
+ buffer_id = vq->vring.desc_packed[*guest_last_used_idx].id;
+ if (++(*guest_last_used_idx) >= vq->vring.size) {
+ *guest_last_used_idx -= vq->vring.size;
+ *guest_used_phase = !(*guest_used_phase);
+ }
+
+ return buffer_id;
+ }
+
+ return -1;
+}
+
+static void
+vq_packed_ring_test(void)
+{
+ struct spdk_vhost_session vs = {};
+ struct spdk_vhost_virtqueue vq = {};
+ struct vring_packed_desc descs[4];
+ uint16_t guest_last_avail_idx = 0, guest_last_used_idx = 0;
+ uint16_t guest_avail_phase = 1, guest_used_phase = 1;
+ int i;
+ int16_t chain_num;
+
+ vq.vring.desc_packed = descs;
+ vq.vring.size = 4;
+
+ /* avail and used wrap counter are initialized to 1 */
+ vq.packed.avail_phase = 1;
+ vq.packed.used_phase = 1;
+ vq.packed.packed_ring = true;
+ memset(descs, 0, sizeof(descs));
+
+ CU_ASSERT(vhost_vq_packed_ring_is_avail(&vq) == false);
+
+ /* Guest send requests */
+ for (i = 0; i < vq.vring.size; i++) {
+ descs[guest_last_avail_idx].id = i;
+ /* Set the desc available */
+ vq_desc_guest_set_avail(&vq, &guest_last_avail_idx, &guest_avail_phase);
+ }
+ CU_ASSERT(guest_last_avail_idx == 0);
+ CU_ASSERT(guest_avail_phase == 0);
+
+ /* Host handle available descs */
+ CU_ASSERT(vhost_vq_packed_ring_is_avail(&vq) == true);
+ i = 0;
+ while (vhost_vq_packed_ring_is_avail(&vq)) {
+ CU_ASSERT(vhost_vring_packed_desc_get_buffer_id(&vq, vq.last_avail_idx, &chain_num) == i++);
+ CU_ASSERT(chain_num == 1);
+ }
+
+ /* Host complete them out of order: 1, 0, 2. */
+ vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 1, 1);
+ vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 0, 1);
+ vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 2, 1);
+
+ /* Host has got all the available request but only complete three requests */
+ CU_ASSERT(vq.last_avail_idx == 0);
+ CU_ASSERT(vq.packed.avail_phase == 0);
+ CU_ASSERT(vq.last_used_idx == 3);
+ CU_ASSERT(vq.packed.used_phase == 1);
+
+ /* Guest handle completed requests */
+ CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 1);
+ CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 0);
+ CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 2);
+ CU_ASSERT(guest_last_used_idx == 3);
+ CU_ASSERT(guest_used_phase == 1);
+
+ /* There are three descs available the guest can send three request again */
+ for (i = 0; i < 3; i++) {
+ descs[guest_last_avail_idx].id = 2 - i;
+ /* Set the desc available */
+ vq_desc_guest_set_avail(&vq, &guest_last_avail_idx, &guest_avail_phase);
+ }
+
+ /* Host handle available descs */
+ CU_ASSERT(vhost_vq_packed_ring_is_avail(&vq) == true);
+ i = 2;
+ while (vhost_vq_packed_ring_is_avail(&vq)) {
+ CU_ASSERT(vhost_vring_packed_desc_get_buffer_id(&vq, vq.last_avail_idx, &chain_num) == i--);
+ CU_ASSERT(chain_num == 1);
+ }
+
+ /* There are four requests in Host, the new three ones and left one */
+ CU_ASSERT(vq.last_avail_idx == 3);
+ /* Available wrap conter should overturn */
+ CU_ASSERT(vq.packed.avail_phase == 0);
+
+ /* Host complete all the requests */
+ vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 1, 1);
+ vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 0, 1);
+ vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 3, 1);
+ vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 2, 1);
+
+ CU_ASSERT(vq.last_used_idx == vq.last_avail_idx);
+ CU_ASSERT(vq.packed.used_phase == vq.packed.avail_phase);
+
+ /* Guest handle completed requests */
+ CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 1);
+ CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 0);
+ CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 3);
+ CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 2);
+
+ CU_ASSERT(guest_last_avail_idx == guest_last_used_idx);
+ CU_ASSERT(guest_avail_phase == guest_used_phase);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("vhost_suite", test_setup, NULL);
+
+ CU_ADD_TEST(suite, desc_to_iov_test);
+ CU_ADD_TEST(suite, create_controller_test);
+ CU_ADD_TEST(suite, session_find_by_vid_test);
+ CU_ADD_TEST(suite, remove_controller_test);
+ CU_ADD_TEST(suite, vq_avail_ring_get_test);
+ CU_ADD_TEST(suite, vq_packed_ring_test);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/unittest.sh b/src/spdk/test/unit/unittest.sh
new file mode 100755
index 000000000..39bfdbb4a
--- /dev/null
+++ b/src/spdk/test/unit/unittest.sh
@@ -0,0 +1,253 @@
+#!/usr/bin/env bash
+#
+# Environment variables:
+# $valgrind Specify the valgrind command line, if not
+# then a default command line is used
+
+set -xe
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $(dirname $0)/../..)
+source "$rootdir/test/common/autotest_common.sh"
+
+cd "$rootdir"
+
+function unittest_bdev() {
+ $valgrind $testdir/lib/bdev/bdev.c/bdev_ut
+ $valgrind $testdir/lib/bdev/bdev_ocssd.c/bdev_ocssd_ut
+ $valgrind $testdir/lib/bdev/raid/bdev_raid.c/bdev_raid_ut
+ $valgrind $testdir/lib/bdev/bdev_zone.c/bdev_zone_ut
+ $valgrind $testdir/lib/bdev/gpt/gpt.c/gpt_ut
+ $valgrind $testdir/lib/bdev/part.c/part_ut
+ $valgrind $testdir/lib/bdev/scsi_nvme.c/scsi_nvme_ut
+ $valgrind $testdir/lib/bdev/vbdev_lvol.c/vbdev_lvol_ut
+ $valgrind $testdir/lib/bdev/vbdev_zone_block.c/vbdev_zone_block_ut
+ $valgrind $testdir/lib/bdev/mt/bdev.c/bdev_ut
+}
+
+function unittest_blob() {
+ $valgrind $testdir/lib/blob/blob.c/blob_ut
+ $valgrind $testdir/lib/blobfs/tree.c/tree_ut
+ $valgrind $testdir/lib/blobfs/blobfs_async_ut/blobfs_async_ut
+ # blobfs_sync_ut hangs when run under valgrind, so don't use $valgrind
+ $testdir/lib/blobfs/blobfs_sync_ut/blobfs_sync_ut
+ $valgrind $testdir/lib/blobfs/blobfs_bdev.c/blobfs_bdev_ut
+}
+
+function unittest_event() {
+ $valgrind $testdir/lib/event/subsystem.c/subsystem_ut
+ $valgrind $testdir/lib/event/app.c/app_ut
+ $valgrind $testdir/lib/event/reactor.c/reactor_ut
+}
+
+function unittest_ftl() {
+ $valgrind $testdir/lib/ftl/ftl_ppa/ftl_ppa_ut
+ $valgrind $testdir/lib/ftl/ftl_band.c/ftl_band_ut
+ $valgrind $testdir/lib/ftl/ftl_reloc.c/ftl_reloc_ut
+ $valgrind $testdir/lib/ftl/ftl_wptr/ftl_wptr_ut
+ $valgrind $testdir/lib/ftl/ftl_md/ftl_md_ut
+ $valgrind $testdir/lib/ftl/ftl_io.c/ftl_io_ut
+}
+
+function unittest_iscsi() {
+ $valgrind $testdir/lib/iscsi/conn.c/conn_ut
+ $valgrind $testdir/lib/iscsi/param.c/param_ut
+ $valgrind $testdir/lib/iscsi/tgt_node.c/tgt_node_ut $testdir/lib/iscsi/tgt_node.c/tgt_node.conf
+ $valgrind $testdir/lib/iscsi/iscsi.c/iscsi_ut
+ $valgrind $testdir/lib/iscsi/init_grp.c/init_grp_ut $testdir/lib/iscsi/init_grp.c/init_grp.conf
+ $valgrind $testdir/lib/iscsi/portal_grp.c/portal_grp_ut $testdir/lib/iscsi/portal_grp.c/portal_grp.conf
+}
+
+function unittest_json() {
+ $valgrind $testdir/lib/json/json_parse.c/json_parse_ut
+ $valgrind $testdir/lib/json/json_util.c/json_util_ut
+ $valgrind $testdir/lib/json/json_write.c/json_write_ut
+ $valgrind $testdir/lib/jsonrpc/jsonrpc_server.c/jsonrpc_server_ut
+}
+
+function unittest_nvme() {
+ $valgrind $testdir/lib/nvme/nvme.c/nvme_ut
+ $valgrind $testdir/lib/nvme/nvme_ctrlr.c/nvme_ctrlr_ut
+ $valgrind $testdir/lib/nvme/nvme_ctrlr_cmd.c/nvme_ctrlr_cmd_ut
+ $valgrind $testdir/lib/nvme/nvme_ctrlr_ocssd_cmd.c/nvme_ctrlr_ocssd_cmd_ut
+ $valgrind $testdir/lib/nvme/nvme_ns.c/nvme_ns_ut
+ $valgrind $testdir/lib/nvme/nvme_ns_cmd.c/nvme_ns_cmd_ut
+ $valgrind $testdir/lib/nvme/nvme_ns_ocssd_cmd.c/nvme_ns_ocssd_cmd_ut
+ $valgrind $testdir/lib/nvme/nvme_qpair.c/nvme_qpair_ut
+ $valgrind $testdir/lib/nvme/nvme_pcie.c/nvme_pcie_ut
+ $valgrind $testdir/lib/nvme/nvme_poll_group.c/nvme_poll_group_ut
+ $valgrind $testdir/lib/nvme/nvme_quirks.c/nvme_quirks_ut
+ $valgrind $testdir/lib/nvme/nvme_tcp.c/nvme_tcp_ut
+ $valgrind $testdir/lib/nvme/nvme_uevent.c/nvme_uevent_ut
+}
+
+function unittest_nvmf() {
+ $valgrind $testdir/lib/nvmf/ctrlr.c/ctrlr_ut
+ $valgrind $testdir/lib/nvmf/ctrlr_bdev.c/ctrlr_bdev_ut
+ $valgrind $testdir/lib/nvmf/ctrlr_discovery.c/ctrlr_discovery_ut
+ $valgrind $testdir/lib/nvmf/subsystem.c/subsystem_ut
+ $valgrind $testdir/lib/nvmf/tcp.c/tcp_ut
+}
+
+function unittest_scsi() {
+ $valgrind $testdir/lib/scsi/dev.c/dev_ut
+ $valgrind $testdir/lib/scsi/lun.c/lun_ut
+ $valgrind $testdir/lib/scsi/scsi.c/scsi_ut
+ $valgrind $testdir/lib/scsi/scsi_bdev.c/scsi_bdev_ut
+ $valgrind $testdir/lib/scsi/scsi_pr.c/scsi_pr_ut
+}
+
+function unittest_sock() {
+ $valgrind $testdir/lib/sock/sock.c/sock_ut
+ $valgrind $testdir/lib/sock/posix.c/posix_ut
+ # Check whether uring is configured
+ if grep -q '#define SPDK_CONFIG_URING 1' $rootdir/include/spdk/config.h; then
+ $valgrind $testdir/lib/sock/uring.c/uring_ut
+ fi
+}
+
+function unittest_util() {
+ $valgrind $testdir/lib/util/base64.c/base64_ut
+ $valgrind $testdir/lib/util/bit_array.c/bit_array_ut
+ $valgrind $testdir/lib/util/cpuset.c/cpuset_ut
+ $valgrind $testdir/lib/util/crc16.c/crc16_ut
+ $valgrind $testdir/lib/util/crc32_ieee.c/crc32_ieee_ut
+ $valgrind $testdir/lib/util/crc32c.c/crc32c_ut
+ $valgrind $testdir/lib/util/string.c/string_ut
+ $valgrind $testdir/lib/util/dif.c/dif_ut
+ $valgrind $testdir/lib/util/iov.c/iov_ut
+ $valgrind $testdir/lib/util/math.c/math_ut
+ $valgrind $testdir/lib/util/pipe.c/pipe_ut
+}
+
+# if ASAN is enabled, use it. If not use valgrind if installed but allow
+# the env variable to override the default shown below.
+if [ -z ${valgrind+x} ]; then
+ if grep -q '#undef SPDK_CONFIG_ASAN' $rootdir/include/spdk/config.h && hash valgrind; then
+ valgrind='valgrind --leak-check=full --error-exitcode=2'
+ else
+ valgrind=''
+ fi
+fi
+
+# setup local unit test coverage if cov is available
+if hash lcov && grep -q '#define SPDK_CONFIG_COVERAGE 1' $rootdir/include/spdk/config.h; then
+ cov_avail="yes"
+else
+ cov_avail="no"
+fi
+if [ "$cov_avail" = "yes" ]; then
+ # set unit test output dir if not specified in env var
+ if [ -z ${UT_COVERAGE+x} ]; then
+ UT_COVERAGE="ut_coverage"
+ fi
+ mkdir -p $UT_COVERAGE
+ export LCOV_OPTS="
+ --rc lcov_branch_coverage=1
+ --rc lcov_function_coverage=1
+ --rc genhtml_branch_coverage=1
+ --rc genhtml_function_coverage=1
+ --rc genhtml_legend=1
+ --rc geninfo_all_blocks=1
+ "
+ export LCOV="lcov $LCOV_OPTS --no-external"
+ # zero out coverage data
+ $LCOV -q -c -i -d . -t "Baseline" -o $UT_COVERAGE/ut_cov_base.info
+fi
+
+# workaround for valgrind v3.13 on arm64
+if [ $(uname -m) = "aarch64" ]; then
+ export LD_HWCAP_MASK=1
+fi
+
+run_test "unittest_include" $valgrind $testdir/include/spdk/histogram_data.h/histogram_ut
+run_test "unittest_bdev" unittest_bdev
+if grep -q '#define SPDK_CONFIG_CRYPTO 1' $rootdir/include/spdk/config.h; then
+ run_test "unittest_bdev_crypto" $valgrind $testdir/lib/bdev/crypto.c/crypto_ut
+fi
+
+if grep -q '#define SPDK_CONFIG_REDUCE 1' $rootdir/include/spdk/config.h; then
+ run_test "unittest_bdev_reduce" $valgrind $testdir/lib/bdev/compress.c/compress_ut
+fi
+
+if grep -q '#define SPDK_CONFIG_PMDK 1' $rootdir/include/spdk/config.h; then
+ run_test "unittest_bdev_pmem" $valgrind $testdir/lib/bdev/pmem/bdev_pmem_ut
+fi
+
+if grep -q '#define SPDK_CONFIG_RAID5 1' $rootdir/include/spdk/config.h; then
+ run_test "unittest_bdev_raid5" $valgrind $testdir/lib/bdev/raid/raid5.c/raid5_ut
+fi
+
+run_test "unittest_blob_blobfs" unittest_blob
+run_test "unittest_event" unittest_event
+if [ $(uname -s) = Linux ]; then
+ run_test "unittest_ftl" unittest_ftl
+fi
+
+run_test "unittest_ioat" $valgrind $testdir/lib/ioat/ioat.c/ioat_ut
+if grep -q '#define SPDK_CONFIG_IDXD 1' $rootdir/include/spdk/config.h; then
+ run_test "unittest_idxd" $valgrind $testdir/lib/idxd/idxd.c/idxd_ut
+fi
+run_test "unittest_iscsi" unittest_iscsi
+run_test "unittest_json" unittest_json
+run_test "unittest_notify" $valgrind $testdir/lib/notify/notify.c/notify_ut
+run_test "unittest_nvme" unittest_nvme
+run_test "unittest_log" $valgrind $testdir/lib/log/log.c/log_ut
+run_test "unittest_lvol" $valgrind $testdir/lib/lvol/lvol.c/lvol_ut
+if grep -q '#define SPDK_CONFIG_RDMA 1' $rootdir/include/spdk/config.h; then
+ run_test "unittest_nvme_rdma" $valgrind $testdir/lib/nvme/nvme_rdma.c/nvme_rdma_ut
+fi
+
+run_test "unittest_nvmf" unittest_nvmf
+if grep -q '#define SPDK_CONFIG_FC 1' $rootdir/include/spdk/config.h; then
+ run_test "unittest_nvmf_fc" $valgrind $testdir/lib/nvmf/fc.c/fc_ut
+ run_test "unittest_nvmf_fc_ls" $valgrind $testdir/lib/nvmf/fc_ls.c/fc_ls_ut
+fi
+
+if grep -q '#define SPDK_CONFIG_RDMA 1' $rootdir/include/spdk/config.h; then
+ run_test "unittest_nvmf_rdma" $valgrind $testdir/lib/nvmf/rdma.c/rdma_ut
+fi
+
+run_test "unittest_scsi" unittest_scsi
+run_test "unittest_sock" unittest_sock
+run_test "unittest_thread" $valgrind $testdir/lib/thread/thread.c/thread_ut
+run_test "unittest_util" unittest_util
+if grep -q '#define SPDK_CONFIG_VHOST 1' $rootdir/include/spdk/config.h; then
+ run_test "unittest_vhost" $valgrind $testdir/lib/vhost/vhost.c/vhost_ut
+fi
+
+# local unit test coverage
+if [ "$cov_avail" = "yes" ]; then
+ $LCOV -q -d . -c -t "$(hostname)" -o $UT_COVERAGE/ut_cov_test.info
+ $LCOV -q -a $UT_COVERAGE/ut_cov_base.info -a $UT_COVERAGE/ut_cov_test.info -o $UT_COVERAGE/ut_cov_total.info
+ $LCOV -q -a $UT_COVERAGE/ut_cov_total.info -o $UT_COVERAGE/ut_cov_unit.info
+ $LCOV -q -r $UT_COVERAGE/ut_cov_unit.info "$rootdir/app/*" -o $UT_COVERAGE/ut_cov_unit.info
+ $LCOV -q -r $UT_COVERAGE/ut_cov_unit.info "$rootdir/dpdk/*" -o $UT_COVERAGE/ut_cov_unit.info
+ $LCOV -q -r $UT_COVERAGE/ut_cov_unit.info "$rootdir/examples/*" -o $UT_COVERAGE/ut_cov_unit.info
+ $LCOV -q -r $UT_COVERAGE/ut_cov_unit.info "$rootdir/include/*" -o $UT_COVERAGE/ut_cov_unit.info
+ $LCOV -q -r $UT_COVERAGE/ut_cov_unit.info "$rootdir/lib/vhost/rte_vhost/*" -o $UT_COVERAGE/ut_cov_unit.info
+ $LCOV -q -r $UT_COVERAGE/ut_cov_unit.info "$rootdir/test/*" -o $UT_COVERAGE/ut_cov_unit.info
+ rm -f $UT_COVERAGE/ut_cov_base.info $UT_COVERAGE/ut_cov_test.info
+ genhtml $UT_COVERAGE/ut_cov_unit.info --output-directory $UT_COVERAGE
+ # git -C option not used for compatibility reasons
+ (cd $rootdir && git clean -f "*.gcda")
+fi
+
+set +x
+
+echo
+echo
+echo "====================="
+echo "All unit tests passed"
+echo "====================="
+if [ "$cov_avail" = "yes" ]; then
+ echo "Note: coverage report is here: $rootdir/$UT_COVERAGE"
+else
+ echo "WARN: lcov not installed or SPDK built without coverage!"
+fi
+if grep -q '#undef SPDK_CONFIG_ASAN' $rootdir/include/spdk/config.h && [ "$valgrind" = "" ]; then
+ echo "WARN: neither valgrind nor ASAN is enabled!"
+fi
+
+echo
+echo
diff --git a/src/spdk/test/vhost/common.sh b/src/spdk/test/vhost/common.sh
new file mode 100644
index 000000000..33c8e0953
--- /dev/null
+++ b/src/spdk/test/vhost/common.sh
@@ -0,0 +1,1266 @@
+: ${SPDK_VHOST_VERBOSE=false}
+: ${VHOST_DIR="$HOME/vhost_test"}
+: ${QEMU_BIN="qemu-system-x86_64"}
+: ${QEMU_IMG_BIN="qemu-img"}
+
+TEST_DIR=$(readlink -f $rootdir/..)
+VM_DIR=$VHOST_DIR/vms
+TARGET_DIR=$VHOST_DIR/vhost
+VM_PASSWORD="root"
+
+#TODO: Move vhost_vm_image.qcow2 into VHOST_DIR on test systems.
+VM_IMAGE=$HOME/vhost_vm_image.qcow2
+
+if ! hash $QEMU_IMG_BIN $QEMU_BIN; then
+ error 'QEMU is not installed on this system. Unable to run vhost tests.'
+ exit 1
+fi
+
+mkdir -p $VHOST_DIR
+mkdir -p $VM_DIR
+mkdir -p $TARGET_DIR
+
+#
+# Source config describing QEMU and VHOST cores and NUMA
+#
+source $rootdir/test/vhost/common/autotest.config
+
+function vhosttestinit() {
+ if [ "$TEST_MODE" == "iso" ]; then
+ $rootdir/scripts/setup.sh
+
+ # Look for the VM image
+ if [[ ! -f $VM_IMAGE ]]; then
+ echo "VM image not found at $VM_IMAGE"
+ echo "Download to $HOME? [yn]"
+ read -r download
+ if [ "$download" = "y" ]; then
+ curl https://ci.spdk.io/download/test_resources/vhost_vm_image.tar.gz | tar xz -C $HOME
+ fi
+ fi
+ fi
+
+ # Look for the VM image
+ if [[ ! -f $VM_IMAGE ]]; then
+ error "VM image not found at $VM_IMAGE"
+ exit 1
+ fi
+}
+
+function vhosttestfini() {
+ if [ "$TEST_MODE" == "iso" ]; then
+ $rootdir/scripts/setup.sh reset
+ fi
+}
+
+function message() {
+ local verbose_out
+ if ! $SPDK_VHOST_VERBOSE; then
+ verbose_out=""
+ elif [[ ${FUNCNAME[2]} == "source" ]]; then
+ verbose_out=" (file $(basename ${BASH_SOURCE[1]}):${BASH_LINENO[1]})"
+ else
+ verbose_out=" (function ${FUNCNAME[2]}:${BASH_LINENO[1]})"
+ fi
+
+ local msg_type="$1"
+ shift
+ echo -e "${msg_type}${verbose_out}: $*"
+}
+
+function fail() {
+ echo "===========" >&2
+ message "FAIL" "$@" >&2
+ echo "===========" >&2
+ exit 1
+}
+
+function error() {
+ echo "===========" >&2
+ message "ERROR" "$@" >&2
+ echo "===========" >&2
+ # Don't 'return 1' since the stack trace will be incomplete (why?) missing upper command.
+ false
+}
+
+function warning() {
+ message "WARN" "$@" >&2
+}
+
+function notice() {
+ message "INFO" "$@"
+}
+
+function check_qemu_packedring_support() {
+ qemu_version=$($QEMU_BIN -version | grep -Po "(?<=version )\d+.\d+.\d+")
+ if [[ "$qemu_version" < "4.2.0" ]]; then
+ error "This qemu binary does not support packed ring"
+ fi
+}
+
+function get_vhost_dir() {
+ local vhost_name="$1"
+
+ if [[ -z "$vhost_name" ]]; then
+ error "vhost name must be provided to get_vhost_dir"
+ return 1
+ fi
+
+ echo "$TARGET_DIR/${vhost_name}"
+}
+
+function vhost_run() {
+ local vhost_name="$1"
+ local run_gen_nvme=true
+
+ if [[ -z "$vhost_name" ]]; then
+ error "vhost name must be provided to vhost_run"
+ return 1
+ fi
+ shift
+
+ if [[ "$1" == "--no-gen-nvme" ]]; then
+ notice "Skipping gen_nvmf.sh NVMe bdev configuration"
+ run_gen_nvme=false
+ shift
+ fi
+
+ local vhost_dir
+ vhost_dir="$(get_vhost_dir $vhost_name)"
+ local vhost_app="$SPDK_BIN_DIR/vhost"
+ local vhost_log_file="$vhost_dir/vhost.log"
+ local vhost_pid_file="$vhost_dir/vhost.pid"
+ local vhost_socket="$vhost_dir/usvhost"
+ notice "starting vhost app in background"
+ [[ -r "$vhost_pid_file" ]] && vhost_kill $vhost_name
+ [[ -d $vhost_dir ]] && rm -f $vhost_dir/*
+ mkdir -p $vhost_dir
+
+ if [[ ! -x $vhost_app ]]; then
+ error "application not found: $vhost_app"
+ return 1
+ fi
+
+ local cmd="$vhost_app -r $vhost_dir/rpc.sock $*"
+
+ notice "Loging to: $vhost_log_file"
+ notice "Socket: $vhost_socket"
+ notice "Command: $cmd"
+
+ timing_enter vhost_start
+ cd $vhost_dir
+ $cmd &
+ vhost_pid=$!
+ echo $vhost_pid > $vhost_pid_file
+
+ notice "waiting for app to run..."
+ waitforlisten "$vhost_pid" "$vhost_dir/rpc.sock"
+ #do not generate nvmes if pci access is disabled
+ if [[ "$cmd" != *"--no-pci"* ]] && [[ "$cmd" != *"-u"* ]] && $run_gen_nvme; then
+ $rootdir/scripts/gen_nvme.sh "--json" | $rootdir/scripts/rpc.py -s $vhost_dir/rpc.sock load_subsystem_config
+ fi
+
+ notice "vhost started - pid=$vhost_pid"
+ timing_exit vhost_start
+}
+
+function vhost_kill() {
+ local rc=0
+ local vhost_name="$1"
+
+ if [[ -z "$vhost_name" ]]; then
+ error "Must provide vhost name to vhost_kill"
+ return 0
+ fi
+
+ local vhost_dir
+ vhost_dir="$(get_vhost_dir $vhost_name)"
+ local vhost_pid_file="$vhost_dir/vhost.pid"
+
+ if [[ ! -r $vhost_pid_file ]]; then
+ warning "no vhost pid file found"
+ return 0
+ fi
+
+ timing_enter vhost_kill
+ local vhost_pid
+ vhost_pid="$(cat $vhost_pid_file)"
+ notice "killing vhost (PID $vhost_pid) app"
+
+ if kill -INT $vhost_pid > /dev/null; then
+ notice "sent SIGINT to vhost app - waiting 60 seconds to exit"
+ for ((i = 0; i < 60; i++)); do
+ if kill -0 $vhost_pid; then
+ echo "."
+ sleep 1
+ else
+ break
+ fi
+ done
+ if kill -0 $vhost_pid; then
+ error "ERROR: vhost was NOT killed - sending SIGABRT"
+ kill -ABRT $vhost_pid
+ rm $vhost_pid_file
+ rc=1
+ else
+ while kill -0 $vhost_pid; do
+ echo "."
+ done
+ fi
+ elif kill -0 $vhost_pid; then
+ error "vhost NOT killed - you need to kill it manually"
+ rc=1
+ else
+ notice "vhost was not running"
+ fi
+
+ timing_exit vhost_kill
+ if [[ $rc == 0 ]]; then
+ rm $vhost_pid_file
+ fi
+
+ rm -rf "$vhost_dir"
+
+ return $rc
+}
+
+function vhost_rpc() {
+ local vhost_name="$1"
+
+ if [[ -z "$vhost_name" ]]; then
+ error "vhost name must be provided to vhost_rpc"
+ return 1
+ fi
+ shift
+
+ $rootdir/scripts/rpc.py -s $(get_vhost_dir $vhost_name)/rpc.sock "$@"
+}
+
+###
+# Mgmt functions
+###
+
+function assert_number() {
+ [[ "$1" =~ [0-9]+ ]] && return 0
+
+ error "Invalid or missing paramter: need number but got '$1'"
+ return 1
+}
+
+# Run command on vm with given password
+# First argument - vm number
+# Second argument - ssh password for vm
+#
+function vm_sshpass() {
+ vm_num_is_valid $1 || return 1
+
+ local ssh_cmd
+ ssh_cmd="sshpass -p $2 ssh \
+ -o UserKnownHostsFile=/dev/null \
+ -o StrictHostKeyChecking=no \
+ -o User=root \
+ -p $(vm_ssh_socket $1) $VM_SSH_OPTIONS 127.0.0.1"
+
+ shift 2
+ $ssh_cmd "$@"
+}
+
+# Helper to validate VM number
+# param $1 VM number
+#
+function vm_num_is_valid() {
+ [[ "$1" =~ ^[0-9]+$ ]] && return 0
+
+ error "Invalid or missing paramter: vm number '$1'"
+ return 1
+}
+
+# Print network socket for given VM number
+# param $1 virtual machine number
+#
+function vm_ssh_socket() {
+ vm_num_is_valid $1 || return 1
+ local vm_dir="$VM_DIR/$1"
+
+ cat $vm_dir/ssh_socket
+}
+
+function vm_fio_socket() {
+ vm_num_is_valid $1 || return 1
+ local vm_dir="$VM_DIR/$1"
+
+ cat $vm_dir/fio_socket
+}
+
+# Execute command on given VM
+# param $1 virtual machine number
+#
+function vm_exec() {
+ vm_num_is_valid $1 || return 1
+
+ local vm_num="$1"
+ shift
+
+ sshpass -p "$VM_PASSWORD" ssh \
+ -o UserKnownHostsFile=/dev/null \
+ -o StrictHostKeyChecking=no \
+ -o User=root \
+ -p $(vm_ssh_socket $vm_num) $VM_SSH_OPTIONS 127.0.0.1 \
+ "$@"
+}
+
+# Execute scp command on given VM
+# param $1 virtual machine number
+#
+function vm_scp() {
+ vm_num_is_valid $1 || return 1
+
+ local vm_num="$1"
+ shift
+
+ sshpass -p "$VM_PASSWORD" scp \
+ -o UserKnownHostsFile=/dev/null \
+ -o StrictHostKeyChecking=no \
+ -o User=root \
+ -P $(vm_ssh_socket $vm_num) $VM_SSH_OPTIONS \
+ "$@"
+}
+
+# check if specified VM is running
+# param $1 VM num
+function vm_is_running() {
+ vm_num_is_valid $1 || return 1
+ local vm_dir="$VM_DIR/$1"
+
+ if [[ ! -r $vm_dir/qemu.pid ]]; then
+ return 1
+ fi
+
+ local vm_pid
+ vm_pid="$(cat $vm_dir/qemu.pid)"
+
+ if /bin/kill -0 $vm_pid; then
+ return 0
+ else
+ if [[ $EUID -ne 0 ]]; then
+ warning "not root - assuming VM running since can't be checked"
+ return 0
+ fi
+
+ # not running - remove pid file
+ rm $vm_dir/qemu.pid
+ return 1
+ fi
+}
+
+# check if specified VM is running
+# param $1 VM num
+function vm_os_booted() {
+ vm_num_is_valid $1 || return 1
+ local vm_dir="$VM_DIR/$1"
+
+ if [[ ! -r $vm_dir/qemu.pid ]]; then
+ error "VM $1 is not running"
+ return 1
+ fi
+
+ if ! VM_SSH_OPTIONS="-o ControlMaster=no" vm_exec $1 "true" 2> /dev/null; then
+ # Shutdown existing master. Ignore errors as it might not exist.
+ VM_SSH_OPTIONS="-O exit" vm_exec $1 "true" 2> /dev/null
+ return 1
+ fi
+
+ return 0
+}
+
+# Shutdown given VM
+# param $1 virtual machine number
+# return non-zero in case of error.
+function vm_shutdown() {
+ vm_num_is_valid $1 || return 1
+ local vm_dir="$VM_DIR/$1"
+ if [[ ! -d "$vm_dir" ]]; then
+ error "VM$1 ($vm_dir) not exist - setup it first"
+ return 1
+ fi
+
+ if ! vm_is_running $1; then
+ notice "VM$1 ($vm_dir) is not running"
+ return 0
+ fi
+
+ # Temporarily disabling exit flag for next ssh command, since it will
+ # "fail" due to shutdown
+ notice "Shutting down virtual machine $vm_dir"
+ set +e
+ vm_exec $1 "nohup sh -c 'shutdown -h -P now'" || true
+ notice "VM$1 is shutting down - wait a while to complete"
+ set -e
+}
+
+# Kill given VM
+# param $1 virtual machine number
+#
+function vm_kill() {
+ vm_num_is_valid $1 || return 1
+ local vm_dir="$VM_DIR/$1"
+
+ if [[ ! -r $vm_dir/qemu.pid ]]; then
+ return 0
+ fi
+
+ local vm_pid
+ vm_pid="$(cat $vm_dir/qemu.pid)"
+
+ notice "Killing virtual machine $vm_dir (pid=$vm_pid)"
+ # First kill should fail, second one must fail
+ if /bin/kill $vm_pid; then
+ notice "process $vm_pid killed"
+ rm $vm_dir/qemu.pid
+ rm -rf $vm_dir
+ elif vm_is_running $1; then
+ error "Process $vm_pid NOT killed"
+ return 1
+ fi
+}
+
+# List all VM numbers in VM_DIR
+#
+function vm_list_all() {
+ local vms
+ vms="$(
+ shopt -s nullglob
+ echo $VM_DIR/[0-9]*
+ )"
+ if [[ -n "$vms" ]]; then
+ basename --multiple $vms
+ fi
+}
+
+# Kills all VM in $VM_DIR
+#
+function vm_kill_all() {
+ local vm
+ for vm in $(vm_list_all); do
+ vm_kill $vm
+ done
+
+ rm -rf $VM_DIR
+}
+
+# Shutdown all VM in $VM_DIR
+#
+function vm_shutdown_all() {
+ # XXX: temporarily disable to debug shutdown issue
+ # xtrace_disable
+
+ local vms
+ vms=$(vm_list_all)
+ local vm
+
+ for vm in $vms; do
+ vm_shutdown $vm
+ done
+
+ notice "Waiting for VMs to shutdown..."
+ local timeo=30
+ while [[ $timeo -gt 0 ]]; do
+ local all_vms_down=1
+ for vm in $vms; do
+ if vm_is_running $vm; then
+ all_vms_down=0
+ break
+ fi
+ done
+
+ if [[ $all_vms_down == 1 ]]; then
+ notice "All VMs successfully shut down"
+ xtrace_restore
+ return 0
+ fi
+
+ ((timeo -= 1))
+ sleep 1
+ done
+
+ rm -rf $VM_DIR
+
+ xtrace_restore
+}
+
+function vm_setup() {
+ xtrace_disable
+ local OPTIND optchar vm_num
+
+ local os=""
+ local os_mode=""
+ local qemu_args=()
+ local disk_type_g=NOT_DEFINED
+ local read_only="false"
+ # List created of a strings separated with a ":"
+ local disks=()
+ local raw_cache=""
+ local vm_incoming=""
+ local vm_migrate_to=""
+ local force_vm=""
+ local guest_memory=1024
+ local queue_number=""
+ local vhost_dir
+ local packed=false
+ vhost_dir="$(get_vhost_dir 0)"
+ while getopts ':-:' optchar; do
+ case "$optchar" in
+ -)
+ case "$OPTARG" in
+ os=*) os="${OPTARG#*=}" ;;
+ os-mode=*) os_mode="${OPTARG#*=}" ;;
+ qemu-args=*) qemu_args+=("${OPTARG#*=}") ;;
+ disk-type=*) disk_type_g="${OPTARG#*=}" ;;
+ read-only=*) read_only="${OPTARG#*=}" ;;
+ disks=*) IFS=":" read -ra disks <<< "${OPTARG#*=}" ;;
+ raw-cache=*) raw_cache=",cache${OPTARG#*=}" ;;
+ force=*) force_vm=${OPTARG#*=} ;;
+ memory=*) guest_memory=${OPTARG#*=} ;;
+ queue_num=*) queue_number=${OPTARG#*=} ;;
+ incoming=*) vm_incoming="${OPTARG#*=}" ;;
+ migrate-to=*) vm_migrate_to="${OPTARG#*=}" ;;
+ vhost-name=*) vhost_dir="$(get_vhost_dir ${OPTARG#*=})" ;;
+ spdk-boot=*) local boot_from="${OPTARG#*=}" ;;
+ packed) packed=true ;;
+ *)
+ error "unknown argument $OPTARG"
+ return 1
+ ;;
+ esac
+ ;;
+ *)
+ error "vm_create Unknown param $OPTARG"
+ return 1
+ ;;
+ esac
+ done
+
+ # Find next directory we can use
+ if [[ -n $force_vm ]]; then
+ vm_num=$force_vm
+
+ vm_num_is_valid $vm_num || return 1
+ local vm_dir="$VM_DIR/$vm_num"
+ [[ -d $vm_dir ]] && warning "removing existing VM in '$vm_dir'"
+ else
+ local vm_dir=""
+
+ set +x
+ for ((i = 0; i <= 256; i++)); do
+ local vm_dir="$VM_DIR/$i"
+ [[ ! -d $vm_dir ]] && break
+ done
+ xtrace_restore
+
+ vm_num=$i
+ fi
+
+ if [[ $vm_num -eq 256 ]]; then
+ error "no free VM found. do some cleanup (256 VMs created, are you insane?)"
+ return 1
+ fi
+
+ if [[ -n "$vm_migrate_to" && -n "$vm_incoming" ]]; then
+ error "'--incoming' and '--migrate-to' cannot be used together"
+ return 1
+ elif [[ -n "$vm_incoming" ]]; then
+ if [[ -n "$os_mode" || -n "$os" ]]; then
+ error "'--incoming' can't be used together with '--os' nor '--os-mode'"
+ return 1
+ fi
+
+ os_mode="original"
+ os="$VM_DIR/$vm_incoming/os.qcow2"
+ elif [[ -n "$vm_migrate_to" ]]; then
+ [[ "$os_mode" != "backing" ]] && warning "Using 'backing' mode for OS since '--migrate-to' is used"
+ os_mode=backing
+ fi
+
+ notice "Creating new VM in $vm_dir"
+ mkdir -p $vm_dir
+
+ if [[ "$os_mode" == "backing" ]]; then
+ notice "Creating backing file for OS image file: $os"
+ if ! $QEMU_IMG_BIN create -f qcow2 -b $os $vm_dir/os.qcow2; then
+ error "Failed to create OS backing file in '$vm_dir/os.qcow2' using '$os'"
+ return 1
+ fi
+
+ local os=$vm_dir/os.qcow2
+ elif [[ "$os_mode" == "original" ]]; then
+ warning "Using original OS image file: $os"
+ elif [[ "$os_mode" != "snapshot" ]]; then
+ if [[ -z "$os_mode" ]]; then
+ notice "No '--os-mode' parameter provided - using 'snapshot'"
+ os_mode="snapshot"
+ else
+ error "Invalid '--os-mode=$os_mode'"
+ return 1
+ fi
+ fi
+
+ local qemu_mask_param="VM_${vm_num}_qemu_mask"
+ local qemu_numa_node_param="VM_${vm_num}_qemu_numa_node"
+
+ if [[ -z "${!qemu_mask_param}" ]] || [[ -z "${!qemu_numa_node_param}" ]]; then
+ error "Parameters ${qemu_mask_param} or ${qemu_numa_node_param} not found in autotest.config file"
+ return 1
+ fi
+
+ local task_mask=${!qemu_mask_param}
+
+ notice "TASK MASK: $task_mask"
+ local cmd=(taskset -a -c "$task_mask" "$QEMU_BIN")
+ local vm_socket_offset=$((10000 + 100 * vm_num))
+
+ local ssh_socket=$((vm_socket_offset + 0))
+ local fio_socket=$((vm_socket_offset + 1))
+ local monitor_port=$((vm_socket_offset + 2))
+ local migration_port=$((vm_socket_offset + 3))
+ local gdbserver_socket=$((vm_socket_offset + 4))
+ local vnc_socket=$((100 + vm_num))
+ local qemu_pid_file="$vm_dir/qemu.pid"
+ local cpu_num=0
+
+ set +x
+ # cpu list for taskset can be comma separated or range
+ # or both at the same time, so first split on commas
+ cpu_list=$(echo $task_mask | tr "," "\n")
+ queue_number=0
+ for c in $cpu_list; do
+ # if range is detected - count how many cpus
+ if [[ $c =~ [0-9]+-[0-9]+ ]]; then
+ val=$((c - 1))
+ val=${val#-}
+ else
+ val=1
+ fi
+ cpu_num=$((cpu_num + val))
+ queue_number=$((queue_number + val))
+ done
+
+ if [ -z $queue_number ]; then
+ queue_number=$cpu_num
+ fi
+
+ xtrace_restore
+
+ local node_num=${!qemu_numa_node_param}
+ local boot_disk_present=false
+ notice "NUMA NODE: $node_num"
+ cmd+=(-m "$guest_memory" --enable-kvm -cpu host -smp "$cpu_num" -vga std -vnc ":$vnc_socket" -daemonize)
+ cmd+=(-object "memory-backend-file,id=mem,size=${guest_memory}M,mem-path=/dev/hugepages,share=on,prealloc=yes,host-nodes=$node_num,policy=bind")
+ [[ $os_mode == snapshot ]] && cmd+=(-snapshot)
+ [[ -n "$vm_incoming" ]] && cmd+=(-incoming "tcp:0:$migration_port")
+ cmd+=(-monitor "telnet:127.0.0.1:$monitor_port,server,nowait")
+ cmd+=(-numa "node,memdev=mem")
+ cmd+=(-pidfile "$qemu_pid_file")
+ cmd+=(-serial "file:$vm_dir/serial.log")
+ cmd+=(-D "$vm_dir/qemu.log")
+ cmd+=(-chardev "file,path=$vm_dir/seabios.log,id=seabios" -device "isa-debugcon,iobase=0x402,chardev=seabios")
+ cmd+=(-net "user,hostfwd=tcp::$ssh_socket-:22,hostfwd=tcp::$fio_socket-:8765")
+ cmd+=(-net nic)
+ if [[ -z "$boot_from" ]]; then
+ cmd+=(-drive "file=$os,if=none,id=os_disk")
+ cmd+=(-device "ide-hd,drive=os_disk,bootindex=0")
+ fi
+
+ if ((${#disks[@]} == 0)) && [[ $disk_type_g == virtio* ]]; then
+ disks=("default_virtio.img")
+ elif ((${#disks[@]} == 0)); then
+ error "No disks defined, aborting"
+ return 1
+ fi
+
+ for disk in "${disks[@]}"; do
+ # Each disk can define its type in a form of a disk_name,type. The remaining parts
+ # of the string are dropped.
+ IFS="," read -r disk disk_type _ <<< "$disk"
+ [[ -z $disk_type ]] && disk_type=$disk_type_g
+
+ case $disk_type in
+ virtio)
+ local raw_name="RAWSCSI"
+ local raw_disk=$vm_dir/test.img
+
+ if [[ -n $disk ]]; then
+ [[ ! -b $disk ]] && touch $disk
+ local raw_disk
+ raw_disk=$(readlink -f $disk)
+ fi
+
+ # Create disk file if it not exist or it is smaller than 1G
+ if { [[ -f $raw_disk ]] && [[ $(stat --printf="%s" $raw_disk) -lt $((1024 * 1024 * 1024)) ]]; } \
+ || [[ ! -e $raw_disk ]]; then
+ if [[ $raw_disk =~ /dev/.* ]]; then
+ error \
+ "ERROR: Virtio disk point to missing device ($raw_disk) -\n" \
+ " this is probably not what you want."
+ return 1
+ fi
+
+ notice "Creating Virtio disc $raw_disk"
+ dd if=/dev/zero of=$raw_disk bs=1024k count=1024
+ else
+ notice "Using existing image $raw_disk"
+ fi
+
+ cmd+=(-device "virtio-scsi-pci,num_queues=$queue_number")
+ cmd+=(-device "scsi-hd,drive=hd$i,vendor=$raw_name")
+ cmd+=(-drive "if=none,id=hd$i,file=$raw_disk,format=raw$raw_cache")
+ ;;
+ spdk_vhost_scsi)
+ notice "using socket $vhost_dir/naa.$disk.$vm_num"
+ cmd+=(-chardev "socket,id=char_$disk,path=$vhost_dir/naa.$disk.$vm_num")
+ cmd+=(-device "vhost-user-scsi-pci,id=scsi_$disk,num_queues=$queue_number,chardev=char_$disk")
+ if [[ "$disk" == "$boot_from" ]]; then
+ cmd[-1]+=,bootindex=0
+ boot_disk_present=true
+ fi
+ ;;
+ spdk_vhost_blk)
+ notice "using socket $vhost_dir/naa.$disk.$vm_num"
+ cmd+=(-chardev "socket,id=char_$disk,path=$vhost_dir/naa.$disk.$vm_num")
+ cmd+=(-device "vhost-user-blk-pci,num-queues=$queue_number,chardev=char_$disk")
+ if [[ "$disk" == "$boot_from" ]]; then
+ cmd[-1]+=,bootindex=0
+ boot_disk_present=true
+ fi
+
+ if $packed; then
+ check_qemu_packedring_support
+ notice "Enabling packed ring support for VM $vm_num, controller $vhost_dir/naa.$disk.$vm_num"
+ cmd[-1]+=,packed=on
+ fi
+ ;;
+ kernel_vhost)
+ if [[ -z $disk ]]; then
+ error "need WWN for $disk_type"
+ return 1
+ elif [[ ! $disk =~ ^[[:alpha:]]{3}[.][[:xdigit:]]+$ ]]; then
+ error "$disk_type - disk(wnn)=$disk does not look like WNN number"
+ return 1
+ fi
+ notice "Using kernel vhost disk wwn=$disk"
+ cmd+=(-device "vhost-scsi-pci,wwpn=$disk,num_queues=$queue_number")
+ ;;
+ *)
+ error "unknown mode '$disk_type', use: virtio, spdk_vhost_scsi, spdk_vhost_blk or kernel_vhost"
+ return 1
+ ;;
+ esac
+ done
+
+ if [[ -n $boot_from ]] && [[ $boot_disk_present == false ]]; then
+ error "Boot from $boot_from is selected but device is not present"
+ return 1
+ fi
+
+ ((${#qemu_args[@]})) && cmd+=("${qemu_args[@]}")
+ notice "Saving to $vm_dir/run.sh"
+ cat <<- RUN > "$vm_dir/run.sh"
+ #!/bin/bash
+ qemu_log () {
+ echo "=== qemu.log ==="
+ [[ -s $vm_dir/qemu.log ]] && cat $vm_dir/qemu.log
+ echo "=== qemu.log ==="
+ }
+
+ if [[ \$EUID -ne 0 ]]; then
+ echo "Go away user come back as root"
+ exit 1
+ fi
+
+ trap "qemu_log" EXIT
+
+ qemu_cmd=($(printf '%s\n' "${cmd[@]}"))
+ chmod +r $vm_dir/*
+ echo "Running VM in $vm_dir"
+ rm -f $qemu_pid_file
+ "\${qemu_cmd[@]}"
+
+ echo "Waiting for QEMU pid file"
+ sleep 1
+ [[ ! -f $qemu_pid_file ]] && sleep 1
+ [[ ! -f $qemu_pid_file ]] && echo "ERROR: no qemu pid file found" && exit 1
+ exit 0
+ # EOF
+ RUN
+ chmod +x $vm_dir/run.sh
+
+ # Save generated sockets redirection
+ echo $ssh_socket > $vm_dir/ssh_socket
+ echo $fio_socket > $vm_dir/fio_socket
+ echo $monitor_port > $vm_dir/monitor_port
+
+ rm -f $vm_dir/migration_port
+ [[ -z $vm_incoming ]] || echo $migration_port > $vm_dir/migration_port
+
+ echo $gdbserver_socket > $vm_dir/gdbserver_socket
+ echo $vnc_socket >> $vm_dir/vnc_socket
+
+ [[ -z $vm_incoming ]] || ln -fs $VM_DIR/$vm_incoming $vm_dir/vm_incoming
+ [[ -z $vm_migrate_to ]] || ln -fs $VM_DIR/$vm_migrate_to $vm_dir/vm_migrate_to
+}
+
+function vm_run() {
+ local OPTIND optchar vm
+ local run_all=false
+ local vms_to_run=""
+
+ while getopts 'a-:' optchar; do
+ case "$optchar" in
+ a) run_all=true ;;
+ *)
+ error "Unknown param $OPTARG"
+ return 1
+ ;;
+ esac
+ done
+
+ if $run_all; then
+ vms_to_run="$(vm_list_all)"
+ else
+ shift $((OPTIND - 1))
+ for vm in "$@"; do
+ vm_num_is_valid $1 || return 1
+ if [[ ! -x $VM_DIR/$vm/run.sh ]]; then
+ error "VM$vm not defined - setup it first"
+ return 1
+ fi
+ vms_to_run+=" $vm"
+ done
+ fi
+
+ for vm in $vms_to_run; do
+ if vm_is_running $vm; then
+ warning "VM$vm ($VM_DIR/$vm) already running"
+ continue
+ fi
+
+ notice "running $VM_DIR/$vm/run.sh"
+ if ! $VM_DIR/$vm/run.sh; then
+ error "FAILED to run vm $vm"
+ return 1
+ fi
+ done
+}
+
+function vm_print_logs() {
+ vm_num=$1
+ warning "================"
+ warning "QEMU LOG:"
+ if [[ -r $VM_DIR/$vm_num/qemu.log ]]; then
+ cat $VM_DIR/$vm_num/qemu.log
+ else
+ warning "LOG qemu.log not found"
+ fi
+
+ warning "VM LOG:"
+ if [[ -r $VM_DIR/$vm_num/serial.log ]]; then
+ cat $VM_DIR/$vm_num/serial.log
+ else
+ warning "LOG serial.log not found"
+ fi
+
+ warning "SEABIOS LOG:"
+ if [[ -r $VM_DIR/$vm_num/seabios.log ]]; then
+ cat $VM_DIR/$vm_num/seabios.log
+ else
+ warning "LOG seabios.log not found"
+ fi
+ warning "================"
+}
+
+# Wait for all created VMs to boot.
+# param $1 max wait time
+function vm_wait_for_boot() {
+ assert_number $1
+
+ xtrace_disable
+
+ local all_booted=false
+ local timeout_time=$1
+ [[ $timeout_time -lt 10 ]] && timeout_time=10
+ local timeout_time
+ timeout_time=$(date -d "+$timeout_time seconds" +%s)
+
+ notice "Waiting for VMs to boot"
+ shift
+ if [[ "$*" == "" ]]; then
+ local vms_to_check="$VM_DIR/[0-9]*"
+ else
+ local vms_to_check=""
+ for vm in "$@"; do
+ vms_to_check+=" $VM_DIR/$vm"
+ done
+ fi
+
+ for vm in $vms_to_check; do
+ local vm_num
+ vm_num=$(basename $vm)
+ local i=0
+ notice "waiting for VM$vm_num ($vm)"
+ while ! vm_os_booted $vm_num; do
+ if ! vm_is_running $vm_num; then
+ warning "VM $vm_num is not running"
+ vm_print_logs $vm_num
+ xtrace_restore
+ return 1
+ fi
+
+ if [[ $(date +%s) -gt $timeout_time ]]; then
+ warning "timeout waiting for machines to boot"
+ vm_print_logs $vm_num
+ xtrace_restore
+ return 1
+ fi
+ if ((i > 30)); then
+ local i=0
+ echo
+ fi
+ echo -n "."
+ sleep 1
+ done
+ echo ""
+ notice "VM$vm_num ready"
+ #Change Timeout for stopping services to prevent lengthy powerdowns
+ #Check that remote system is not Cygwin in case of Windows VMs
+ local vm_os
+ vm_os=$(vm_exec $vm_num "uname -o")
+ if [[ "$vm_os" != "Cygwin" ]]; then
+ vm_exec $vm_num "echo 'DefaultTimeoutStopSec=10' >> /etc/systemd/system.conf; systemctl daemon-reexec"
+ fi
+ done
+
+ notice "all VMs ready"
+ xtrace_restore
+ return 0
+}
+
+function vm_start_fio_server() {
+ local OPTIND optchar
+ local readonly=''
+ local fio_bin=''
+ while getopts ':-:' optchar; do
+ case "$optchar" in
+ -)
+ case "$OPTARG" in
+ fio-bin=*) local fio_bin="${OPTARG#*=}" ;;
+ readonly) local readonly="--readonly" ;;
+ *) error "Invalid argument '$OPTARG'" && return 1 ;;
+ esac
+ ;;
+ *) error "Invalid argument '$OPTARG'" && return 1 ;;
+ esac
+ done
+
+ shift $((OPTIND - 1))
+ for vm_num in "$@"; do
+ notice "Starting fio server on VM$vm_num"
+ if [[ $fio_bin != "" ]]; then
+ vm_exec $vm_num 'cat > /root/fio; chmod +x /root/fio' < $fio_bin
+ vm_exec $vm_num /root/fio $readonly --eta=never --server --daemonize=/root/fio.pid
+ else
+ vm_exec $vm_num fio $readonly --eta=never --server --daemonize=/root/fio.pid
+ fi
+ done
+}
+
+function vm_check_scsi_location() {
+ # Script to find wanted disc
+ local script='shopt -s nullglob;
+ for entry in /sys/block/sd*; do
+ disk_type="$(cat $entry/device/vendor)";
+ if [[ $disk_type == INTEL* ]] || [[ $disk_type == RAWSCSI* ]] || [[ $disk_type == LIO-ORG* ]]; then
+ fname=$(basename $entry);
+ echo -n " $fname";
+ fi;
+ done'
+
+ SCSI_DISK="$(echo "$script" | vm_exec $1 bash -s)"
+
+ if [[ -z "$SCSI_DISK" ]]; then
+ error "no test disk found!"
+ return 1
+ fi
+}
+
+# Note: to use this function your VM should be run with
+# appropriate memory and with SPDK source already cloned
+# and compiled in /root/spdk.
+function vm_check_virtio_location() {
+ vm_exec $1 NRHUGE=512 /root/spdk/scripts/setup.sh
+ vm_exec $1 "cat > /root/bdev.conf" <<- EOF
+ [VirtioPci]
+ Enable Yes
+ EOF
+
+ vm_exec $1 "cat /root/bdev.conf"
+
+ vm_exec $1 "bash -s" <<- EOF
+ set -e
+ rootdir="/root/spdk"
+ source /root/spdk/test/common/autotest_common.sh
+ discover_bdevs /root/spdk /root/bdev.conf | jq -r '[.[].name] | join(" ")' > /root/fio_bdev_filenames
+ exit 0
+ EOF
+
+ SCSI_DISK=$(vm_exec $1 cat /root/fio_bdev_filenames)
+ if [[ -z "$SCSI_DISK" ]]; then
+ error "no virtio test disk found!"
+ return 1
+ fi
+}
+
+# Script to perform scsi device reset on all disks in VM
+# param $1 VM num
+# param $2..$n Disks to perform reset on
+function vm_reset_scsi_devices() {
+ for disk in "${@:2}"; do
+ notice "VM$1 Performing device reset on disk $disk"
+ vm_exec $1 sg_reset /dev/$disk -vNd
+ done
+}
+
+function vm_check_blk_location() {
+ local script='shopt -s nullglob; cd /sys/block; echo vd*'
+ SCSI_DISK="$(echo "$script" | vm_exec $1 bash -s)"
+
+ if [[ -z "$SCSI_DISK" ]]; then
+ error "no blk test disk found!"
+ return 1
+ fi
+}
+
+function run_fio() {
+ local arg
+ local job_file=""
+ local fio_bin=""
+ local vms=()
+ local out=""
+ local vm
+ local run_server_mode=true
+ local run_plugin_mode=false
+ local fio_start_cmd
+ local fio_output_format="normal"
+ local fio_gtod_reduce=false
+ local wait_for_fio=true
+
+ for arg in "$@"; do
+ case "$arg" in
+ --job-file=*) local job_file="${arg#*=}" ;;
+ --fio-bin=*) local fio_bin="${arg#*=}" ;;
+ --vm=*) vms+=("${arg#*=}") ;;
+ --out=*)
+ local out="${arg#*=}"
+ mkdir -p $out
+ ;;
+ --local) run_server_mode=false ;;
+ --plugin)
+ notice "Using plugin mode. Disabling server mode."
+ run_plugin_mode=true
+ run_server_mode=false
+ ;;
+ --json) fio_output_format="json" ;;
+ --hide-results) hide_results=true ;;
+ --no-wait-for-fio) wait_for_fio=false ;;
+ --gtod-reduce) fio_gtod_reduce=true ;;
+ *)
+ error "Invalid argument '$arg'"
+ return 1
+ ;;
+ esac
+ done
+
+ if [[ -n "$fio_bin" && ! -r "$fio_bin" ]]; then
+ error "FIO binary '$fio_bin' does not exist"
+ return 1
+ fi
+
+ if [[ -z "$fio_bin" ]]; then
+ fio_bin="fio"
+ fi
+
+ if [[ ! -r "$job_file" ]]; then
+ error "Fio job '$job_file' does not exist"
+ return 1
+ fi
+
+ fio_start_cmd="$fio_bin --eta=never "
+
+ local job_fname
+ job_fname=$(basename "$job_file")
+ log_fname="${job_fname%%.*}.log"
+ fio_start_cmd+=" --output=$out/$log_fname --output-format=$fio_output_format "
+
+ # prepare job file for each VM
+ for vm in "${vms[@]}"; do
+ local vm_num=${vm%%:*}
+ local vmdisks=${vm#*:}
+
+ sed "s@filename=@filename=$vmdisks@" $job_file | vm_exec $vm_num "cat > /root/$job_fname"
+
+ if $fio_gtod_reduce; then
+ vm_exec $vm_num "echo 'gtod_reduce=1' >> /root/$job_fname"
+ fi
+
+ vm_exec $vm_num cat /root/$job_fname
+
+ if $run_server_mode; then
+ fio_start_cmd+="--client=127.0.0.1,$(vm_fio_socket $vm_num) --remote-config /root/$job_fname "
+ fi
+
+ if ! $run_server_mode; then
+ if [[ -n "$fio_bin" ]]; then
+ if ! $run_plugin_mode; then
+ vm_exec $vm_num 'cat > /root/fio; chmod +x /root/fio' < $fio_bin
+ vm_fio_bin="/root/fio"
+ else
+ vm_fio_bin="/usr/src/fio/fio"
+ fi
+ fi
+
+ notice "Running local fio on VM $vm_num"
+ vm_exec $vm_num "$vm_fio_bin --output=/root/$log_fname --output-format=$fio_output_format /root/$job_fname & echo \$! > /root/fio.pid" &
+ vm_exec_pids+=("$!")
+ fi
+ done
+
+ if ! $run_server_mode; then
+ if ! $wait_for_fio; then
+ return 0
+ fi
+ echo "Waiting for guest fio instances to finish.."
+ wait "${vm_exec_pids[@]}"
+
+ for vm in "${vms[@]}"; do
+ local vm_num=${vm%%:*}
+ vm_exec $vm_num cat /root/$log_fname > "$out/vm${vm_num}_${log_fname}"
+ done
+ return 0
+ fi
+
+ $fio_start_cmd
+ sleep 1
+
+ if [[ "$fio_output_format" == "json" ]]; then
+ # Fio in client-server mode produces a lot of "trash" output
+ # preceding JSON structure, making it not possible to parse.
+ # Remove these lines from file.
+ # shellcheck disable=SC2005
+ echo "$(grep -vP '^[<\w]' "$out/$log_fname")" > "$out/$log_fname"
+ fi
+
+ if [[ ! $hide_results ]]; then
+ cat $out/$log_fname
+ fi
+}
+
+# Parsing fio results for json output and client-server mode only!
+function parse_fio_results() {
+ local fio_log_dir=$1
+ local fio_log_filename=$2
+ local fio_csv_filename
+
+ # Variables used in parsing loop
+ local log_file
+ local rwmode mixread mixwrite
+ local lat_key lat_divisor
+ local client_stats iops bw
+ local read_avg_lat read_min_lat read_max_lat
+ local write_avg_lat write_min_lat write_min_lat
+
+ declare -A results
+ results["iops"]=0
+ results["bw"]=0
+ results["avg_lat"]=0
+ results["min_lat"]=0
+ results["max_lat"]=0
+
+ # Loop using the log filename to see if there are any other
+ # matching files. This is in case we ran fio test multiple times.
+ log_files=("$fio_log_dir/$fio_log_filename"*)
+ for log_file in "${log_files[@]}"; do
+ rwmode=$(jq -r '.["client_stats"][0]["job options"]["rw"]' "$log_file")
+ mixread=1
+ mixwrite=1
+ if [[ $rwmode = *"rw"* ]]; then
+ mixread=$(jq -r '.["client_stats"][0]["job options"]["rwmixread"]' "$log_file")
+ mixread=$(bc -l <<< "scale=3; $mixread/100")
+ mixwrite=$(bc -l <<< "scale=3; 1-$mixread")
+ fi
+
+ client_stats=$(jq -r '.["client_stats"][] | select(.jobname == "All clients")' "$log_file")
+
+ # Check latency unit and later normalize to microseconds
+ lat_key="lat_us"
+ lat_divisor=1
+ if jq -er '.read["lat_ns"]' &> /dev/null <<< $client_stats; then
+ lat_key="lat_ns"
+ lat_divisor=1000
+ fi
+
+ # Horrific bash float point arithmetic oprations below.
+ # Viewer discretion is advised.
+ iops=$(jq -r '[.read["iops"],.write["iops"]] | add' <<< $client_stats)
+ bw=$(jq -r '[.read["bw"],.write["bw"]] | add' <<< $client_stats)
+ read_avg_lat=$(jq -r --arg lat_key $lat_key '.read[$lat_key]["mean"]' <<< $client_stats)
+ read_min_lat=$(jq -r --arg lat_key $lat_key '.read[$lat_key]["min"]' <<< $client_stats)
+ read_max_lat=$(jq -r --arg lat_key $lat_key '.read[$lat_key]["max"]' <<< $client_stats)
+ write_avg_lat=$(jq -r --arg lat_key $lat_key '.write[$lat_key]["mean"]' <<< $client_stats)
+ write_min_lat=$(jq -r --arg lat_key $lat_key '.write[$lat_key]["min"]' <<< $client_stats)
+ write_max_lat=$(jq -r --arg lat_key $lat_key '.write[$lat_key]["max"]' <<< $client_stats)
+
+ results["iops"]=$(bc -l <<< "${results[iops]} + $iops")
+ results["bw"]=$(bc -l <<< "${results[bw]} + $bw")
+ results["avg_lat"]=$(bc -l <<< "${results[avg_lat]} + ($mixread*$read_avg_lat + $mixwrite*$write_avg_lat)/$lat_divisor")
+ results["min_lat"]=$(bc -l <<< "${results[min_lat]} + ($mixread*$read_min_lat + $mixwrite*$write_min_lat)/$lat_divisor")
+ results["max_lat"]=$(bc -l <<< "${results[max_lat]} + ($mixread*$read_max_lat + $mixwrite*$write_max_lat)/$lat_divisor")
+ done
+
+ results["iops"]=$(bc -l <<< "scale=3; ${results[iops]} / ${#log_files[@]}")
+ results["bw"]=$(bc -l <<< "scale=3; ${results[bw]} / ${#log_files[@]}")
+ results["avg_lat"]=$(bc -l <<< "scale=3; ${results[avg_lat]} / ${#log_files[@]}")
+ results["min_lat"]=$(bc -l <<< "scale=3; ${results[min_lat]} / ${#log_files[@]}")
+ results["max_lat"]=$(bc -l <<< "scale=3; ${results[max_lat]} / ${#log_files[@]}")
+
+ fio_csv_filename="${fio_log_filename%%.*}.csv"
+ cat <<- EOF > "$fio_log_dir/$fio_csv_filename"
+ iops,bw,avg_lat,min_lat,max_lat
+ ${results["iops"]},${results["bw"]},${results["avg_lat"]},${results["min_lat"]},${results["max_lat"]}
+ EOF
+}
+
+# Shutdown or kill any running VM and SPDK APP.
+#
+function at_app_exit() {
+ local vhost_name
+
+ notice "APP EXITING"
+ notice "killing all VMs"
+ vm_kill_all
+ # Kill vhost application
+ notice "killing vhost app"
+
+ for vhost_name in "$TARGET_DIR"/*; do
+ vhost_kill $vhost_name
+ done
+
+ notice "EXIT DONE"
+}
+
+function error_exit() {
+ trap - ERR
+ print_backtrace
+ set +e
+ error "Error on $1 $2"
+
+ at_app_exit
+ exit 1
+}
diff --git a/src/spdk/test/vhost/common/autotest.config b/src/spdk/test/vhost/common/autotest.config
new file mode 100644
index 000000000..96b0d08be
--- /dev/null
+++ b/src/spdk/test/vhost/common/autotest.config
@@ -0,0 +1,38 @@
+vhost_0_reactor_mask="[0]"
+vhost_0_master_core=0
+
+VM_0_qemu_mask=1-2
+VM_0_qemu_numa_node=0
+
+VM_1_qemu_mask=3-4
+VM_1_qemu_numa_node=0
+
+VM_2_qemu_mask=5-6
+VM_2_qemu_numa_node=0
+
+VM_3_qemu_mask=7-8
+VM_3_qemu_numa_node=0
+
+VM_4_qemu_mask=9-10
+VM_4_qemu_numa_node=0
+
+VM_5_qemu_mask=11-12
+VM_5_qemu_numa_node=0
+
+VM_6_qemu_mask=13-14
+VM_6_qemu_numa_node=1
+
+VM_7_qemu_mask=15-16
+VM_7_qemu_numa_node=1
+
+VM_8_qemu_mask=17-18
+VM_8_qemu_numa_node=1
+
+VM_9_qemu_mask=19-20
+VM_9_qemu_numa_node=1
+
+VM_10_qemu_mask=21-22
+VM_10_qemu_numa_node=1
+
+VM_11_qemu_mask=23-24
+VM_11_qemu_numa_node=1
diff --git a/src/spdk/test/vhost/common/fio_jobs/default_initiator.job b/src/spdk/test/vhost/common/fio_jobs/default_initiator.job
new file mode 100644
index 000000000..32c993bd2
--- /dev/null
+++ b/src/spdk/test/vhost/common/fio_jobs/default_initiator.job
@@ -0,0 +1,11 @@
+[global]
+thread=1
+group_reporting=1
+direct=1
+time_based=1
+do_verify=1
+verify=md5
+verify_backlog=1024
+fsync_on_close=1
+iodepth=32
+[job0]
diff --git a/src/spdk/test/vhost/common/fio_jobs/default_integrity.job b/src/spdk/test/vhost/common/fio_jobs/default_integrity.job
new file mode 100644
index 000000000..06398b506
--- /dev/null
+++ b/src/spdk/test/vhost/common/fio_jobs/default_integrity.job
@@ -0,0 +1,19 @@
+[global]
+blocksize_range=4k-512k
+iodepth=512
+iodepth_batch=128
+iodepth_low=256
+ioengine=libaio
+size=1G
+io_size=4G
+filename=
+group_reporting
+thread
+numjobs=1
+direct=1
+rw=randwrite
+do_verify=1
+verify=md5
+verify_backlog=1024
+fsync_on_close=1
+[nvme-host]
diff --git a/src/spdk/test/vhost/common/fio_jobs/default_integrity_nightly.job b/src/spdk/test/vhost/common/fio_jobs/default_integrity_nightly.job
new file mode 100644
index 000000000..097401780
--- /dev/null
+++ b/src/spdk/test/vhost/common/fio_jobs/default_integrity_nightly.job
@@ -0,0 +1,23 @@
+[global]
+ioengine=libaio
+runtime=10
+filename=
+group_reporting
+thread
+numjobs=1
+direct=1
+do_verify=1
+verify=md5
+verify_backlog=1024
+
+[randwrite]
+stonewall
+rw=randwrite
+bs=512k
+iodepth=256
+
+[randrw]
+stonewall
+rw=randrw
+bs=128k
+iodepth=64
diff --git a/src/spdk/test/vhost/common/fio_jobs/default_performance.job b/src/spdk/test/vhost/common/fio_jobs/default_performance.job
new file mode 100644
index 000000000..a51cb5eda
--- /dev/null
+++ b/src/spdk/test/vhost/common/fio_jobs/default_performance.job
@@ -0,0 +1,16 @@
+[global]
+blocksize_range=4k-512k
+iodepth=512
+iodepth_batch=128
+iodepth_low=256
+ioengine=libaio
+size=10G
+filename=
+ramp_time=10
+group_reporting
+thread
+numjobs=1
+direct=1
+rw=randread
+fsync_on_close=1
+[nvme-host]
diff --git a/src/spdk/test/vhost/fio/fio.sh b/src/spdk/test/vhost/fio/fio.sh
new file mode 100755
index 000000000..3d8bf6092
--- /dev/null
+++ b/src/spdk/test/vhost/fio/fio.sh
@@ -0,0 +1,58 @@
+#!/usr/bin/env bash
+
+set -e
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/vhost/common.sh
+
+MALLOC_BDEV_SIZE=128
+MALLOC_BLOCK_SIZE=512
+
+vhosttestinit
+
+#TODO: Both scsi and blk?
+
+timing_enter vhost_fio
+
+trap "at_app_exit; process_shm --id 0; exit 1" SIGINT SIGTERM EXIT
+
+vhost_run vhost0 "-m 0x1"
+
+# Create vhost scsi controller
+vhost_rpc vhost0 bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc0
+vhost_rpc vhost0 vhost_create_scsi_controller naa.VhostScsi0.0
+vhost_rpc vhost0 vhost_scsi_controller_add_target naa.VhostScsi0.0 0 "Malloc0"
+
+# Create vhost blk controller
+vhost_rpc vhost0 bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc1
+vhost_rpc vhost0 vhost_create_blk_controller naa.Malloc1.1 Malloc1
+
+# Start qemu based VMs
+vm_setup --os="$VM_IMAGE" --disk-type=spdk_vhost_scsi --disks="VhostScsi0" --vhost-name=vhost0 --force=0
+vm_setup --os="$VM_IMAGE" --disk-type=spdk_vhost_blk --disks="Malloc1" --vhost-name=vhost0 --force=1
+
+vm_run 0
+vm_run 1
+
+vm_wait_for_boot 300 0
+vm_wait_for_boot 300 1
+sleep 5
+
+# Run the fio workload on the VM
+vm_scp 0 $testdir/vhost_fio.job 127.0.0.1:/root/vhost_fio.job
+vm_exec 0 "fio /root/vhost_fio.job"
+
+vm_scp 1 $testdir/vhost_fio.job 127.0.0.1:/root/vhost_fio.job
+vm_exec 1 "fio /root/vhost_fio.job"
+
+# Shut the VM down
+vm_shutdown_all
+
+# Shut vhost down
+vhost_kill vhost0
+
+trap - SIGINT SIGTERM EXIT
+
+vhosttestfini
+timing_exit vhost_fio
diff --git a/src/spdk/test/vhost/fio/vhost_fio.job b/src/spdk/test/vhost/fio/vhost_fio.job
new file mode 100644
index 000000000..350aa895e
--- /dev/null
+++ b/src/spdk/test/vhost/fio/vhost_fio.job
@@ -0,0 +1,19 @@
+[global]
+blocksize=4k-512k
+iodepth=128
+ioengine=libaio
+filename=/dev/sdb
+group_reporting
+thread
+numjobs=1
+direct=1
+do_verify=1
+verify=md5
+verify_fatal=1
+verify_dump=1
+verify_backlog=8
+
+[randwrite]
+rw=randwrite
+runtime=15
+time_based
diff --git a/src/spdk/test/vhost/fiotest/fio.sh b/src/spdk/test/vhost/fiotest/fio.sh
new file mode 100755
index 000000000..930948d6d
--- /dev/null
+++ b/src/spdk/test/vhost/fiotest/fio.sh
@@ -0,0 +1,288 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/vhost/common.sh
+
+dry_run=false
+no_shutdown=false
+fio_bin=""
+remote_fio_bin=""
+fio_jobs=""
+test_type=spdk_vhost_scsi
+reuse_vms=false
+vms=()
+used_vms=""
+x=""
+readonly=""
+
+function usage() {
+ [[ -n $2 ]] && (
+ echo "$2"
+ echo ""
+ )
+ echo "Shortcut script for doing automated test"
+ echo "Usage: $(basename $1) [OPTIONS]"
+ echo
+ echo "-h, --help print help and exit"
+ echo " --test-type=TYPE Perform specified test:"
+ echo " virtio - test host virtio-scsi-pci using file as disk image"
+ echo " kernel_vhost - use kernel driver vhost-scsi"
+ echo " spdk_vhost_scsi - use spdk vhost scsi"
+ echo " spdk_vhost_blk - use spdk vhost block"
+ echo "-x set -x for script debug"
+ echo " --fio-bin=FIO Use specific fio binary (will be uploaded to VM)"
+ echo " --fio-job= Fio config to use for test."
+ echo " All VMs will run the same fio job when FIO executes."
+ echo " (no unique jobs for specific VMs)"
+ echo " --dry-run Don't perform any tests, run only and wait for enter to terminate"
+ echo " --no-shutdown Don't shutdown at the end but leave envirionment working"
+ echo " --vm=NUM[,OS][,DISKS] VM configuration. This parameter might be used more than once:"
+ echo " NUM - VM number (mandatory)"
+ echo " OS - VM os disk path (optional)"
+ echo " DISKS - VM os test disks/devices path (virtio - optional, kernel_vhost - mandatory)"
+ echo " --readonly Use readonly for fio"
+ exit 0
+}
+
+#default raw file is NVMe drive
+
+while getopts 'xh-:' optchar; do
+ case "$optchar" in
+ -)
+ case "$OPTARG" in
+ help) usage $0 ;;
+ fio-bin=*) fio_bin="--fio-bin=${OPTARG#*=}" ;;
+ fio-job=*) fio_job="${OPTARG#*=}" ;;
+ dry-run) dry_run=true ;;
+ no-shutdown) no_shutdown=true ;;
+ test-type=*) test_type="${OPTARG#*=}" ;;
+ vm=*) vms+=("${OPTARG#*=}") ;;
+ readonly) readonly="--readonly" ;;
+ *) usage $0 "Invalid argument '$OPTARG'" ;;
+ esac
+ ;;
+ h) usage $0 ;;
+ x)
+ set -x
+ x="-x"
+ ;;
+ *) usage $0 "Invalid argument '$OPTARG'" ;;
+ esac
+done
+shift $((OPTIND - 1))
+
+if [[ ! -r "$fio_job" ]]; then
+ fail "no fio job file specified"
+fi
+
+vhosttestinit
+
+trap 'error_exit "${FUNCNAME}" "${LINENO}"' ERR
+
+vm_kill_all
+
+if [[ $test_type =~ "spdk_vhost" ]]; then
+ notice "==============="
+ notice ""
+ notice "running SPDK"
+ notice ""
+ vhost_run 0
+ rpc_py="$rootdir/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock"
+ $rpc_py bdev_split_create Nvme0n1 4
+ $rpc_py bdev_malloc_create -b Malloc0 128 4096
+ $rpc_py bdev_malloc_create -b Malloc1 128 4096
+ $rpc_py bdev_malloc_create -b Malloc2 64 512
+ $rpc_py bdev_malloc_create -b Malloc3 64 512
+ $rpc_py bdev_malloc_create -b Malloc4 64 512
+ $rpc_py bdev_malloc_create -b Malloc5 64 512
+ $rpc_py bdev_malloc_create -b Malloc6 64 512
+ $rpc_py bdev_raid_create -n RaidBdev0 -z 128 -r 0 -b "Malloc2 Malloc3"
+ $rpc_py bdev_raid_create -n RaidBdev1 -z 128 -r 0 -b "Nvme0n1p2 Malloc4"
+ $rpc_py bdev_raid_create -n RaidBdev2 -z 128 -r 0 -b "Malloc5 Malloc6"
+ $rpc_py vhost_create_scsi_controller --cpumask 0x1 vhost.0
+ $rpc_py vhost_scsi_controller_add_target vhost.0 0 Malloc0
+ $rpc_py vhost_create_blk_controller --cpumask 0x1 -r vhost.1 Malloc1
+ notice ""
+fi
+
+notice "==============="
+notice ""
+notice "Setting up VM"
+notice ""
+
+rpc_py="$rootdir/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock"
+
+for vm_conf in "${vms[@]}"; do
+ IFS=',' read -ra conf <<< "$vm_conf"
+ if [[ x"${conf[0]}" == x"" ]] || ! assert_number ${conf[0]}; then
+ fail "invalid VM configuration syntax $vm_conf"
+ fi
+
+ # Sanity check if VM is not defined twice
+ for vm_num in $used_vms; do
+ if [[ $vm_num -eq ${conf[0]} ]]; then
+ fail "VM$vm_num defined more than twice ( $(printf "'%s' " "${vms[@]}"))!"
+ fi
+ done
+
+ used_vms+=" ${conf[0]}"
+
+ if [[ $test_type =~ "spdk_vhost" ]]; then
+
+ notice "Adding device via RPC ..."
+
+ while IFS=':' read -ra disks; do
+ for disk in "${disks[@]}"; do
+ notice "Create a lvol store on RaidBdev2 and then a lvol bdev on the lvol store"
+ if [[ $disk == "RaidBdev2" ]]; then
+ ls_guid=$($rpc_py bdev_lvol_create_lvstore RaidBdev2 lvs_0 -c 4194304)
+ free_mb=$(get_lvs_free_mb "$ls_guid")
+ based_disk=$($rpc_py bdev_lvol_create -u $ls_guid lbd_0 $free_mb)
+ else
+ based_disk="$disk"
+ fi
+
+ if [[ "$test_type" == "spdk_vhost_blk" ]]; then
+ disk=${disk%%_*}
+ notice "Creating vhost block controller naa.$disk.${conf[0]} with device $disk"
+ $rpc_py vhost_create_blk_controller naa.$disk.${conf[0]} $based_disk
+ else
+ notice "Creating controller naa.$disk.${conf[0]}"
+ $rpc_py vhost_create_scsi_controller naa.$disk.${conf[0]}
+
+ notice "Adding device (0) to naa.$disk.${conf[0]}"
+ $rpc_py vhost_scsi_controller_add_target naa.$disk.${conf[0]} 0 $based_disk
+ fi
+ done
+ done <<< "${conf[2]}"
+ unset IFS
+ $rpc_py vhost_get_controllers
+ fi
+
+ setup_cmd="vm_setup --force=${conf[0]} --disk-type=$test_type"
+ [[ x"${conf[1]}" != x"" ]] && setup_cmd+=" --os=${conf[1]}"
+ [[ x"${conf[2]}" != x"" ]] && setup_cmd+=" --disks=${conf[2]}"
+
+ $setup_cmd
+done
+
+# Run everything
+vm_run $used_vms
+vm_wait_for_boot 300 $used_vms
+
+if [[ $test_type == "spdk_vhost_scsi" ]]; then
+ for vm_conf in "${vms[@]}"; do
+ IFS=',' read -ra conf <<< "$vm_conf"
+ while IFS=':' read -ra disks; do
+ for disk in "${disks[@]}"; do
+ # For RaidBdev2, the lvol bdev on RaidBdev2 is being used.
+ if [[ $disk == "RaidBdev2" ]]; then
+ based_disk="lvs_0/lbd_0"
+ else
+ based_disk="$disk"
+ fi
+ notice "Hotdetach test. Trying to remove existing device from a controller naa.$disk.${conf[0]}"
+ $rpc_py vhost_scsi_controller_remove_target naa.$disk.${conf[0]} 0
+
+ sleep 0.1
+
+ notice "Hotattach test. Re-adding device 0 to naa.$disk.${conf[0]}"
+ $rpc_py vhost_scsi_controller_add_target naa.$disk.${conf[0]} 0 $based_disk
+ done
+ done <<< "${conf[2]}"
+ unset IFS
+ done
+fi
+
+sleep 0.1
+
+notice "==============="
+notice ""
+notice "Testing..."
+
+notice "Running fio jobs ..."
+
+# Check if all VM have disk in tha same location
+DISK=""
+
+fio_disks=""
+for vm_num in $used_vms; do
+ qemu_mask_param="VM_${vm_num}_qemu_mask"
+
+ host_name="VM-$vm_num"
+ notice "Setting up hostname: $host_name"
+ vm_exec $vm_num "hostname $host_name"
+ vm_start_fio_server $fio_bin $readonly $vm_num
+
+ if [[ "$test_type" == "spdk_vhost_scsi" ]]; then
+ vm_check_scsi_location $vm_num
+ #vm_reset_scsi_devices $vm_num $SCSI_DISK
+ elif [[ "$test_type" == "spdk_vhost_blk" ]]; then
+ vm_check_blk_location $vm_num
+ fi
+
+ fio_disks+=" --vm=${vm_num}$(printf ':/dev/%s' $SCSI_DISK)"
+done
+
+if $dry_run; then
+ read -r -p "Enter to kill evething" xx
+ sleep 3
+ at_app_exit
+ exit 0
+fi
+
+run_fio $fio_bin --job-file="$fio_job" --out="$VHOST_DIR/fio_results" $fio_disks
+
+if [[ "$test_type" == "spdk_vhost_scsi" ]]; then
+ for vm_num in $used_vms; do
+ vm_reset_scsi_devices $vm_num $SCSI_DISK
+ done
+fi
+
+if ! $no_shutdown; then
+ notice "==============="
+ notice "APP EXITING"
+ notice "killing all VMs"
+ vm_shutdown_all
+ notice "waiting 2 seconds to let all VMs die"
+ sleep 2
+ if [[ $test_type =~ "spdk_vhost" ]]; then
+ notice "Removing vhost devices & controllers via RPC ..."
+ for vm_conf in "${vms[@]}"; do
+ IFS=',' read -ra conf <<< "$vm_conf"
+
+ while IFS=':' read -ra disks; do
+ for disk in "${disks[@]}"; do
+ disk=${disk%%_*}
+ notice "Removing all vhost devices from controller naa.$disk.${conf[0]}"
+ if [[ "$test_type" == "spdk_vhost_scsi" ]]; then
+ $rpc_py vhost_scsi_controller_remove_target naa.$disk.${conf[0]} 0
+ fi
+
+ $rpc_py vhost_delete_controller naa.$disk.${conf[0]}
+ if [[ $disk == "RaidBdev2" ]]; then
+ notice "Removing lvol bdev and lvol store"
+ $rpc_py bdev_lvol_delete lvs_0/lbd_0
+ $rpc_py bdev_lvol_delete_lvstore -l lvs_0
+ fi
+ done
+ done <<< "${conf[2]}"
+ done
+ fi
+ notice "Testing done -> shutting down"
+ notice "killing vhost app"
+ vhost_kill 0
+
+ notice "EXIT DONE"
+ notice "==============="
+else
+ notice "==============="
+ notice ""
+ notice "Leaving environment working!"
+ notice ""
+ notice "==============="
+fi
+
+vhosttestfini
diff --git a/src/spdk/test/vhost/fuzz/fuzz.sh b/src/spdk/test/vhost/fuzz/fuzz.sh
new file mode 100755
index 000000000..7502f1976
--- /dev/null
+++ b/src/spdk/test/vhost/fuzz/fuzz.sh
@@ -0,0 +1,66 @@
+#!/usr/bin/env bash
+set -e
+
+rootdir=$(readlink -f $(dirname $0))/../../..
+source $rootdir/test/common/autotest_common.sh
+source "$rootdir/scripts/common.sh"
+
+VHOST_APP+=(-p 0)
+FUZZ_RPC_SOCK="/var/tmp/spdk_fuzz.sock"
+VHOST_FUZZ_APP+=(-r "$FUZZ_RPC_SOCK" -g --wait-for-rpc)
+
+vhost_rpc_py="$rootdir/scripts/rpc.py"
+fuzz_generic_rpc_py="$rootdir/scripts/rpc.py -s $FUZZ_RPC_SOCK"
+fuzz_specific_rpc_py="$rootdir/test/app/fuzz/common/fuzz_rpc.py -s $FUZZ_RPC_SOCK"
+
+"${VHOST_APP[@]}" > "$output_dir/vhost_fuzz_tgt_output.txt" 2>&1 &
+vhostpid=$!
+waitforlisten $vhostpid
+
+trap 'killprocess $vhostpid; exit 1' SIGINT SIGTERM exit
+
+"${VHOST_FUZZ_APP[@]}" -t 10 2> "$output_dir/vhost_fuzz_output1.txt" &
+fuzzpid=$!
+waitforlisten $fuzzpid $FUZZ_RPC_SOCK
+
+trap 'killprocess $vhostpid; killprocess $fuzzpid; exit 1' SIGINT SIGTERM exit
+
+$vhost_rpc_py bdev_malloc_create -b Malloc0 64 512
+$vhost_rpc_py vhost_create_blk_controller Vhost.1 Malloc0
+
+$vhost_rpc_py bdev_malloc_create -b Malloc1 64 512
+$vhost_rpc_py vhost_create_scsi_controller naa.VhostScsi0.1
+$vhost_rpc_py vhost_scsi_controller_add_target naa.VhostScsi0.1 0 Malloc1
+
+$vhost_rpc_py bdev_malloc_create -b Malloc2 64 512
+$vhost_rpc_py vhost_create_scsi_controller naa.VhostScsi0.2
+$vhost_rpc_py vhost_scsi_controller_add_target naa.VhostScsi0.2 0 Malloc2
+
+# test the vhost blk controller with valid data buffers.
+$fuzz_specific_rpc_py fuzz_vhost_create_dev -s $(pwd)/Vhost.1 -b -v
+# test the vhost scsi I/O queue with valid data buffers on a valid lun.
+$fuzz_specific_rpc_py fuzz_vhost_create_dev -s $(pwd)/naa.VhostScsi0.1 -l -v
+# test the vhost scsi management queue with valid data buffers.
+$fuzz_specific_rpc_py fuzz_vhost_create_dev -s $(pwd)/naa.VhostScsi0.2 -v -m
+# The test won't actually begin until this option is passed in.
+$fuzz_generic_rpc_py framework_start_init
+
+wait $fuzzpid
+
+"${VHOST_FUZZ_APP[@]}" -j "$rootdir/test/app/fuzz/vhost_fuzz/example.json" 2> "$output_dir/vhost_fuzz_output2.txt" &
+fuzzpid=$!
+waitforlisten $fuzzpid $FUZZ_RPC_SOCK
+
+# re-evaluate fuzzpid
+trap 'killprocess $vhostpid; killprocess $fuzzpid; exit 1' SIGINT SIGTERM exit
+
+$fuzz_specific_rpc_py fuzz_vhost_create_dev -s $(pwd)/Vhost.1 -b -v
+$fuzz_specific_rpc_py fuzz_vhost_create_dev -s $(pwd)/naa.VhostScsi0.1 -l -v
+$fuzz_specific_rpc_py fuzz_vhost_create_dev -s $(pwd)/naa.VhostScsi0.2 -v -m
+$fuzz_generic_rpc_py framework_start_init
+
+wait $fuzzpid
+
+trap - SIGINT SIGTERM exit
+
+killprocess $vhostpid
diff --git a/src/spdk/test/vhost/hotplug/blk_hotremove.sh b/src/spdk/test/vhost/hotplug/blk_hotremove.sh
new file mode 100644
index 000000000..d0edab83a
--- /dev/null
+++ b/src/spdk/test/vhost/hotplug/blk_hotremove.sh
@@ -0,0 +1,235 @@
+# Vhost blk hot remove tests
+#
+# Objective
+# The purpose of these tests is to verify that SPDK vhost remains stable during
+# hot-remove operations performed on SCSI and BLK controllers devices.
+# Hot-remove is a scenario where a NVMe device is removed when already in use.
+#
+# Test cases description
+# 1. FIO I/O traffic is run during hot-remove operations.
+# By default FIO uses default_integrity*.job config files located in
+# test/vhost/hotplug/fio_jobs directory.
+# 2. FIO mode of operation is random write (randwrite) with verification enabled
+# which results in also performing read operations.
+# 3. In test cases fio status is checked after every run if any errors occurred.
+
+function prepare_fio_cmd_tc1() {
+ print_test_fio_header
+
+ run_fio="$fio_bin --eta=never "
+ for vm_num in $1; do
+ cp $fio_job $tmp_detach_job
+ vm_check_blk_location $vm_num
+ for disk in $SCSI_DISK; do
+ echo "[nvme-host$disk]" >> $tmp_detach_job
+ echo "filename=/dev/$disk" >> $tmp_detach_job
+ done
+ vm_scp "$vm_num" $tmp_detach_job 127.0.0.1:/root/default_integrity_2discs.job
+ run_fio+="--client=127.0.0.1,$(vm_fio_socket $vm_num) --remote-config /root/default_integrity_2discs.job "
+ rm $tmp_detach_job
+ done
+}
+
+function vhost_delete_controllers() {
+ $rpc_py vhost_delete_controller naa.Nvme0n1p0.0
+ $rpc_py vhost_delete_controller naa.Nvme0n1p1.0
+ $rpc_py vhost_delete_controller naa.Nvme0n1p2.1
+ $rpc_py vhost_delete_controller naa.Nvme0n1p3.1
+}
+
+# Vhost blk hot remove test cases
+#
+# Test Case 1
+function blk_hotremove_tc1() {
+ echo "Blk hotremove test case 1"
+ traddr=""
+ # 1. Run the command to hot remove NVMe disk.
+ get_traddr "Nvme0"
+ delete_nvme "Nvme0"
+ # 2. If vhost had crashed then tests would stop running
+ sleep 1
+ add_nvme "HotInNvme0" "$traddr"
+ sleep 1
+}
+
+# Test Case 2
+function blk_hotremove_tc2() {
+ echo "Blk hotremove test case 2"
+ # 1. Use rpc command to create blk controllers.
+ $rpc_py vhost_create_blk_controller naa.Nvme0n1p0.0 HotInNvme0n1p0
+ $rpc_py vhost_create_blk_controller naa.Nvme0n1p1.0 Mallocp0
+ $rpc_py vhost_create_blk_controller naa.Nvme0n1p2.1 Mallocp1
+ $rpc_py vhost_create_blk_controller naa.Nvme0n1p3.1 Mallocp2
+ # 2. Run two VMs and attach every VM to two blk controllers.
+ vm_run_with_arg "0 1"
+ vms_prepare "0"
+
+ traddr=""
+ get_traddr "Nvme0"
+ prepare_fio_cmd_tc1 "0"
+ # 3. Run FIO I/O traffic with verification enabled on NVMe disk.
+ $run_fio &
+ local last_pid=$!
+ sleep 3
+ # 4. Run the command to hot remove NVMe disk.
+ delete_nvme "HotInNvme0"
+ local retcode=0
+ wait_for_finish $last_pid || retcode=$?
+ # 5. Check that fio job run on hot-removed device stopped.
+ # Expected: Fio should return error message and return code != 0.
+ check_fio_retcode "Blk hotremove test case 2: Iteration 1." 1 $retcode
+
+ # 6. Reboot VM
+ reboot_all_and_prepare "0"
+ # 7. Run FIO I/O traffic with verification enabled on NVMe disk.
+ $run_fio &
+ local retcode=0
+ wait_for_finish $! || retcode=$?
+ # 8. Check that fio job run on hot-removed device stopped.
+ # Expected: Fio should return error message and return code != 0.
+ check_fio_retcode "Blk hotremove test case 2: Iteration 2." 1 $retcode
+ vm_shutdown_all
+ vhost_delete_controllers
+ add_nvme "HotInNvme1" "$traddr"
+ sleep 1
+}
+
+# ## Test Case 3
+function blk_hotremove_tc3() {
+ echo "Blk hotremove test case 3"
+ # 1. Use rpc command to create blk controllers.
+ $rpc_py vhost_create_blk_controller naa.Nvme0n1p0.0 HotInNvme1n1p0
+ $rpc_py vhost_create_blk_controller naa.Nvme0n1p1.0 Mallocp0
+ $rpc_py vhost_create_blk_controller naa.Nvme0n1p2.1 HotInNvme1n1p1
+ $rpc_py vhost_create_blk_controller naa.Nvme0n1p3.1 Mallocp1
+ # 2. Run two VMs and attach every VM to two blk controllers.
+ vm_run_with_arg "0 1"
+ vms_prepare "0 1"
+
+ traddr=""
+ get_traddr "Nvme0"
+ prepare_fio_cmd_tc1 "0"
+ # 3. Run FIO I/O traffic with verification enabled on first NVMe disk.
+ $run_fio &
+ local last_pid=$!
+ sleep 3
+ # 4. Run the command to hot remove of first NVMe disk.
+ delete_nvme "HotInNvme1"
+ local retcode=0
+ wait_for_finish $last_pid || retcode=$?
+ # 6. Check that fio job run on hot-removed device stopped.
+ # Expected: Fio should return error message and return code != 0.
+ check_fio_retcode "Blk hotremove test case 3: Iteration 1." 1 $retcode
+
+ # 7. Reboot VM
+ reboot_all_and_prepare "0"
+ local retcode=0
+ # 8. Run FIO I/O traffic with verification enabled on removed NVMe disk.
+ $run_fio &
+ wait_for_finish $! || retcode=$?
+ # 9. Check that fio job run on hot-removed device stopped.
+ # Expected: Fio should return error message and return code != 0.
+ check_fio_retcode "Blk hotremove test case 3: Iteration 2." 1 $retcode
+ vm_shutdown_all
+ vhost_delete_controllers
+ add_nvme "HotInNvme2" "$traddr"
+ sleep 1
+}
+
+# Test Case 4
+function blk_hotremove_tc4() {
+ echo "Blk hotremove test case 4"
+ # 1. Use rpc command to create blk controllers.
+ $rpc_py vhost_create_blk_controller naa.Nvme0n1p0.0 HotInNvme2n1p0
+ $rpc_py vhost_create_blk_controller naa.Nvme0n1p1.0 Mallocp0
+ $rpc_py vhost_create_blk_controller naa.Nvme0n1p2.1 HotInNvme2n1p1
+ $rpc_py vhost_create_blk_controller naa.Nvme0n1p3.1 Mallocp1
+ # 2. Run two VM, attached to blk controllers.
+ vm_run_with_arg "0 1"
+ vms_prepare "0 1"
+
+ prepare_fio_cmd_tc1 "0"
+ # 3. Run FIO I/O traffic on first VM with verification enabled on both NVMe disks.
+ $run_fio &
+ local last_pid_vm0=$!
+
+ prepare_fio_cmd_tc1 "1"
+ # 4. Run FIO I/O traffic on second VM with verification enabled on both NVMe disks.
+ $run_fio &
+ local last_pid_vm1=$!
+
+ sleep 3
+ prepare_fio_cmd_tc1 "0 1"
+ # 5. Run the command to hot remove of first NVMe disk.
+ delete_nvme "HotInNvme2"
+ local retcode_vm0=0
+ local retcode_vm1=0
+ wait_for_finish $last_pid_vm0 || retcode_vm0=$?
+ wait_for_finish $last_pid_vm1 || retcode_vm1=$?
+ # 6. Check that fio job run on hot-removed device stopped.
+ # Expected: Fio should return error message and return code != 0.
+ check_fio_retcode "Blk hotremove test case 4: Iteration 1." 1 $retcode_vm0
+ check_fio_retcode "Blk hotremove test case 4: Iteration 2." 1 $retcode_vm1
+
+ # 7. Reboot all VMs.
+ reboot_all_and_prepare "0 1"
+ # 8. Run FIO I/O traffic with verification enabled on removed NVMe disk.
+ $run_fio &
+ local retcode=0
+ wait_for_finish $! || retcode=$?
+ # 9. Check that fio job run on hot-removed device stopped.
+ # Expected: Fio should return error message and return code != 0.
+ check_fio_retcode "Blk hotremove test case 4: Iteration 3." 1 $retcode
+
+ vm_shutdown_all
+ vhost_delete_controllers
+ add_nvme "HotInNvme3" "$traddr"
+ sleep 1
+}
+
+# Test Case 5
+function blk_hotremove_tc5() {
+ echo "Blk hotremove test case 5"
+ # 1. Use rpc command to create blk controllers.
+ $rpc_py vhost_create_blk_controller naa.Nvme0n1p0.0 HotInNvme3n1p0
+ $rpc_py vhost_create_blk_controller naa.Nvme0n1p1.0 Mallocp0
+ $rpc_py vhost_create_blk_controller naa.Nvme0n1p2.1 Mallocp1
+ $rpc_py vhost_create_blk_controller naa.Nvme0n1p3.1 Mallocp2
+ # 2. Run two VM, attached to blk controllers.
+ vm_run_with_arg "0 1"
+ vms_prepare "0 1"
+
+ prepare_fio_cmd_tc1 "0"
+ # 3. Run FIO I/O traffic on first VM with verification enabled on both NVMe disks.
+ $run_fio &
+ local last_pid=$!
+ sleep 3
+ # 4. Run the command to hot remove of first NVMe disk.
+ delete_nvme "HotInNvme3"
+ local retcode=0
+ wait_for_finish $last_pid || retcode=$?
+ # 5. Check that fio job run on hot-removed device stopped.
+ # Expected: Fio should return error message and return code != 0.
+ check_fio_retcode "Blk hotremove test case 5: Iteration 1." 1 $retcode
+
+ # 6. Reboot VM.
+ reboot_all_and_prepare "0"
+ local retcode=0
+ # 7. Run FIO I/O traffic with verification enabled on removed NVMe disk.
+ $run_fio &
+ wait_for_finish $! || retcode=$?
+ # 8. Check that fio job run on hot-removed device stopped.
+ # Expected: Fio should return error message and return code != 0.
+ check_fio_retcode "Blk hotremove test case 5: Iteration 2." 1 $retcode
+ vm_shutdown_all
+ vhost_delete_controllers
+ add_nvme "HotInNvme4" "$traddr"
+ sleep 1
+}
+
+vms_setup
+blk_hotremove_tc1
+blk_hotremove_tc2
+blk_hotremove_tc3
+blk_hotremove_tc4
+blk_hotremove_tc5
diff --git a/src/spdk/test/vhost/hotplug/common.sh b/src/spdk/test/vhost/hotplug/common.sh
new file mode 100644
index 000000000..b7b05ee74
--- /dev/null
+++ b/src/spdk/test/vhost/hotplug/common.sh
@@ -0,0 +1,230 @@
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/vhost/common.sh
+
+dry_run=false
+no_shutdown=false
+fio_bin="fio"
+fio_jobs="$testdir/fio_jobs/"
+test_type=spdk_vhost_scsi
+reuse_vms=false
+vms=()
+used_vms=""
+disk_split=""
+x=""
+scsi_hot_remove_test=0
+blk_hot_remove_test=0
+readonly=""
+
+function usage() {
+ [[ -n $2 ]] && (
+ echo "$2"
+ echo ""
+ )
+ echo "Shortcut script for doing automated hotattach/hotdetach test"
+ echo "Usage: $(basename $1) [OPTIONS]"
+ echo
+ echo "-h, --help print help and exit"
+ echo " --test-type=TYPE Perform specified test:"
+ echo " virtio - test host virtio-scsi-pci using file as disk image"
+ echo " kernel_vhost - use kernel driver vhost-scsi"
+ echo " spdk_vhost_scsi - use spdk vhost scsi"
+ echo " spdk_vhost_blk - use spdk vhost block"
+ echo "-x set -x for script debug"
+ echo " --fio-bin=FIO Use specific fio binary (will be uploaded to VM)"
+ echo " --fio-jobs= Fio configs to use for tests. Can point to a directory or"
+ echo " --vm=NUM[,OS][,DISKS] VM configuration. This parameter might be used more than once:"
+ echo " NUM - VM number (mandatory)"
+ echo " OS - VM os disk path (optional)"
+ echo " DISKS - VM os test disks/devices path (virtio - optional, kernel_vhost - mandatory)"
+ echo " --scsi-hotremove-test Run scsi hotremove tests"
+ echo " --readonly Use readonly for fio"
+ exit 0
+}
+
+while getopts 'xh-:' optchar; do
+ case "$optchar" in
+ -)
+ case "$OPTARG" in
+ help) usage $0 ;;
+ fio-bin=*) fio_bin="${OPTARG#*=}" ;;
+ fio-jobs=*) fio_jobs="${OPTARG#*=}" ;;
+ test-type=*) test_type="${OPTARG#*=}" ;;
+ vm=*) vms+=("${OPTARG#*=}") ;;
+ scsi-hotremove-test) scsi_hot_remove_test=1 ;;
+ blk-hotremove-test) blk_hot_remove_test=1 ;;
+ readonly) readonly="--readonly" ;;
+ *) usage $0 "Invalid argument '$OPTARG'" ;;
+ esac
+ ;;
+ h) usage $0 ;;
+ x)
+ set -x
+ x="-x"
+ ;;
+ *) usage $0 "Invalid argument '$OPTARG'" ;;
+ esac
+done
+shift $((OPTIND - 1))
+
+fio_job=$testdir/fio_jobs/default_integrity.job
+tmp_attach_job=$testdir/fio_jobs/fio_attach.job.tmp
+tmp_detach_job=$testdir/fio_jobs/fio_detach.job.tmp
+
+rpc_py="$rootdir/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock"
+
+function print_test_fio_header() {
+ notice "==============="
+ notice ""
+ notice "Testing..."
+
+ notice "Running fio jobs ..."
+ if [ $# -gt 0 ]; then
+ echo $1
+ fi
+}
+
+function vms_setup() {
+ for vm_conf in "${vms[@]}"; do
+ IFS=',' read -ra conf <<< "$vm_conf"
+ if [[ x"${conf[0]}" == x"" ]] || ! assert_number ${conf[0]}; then
+ fail "invalid VM configuration syntax $vm_conf"
+ fi
+
+ # Sanity check if VM is not defined twice
+ for vm_num in $used_vms; do
+ if [[ $vm_num -eq ${conf[0]} ]]; then
+ fail "VM$vm_num defined more than twice ( $(printf "'%s' " "${vms[@]}"))!"
+ fi
+ done
+
+ used_vms+=" ${conf[0]}"
+
+ setup_cmd="vm_setup --disk-type=$test_type --force=${conf[0]}"
+ [[ x"${conf[1]}" != x"" ]] && setup_cmd+=" --os=${conf[1]}"
+ [[ x"${conf[2]}" != x"" ]] && setup_cmd+=" --disks=${conf[2]}"
+ $setup_cmd
+ done
+}
+
+function vm_run_with_arg() {
+ local vms_to_run="$*"
+ vm_run $vms_to_run
+ vm_wait_for_boot 300 $vms_to_run
+}
+
+function vms_setup_and_run() {
+ local vms_to_run="$*"
+ vms_setup
+ vm_run_with_arg $vms_to_run
+}
+
+function vms_prepare() {
+ for vm_num in $1; do
+ qemu_mask_param="VM_${vm_num}_qemu_mask"
+
+ host_name="VM-${vm_num}-${!qemu_mask_param}"
+ notice "Setting up hostname: $host_name"
+ vm_exec $vm_num "hostname $host_name"
+ vm_start_fio_server --fio-bin=$fio_bin $readonly $vm_num
+ done
+}
+
+function vms_reboot_all() {
+ notice "Rebooting all vms "
+ for vm_num in $1; do
+ vm_exec $vm_num "reboot" || true
+ while vm_os_booted $vm_num; do
+ sleep 0.5
+ done
+ done
+
+ vm_wait_for_boot 300 $1
+}
+
+function check_fio_retcode() {
+ local fio_retcode=$3
+ echo $1
+ local retcode_expected=$2
+ if [ $retcode_expected == 0 ]; then
+ if [ $fio_retcode != 0 ]; then
+ error " Fio test ended with error."
+ else
+ notice " Fio test ended with success."
+ fi
+ else
+ if [ $fio_retcode != 0 ]; then
+ notice " Fio test ended with expected error."
+ else
+ error " Fio test ended with unexpected success."
+ fi
+ fi
+}
+
+function wait_for_finish() {
+ local wait_for_pid=$1
+ local sequence=${2:-30}
+ for i in $(seq 1 $sequence); do
+ if kill -0 $wait_for_pid; then
+ sleep 0.5
+ continue
+ else
+ break
+ fi
+ done
+ if kill -0 $wait_for_pid; then
+ error "Timeout for fio command"
+ fi
+
+ wait $wait_for_pid
+}
+
+function reboot_all_and_prepare() {
+ vms_reboot_all "$1"
+ vms_prepare "$1"
+}
+
+function post_test_case() {
+ vm_shutdown_all
+ vhost_kill 0
+}
+
+function on_error_exit() {
+ set +e
+ echo "Error on $1 - $2"
+ post_test_case
+ print_backtrace
+ exit 1
+}
+
+function check_disks() {
+ if [ "$1" == "$2" ]; then
+ echo "Disk has not been deleted"
+ exit 1
+ fi
+}
+
+function get_traddr() {
+ local nvme_name=$1
+ local nvme
+ nvme="$($rootdir/scripts/gen_nvme.sh)"
+ while read -r line; do
+ if [[ $line == *"TransportID"* ]] && [[ $line == *$nvme_name* ]]; then
+ local word_array=($line)
+ for word in "${word_array[@]}"; do
+ if [[ $word == *"traddr"* ]]; then
+ traddr=$(echo $word | sed 's/traddr://' | sed 's/"//')
+ fi
+ done
+ fi
+ done <<< "$nvme"
+}
+
+function delete_nvme() {
+ $rpc_py bdev_nvme_detach_controller $1
+}
+
+function add_nvme() {
+ $rpc_py bdev_nvme_attach_controller -b $1 -t PCIe -a $2
+}
diff --git a/src/spdk/test/vhost/hotplug/fio_jobs/default_integrity.job b/src/spdk/test/vhost/hotplug/fio_jobs/default_integrity.job
new file mode 100644
index 000000000..136fe9029
--- /dev/null
+++ b/src/spdk/test/vhost/hotplug/fio_jobs/default_integrity.job
@@ -0,0 +1,16 @@
+[global]
+blocksize=4k
+iodepth=512
+iodepth_batch=128
+iodepth_low=256
+ioengine=libaio
+group_reporting
+thread
+numjobs=1
+direct=1
+rw=randwrite
+do_verify=1
+verify=md5
+verify_backlog=1024
+time_based=1
+runtime=10
diff --git a/src/spdk/test/vhost/hotplug/scsi_hotattach.sh b/src/spdk/test/vhost/hotplug/scsi_hotattach.sh
new file mode 100755
index 000000000..4b9e26ab8
--- /dev/null
+++ b/src/spdk/test/vhost/hotplug/scsi_hotattach.sh
@@ -0,0 +1,103 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/vhost/common.sh
+source $rootdir/test/vhost/hotplug/common.sh
+
+function prepare_fio_cmd_tc1() {
+ print_test_fio_header
+
+ run_fio="$fio_bin --eta=never "
+ for vm_num in $1; do
+ cp $fio_job $tmp_attach_job
+ vm_check_scsi_location $vm_num
+ for disk in $SCSI_DISK; do
+ echo "[nvme-host$disk]" >> $tmp_attach_job
+ echo "filename=/dev/$disk" >> $tmp_attach_job
+ done
+ vm_scp $vm_num $tmp_attach_job 127.0.0.1:/root/default_integrity_discs.job
+ run_fio+="--client=127.0.0.1,$(vm_fio_socket ${vm_num}) --remote-config /root/default_integrity_discs.job "
+ rm $tmp_attach_job
+ done
+}
+
+# Check if fio test passes on device attached to first controller.
+function hotattach_tc1() {
+ notice "Hotattach test case 1"
+
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p0.0 0 Nvme0n1p0
+
+ sleep 3
+ prepare_fio_cmd_tc1 "0"
+ $run_fio
+ check_fio_retcode "Hotattach test case 1: Iteration 1." 0 $?
+}
+
+# Run fio test for previously attached device.
+# During test attach another device to first controller and check fio status.
+function hotattach_tc2() {
+ notice "Hotattach test case 2"
+ prepare_fio_cmd_tc1 "0"
+
+ $run_fio &
+ last_pid=$!
+ sleep 3
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p0.0 1 Nvme0n1p1
+ wait $last_pid
+ check_fio_retcode "Hotattach test case 2: Iteration 1." 0 $?
+}
+
+# Run fio test for previously attached devices.
+# During test attach another device to second controller and check fio status.
+function hotattach_tc3() {
+ notice "Hotattach test case 3"
+ prepare_fio_cmd_tc1 "0"
+
+ $run_fio &
+ last_pid=$!
+ sleep 3
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p1.0 0 Nvme0n1p2
+ wait $last_pid
+ check_fio_retcode "Hotattach test case 3: Iteration 1." 0 $?
+}
+
+# Run fio test for previously attached devices.
+# During test attach another device to third controller(VM2) and check fio status.
+# At the end after rebooting VMs run fio test for all devices and check fio status.
+function hotattach_tc4() {
+ notice "Hotattach test case 4"
+
+ prepare_fio_cmd_tc1 "0"
+
+ $run_fio &
+ last_pid=$!
+ sleep 3
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p2.1 0 Nvme0n1p3
+ wait $last_pid
+ check_fio_retcode "Hotattach test case 4: Iteration 1." 0 $?
+
+ prepare_fio_cmd_tc1 "0 1"
+ $run_fio
+ check_fio_retcode "Hotattach test case 4: Iteration 2." 0 $?
+
+ reboot_all_and_prepare "0 1"
+
+ prepare_fio_cmd_tc1 "0 1"
+ $run_fio
+ check_fio_retcode "Hotattach test case 4: Iteration 3." 0 $?
+}
+
+function cleanup_after_tests() {
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p0.0 0
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p0.0 1
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p1.0 0
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p2.1 0
+}
+
+hotattach_tc1
+hotattach_tc2
+hotattach_tc3
+hotattach_tc4
+cleanup_after_tests
diff --git a/src/spdk/test/vhost/hotplug/scsi_hotdetach.sh b/src/spdk/test/vhost/hotplug/scsi_hotdetach.sh
new file mode 100755
index 000000000..8a7cb264f
--- /dev/null
+++ b/src/spdk/test/vhost/hotplug/scsi_hotdetach.sh
@@ -0,0 +1,212 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/vhost/common.sh
+source $rootdir/test/vhost/hotplug/common.sh
+
+function get_first_disk() {
+ vm_check_scsi_location $1
+ disk_array=($SCSI_DISK)
+ eval "$2=${disk_array[0]}"
+}
+
+function check_disks() {
+ if [ "$1" == "$2" ]; then
+ fail "Disk has not been deleted"
+ fi
+}
+
+function prepare_fio_cmd_tc1_iter1() {
+ print_test_fio_header
+
+ run_fio="$fio_bin --eta=never "
+ for vm_num in $1; do
+ cp $fio_job $tmp_detach_job
+ vm_check_scsi_location $vm_num
+ for disk in $SCSI_DISK; do
+ echo "[nvme-host$disk]" >> $tmp_detach_job
+ echo "filename=/dev/$disk" >> $tmp_detach_job
+ done
+ vm_scp "$vm_num" $tmp_detach_job 127.0.0.1:/root/default_integrity_4discs.job
+ run_fio+="--client=127.0.0.1,$(vm_fio_socket $vm_num) --remote-config /root/default_integrity_4discs.job "
+ rm $tmp_detach_job
+ done
+}
+
+function prepare_fio_cmd_tc2_iter1() {
+ print_test_fio_header
+
+ run_fio="$fio_bin --eta=never "
+ for vm_num in $1; do
+ cp $fio_job $tmp_detach_job
+ vm_check_scsi_location $vm_num
+ disk_array=($SCSI_DISK)
+ disk=${disk_array[0]}
+ echo "[nvme-host$disk]" >> $tmp_detach_job
+ echo "filename=/dev/$disk" >> $tmp_detach_job
+ vm_scp "$vm_num" $tmp_detach_job 127.0.0.1:/root/default_integrity.job
+ run_fio+="--client=127.0.0.1,$(vm_fio_socket $vm_num) --remote-config /root/default_integrity.job "
+ rm $tmp_detach_job
+ done
+}
+
+function prepare_fio_cmd_tc2_iter2() {
+ print_test_fio_header
+
+ run_fio="$fio_bin --eta=never "
+ for vm_num in $1; do
+ cp $fio_job $tmp_detach_job
+ if [ $vm_num == 2 ]; then
+ vm_job_name=default_integrity_3discs.job
+ else
+ vm_job_name=default_integrity_4discs.job
+ fi
+ vm_check_scsi_location $vm_num
+ for disk in $SCSI_DISK; do
+ echo "[nvme-host$disk]" >> $tmp_detach_job
+ echo "filename=/dev/$disk" >> $tmp_detach_job
+ done
+ vm_scp "$vm_num" $tmp_detach_job 127.0.0.1:/root/$vm_job_name
+ run_fio+="--client=127.0.0.1,$(vm_fio_socket $vm_num) --remote-config /root/${vm_job_name} "
+ rm $tmp_detach_job
+ done
+}
+
+function prepare_fio_cmd_tc3_iter1() {
+ print_test_fio_header
+
+ run_fio="$fio_bin --eta=never "
+ for vm_num in $1; do
+ cp $fio_job $tmp_detach_job
+ if [ $vm_num == 2 ]; then
+ vm_job_name=default_integrity_3discs.job
+ else
+ vm_job_name=default_integrity_4discs.job
+ fi
+ vm_check_scsi_location $vm_num
+ j=1
+ for disk in $SCSI_DISK; do
+ if [ $vm_num == 2 ]; then
+ if [ $j == 1 ]; then
+ ((j++))
+ continue
+ fi
+ fi
+ echo "[nvme-host$disk]" >> $tmp_detach_job
+ echo "filename=/dev/$disk" >> $tmp_detach_job
+ ((j++))
+ done
+ vm_scp "$vm_num" $tmp_detach_job 127.0.0.1:/root/$vm_job_name
+ run_fio+="--client=127.0.0.1,$(vm_fio_socket $vm_num) --remote-config /root/$vm_job_name "
+ rm $tmp_detach_job
+ done
+}
+
+# During fio test for all devices remove first device from fifth controller and check if fio fails.
+# Also check if disc has been removed from VM.
+function hotdetach_tc1() {
+ notice "Hotdetach test case 1"
+ first_disk=""
+ get_first_disk "2" first_disk
+ prepare_fio_cmd_tc1_iter1 "2 3"
+ $run_fio &
+ last_pid=$!
+ sleep 3
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p4.2 0
+ set +xe
+ wait $last_pid
+ check_fio_retcode "Hotdetach test case 1: Iteration 1." 1 $?
+ set -xe
+ second_disk=""
+ get_first_disk "2" second_disk
+ check_disks $first_disk $second_disk
+ clear_after_tests
+}
+
+# During fio test for device from third VM remove first device from fifth controller and check if fio fails.
+# Also check if disc has been removed from VM.
+function hotdetach_tc2() {
+ notice "Hotdetach test case 2"
+ sleep 2
+ first_disk=""
+ get_first_disk "2" first_disk
+ prepare_fio_cmd_tc2_iter1 "2"
+ $run_fio &
+ last_pid=$!
+ sleep 3
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p4.2 0
+ set +xe
+ wait $last_pid
+ check_fio_retcode "Hotdetach test case 2: Iteration 1." 1 $?
+ set -xe
+ second_disk=""
+ get_first_disk "2" second_disk
+ check_disks $first_disk $second_disk
+ clear_after_tests
+}
+
+# Run fio test for all devices except one, then remove this device and check if fio passes.
+# Also check if disc has been removed from VM.
+function hotdetach_tc3() {
+ notice "Hotdetach test case 3"
+ sleep 2
+ first_disk=""
+ get_first_disk "2" first_disk
+ prepare_fio_cmd_tc3_iter1 "2 3"
+ $run_fio &
+ last_pid=$!
+ sleep 3
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p4.2 0
+ wait $last_pid
+ check_fio_retcode "Hotdetach test case 3: Iteration 1." 0 $?
+ second_disk=""
+ get_first_disk "2" second_disk
+ check_disks $first_disk $second_disk
+ clear_after_tests
+}
+
+# Run fio test for all devices except one and run separate fio test for this device.
+# Check if first fio test passes and second fio test fails.
+# Also check if disc has been removed from VM.
+# After reboot run fio test for remaining devices and check if fio passes.
+function hotdetach_tc4() {
+ notice "Hotdetach test case 4"
+ sleep 2
+ first_disk=""
+ get_first_disk "2" first_disk
+ prepare_fio_cmd_tc2_iter1 "2"
+ $run_fio &
+ first_fio_pid=$!
+ prepare_fio_cmd_tc3_iter1 "2 3"
+ $run_fio &
+ second_fio_pid=$!
+ sleep 3
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p4.2 0
+ set +xe
+ wait $first_fio_pid
+ check_fio_retcode "Hotdetach test case 4: Iteration 1." 1 $?
+ set -xe
+ wait $second_fio_pid
+ check_fio_retcode "Hotdetach test case 4: Iteration 2." 0 $?
+ second_disk=""
+ get_first_disk "2" second_disk
+ check_disks $first_disk $second_disk
+
+ reboot_all_and_prepare "2 3"
+ sleep 2
+ prepare_fio_cmd_tc2_iter2 "2 3"
+ $run_fio
+ check_fio_retcode "Hotdetach test case 4: Iteration 3." 0 $?
+ clear_after_tests
+}
+
+function clear_after_tests() {
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p4.2 0 Nvme0n1p8
+}
+
+hotdetach_tc1
+hotdetach_tc2
+hotdetach_tc3
+hotdetach_tc4
diff --git a/src/spdk/test/vhost/hotplug/scsi_hotplug.sh b/src/spdk/test/vhost/hotplug/scsi_hotplug.sh
new file mode 100755
index 000000000..40132ab8a
--- /dev/null
+++ b/src/spdk/test/vhost/hotplug/scsi_hotplug.sh
@@ -0,0 +1,92 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/vhost/common.sh
+source $rootdir/test/vhost/hotplug/common.sh
+
+if [[ $scsi_hot_remove_test == 1 ]] && [[ $blk_hot_remove_test == 1 ]]; then
+ notice "Vhost-scsi and vhost-blk hotremove tests cannot be run together"
+fi
+
+# Run spdk by calling run_vhost from hotplug/common.sh.
+# Then prepare vhost with rpc calls and setup and run 4 VMs.
+function pre_hot_attach_detach_test_case() {
+ used_vms=""
+ $rpc_py vhost_create_scsi_controller naa.Nvme0n1p0.0
+ $rpc_py vhost_create_scsi_controller naa.Nvme0n1p1.0
+ $rpc_py vhost_create_scsi_controller naa.Nvme0n1p2.1
+ $rpc_py vhost_create_scsi_controller naa.Nvme0n1p3.1
+ $rpc_py vhost_create_scsi_controller naa.Nvme0n1p4.2
+ $rpc_py vhost_create_scsi_controller naa.Nvme0n1p5.2
+ $rpc_py vhost_create_scsi_controller naa.Nvme0n1p6.3
+ $rpc_py vhost_create_scsi_controller naa.Nvme0n1p7.3
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p4.2 0 Nvme0n1p8
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p4.2 1 Nvme0n1p9
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p5.2 0 Nvme0n1p10
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p5.2 1 Nvme0n1p11
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p6.3 0 Nvme0n1p12
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p6.3 1 Nvme0n1p13
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p7.3 0 Nvme0n1p14
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p7.3 1 Nvme0n1p15
+ vms_setup_and_run "0 1 2 3"
+ vms_prepare "0 1 2 3"
+}
+
+function clear_vhost_config() {
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p4.2 0
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p4.2 1
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p5.2 0
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p5.2 1
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p6.3 0
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p6.3 1
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p7.3 0
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p7.3 1
+ $rpc_py vhost_delete_controller naa.Nvme0n1p0.0
+ $rpc_py vhost_delete_controller naa.Nvme0n1p1.0
+ $rpc_py vhost_delete_controller naa.Nvme0n1p2.1
+ $rpc_py vhost_delete_controller naa.Nvme0n1p3.1
+ $rpc_py vhost_delete_controller naa.Nvme0n1p4.2
+ $rpc_py vhost_delete_controller naa.Nvme0n1p5.2
+ $rpc_py vhost_delete_controller naa.Nvme0n1p6.3
+ $rpc_py vhost_delete_controller naa.Nvme0n1p7.3
+}
+
+trap 'error_exit "${FUNCNAME}" "${LINENO}"' ERR
+# Hotremove/hotattach/hotdetach test cases prerequisites
+# Run vhost with 2 NVMe disks.
+
+notice "==============="
+notice ""
+notice "running SPDK"
+notice ""
+vhost_run 0
+$rpc_py bdev_nvme_set_hotplug -e
+$rpc_py bdev_split_create Nvme0n1 16
+$rpc_py bdev_malloc_create 128 512 -b Malloc
+$rpc_py bdev_split_create Malloc 4
+$rpc_py bdev_split_create HotInNvme0n1 2
+$rpc_py bdev_split_create HotInNvme1n1 2
+$rpc_py bdev_split_create HotInNvme2n1 2
+$rpc_py bdev_split_create HotInNvme3n1 2
+$rpc_py bdev_get_bdevs
+
+if [[ $scsi_hot_remove_test == 0 ]] && [[ $blk_hot_remove_test == 0 ]]; then
+ pre_hot_attach_detach_test_case
+ $testdir/scsi_hotattach.sh --fio-bin=$fio_bin &
+ first_script=$!
+ $testdir/scsi_hotdetach.sh --fio-bin=$fio_bin &
+ second_script=$!
+ wait $first_script
+ wait $second_script
+ vm_shutdown_all
+ clear_vhost_config
+fi
+if [[ $scsi_hot_remove_test == 1 ]]; then
+ source $testdir/scsi_hotremove.sh
+fi
+if [[ $blk_hot_remove_test == 1 ]]; then
+ source $testdir/blk_hotremove.sh
+fi
+post_test_case
diff --git a/src/spdk/test/vhost/hotplug/scsi_hotremove.sh b/src/spdk/test/vhost/hotplug/scsi_hotremove.sh
new file mode 100644
index 000000000..1dee4ac7f
--- /dev/null
+++ b/src/spdk/test/vhost/hotplug/scsi_hotremove.sh
@@ -0,0 +1,233 @@
+set -xe
+
+# Vhost SCSI hotremove tests
+#
+# # Objective
+# The purpose of these tests is to verify that SPDK vhost remains stable during
+# hot-remove operations performed on SCSI controllers devices.
+# Hot-remove is a scenario where a NVMe device is removed when already in use.
+# Tests consist of 4 test cases.
+#
+# # Test cases description
+# 1. FIO I/O traffic is run during hot-remove operations.
+# By default FIO uses default_integrity*.job config files located in
+# test/vhost/hotplug/fio_jobs directory.
+# 2. FIO mode of operation is random write (randwrite) with verification enabled
+# which results in also performing read operations.
+
+function prepare_fio_cmd_tc1() {
+ print_test_fio_header
+
+ run_fio="$fio_bin --eta=never "
+ for vm_num in $1; do
+ cp $fio_job $tmp_detach_job
+ vm_check_scsi_location $vm_num
+ for disk in $SCSI_DISK; do
+ cat <<- EOL >> $tmp_detach_job
+ [nvme-host$disk]
+ filename=/dev/$disk
+ size=100%
+ EOL
+ done
+ vm_scp "$vm_num" $tmp_detach_job 127.0.0.1:/root/default_integrity_2discs.job
+ run_fio+="--client=127.0.0.1,$(vm_fio_socket $vm_num) --remote-config /root/default_integrity_2discs.job "
+ rm $tmp_detach_job
+ done
+}
+
+# Vhost SCSI hot-remove test cases.
+
+# Test Case 1
+function scsi_hotremove_tc1() {
+ echo "Scsi hotremove test case 1"
+ traddr=""
+ get_traddr "Nvme0"
+ # 1. Run the command to hot remove NVMe disk.
+ delete_nvme "Nvme0"
+ # 2. If vhost had crashed then tests would stop running
+ sleep 1
+ add_nvme "HotInNvme0" "$traddr"
+}
+
+# Test Case 2
+function scsi_hotremove_tc2() {
+ echo "Scsi hotremove test case 2"
+ # 1. Attach split NVMe bdevs to scsi controller.
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p0.0 0 HotInNvme0n1p0
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p1.0 0 Mallocp0
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p2.1 0 HotInNvme0n1p1
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p3.1 0 Mallocp1
+
+ # 2. Run two VMs, attached to scsi controllers.
+ vms_setup
+ vm_run_with_arg 0 1
+ vms_prepare "0 1"
+
+ vm_check_scsi_location "0"
+ local disks="$SCSI_DISK"
+
+ traddr=""
+ get_traddr "Nvme0"
+ prepare_fio_cmd_tc1 "0 1"
+ # 3. Run FIO I/O traffic with verification enabled on on both NVMe disks in VM.
+ $run_fio &
+ local last_pid=$!
+ sleep 3
+ # 4. Run the command to hot remove NVMe disk.
+ delete_nvme "HotInNvme0"
+
+ # 5. Check that fio job run on hot-remove device stopped on VM.
+ # Expected: Fio should return error message and return code != 0.
+ wait_for_finish $last_pid || retcode=$?
+ check_fio_retcode "Scsi hotremove test case 2: Iteration 1." 1 $retcode
+
+ # 6. Check if removed devices are gone from VM.
+ vm_check_scsi_location "0"
+ local new_disks="$SCSI_DISK"
+ check_disks "$disks" "$new_disks"
+ # 7. Reboot both VMs.
+ reboot_all_and_prepare "0 1"
+ # 8. Run FIO I/O traffic with verification enabled on on both VMs.
+ local retcode=0
+ $run_fio &
+ wait_for_finish $! || retcode=$?
+ # 9. Check that fio job run on hot-remove device stopped on both VMs.
+ # Expected: Fio should return error message and return code != 0.
+ check_fio_retcode "Scsi hotremove test case 2: Iteration 2." 1 $retcode
+ vm_shutdown_all
+ add_nvme "HotInNvme1" "$traddr"
+ sleep 1
+}
+
+# Test Case 3
+function scsi_hotremove_tc3() {
+ echo "Scsi hotremove test case 3"
+ # 1. Attach added NVMe bdev to scsi controller.
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p0.0 0 HotInNvme1n1p0
+ # 2. Run two VM, attached to scsi controllers.
+ vm_run_with_arg 0 1
+ vms_prepare "0 1"
+ vm_check_scsi_location "0"
+ local disks="$SCSI_DISK"
+ traddr=""
+ get_traddr "Nvme0"
+ # 3. Run FIO I/O traffic with verification enabled on on both NVMe disks in VMs.
+ prepare_fio_cmd_tc1 "0"
+ $run_fio &
+ local last_pid=$!
+ sleep 3
+ # 4. Run the command to hot remove NVMe disk.
+ delete_nvme "HotInNvme1"
+ # 5. Check that fio job run on hot-remove device stopped on first VM.
+ # Expected: Fio should return error message and return code != 0.
+ wait_for_finish $last_pid || retcode=$?
+ check_fio_retcode "Scsi hotremove test case 3: Iteration 1." 1 $retcode
+ # 6. Check if removed devices are gone from lsblk.
+ vm_check_scsi_location "0"
+ local new_disks="$SCSI_DISK"
+ check_disks "$disks" "$new_disks"
+ # 7. Reboot both VMs.
+ reboot_all_and_prepare "0 1"
+ # 8. Run FIO I/O traffic with verification enabled on on both VMs.
+ local retcode=0
+ $run_fio &
+ wait_for_finish $! || retcode=$?
+ # 9. Check that fio job run on hot-remove device stopped on both VMs.
+ # Expected: Fio should return error message and return code != 0.
+ check_fio_retcode "Scsi hotremove test case 3: Iteration 2." 1 $retcode
+ vm_shutdown_all
+ add_nvme "HotInNvme2" "$traddr"
+ sleep 1
+}
+
+# Test Case 4
+function scsi_hotremove_tc4() {
+ echo "Scsi hotremove test case 4"
+ # 1. Attach NVMe bdevs to scsi controllers.
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p0.0 0 HotInNvme2n1p0
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p2.1 0 HotInNvme2n1p1
+ # 2. Run two VMs, attach to scsi controller.
+ vm_run_with_arg 0 1
+ vms_prepare "0 1"
+
+ # 3. Run FIO I/O traffic with verification enabled on first VM.
+ vm_check_scsi_location "0"
+ local disks_vm0="$SCSI_DISK"
+ # 4. Run FIO I/O traffic with verification enabled on second VM.
+ prepare_fio_cmd_tc1 "0"
+ $run_fio &
+ last_pid_vm0=$!
+
+ vm_check_scsi_location "1"
+ local disks_vm1="$SCSI_DISK"
+ prepare_fio_cmd_tc1 "1"
+ $run_fio &
+ local last_pid_vm1=$!
+ prepare_fio_cmd_tc1 "0 1"
+ sleep 3
+ # 5. Run the command to hot remove NVMe disk.
+ traddr=""
+ get_traddr "Nvme0"
+ delete_nvme "HotInNvme2"
+ # 6. Check that fio job run on hot-removed devices stopped.
+ # Expected: Fio should return error message and return code != 0.
+ local retcode_vm0=0
+ wait_for_finish $last_pid_vm0 || retcode_vm0=$?
+ local retcode_vm1=0
+ wait_for_finish $last_pid_vm1 || retcode_vm1=$?
+ check_fio_retcode "Scsi hotremove test case 4: Iteration 1." 1 $retcode_vm0
+ check_fio_retcode "Scsi hotremove test case 4: Iteration 2." 1 $retcode_vm1
+
+ # 7. Check if removed devices are gone from lsblk.
+ vm_check_scsi_location "0"
+ local new_disks_vm0="$SCSI_DISK"
+ check_disks "$disks_vm0" "$new_disks_vm0"
+ vm_check_scsi_location "1"
+ local new_disks_vm1="$SCSI_DISK"
+ check_disks "$disks_vm1" "$new_disks_vm1"
+
+ # 8. Reboot both VMs.
+ reboot_all_and_prepare "0 1"
+ # 9. Run FIO I/O traffic with verification enabled on on not-removed NVMe disk.
+ local retcode=0
+ $run_fio &
+ wait_for_finish $! || retcode=$?
+ # 10. Check that fio job run on hot-removed device stopped.
+ # Expect: Fio should return error message and return code != 0.
+ check_fio_retcode "Scsi hotremove test case 4: Iteration 3." 1 $retcode
+ prepare_fio_cmd_tc1 "0 1"
+ # 11. Run FIO I/O traffic with verification enabled on on not-removed NVMe disk.
+ local retcode=0
+ $run_fio &
+ wait_for_finish $! || retcode=$?
+ # 12. Check finished status FIO. Write and read in the not-removed.
+ # NVMe disk should be successful.
+ # Expected: Fio should return return code == 0.
+ check_fio_retcode "Scsi hotremove test case 4: Iteration 4." 0 $retcode
+ vm_shutdown_all
+ add_nvme "HotInNvme3" "$traddr"
+ sleep 1
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p1.0 0
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p3.1 0
+}
+
+function pre_scsi_hotremove_test_case() {
+ $rpc_py vhost_create_scsi_controller naa.Nvme0n1p0.0
+ $rpc_py vhost_create_scsi_controller naa.Nvme0n1p1.0
+ $rpc_py vhost_create_scsi_controller naa.Nvme0n1p2.1
+ $rpc_py vhost_create_scsi_controller naa.Nvme0n1p3.1
+}
+
+function post_scsi_hotremove_test_case() {
+ $rpc_py vhost_delete_controller naa.Nvme0n1p0.0
+ $rpc_py vhost_delete_controller naa.Nvme0n1p1.0
+ $rpc_py vhost_delete_controller naa.Nvme0n1p2.1
+ $rpc_py vhost_delete_controller naa.Nvme0n1p3.1
+}
+
+pre_scsi_hotremove_test_case
+scsi_hotremove_tc1
+scsi_hotremove_tc2
+scsi_hotremove_tc3
+scsi_hotremove_tc4
+post_scsi_hotremove_test_case
diff --git a/src/spdk/test/vhost/initiator/autotest.config b/src/spdk/test/vhost/initiator/autotest.config
new file mode 100644
index 000000000..61a1a2424
--- /dev/null
+++ b/src/spdk/test/vhost/initiator/autotest.config
@@ -0,0 +1,5 @@
+vhost_0_reactor_mask=["0"]
+vhost_0_master_core=0
+
+VM_0_qemu_mask=1-10
+VM_0_qemu_numa_node=0
diff --git a/src/spdk/test/vhost/initiator/bdev.fio b/src/spdk/test/vhost/initiator/bdev.fio
new file mode 100644
index 000000000..405202282
--- /dev/null
+++ b/src/spdk/test/vhost/initiator/bdev.fio
@@ -0,0 +1,51 @@
+[global]
+thread=1
+group_reporting=1
+direct=1
+norandommap=1
+time_based=1
+do_verify=1
+verify=md5
+verify_backlog=1024
+iodepth=128
+bs=4K
+runtime=10
+size=13%
+
+[job_randwrite]
+rw=randwrite
+name=randwrite
+
+[job_randrw]
+offset=13%
+rw=randrw
+name=randrw
+
+[job_write]
+offset=26%
+rw=write
+name=write
+
+[job_rw]
+offset=39%
+rw=rw
+name=rw
+
+[job_unmap_trim_sequential]
+offset=52%
+rw=trim
+trim_verify_zero=1
+name=unmap_trim_sequential
+
+[job_unmap_trim_random]
+offset=65%
+rw=randtrim
+trim_verify_zero=1
+name=unmap_trim_random
+
+[job_unmap_write]
+stonewall
+offset=52%
+size=26%
+rw=randwrite
+name=unmap_write
diff --git a/src/spdk/test/vhost/initiator/bdev_pci.conf b/src/spdk/test/vhost/initiator/bdev_pci.conf
new file mode 100644
index 000000000..0e47e88a7
--- /dev/null
+++ b/src/spdk/test/vhost/initiator/bdev_pci.conf
@@ -0,0 +1,2 @@
+[VirtioPci]
+ Enable Yes
diff --git a/src/spdk/test/vhost/initiator/blockdev.sh b/src/spdk/test/vhost/initiator/blockdev.sh
new file mode 100755
index 000000000..9667f1f3d
--- /dev/null
+++ b/src/spdk/test/vhost/initiator/blockdev.sh
@@ -0,0 +1,82 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+
+function run_spdk_fio() {
+ fio_bdev --ioengine=spdk_bdev "$@" --spdk_mem=1024 --spdk_single_seg=1 \
+ --verify_state_save=0
+}
+
+function err_cleanup() {
+ rm -f $testdir/bdev.json
+ killprocess $vhost_pid
+ if [[ -n "$dummy_spdk_pid" ]] && kill -0 $dummy_spdk_pid &> /dev/null; then
+ killprocess $dummy_spdk_pid
+ fi
+}
+
+# start vhost and configure it
+trap 'err_cleanup; exit 1' SIGINT SIGTERM EXIT
+$SPDK_BIN_DIR/vhost &
+vhost_pid=$!
+waitforlisten $vhost_pid
+
+$rootdir/scripts/gen_nvme.sh --json | $rootdir/scripts/rpc.py load_subsystem_config
+if [ -z "$(rpc_cmd bdev_get_bdevs | jq '.[] | select(.name=="Nvme0n1")')" ]; then
+ echo "Nvme0n1 bdev not found!" && false
+fi
+
+rpc_cmd bdev_split_create Nvme0n1 6
+
+rpc_cmd vhost_create_scsi_controller naa.Nvme0n1_scsi0.0
+rpc_cmd vhost_scsi_controller_add_target naa.Nvme0n1_scsi0.0 0 Nvme0n1p0
+rpc_cmd vhost_scsi_controller_add_target naa.Nvme0n1_scsi0.0 1 Nvme0n1p1
+rpc_cmd vhost_scsi_controller_add_target naa.Nvme0n1_scsi0.0 2 Nvme0n1p2
+rpc_cmd vhost_scsi_controller_add_target naa.Nvme0n1_scsi0.0 3 Nvme0n1p3
+
+rpc_cmd vhost_create_blk_controller naa.Nvme0n1_blk0.0 Nvme0n1p4
+rpc_cmd vhost_create_blk_controller naa.Nvme0n1_blk1.0 Nvme0n1p5
+
+rpc_cmd bdev_malloc_create 128 512 --name Malloc0
+rpc_cmd vhost_create_scsi_controller naa.Malloc0.0
+rpc_cmd vhost_scsi_controller_add_target naa.Malloc0.0 0 Malloc0
+
+rpc_cmd bdev_malloc_create 128 4096 --name Malloc1
+rpc_cmd vhost_create_scsi_controller naa.Malloc1.0
+rpc_cmd vhost_scsi_controller_add_target naa.Malloc1.0 0 Malloc1
+
+# start a dummy app, create vhost bdevs in it, then dump the config for FIO
+$SPDK_BIN_DIR/spdk_tgt -r /tmp/spdk2.sock -g &
+dummy_spdk_pid=$!
+waitforlisten $dummy_spdk_pid /tmp/spdk2.sock
+rpc_cmd -s /tmp/spdk2.sock bdev_virtio_attach_controller --trtype user --traddr 'naa.Nvme0n1_scsi0.0' -d scsi --vq-count 8 'VirtioScsi0'
+rpc_cmd -s /tmp/spdk2.sock bdev_virtio_attach_controller --trtype user --traddr 'naa.Nvme0n1_blk0.0' -d blk --vq-count 8 'VirtioBlk3'
+rpc_cmd -s /tmp/spdk2.sock bdev_virtio_attach_controller --trtype user --traddr 'naa.Nvme0n1_blk1.0' -d blk --vq-count 8 'VirtioBlk4'
+
+rpc_cmd -s /tmp/spdk2.sock bdev_virtio_attach_controller --trtype user --traddr 'naa.Malloc0.0' -d scsi --vq-count 8 'VirtioScsi1'
+rpc_cmd -s /tmp/spdk2.sock bdev_virtio_attach_controller --trtype user --traddr 'naa.Malloc1.0' -d scsi --vq-count 8 'VirtioScsi2'
+
+cat <<- CONF > $testdir/bdev.json
+ {"subsystems":[
+ $(rpc_cmd -s /tmp/spdk2.sock save_subsystem_config -n bdev)
+ ]}
+CONF
+killprocess $dummy_spdk_pid
+
+# run FIO with previously acquired spdk config files
+timing_enter run_spdk_fio
+run_spdk_fio $testdir/bdev.fio --filename=* --section=job_randwrite --spdk_json_conf=$testdir/bdev.json
+timing_exit run_spdk_fio
+
+timing_enter run_spdk_fio_unmap
+run_spdk_fio $testdir/bdev.fio --filename="VirtioScsi1t0:VirtioScsi2t0" --spdk_json_conf=$testdir/bdev.json
+timing_exit run_spdk_fio_unmap
+
+rpc_cmd bdev_nvme_detach_controller Nvme0
+
+trap - SIGINT SIGTERM EXIT
+rm -f $testdir/bdev.json
+
+killprocess $vhost_pid
diff --git a/src/spdk/test/vhost/integrity/integrity_start.sh b/src/spdk/test/vhost/integrity/integrity_start.sh
new file mode 100755
index 000000000..ff3e98bda
--- /dev/null
+++ b/src/spdk/test/vhost/integrity/integrity_start.sh
@@ -0,0 +1,106 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/vhost/common.sh
+
+ctrl_type="spdk_vhost_scsi"
+vm_fs="ext4"
+
+function usage() {
+ [[ -n $2 ]] && (
+ echo "$2"
+ echo ""
+ )
+ echo "Shortcut script for doing automated test"
+ echo "Usage: $(basename $1) [OPTIONS]"
+ echo
+ echo "-h, --help Print help and exit"
+ echo " --ctrl-type=TYPE Controller type to use for test:"
+ echo " spdk_vhost_scsi - use spdk vhost scsi"
+ echo " --fs=FS_LIST Filesystems to use for test in VM:"
+ echo " Example: --fs=\"ext4 ntfs ext2\""
+ echo " Default: ext4"
+ echo " spdk_vhost_blk - use spdk vhost block"
+ echo "-x set -x for script debug"
+ exit 0
+}
+
+function clean_lvol_cfg() {
+ notice "Removing lvol bdev and lvol store"
+ $rpc_py bdev_lvol_delete lvol_store/lvol_bdev
+ $rpc_py bdev_lvol_delete_lvstore -l lvol_store
+}
+
+while getopts 'xh-:' optchar; do
+ case "$optchar" in
+ -)
+ case "$OPTARG" in
+ help) usage $0 ;;
+ ctrl-type=*) ctrl_type="${OPTARG#*=}" ;;
+ fs=*) vm_fs="${OPTARG#*=}" ;;
+ *) usage $0 "Invalid argument '$OPTARG'" ;;
+ esac
+ ;;
+ h) usage $0 ;;
+ x)
+ set -x
+ x="-x"
+ ;;
+ *) usage $0 "Invalid argument '$OPTARG'" ;;
+ esac
+done
+
+vhosttestinit
+
+. $(readlink -e "$(dirname $0)/../common.sh") || exit 1
+rpc_py="$rootdir/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock"
+
+trap 'error_exit "${FUNCNAME}" "${LINENO}"' SIGTERM SIGABRT ERR
+
+# Try to kill if any VM remains from previous runs
+vm_kill_all
+
+notice "Starting SPDK vhost"
+vhost_run 0
+notice "..."
+
+# Set up lvols and vhost controllers
+trap 'clean_lvol_cfg; error_exit "${FUNCNAME}" "${LINENO}"' SIGTERM SIGABRT ERR
+notice "Creating lvol store and lvol bdev on top of Nvme0n1"
+lvs_uuid=$($rpc_py bdev_lvol_create_lvstore Nvme0n1 lvol_store)
+$rpc_py bdev_lvol_create lvol_bdev 10000 -l lvol_store
+
+if [[ "$ctrl_type" == "spdk_vhost_scsi" ]]; then
+ $rpc_py vhost_create_scsi_controller naa.Nvme0n1.0
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1.0 0 lvol_store/lvol_bdev
+elif [[ "$ctrl_type" == "spdk_vhost_blk" ]]; then
+ $rpc_py vhost_create_blk_controller naa.Nvme0n1.0 lvol_store/lvol_bdev
+fi
+
+# Set up and run VM
+setup_cmd="vm_setup --disk-type=$ctrl_type --force=0"
+setup_cmd+=" --os=$VM_IMAGE"
+setup_cmd+=" --disks=Nvme0n1"
+$setup_cmd
+
+# Run VM
+vm_run 0
+vm_wait_for_boot 300 0
+
+# Run tests on VM
+vm_scp 0 $testdir/integrity_vm.sh root@127.0.0.1:/root/integrity_vm.sh
+vm_exec 0 "/root/integrity_vm.sh $ctrl_type \"$vm_fs\""
+
+notice "Shutting down virtual machine..."
+vm_shutdown_all
+
+clean_lvol_cfg
+
+$rpc_py bdev_nvme_detach_controller Nvme0
+
+notice "Shutting down SPDK vhost app..."
+vhost_kill 0
+
+vhosttestfini
diff --git a/src/spdk/test/vhost/integrity/integrity_vm.sh b/src/spdk/test/vhost/integrity/integrity_vm.sh
new file mode 100755
index 000000000..5e83fef95
--- /dev/null
+++ b/src/spdk/test/vhost/integrity/integrity_vm.sh
@@ -0,0 +1,83 @@
+#!/usr/bin/env bash
+set -xe
+
+MAKE="make -j$(($(nproc) * 2))"
+
+if [[ $1 == "spdk_vhost_scsi" ]]; then
+ devs=""
+ for entry in /sys/block/sd*; do
+ if grep -Eq '(INTEL|RAWSCSI|LIO-ORG)' $entry/device/vendor; then
+ devs+="$(basename $entry) "
+ fi
+ done
+elif [[ $1 == "spdk_vhost_blk" ]]; then
+ devs=$(
+ cd /sys/block
+ echo vd*
+ )
+fi
+
+fs=$2
+
+trap "exit 1" SIGINT SIGTERM EXIT
+
+for fs in $fs; do
+ for dev in $devs; do
+ i=0
+ parted_cmd="parted -s /dev/${dev}"
+
+ echo "INFO: Creating partition table on disk using: $parted_cmd mklabel gpt"
+ $parted_cmd mklabel gpt
+ while ! ($parted_cmd print | grep -q gpt); do
+ [[ $i -lt 100 ]] || break
+ i=$((i + 1))
+ sleep 0.1
+ done
+ $parted_cmd mkpart primary 2048s 100%
+
+ mkfs_cmd="mkfs.$fs"
+ if [[ $fs == "ntfs" ]]; then
+ mkfs_cmd+=" -f"
+ fi
+ mkfs_cmd+=" /dev/${dev}1"
+ echo "INFO: Creating filesystem using: $mkfs_cmd"
+ i=0
+ until wipefs -a /dev/${dev}1; do
+ [[ $i -lt 100 ]] || break
+ i=$((i + 1))
+ echo "Waiting for /dev/${dev}1"
+ sleep 0.1
+ done
+ $mkfs_cmd
+
+ mkdir -p /mnt/${dev}dir
+ mount -o sync /dev/${dev}1 /mnt/${dev}dir
+
+ fio --name="integrity" --bsrange=4k-512k --iodepth=128 --numjobs=1 --direct=1 \
+ --thread=1 --group_reporting=1 --rw=randrw --rwmixread=70 \
+ --filename=/mnt/${dev}dir/test_file --verify=md5 --do_verify=1 \
+ --verify_backlog=1024 --fsync_on_close=1 --runtime=20 --time_based=1 --size=512m
+
+ # Print out space consumed on target device
+ df -h /dev/$dev
+ done
+
+ for dev in $devs; do
+ umount /mnt/${dev}dir
+ rm -rf /mnt/${dev}dir
+ parted -s /dev/${dev} rm 1
+
+ stats=($(cat /sys/block/$dev/stat))
+ echo ""
+ echo "$dev stats"
+ printf "READ IO cnt: % 8u merges: % 8u sectors: % 8u ticks: % 8u\n" \
+ ${stats[0]} ${stats[1]} ${stats[2]} ${stats[3]}
+ printf "WRITE IO cnt: % 8u merges: % 8u sectors: % 8u ticks: % 8u\n" \
+ ${stats[4]} ${stats[5]} ${stats[6]} ${stats[7]}
+ printf "in flight: % 8u io ticks: % 8u time in queue: % 8u\n" \
+ ${stats[8]} ${stats[9]} ${stats[10]}
+ echo ""
+ done
+done
+
+trap - SIGINT SIGTERM EXIT
diff --git a/src/spdk/test/vhost/lvol/autotest.config b/src/spdk/test/vhost/lvol/autotest.config
new file mode 100644
index 000000000..9b653cd7f
--- /dev/null
+++ b/src/spdk/test/vhost/lvol/autotest.config
@@ -0,0 +1,74 @@
+vhost_0_reactor_mask="[0-31]"
+vhost_0_master_core=0
+
+VM_0_qemu_mask=1
+VM_0_qemu_numa_node=0
+
+VM_1_qemu_mask=2
+VM_1_qemu_numa_node=0
+
+VM_2_qemu_mask=3
+VM_2_qemu_numa_node=0
+
+VM_3_qemu_mask=4
+VM_3_qemu_numa_node=0
+
+VM_4_qemu_mask=5
+VM_4_qemu_numa_node=0
+
+VM_5_qemu_mask=6
+VM_5_qemu_numa_node=0
+
+VM_6_qemu_mask=7
+VM_6_qemu_numa_node=0
+
+VM_7_qemu_mask=8
+VM_7_qemu_numa_node=0
+
+VM_8_qemu_mask=9
+VM_8_qemu_numa_node=0
+
+VM_9_qemu_mask=10
+VM_9_qemu_numa_node=0
+
+VM_10_qemu_mask=11
+VM_10_qemu_numa_node=0
+
+VM_11_qemu_mask=12
+VM_11_qemu_numa_node=0
+
+VM_12_qemu_mask=13
+VM_12_qemu_numa_node=1
+
+VM_13_qemu_mask=14
+VM_13_qemu_numa_node=1
+
+VM_14_qemu_mask=15
+VM_14_qemu_numa_node=1
+
+VM_15_qemu_mask=16
+VM_15_qemu_numa_node=1
+
+VM_16_qemu_mask=17
+VM_16_qemu_numa_node=1
+
+VM_17_qemu_mask=18
+VM_17_qemu_numa_node=1
+
+VM_18_qemu_mask=19
+VM_18_qemu_numa_node=1
+
+VM_19_qemu_mask=20
+VM_19_qemu_numa_node=1
+
+VM_20_qemu_mask=21
+VM_20_qemu_numa_node=1
+
+VM_21_qemu_mask=22
+VM_21_qemu_numa_node=1
+
+VM_22_qemu_mask=23
+VM_22_qemu_numa_node=1
+
+VM_23_qemu_mask=24
+VM_23_qemu_numa_node=1
diff --git a/src/spdk/test/vhost/lvol/lvol_test.sh b/src/spdk/test/vhost/lvol/lvol_test.sh
new file mode 100755
index 000000000..fba38e059
--- /dev/null
+++ b/src/spdk/test/vhost/lvol/lvol_test.sh
@@ -0,0 +1,289 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/vhost/common.sh
+source $rootdir/scripts/common.sh
+
+rpc_py="$rootdir/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock"
+
+vm_count=1
+max_disks=""
+ctrl_type="spdk_vhost_scsi"
+use_fs=false
+nested_lvol=false
+distribute_cores=false
+
+function usage() {
+ [[ -n $2 ]] && (
+ echo "$2"
+ echo ""
+ )
+ echo "Shortcut script for doing automated test"
+ echo "Usage: $(basename $1) [OPTIONS]"
+ echo
+ echo "-h, --help Print help and exit"
+ echo " --fio-bin=PATH Path to FIO binary.;"
+ echo " --vm-count=INT Virtual machines to use in test;"
+ echo " Each VM will get one lvol bdev on each NVMe."
+ echo " Default: 1"
+ echo " --max-disks=INT Maximum number of NVMe drives to use in test."
+ echo " Default: will use all available NVMes."
+ echo " --ctrl-type=TYPE Controller type to use for test:"
+ echo " spdk_vhost_scsi - use spdk vhost scsi"
+ echo " spdk_vhost_blk - use spdk vhost block"
+ echo " --nested-lvol If enabled will create additional lvol bdev"
+ echo " on each NVMe for use as base device for next"
+ echo " lvol store and lvol bdevs."
+ echo " (NVMe->lvol_store->lvol_bdev->lvol_store->lvol_bdev)"
+ echo " Default: False"
+ echo " --thin-provisioning Create lvol bdevs thin provisioned instead of"
+ echo " allocating space up front"
+ echo " --distribute-cores Use custom config file and run vhost controllers"
+ echo " on different CPU cores instead of single core."
+ echo " Default: False"
+ echo "-x set -x for script debug"
+ echo " --multi-os Run tests on different os types in VMs"
+ echo " Default: False"
+ exit 0
+}
+
+function clean_lvol_cfg() {
+ notice "Removing nested lvol bdevs"
+ for lvol_bdev in "${nest_lvol_bdevs[@]}"; do
+ $rpc_py bdev_lvol_delete $lvol_bdev
+ notice "nested lvol bdev $lvol_bdev removed"
+ done
+
+ notice "Removing nested lvol stores"
+ for lvol_store in "${nest_lvol_stores[@]}"; do
+ $rpc_py bdev_lvol_delete_lvstore -u $lvol_store
+ notice "nested lvol store $lvol_store removed"
+ done
+
+ notice "Removing lvol bdevs"
+ for lvol_bdev in "${lvol_bdevs[@]}"; do
+ $rpc_py bdev_lvol_delete $lvol_bdev
+ notice "lvol bdev $lvol_bdev removed"
+ done
+
+ notice "Removing lvol stores"
+ for lvol_store in "${lvol_stores[@]}"; do
+ $rpc_py bdev_lvol_delete_lvstore -u $lvol_store
+ notice "lvol store $lvol_store removed"
+ done
+}
+
+while getopts 'xh-:' optchar; do
+ case "$optchar" in
+ -)
+ case "$OPTARG" in
+ help) usage $0 ;;
+ fio-bin=*) fio_bin="--fio-bin=${OPTARG#*=}" ;;
+ vm-count=*) vm_count="${OPTARG#*=}" ;;
+ max-disks=*) max_disks="${OPTARG#*=}" ;;
+ ctrl-type=*) ctrl_type="${OPTARG#*=}" ;;
+ nested-lvol) nested_lvol=true ;;
+ distribute-cores) distribute_cores=true ;;
+ thin-provisioning) thin=" -t " ;;
+ multi-os) multi_os=true ;;
+ *) usage $0 "Invalid argument '$OPTARG'" ;;
+ esac
+ ;;
+ h) usage $0 ;;
+ x)
+ set -x
+ x="-x"
+ ;;
+ *) usage $0 "Invalid argument '$OPTARG'" ;;
+ esac
+done
+
+vhosttestinit
+
+notice "Get NVMe disks:"
+nvmes=($(get_nvme_bdfs))
+
+if [[ -z $max_disks ]]; then
+ max_disks=${#nvmes[@]}
+fi
+
+if ((${#nvmes[@]} < max_disks)); then
+ fail "Number of NVMe drives (${#nvmes[@]}) is lower than number of requested disks for test ($max_disks)"
+fi
+
+if $distribute_cores; then
+ # FIXME: this need to be handled entirely in common.sh
+ source $testdir/autotest.config
+fi
+
+trap 'error_exit "${FUNCNAME}" "${LINENO}"' SIGTERM SIGABRT ERR
+
+vm_kill_all
+
+notice "running SPDK vhost"
+vhost_run 0
+notice "..."
+
+trap 'clean_lvol_cfg; error_exit "${FUNCNAME}" "${LINENO}"' SIGTERM SIGABRT ERR
+
+lvol_stores=()
+lvol_bdevs=()
+nest_lvol_stores=()
+nest_lvol_bdevs=()
+used_vms=""
+
+# On each NVMe create one lvol store
+for ((i = 0; i < max_disks; i++)); do
+
+ # Create base lvol store on NVMe
+ notice "Creating lvol store on device Nvme${i}n1"
+ ls_guid=$($rpc_py bdev_lvol_create_lvstore Nvme${i}n1 lvs_$i -c 4194304)
+ lvol_stores+=("$ls_guid")
+
+ if $nested_lvol; then
+ free_mb=$(get_lvs_free_mb "$ls_guid")
+ size=$((free_mb / (vm_count + 1)))
+
+ notice "Creating lvol bdev on lvol store: $ls_guid"
+ lb_name=$($rpc_py bdev_lvol_create -u $ls_guid lbd_nest $size $thin)
+
+ notice "Creating nested lvol store on lvol bdev: $lb_name"
+ nest_ls_guid=$($rpc_py bdev_lvol_create_lvstore $lb_name lvs_n_$i -c 4194304)
+ nest_lvol_stores+=("$nest_ls_guid")
+
+ for ((j = 0; j < vm_count; j++)); do
+ notice "Creating nested lvol bdev for VM $i on lvol store $nest_ls_guid"
+ free_mb=$(get_lvs_free_mb "$nest_ls_guid")
+ nest_size=$((free_mb / (vm_count - j)))
+ lb_name=$($rpc_py bdev_lvol_create -u $nest_ls_guid lbd_vm_$j $nest_size $thin)
+ nest_lvol_bdevs+=("$lb_name")
+ done
+ fi
+
+ # Create base lvol bdevs
+ for ((j = 0; j < vm_count; j++)); do
+ notice "Creating lvol bdev for VM $i on lvol store $ls_guid"
+ free_mb=$(get_lvs_free_mb "$ls_guid")
+ size=$((free_mb / (vm_count - j)))
+ lb_name=$($rpc_py bdev_lvol_create -u $ls_guid lbd_vm_$j $size $thin)
+ lvol_bdevs+=("$lb_name")
+ done
+done
+
+bdev_info=$($rpc_py bdev_get_bdevs)
+notice "Configuration after initial set-up:"
+$rpc_py bdev_lvol_get_lvstores
+echo "$bdev_info"
+
+# Set up VMs
+for ((i = 0; i < vm_count; i++)); do
+ vm="vm_$i"
+
+ # Get all lvol bdevs associated with this VM number
+ bdevs=$(jq -r "map(select(.aliases[] | contains(\"$vm\")) | \
+ .aliases[]) | join(\" \")" <<< "$bdev_info")
+ bdevs=($bdevs)
+
+ setup_cmd="vm_setup --disk-type=$ctrl_type --force=$i"
+ if [[ $i%2 -ne 0 ]] && [[ $multi_os ]]; then
+ setup_cmd+=" --os=/home/sys_sgsw/spdk_vhost_CentOS_vm_image.qcow2"
+ else
+ setup_cmd+=" --os=$VM_IMAGE"
+ fi
+
+ # Create single SCSI controller or multiple BLK controllers for this VM
+ if $distribute_cores; then
+ mask="VM_${i}_qemu_mask"
+ mask_arg="--cpumask ${!mask}"
+ fi
+
+ if [[ "$ctrl_type" == "spdk_vhost_scsi" ]]; then
+ $rpc_py vhost_create_scsi_controller naa.0.$i $mask_arg
+ for ((j = 0; j < ${#bdevs[@]}; j++)); do
+ $rpc_py vhost_scsi_controller_add_target naa.0.$i $j ${bdevs[$j]}
+ done
+ setup_cmd+=" --disks=0"
+ elif [[ "$ctrl_type" == "spdk_vhost_blk" ]]; then
+ disk=""
+ for ((j = 0; j < ${#bdevs[@]}; j++)); do
+ $rpc_py vhost_create_blk_controller naa.$j.$i ${bdevs[$j]} $mask_arg
+ disk+="${j}:"
+ done
+ disk="${disk::-1}"
+ setup_cmd+=" --disks=$disk"
+ fi
+
+ $setup_cmd
+ used_vms+=" $i"
+done
+
+$rpc_py vhost_get_controllers
+
+# Run VMs
+vm_run $used_vms
+vm_wait_for_boot 300 $used_vms
+
+# Get disk names from VMs and run FIO traffic
+
+fio_disks=""
+for vm_num in $used_vms; do
+ qemu_mask_param="VM_${vm_num}_qemu_mask"
+
+ host_name="VM-$vm_num-${!qemu_mask_param}"
+ vm_exec $vm_num "hostname $host_name"
+ vm_start_fio_server $fio_bin $vm_num
+
+ if [[ "$ctrl_type" == "spdk_vhost_scsi" ]]; then
+ vm_check_scsi_location $vm_num
+ elif [[ "$ctrl_type" == "spdk_vhost_blk" ]]; then
+ vm_check_blk_location $vm_num
+ fi
+
+ fio_disks+=" --vm=${vm_num}$(printf ':/dev/%s' $SCSI_DISK)"
+done
+
+if [[ $RUN_NIGHTLY -eq 1 ]]; then
+ job_file="default_integrity_nightly.job"
+else
+ job_file="default_integrity.job"
+fi
+# Run FIO traffic
+run_fio $fio_bin --job-file=$rootdir/test/vhost/common/fio_jobs/$job_file --out="$VHOST_DIR/fio_results" $fio_disks
+
+notice "Shutting down virtual machines..."
+vm_shutdown_all
+sleep 2
+
+notice "Cleaning up vhost - remove LUNs, controllers, lvol bdevs and lvol stores"
+if [[ "$ctrl_type" == "spdk_vhost_scsi" ]]; then
+ for ((i = 0; i < vm_count; i++)); do
+ notice "Removing devices from vhost SCSI controller naa.0.$i"
+ for ((j = 0; j < ${#bdevs[@]}; j++)); do
+ $rpc_py vhost_scsi_controller_remove_target naa.0.$i $j
+ notice "Removed device $j"
+ done
+ notice "Removing vhost SCSI controller naa.0.$i"
+ $rpc_py vhost_delete_controller naa.0.$i
+ done
+elif [[ "$ctrl_type" == "spdk_vhost_blk" ]]; then
+ for ((i = 0; i < vm_count; i++)); do
+ for ((j = 0; j < ${#bdevs[@]}; j++)); do
+ notice "Removing vhost BLK controller naa.$j.$i"
+ $rpc_py vhost_delete_controller naa.$j.$i
+ notice "Removed naa.$j.$i"
+ done
+ done
+fi
+
+clean_lvol_cfg
+
+$rpc_py bdev_lvol_get_lvstores
+$rpc_py bdev_get_bdevs
+$rpc_py vhost_get_controllers
+
+notice "Shutting down SPDK vhost app..."
+vhost_kill 0
+
+vhosttestfini
diff --git a/src/spdk/test/vhost/manual.sh b/src/spdk/test/vhost/manual.sh
new file mode 100755
index 000000000..187a0225e
--- /dev/null
+++ b/src/spdk/test/vhost/manual.sh
@@ -0,0 +1,86 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/vhost/common.sh
+
+CENTOS_VM_IMAGE="/home/sys_sgsw/spdk_vhost_CentOS_vm_image.qcow2"
+DEFAULT_FIO_BIN="/home/sys_sgsw/fio_ubuntu"
+CENTOS_FIO_BIN="/home/sys_sgsw/fio_ubuntu_bak"
+
+case $1 in
+ -h | --help)
+ echo "usage: $(basename $0) TEST_TYPE"
+ echo "Test type can be:"
+ echo " -p |--performance for running a performance test with vhost scsi"
+ echo " -pb|--performance-blk for running a performance test with vhost blk"
+ echo " -hp|--hotplug for running hotplug tests"
+ echo " -shr|--scsi-hot-remove for running scsi hot remove tests"
+ echo " -bhr|--blk-hot-remove for running blk hot remove tests"
+ echo " -h |--help prints this message"
+ echo ""
+ echo "Environment:"
+ echo " VM_IMAGE path to QCOW2 VM image used during test (default: $HOME/vhost_vm_image.qcow2)"
+ echo ""
+ echo "Tests are performed only on Linux machine. For other OS no action is performed."
+ echo ""
+ exit 0
+ ;;
+esac
+
+echo "Running SPDK vhost fio autotest..."
+if [[ $(uname -s) != Linux ]]; then
+ echo ""
+ echo "INFO: Vhost tests are only for Linux machine."
+ echo ""
+ exit 0
+fi
+
+: ${FIO_BIN="$DEFAULT_FIO_BIN"}
+
+if [[ ! -r "${VM_IMAGE}" ]]; then
+ echo ""
+ echo "ERROR: VM image '${VM_IMAGE}' does not exist."
+ echo ""
+ exit 1
+fi
+
+DISKS_NUMBER=$(lspci -mm -n | grep 0108 | tr -d '"' | awk -F " " '{print "0000:"$1}' | wc -l)
+
+WORKDIR=$(readlink -f $(dirname $0))
+
+case $1 in
+ -hp | --hotplug)
+ echo 'Running hotplug tests suite...'
+ run_test "vhost_hotplug" $WORKDIR/hotplug/scsi_hotplug.sh --fio-bin=$FIO_BIN \
+ --vm=0,$VM_IMAGE,Nvme0n1p0:Nvme0n1p1 \
+ --vm=1,$VM_IMAGE,Nvme0n1p2:Nvme0n1p3 \
+ --vm=2,$VM_IMAGE,Nvme0n1p4:Nvme0n1p5 \
+ --vm=3,$VM_IMAGE,Nvme0n1p6:Nvme0n1p7 \
+ --test-type=spdk_vhost_scsi \
+ --fio-jobs=$WORKDIR/hotplug/fio_jobs/default_integrity.job -x
+ ;;
+ -shr | --scsi-hot-remove)
+ echo 'Running scsi hotremove tests suite...'
+ run_test "vhost_scsi_hot_remove" $WORKDIR/hotplug/scsi_hotplug.sh --fio-bin=$FIO_BIN \
+ --vm=0,$VM_IMAGE,Nvme0n1p0:Nvme0n1p1 \
+ --vm=1,$VM_IMAGE,Nvme0n1p2:Nvme0n1p3 \
+ --test-type=spdk_vhost_scsi \
+ --scsi-hotremove-test \
+ --fio-jobs=$WORKDIR/hotplug/fio_jobs/default_integrity.job
+ ;;
+ -bhr | --blk-hot-remove)
+ echo 'Running blk hotremove tests suite...'
+ run_test "vhost_blk_hot_remove" $WORKDIR/hotplug/scsi_hotplug.sh --fio-bin=$FIO_BIN \
+ --vm=0,$VM_IMAGE,Nvme0n1p0:Nvme0n1p1 \
+ --vm=1,$VM_IMAGE,Nvme0n1p2:Nvme0n1p3 \
+ --test-type=spdk_vhost_blk \
+ --blk-hotremove-test \
+ --fio-jobs=$WORKDIR/hotplug/fio_jobs/default_integrity.job
+ ;;
+ *)
+ echo "unknown test type: $1"
+ exit 1
+ ;;
+esac
diff --git a/src/spdk/test/vhost/migration/autotest.config b/src/spdk/test/vhost/migration/autotest.config
new file mode 100644
index 000000000..ccda306ea
--- /dev/null
+++ b/src/spdk/test/vhost/migration/autotest.config
@@ -0,0 +1,14 @@
+vhost_0_reactor_mask=["0"]
+vhost_0_master_core=0
+
+vhost_1_reactor_mask=["0"]
+vhost_1_master_core=0
+
+VM_0_qemu_mask=1
+VM_0_qemu_numa_node=0
+
+VM_1_qemu_mask=1
+VM_1_qemu_numa_node=0
+
+VM_2_qemu_mask=1
+VM_2_qemu_numa_node=0
diff --git a/src/spdk/test/vhost/migration/migration-tc1.job b/src/spdk/test/vhost/migration/migration-tc1.job
new file mode 100644
index 000000000..5383b243f
--- /dev/null
+++ b/src/spdk/test/vhost/migration/migration-tc1.job
@@ -0,0 +1,25 @@
+[global]
+blocksize_range=4k-512k
+#bs=512k
+iodepth=128
+ioengine=libaio
+filename=
+group_reporting
+thread
+numjobs=1
+direct=1
+do_verify=1
+verify=md5
+verify_fatal=1
+verify_dump=1
+size=100%
+
+[write]
+rw=write
+stonewall
+
+[randread]
+rw=randread
+runtime=10
+time_based
+stonewall
diff --git a/src/spdk/test/vhost/migration/migration-tc1.sh b/src/spdk/test/vhost/migration/migration-tc1.sh
new file mode 100644
index 000000000..6d5a436ef
--- /dev/null
+++ b/src/spdk/test/vhost/migration/migration-tc1.sh
@@ -0,0 +1,119 @@
+function migration_tc1_clean_vhost_config() {
+ # Restore trap
+ trap 'error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT
+
+ notice "Removing vhost devices & controllers via RPC ..."
+ # Delete bdev first to remove all LUNs and SCSI targets
+ $rpc bdev_malloc_delete Malloc0
+
+ # Delete controllers
+ $rpc vhost_delete_controller $incoming_vm_ctrlr
+ $rpc vhost_delete_controller $target_vm_ctrlr
+
+ unset -v incoming_vm target_vm incoming_vm_ctrlr target_vm_ctrlr rpc
+}
+
+function migration_tc1_configure_vhost() {
+ # Those are global intentionally - they will be unset in cleanup handler
+ incoming_vm=0
+ target_vm=1
+ incoming_vm_ctrlr=naa.Malloc0.$incoming_vm
+ target_vm_ctrlr=naa.Malloc0.$target_vm
+ rpc="$rootdir/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock"
+
+ trap 'migration_tc1_error_handler; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT
+
+ # Construct shared Malloc Bdev
+ $rpc bdev_malloc_create -b Malloc0 128 4096
+
+ # And two controllers - one for each VM. Both are using the same Malloc Bdev as LUN 0
+ $rpc vhost_create_scsi_controller $incoming_vm_ctrlr
+ $rpc vhost_scsi_controller_add_target $incoming_vm_ctrlr 0 Malloc0
+
+ $rpc vhost_create_scsi_controller $target_vm_ctrlr
+ $rpc vhost_scsi_controller_add_target $target_vm_ctrlr 0 Malloc0
+}
+
+function migration_tc1_error_handler() {
+ trap - SIGINT ERR EXIT
+ warning "Migration TC1 ERROR HANDLER"
+ print_backtrace
+ set -x
+
+ vm_kill_all
+ migration_tc1_clean_vhost_config
+
+ warning "Migration TC1 FAILED"
+}
+
+function migration_tc1() {
+ # Use 2 VMs:
+ # incoming VM - the one we want to migrate
+ # targe VM - the one which will accept migration
+ local job_file="$testdir/migration-tc1.job"
+ local log_file
+ log_file="/root/$(basename ${job_file%%.*}).log"
+
+ # Run vhost
+ vhost_run 0
+ migration_tc1_configure_vhost
+
+ notice "Setting up VMs"
+ vm_setup --os="$os_image" --force=$incoming_vm --disk-type=spdk_vhost_scsi --disks=Malloc0 --migrate-to=$target_vm
+ vm_setup --force=$target_vm --disk-type=spdk_vhost_scsi --disks=Malloc0 --incoming=$incoming_vm
+
+ # Run everything
+ vm_run $incoming_vm $target_vm
+
+ # Wait only for incoming VM, as target is waiting for migration
+ vm_wait_for_boot 300 $incoming_vm
+
+ # Run fio before migration
+ notice "Starting FIO"
+
+ vm_check_scsi_location $incoming_vm
+ run_fio $fio_bin --job-file="$job_file" --no-wait-for-fio --local --vm="${incoming_vm}$(printf ':/dev/%s' $SCSI_DISK)"
+
+ # Wait a while to let the FIO time to issue some IO
+ sleep 5
+
+ # Check if fio is still running before migration
+ if ! is_fio_running $incoming_vm; then
+ vm_exec $incoming_vm "cat $log_file"
+ error "FIO is not running before migration: process crashed or finished too early"
+ fi
+
+ vm_migrate $incoming_vm
+ sleep 3
+
+ # Check if fio is still running after migration
+ if ! is_fio_running $target_vm; then
+ vm_exec $target_vm "cat $log_file"
+ error "FIO is not running after migration: process crashed or finished too early"
+ fi
+
+ notice "Waiting for fio to finish"
+ local timeout=40
+ while is_fio_running $target_vm; do
+ sleep 1
+ echo -n "."
+ if ((timeout-- == 0)); then
+ error "timeout while waiting for FIO!"
+ fi
+ done
+
+ notice "Fio result is:"
+ vm_exec $target_vm "cat $log_file"
+
+ notice "Migration DONE"
+
+ notice "Shutting down all VMs"
+ vm_shutdown_all
+
+ migration_tc1_clean_vhost_config
+
+ notice "killing vhost app"
+ vhost_kill 0
+
+ notice "Migration TC1 SUCCESS"
+}
diff --git a/src/spdk/test/vhost/migration/migration-tc2.job b/src/spdk/test/vhost/migration/migration-tc2.job
new file mode 100644
index 000000000..df78a3cd6
--- /dev/null
+++ b/src/spdk/test/vhost/migration/migration-tc2.job
@@ -0,0 +1,20 @@
+[global]
+blocksize_range=4k-512k
+iodepth=128
+ioengine=libaio
+filename=
+group_reporting
+thread
+numjobs=1
+direct=1
+do_verify=1
+verify=md5
+verify_fatal=1
+verify_dump=1
+verify_backlog=8
+
+[randwrite]
+rw=randwrite
+runtime=15
+time_based
+stonewall
diff --git a/src/spdk/test/vhost/migration/migration-tc2.sh b/src/spdk/test/vhost/migration/migration-tc2.sh
new file mode 100644
index 000000000..aa234d842
--- /dev/null
+++ b/src/spdk/test/vhost/migration/migration-tc2.sh
@@ -0,0 +1,203 @@
+source $rootdir/test/nvmf/common.sh
+
+function migration_tc2_cleanup_nvmf_tgt() {
+ local i
+
+ if [[ ! -r "$nvmf_dir/nvmf_tgt.pid" ]]; then
+ warning "Pid file '$nvmf_dir/nvmf_tgt.pid' does not exist. "
+ return
+ fi
+
+ if [[ -n "$1" ]]; then
+ trap 'error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT
+ pkill --signal $1 -F $nvmf_dir/nvmf_tgt.pid || true
+ sleep 5
+ if ! pkill -F $nvmf_dir/nvmf_tgt.pid; then
+ fail "failed to kill nvmf_tgt app"
+ fi
+ else
+ pkill --signal SIGTERM -F $nvmf_dir/nvmf_tgt.pid || true
+ for ((i = 0; i < 20; i++)); do
+ if ! pkill --signal 0 -F $nvmf_dir/nvmf_tgt.pid; then
+ break
+ fi
+ sleep 0.5
+ done
+
+ if pkill --signal 0 -F $nvmf_dir/nvmf_tgt.pid; then
+ error "nvmf_tgt failed to shutdown"
+ fi
+ fi
+
+ rm $nvmf_dir/nvmf_tgt.pid
+ unset -v nvmf_dir rpc_nvmf
+}
+
+function migration_tc2_cleanup_vhost_config() {
+ timing_enter migration_tc2_cleanup_vhost_config
+
+ trap 'migration_tc2_cleanup_nvmf_tgt SIGKILL; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT
+
+ notice "Shutting down all VMs"
+ vm_shutdown_all
+
+ notice "Removing vhost devices & controllers via RPC ..."
+ # Delete bdev first to remove all LUNs and SCSI targets
+ $rpc_0 bdev_nvme_detach_controller Nvme0
+ $rpc_0 vhost_delete_controller $incoming_vm_ctrlr
+
+ $rpc_1 delete_nvme_controller Nvme0
+ $rpc_1 vhost_delete_controller $target_vm_ctrlr
+
+ notice "killing vhost app"
+ vhost_kill 0
+ vhost_kill 1
+
+ unset -v incoming_vm target_vm incoming_vm_ctrlr target_vm_ctrlr
+ unset -v rpc_0 rpc_1
+
+ trap 'error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT
+ migration_tc2_cleanup_nvmf_tgt
+
+ timing_exit migration_tc2_cleanup_vhost_config
+}
+
+function migration_tc2_configure_vhost() {
+ timing_enter migration_tc2_configure_vhost
+
+ # Those are global intentionally - they will be unset in cleanup handler
+ nvmf_dir="$VHOST_DIR/nvmf_tgt"
+
+ incoming_vm=1
+ target_vm=2
+ incoming_vm_ctrlr=naa.VhostScsi0.$incoming_vm
+ target_vm_ctrlr=naa.VhostScsi0.$target_vm
+
+ rpc_nvmf="$rootdir/scripts/rpc.py -s $nvmf_dir/rpc.sock"
+ rpc_0="$rootdir/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock"
+ rpc_1="$rootdir/scripts/rpc.py -s $(get_vhost_dir 1)/rpc.sock"
+
+ # Default cleanup/error handlers will not shutdown nvmf_tgt app so setup it
+ # here to teardown in cleanup function
+ trap 'migration_tc2_error_cleanup; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT
+
+ # Run nvmf_tgt and two vhost instances:
+ # nvmf_tgt uses core id 2 (-m 0x4)
+ # First uses core id 0
+ # Second uses core id 1
+ # This force to use VM 1 and 2.
+ timing_enter start_nvmf_tgt
+ notice "Running nvmf_tgt..."
+ mkdir -p $nvmf_dir
+ rm -f $nvmf_dir/*
+ $SPDK_BIN_DIR/nvmf_tgt -s 512 -m 0x4 -r $nvmf_dir/rpc.sock --wait-for-rpc &
+ local nvmf_tgt_pid=$!
+ echo $nvmf_tgt_pid > $nvmf_dir/nvmf_tgt.pid
+ waitforlisten "$nvmf_tgt_pid" "$nvmf_dir/rpc.sock"
+ $rpc_nvmf framework_start_init
+ $rpc_nvmf nvmf_create_transport -t RDMA -u 8192
+ $rootdir/scripts/gen_nvme.sh --json | $rpc_nvmf load_subsystem_config
+ timing_exit start_nvmf_tgt
+
+ vhost_run 0 "-m 0x1 -s 512 -u"
+ vhost_run 1 "-m 0x2 -s 512 -u"
+
+ local rdma_ip_list
+ local nvmf_target_ip
+ rdma_ip_list=$(get_available_rdma_ips)
+ nvmf_target_ip=$(echo "$rdma_ip_list" | head -n 1)
+
+ if [[ -z "$nvmf_target_ip" ]]; then
+ fail "no NIC for nvmf target"
+ fi
+
+ notice "Configuring nvmf_tgt, vhost devices & controllers via RPC ..."
+
+ # Construct shared bdevs and controllers
+ $rpc_nvmf nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
+ $rpc_nvmf nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Nvme0n1
+ $rpc_nvmf nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t rdma -a $nvmf_target_ip -s 4420
+
+ $rpc_0 bdev_nvme_attach_controller -b Nvme0 -t rdma -f ipv4 -a $nvmf_target_ip -s 4420 -n "nqn.2016-06.io.spdk:cnode1"
+ $rpc_0 vhost_create_scsi_controller $incoming_vm_ctrlr
+ $rpc_0 vhost_scsi_controller_add_target $incoming_vm_ctrlr 0 Nvme0n1
+
+ $rpc_1 bdev_nvme_attach_controller -b Nvme0 -t rdma -f ipv4 -a $nvmf_target_ip -s 4420 -n "nqn.2016-06.io.spdk:cnode1"
+ $rpc_1 vhost_create_scsi_controller $target_vm_ctrlr
+ $rpc_1 vhost_scsi_controller_add_target $target_vm_ctrlr 0 Nvme0n1
+
+ notice "Setting up VMs"
+ vm_setup --os="$os_image" --force=$incoming_vm --disk-type=spdk_vhost_scsi --disks=VhostScsi0 \
+ --migrate-to=$target_vm --memory=1024 --vhost-name=0
+ vm_setup --force=$target_vm --disk-type=spdk_vhost_scsi --disks=VhostScsi0 --incoming=$incoming_vm --memory=1024 \
+ --vhost-name=1
+
+ # Run everything
+ vm_run $incoming_vm $target_vm
+
+ # Wait only for incoming VM, as target is waiting for migration
+ vm_wait_for_boot 300 $incoming_vm
+
+ notice "Configuration done"
+
+ timing_exit migration_tc2_configure_vhost
+}
+
+function migration_tc2_error_cleanup() {
+ trap - SIGINT ERR EXIT
+ set -x
+
+ vm_kill_all
+ migration_tc2_cleanup_vhost_config
+ notice "Migration TC2 FAILED"
+}
+
+function migration_tc2() {
+ # Use 2 VMs:
+ # incoming VM - the one we want to migrate
+ # targe VM - the one which will accept migration
+ local job_file="$testdir/migration-tc2.job"
+ local log_file
+ log_file="/root/$(basename ${job_file%%.*}).log"
+
+ migration_tc2_configure_vhost
+
+ # Run fio before migration
+ notice "Starting FIO"
+ vm_check_scsi_location $incoming_vm
+ run_fio $fio_bin --job-file="$job_file" --no-wait-for-fio --local --vm="${incoming_vm}$(printf ':/dev/%s' $SCSI_DISK)"
+
+ # Wait a while to let the FIO time to issue some IO
+ sleep 5
+
+ # Check if fio is still running before migration
+ if ! is_fio_running $incoming_vm; then
+ vm_exec $incoming_vm "cat $log_file"
+ error "FIO is not running before migration: process crashed or finished too early"
+ fi
+
+ vm_migrate $incoming_vm
+ sleep 3
+
+ # Check if fio is still running after migration
+ if ! is_fio_running $target_vm; then
+ vm_exec $target_vm "cat $log_file"
+ error "FIO is not running after migration: process crashed or finished too early"
+ fi
+
+ notice "Waiting for fio to finish"
+ local timeout=40
+ while is_fio_running $target_vm; do
+ sleep 1
+ echo -n "."
+ if ((timeout-- == 0)); then
+ error "timeout while waiting for FIO!"
+ fi
+ done
+
+ notice "Fio result is:"
+ vm_exec $target_vm "cat $log_file"
+
+ migration_tc2_cleanup_vhost_config
+ notice "Migration TC2 SUCCESS"
+}
diff --git a/src/spdk/test/vhost/migration/migration-tc3.job b/src/spdk/test/vhost/migration/migration-tc3.job
new file mode 100644
index 000000000..fe1929662
--- /dev/null
+++ b/src/spdk/test/vhost/migration/migration-tc3.job
@@ -0,0 +1,20 @@
+[global]
+blocksize=4k-512k
+iodepth=128
+ioengine=libaio
+filename=
+group_reporting
+thread
+numjobs=1
+direct=1
+do_verify=1
+verify=md5
+verify_fatal=1
+verify_dump=1
+verify_backlog=8
+
+[randwrite]
+rw=randwrite
+runtime=15
+time_based
+stonewall
diff --git a/src/spdk/test/vhost/migration/migration-tc3a.sh b/src/spdk/test/vhost/migration/migration-tc3a.sh
new file mode 100644
index 000000000..b8f06a8d0
--- /dev/null
+++ b/src/spdk/test/vhost/migration/migration-tc3a.sh
@@ -0,0 +1,218 @@
+source $rootdir/test/nvmf/common.sh
+source $testdir/autotest.config
+
+incoming_vm=1
+target_vm=2
+incoming_vm_ctrlr=naa.VhostScsi0.$incoming_vm
+target_vm_ctrlr=naa.VhostScsi0.$target_vm
+share_dir=$TEST_DIR/share
+spdk_repo_share_dir=$TEST_DIR/share_spdk
+job_file=$testdir/migration-tc3.job
+log_file="/root/$(basename ${job_file%%.*}).log"
+
+if [ -z "$MGMT_TARGET_IP" ]; then
+ error "No IP address of target is given"
+fi
+
+if [ -z "$MGMT_INITIATOR_IP" ]; then
+ error "No IP address of initiator is given"
+fi
+
+if [ -z "$RDMA_TARGET_IP" ]; then
+ error "No IP address of targets RDMA capable NIC is given"
+fi
+
+if [ -z "$RDMA_INITIATOR_IP" ]; then
+ error "No IP address of initiators RDMA capable NIC is given"
+fi
+
+function ssh_remote() {
+ local ssh_cmd="sshpass -p root ssh \
+ -o UserKnownHostsFile=/dev/null \
+ -o StrictHostKeyChecking=no \
+ -o ControlMaster=auto \
+ -o User=root \
+ $1"
+
+ shift
+ $ssh_cmd "$@"
+}
+
+function wait_for_remote() {
+ local timeout=40
+ set +x
+ while [[ ! -f $share_dir/DONE ]]; do
+ echo -n "."
+ if ((timeout-- == 0)); then
+ error "timeout while waiting for FIO!"
+ fi
+ sleep 1
+ done
+ set -x
+ rm -f $share_dir/DONE
+}
+
+function check_rdma_connection() {
+ local nic_name
+ nic_name=$(ip -4 -o addr show to $RDMA_TARGET_IP up | cut -d' ' -f2)
+ if [[ -z $nic_name ]]; then
+ error "There is no NIC with IP address $RDMA_TARGET_IP configured"
+ fi
+
+ if ! ls /sys/class/infiniband/*/device/net/$nic_name &> /dev/null; then
+ error "$nic_name with IP $RDMA_TARGET_IP is not a RDMA capable NIC"
+ fi
+
+}
+
+function host1_cleanup_nvmf() {
+ notice "Shutting down nvmf_tgt on local server"
+ if [[ -n "$1" ]]; then
+ pkill --signal $1 -F $nvmf_dir/nvmf_tgt.pid
+ else
+ pkill -F $nvmf_dir/nvmf_tgt.pid
+ fi
+ rm -f $nvmf_dir/nvmf_tgt.pid
+}
+
+function host1_cleanup_vhost() {
+ trap 'host1_cleanup_nvmf SIGKILL; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT
+ notice "Shutting down VM $incoming_vm"
+ vm_kill $incoming_vm
+
+ notice "Removing bdev & controller from vhost on local server"
+ $rpc_0 bdev_nvme_detach_controller Nvme0
+ $rpc_0 vhost_delete_controller $incoming_vm_ctrlr
+
+ notice "Shutting down vhost app"
+ vhost_kill 0
+
+ host1_cleanup_nvmf
+}
+
+function host1_start_nvmf() {
+ nvmf_dir="$TEST_DIR/nvmf_tgt"
+ rpc_nvmf="$rootdir/scripts/rpc.py -s $nvmf_dir/nvmf_rpc.sock"
+
+ notice "Starting nvmf_tgt instance on local server"
+ mkdir -p $nvmf_dir
+ rm -rf "${nvmf_dir:?}/"*
+
+ trap 'host1_cleanup_nvmf SIGKILL; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT
+ $SPDK_BIN_DIR/nvmf_tgt -s 512 -m 0xF -r $nvmf_dir/nvmf_rpc.sock --wait-for-rpc &
+ nvmf_tgt_pid=$!
+ echo $nvmf_tgt_pid > $nvmf_dir/nvmf_tgt.pid
+ waitforlisten "$nvmf_tgt_pid" "$nvmf_dir/nvmf_rpc.sock"
+ $rpc_nvmf framework_start_init
+ $rpc_nvmf nvmf_create_transport -t RDMA -u 8192
+ $rootdir/scripts/gen_nvme.sh --json | $rpc_nvmf load_subsystem_config
+
+ $rpc_nvmf nvmf_create_subsystem nqn.2018-02.io.spdk:cnode1 -a -s SPDK01
+ $rpc_nvmf nvmf_subsystem_add_ns nqn.2018-02.io.spdk:cnode1 Nvme0n1
+ $rpc_nvmf nvmf_subsystem_add_listener nqn.2018-02.io.spdk:cnode1 -t rdma -a $RDMA_TARGET_IP -s 4420
+}
+
+function host1_start_vhost() {
+ rpc_0="$rootdir/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock"
+
+ notice "Starting vhost0 instance on local server"
+ trap 'host1_cleanup_vhost; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT
+ vhost_run 0 "-u"
+ $rpc_0 bdev_nvme_attach_controller -b Nvme0 -t rdma -f ipv4 -a $RDMA_TARGET_IP -s 4420 -n "nqn.2018-02.io.spdk:cnode1"
+ $rpc_0 vhost_create_scsi_controller $incoming_vm_ctrlr
+ $rpc_0 vhost_scsi_controller_add_target $incoming_vm_ctrlr 0 Nvme0n1
+
+ vm_setup --os="$share_dir/migration.qcow2" --force=$incoming_vm --disk-type=spdk_vhost_scsi --disks=VhostScsi0 \
+ --migrate-to=$target_vm --memory=512 --queue_num=1
+
+ # TODO: Fix loop calculating cpu_num in common.sh
+ # We need -smp 1 and -queue_num 1 for this test to work, and this loop
+ # in some cases calculates wrong cpu_num.
+ sed -i "s#smp 2#smp 1#g" $VM_BASE_DIR/$incoming_vm/run.sh
+ vm_run $incoming_vm
+ vm_wait_for_boot 300 $incoming_vm
+}
+
+function cleanup_share() {
+ set +e
+ notice "Cleaning up share directory on remote and local server"
+ ssh_remote $MGMT_INITIATOR_IP "umount $VM_BASE_DIR"
+ ssh_remote $MGMT_INITIATOR_IP "umount $share_dir; rm -f $share_dir/* rm -rf $spdk_repo_share_dir"
+ rm -f $share_dir/migration.qcow2
+ rm -f $share_dir/spdk.tar.gz
+ set -e
+}
+
+function host_1_create_share() {
+ notice "Creating share directory on local server to re-use on remote"
+ mkdir -p $share_dir
+ mkdir -p $VM_BASE_DIR # This dir would've been created later but we need it now
+ rm -rf $share_dir/spdk.tar.gz $share_dir/spdk || true
+ cp $os_image $share_dir/migration.qcow2
+ tar --exclude="*.o" --exclude="*.d" --exclude="*.git" -C $rootdir -zcf $share_dir/spdk.tar.gz .
+}
+
+function host_2_create_share() {
+ # Copy & compile the sources for later use on remote server.
+ ssh_remote $MGMT_INITIATOR_IP "uname -a"
+ ssh_remote $MGMT_INITIATOR_IP "mkdir -p $share_dir"
+ ssh_remote $MGMT_INITIATOR_IP "mkdir -p $spdk_repo_share_dir"
+ ssh_remote $MGMT_INITIATOR_IP "mkdir -p $VM_BASE_DIR"
+ ssh_remote $MGMT_INITIATOR_IP "sshfs -o\
+ ssh_command=\"ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ControlMaster=auto\
+ -i $SPDK_VHOST_SSH_KEY_FILE\" root@$MGMT_TARGET_IP:$VM_BASE_DIR $VM_BASE_DIR"
+ ssh_remote $MGMT_INITIATOR_IP "sshfs -o\
+ ssh_command=\"ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ControlMaster=auto\
+ -i $SPDK_VHOST_SSH_KEY_FILE\" root@$MGMT_TARGET_IP:$share_dir $share_dir"
+ ssh_remote $MGMT_INITIATOR_IP "mkdir -p $spdk_repo_share_dir/spdk"
+ ssh_remote $MGMT_INITIATOR_IP "tar -zxf $share_dir/spdk.tar.gz -C $spdk_repo_share_dir/spdk --strip-components=1"
+ ssh_remote $MGMT_INITIATOR_IP "cd $spdk_repo_share_dir/spdk; make clean; ./configure --with-rdma --enable-debug; make -j40"
+}
+
+function host_2_start_vhost() {
+ ssh_remote $MGMT_INITIATOR_IP "nohup $spdk_repo_share_dir/spdk/test/vhost/migration/migration.sh\
+ --test-cases=3b --os=$share_dir/migration.qcow2\
+ --rdma-tgt-ip=$RDMA_TARGET_IP &>$share_dir/output.log &"
+ notice "Waiting for remote to be done with vhost & VM setup..."
+ wait_for_remote
+}
+
+function setup_share() {
+ trap 'cleanup_share; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT
+ host_1_create_share
+ host_2_create_share
+}
+
+function migration_tc3() {
+ check_rdma_connection
+ setup_share
+ host1_start_nvmf
+ host1_start_vhost
+ host_2_start_vhost
+
+ # Do migration
+ notice "Starting fio on local VM"
+ vm_check_scsi_location $incoming_vm
+
+ run_fio $fio_bin --job-file="$job_file" --no-wait-for-fio --local --vm="${incoming_vm}$(printf ':/dev/%s' $SCSI_DISK)"
+ sleep 5
+
+ if ! is_fio_running $incoming_vm; then
+ vm_exec $incoming_vm "cat $log_file"
+ error "Fio not running on local VM before starting migration!"
+ fi
+
+ vm_migrate $incoming_vm $RDMA_INITIATOR_IP
+ sleep 1
+
+ # Verify migration on remote host and clean up vhost
+ ssh_remote $MGMT_INITIATOR_IP "pkill -CONT -F $TEST_DIR/tc3b.pid"
+ notice "Waiting for remote to finish FIO on VM and clean up..."
+ wait_for_remote
+
+ # Clean up local stuff
+ host1_cleanup_vhost
+ cleanup_share
+}
+
+migration_tc3
diff --git a/src/spdk/test/vhost/migration/migration-tc3b.sh b/src/spdk/test/vhost/migration/migration-tc3b.sh
new file mode 100644
index 000000000..22d54df73
--- /dev/null
+++ b/src/spdk/test/vhost/migration/migration-tc3b.sh
@@ -0,0 +1,77 @@
+# Set -m option is needed to be able to use "suspend" command
+# as we are usin non-interactive session to connect to remote.
+# Without -m it would be not possible to suspend the process.
+set -m
+source $testdir/autotest.config
+
+incoming_vm=1
+target_vm=2
+target_vm_ctrl=naa.VhostScsi0.$target_vm
+rpc="$rootdir/scripts/rpc.py -s $(get_vhost_dir 1)/rpc.sock"
+share_dir=$VHOST_DIR/share
+
+function host_2_cleanup_vhost() {
+ notice "Shutting down VM $target_vm"
+ vm_kill $target_vm
+
+ notice "Removing bdev & controller from vhost 1 on remote server"
+ $rpc bdev_nvme_detach_controller Nvme0
+ $rpc vhost_delete_controller $target_vm_ctrl
+
+ notice "Shutting down vhost app"
+ vhost_kill 1
+ sleep 1
+}
+
+function host_2_start_vhost() {
+ echo "BASE DIR $VHOST_DIR"
+ vhost_work_dir=$VHOST_DIR/vhost1
+ mkdir -p $vhost_work_dir
+ rm -f $vhost_work_dir/*
+
+ notice "Starting vhost 1 instance on remote server"
+ trap 'host_2_cleanup_vhost; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT
+ vhost_run 1 "-u"
+
+ $rpc bdev_nvme_attach_controller -b Nvme0 -t rdma -f ipv4 -a $RDMA_TARGET_IP -s 4420 -n "nqn.2018-02.io.spdk:cnode1"
+ $rpc vhost_create_scsi_controller $target_vm_ctrl
+ $rpc vhost_scsi_controller_add_target $target_vm_ctrl 0 Nvme0n1
+
+ vm_setup --os="$os_image" --force=$target_vm --disk-type=spdk_vhost_scsi --disks=VhostScsi0 \
+ --memory=512 --vhost-name=1 --incoming=$incoming_vm
+ vm_run $target_vm
+ sleep 1
+
+ # Use this file as a flag to notify main script
+ # that setup on remote server is done
+ echo "DONE" > $share_dir/DONE
+}
+
+echo $$ > $VHOST_DIR/tc3b.pid
+host_2_start_vhost
+suspend -f
+
+if ! vm_os_booted $target_vm; then
+ fail "VM$target_vm is not running!"
+fi
+
+if ! is_fio_running $target_vm; then
+ vm_exec $target_vm "cat /root/migration-tc3.log"
+ error "FIO is not running on remote server after migration!"
+fi
+
+notice "Waiting for FIO to finish on remote server VM"
+timeout=40
+while is_fio_running $target_vm; do
+ sleep 1
+ echo -n "."
+ if ((timeout-- == 0)); then
+ error "timeout while waiting for FIO!"
+ fi
+done
+
+notice "FIO result after migration:"
+vm_exec $target_vm "cat /root/migration-tc3.log"
+
+host_2_cleanup_vhost
+echo "DONE" > $share_dir/DONE
diff --git a/src/spdk/test/vhost/migration/migration.sh b/src/spdk/test/vhost/migration/migration.sh
new file mode 100755
index 000000000..8f461e6ca
--- /dev/null
+++ b/src/spdk/test/vhost/migration/migration.sh
@@ -0,0 +1,143 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/vhost/common.sh
+source $testdir/migration-tc1.sh
+source $testdir/migration-tc2.sh
+
+vms=()
+declare -A vms_os
+declare -A vms_raw_disks
+declare -A vms_ctrlrs
+declare -A vms_ctrlrs_disks
+
+# By default use Guest fio
+fio_bin=""
+MGMT_TARGET_IP=""
+MGMT_INITIATOR_IP=""
+RDMA_TARGET_IP=""
+RDMA_INITIATOR_IP=""
+function usage() {
+ [[ -n $2 ]] && (
+ echo "$2"
+ echo ""
+ )
+ echo "Shortcut script for doing automated test of live migration."
+ echo "Usage: $(basename $1) [OPTIONS]"
+ echo
+ echo " --os ARGS VM configuration. This parameter might be used more than once:"
+ echo " --fio-bin=FIO Use specific fio binary (will be uploaded to VM)"
+ echo " --mgmt-tgt-ip=IP IP address of target."
+ echo " --mgmt-init-ip=IP IP address of initiator."
+ echo " --rdma-tgt-ip=IP IP address of targets rdma capable NIC."
+ echo " --rdma-init-ip=IP IP address of initiators rdma capable NIC."
+ echo "-x set -x for script debug"
+}
+
+for param in "$@"; do
+ case "$param" in
+ --help | -h)
+ usage $0
+ exit 0
+ ;;
+ --os=*) os_image="${param#*=}" ;;
+ --fio-bin=*) fio_bin="${param}" ;;
+ --mgmt-tgt-ip=*) MGMT_TARGET_IP="${param#*=}" ;;
+ --mgmt-init-ip=*) MGMT_INITIATOR_IP="${param#*=}" ;;
+ --rdma-tgt-ip=*) RDMA_TARGET_IP="${param#*=}" ;;
+ --rdma-init-ip=*) RDMA_INITIATOR_IP="${param#*=}" ;;
+ -x) set -x ;;
+ -v) SPDK_VHOST_VERBOSE=true ;;
+ *)
+ usage $0 "Invalid argument '$param'"
+ exit 1
+ ;;
+ esac
+done
+
+vhosttestinit
+
+trap 'error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT
+
+function vm_monitor_send() {
+ local vm_num=$1
+ local cmd_result_file="$2"
+ local vm_dir="$VM_DIR/$1"
+ local vm_monitor_port
+ vm_monitor_port=$(cat $vm_dir/monitor_port)
+
+ [[ -n "$vm_monitor_port" ]] || fail "No monitor port!"
+
+ shift 2
+ nc 127.0.0.1 $vm_monitor_port "$@" > $cmd_result_file
+}
+
+# Migrate VM $1
+function vm_migrate() {
+ local from_vm_dir="$VM_DIR/$1"
+ local target_vm_dir
+ local target_vm
+ local target_vm_migration_port
+ target_vm_dir="$(readlink -e $from_vm_dir/vm_migrate_to)"
+ target_vm="$(basename $target_vm_dir)"
+ target_vm_migration_port="$(cat $target_vm_dir/migration_port)"
+ if [[ -n "$2" ]]; then
+ local target_ip=$2
+ else
+ local target_ip="127.0.0.1"
+ fi
+
+ # Sanity check if target VM (QEMU) is configured to accept source VM (QEMU) migration
+ if [[ "$(readlink -e ${target_vm_dir}/vm_incoming)" != "$(readlink -e ${from_vm_dir})" ]]; then
+ fail "source VM $1 or destination VM is not properly configured for live migration"
+ fi
+
+ timing_enter vm_migrate
+ notice "Migrating VM $1 to VM "$(basename $target_vm_dir)
+ echo -e \
+ "migrate_set_speed 1g\n" \
+ "migrate tcp:$target_ip:$target_vm_migration_port\n" \
+ "info migrate\n" \
+ "quit" | vm_monitor_send $1 "$from_vm_dir/migration_result"
+
+ # Post migration checks:
+ if ! grep "Migration status: completed" $from_vm_dir/migration_result -q; then
+ cat $from_vm_dir/migration_result
+ fail "Migration failed:\n"
+ fi
+
+ # Don't perform the following check if target VM is on remote server
+ # as we won't have access to it.
+ # If you need this check then perform it on your own.
+ if [[ "$target_ip" == "127.0.0.1" ]]; then
+ if ! vm_os_booted $target_vm; then
+ fail "VM$target_vm is not running"
+ cat $target_vm $target_vm_dir/cont_result
+ fi
+ fi
+
+ notice "Migration complete"
+ timing_exit vm_migrate
+}
+
+function is_fio_running() {
+ xtrace_disable
+
+ if vm_exec $1 'kill -0 $(cat /root/fio.pid)'; then
+ local ret=0
+ else
+ local ret=1
+ fi
+
+ xtrace_restore
+ return $ret
+}
+
+run_test "vhost_migration_tc1" migration_tc1
+run_test "vhost_migration_tc2" migration_tc2
+
+trap - SIGINT ERR EXIT
+
+vhosttestfini
diff --git a/src/spdk/test/vhost/other/invalid.config b/src/spdk/test/vhost/other/invalid.config
new file mode 100644
index 000000000..58b703068
--- /dev/null
+++ b/src/spdk/test/vhost/other/invalid.config
@@ -0,0 +1,18 @@
+# SPDK vhost configuration file
+#
+# Please write all parameters using ASCII.
+# The parameter must be quoted if it includes whitespace.
+
+# Configuration syntax:
+# Leading whitespace is ignored.
+# Lines starting with '#' are comments.
+# Lines ending with '\' are concatenated with the next line.
+# Bracketed ([]) names define sections
+
+[Global]
+ # Instance ID for multi-process support
+ # Default: 0
+ #InstanceID 0
+
+[Null]
+ Dev null0 512 513
diff --git a/src/spdk/test/vhost/other/negative.sh b/src/spdk/test/vhost/other/negative.sh
new file mode 100755
index 000000000..81461c26f
--- /dev/null
+++ b/src/spdk/test/vhost/other/negative.sh
@@ -0,0 +1,209 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/vhost/common.sh
+
+function usage() {
+ [[ -n $2 ]] && (
+ echo "$2"
+ echo ""
+ )
+ echo "Shortcut script for running vhost app."
+ echo "Usage: $(basename $1) [-x] [-h|--help] [--clean-build]"
+ echo "-h, --help print help and exit"
+ echo "-x Set -x for script debug"
+
+ exit 0
+}
+
+run_in_background=false
+while getopts 'xh-:' optchar; do
+ case "$optchar" in
+ -)
+ case "$OPTARG" in
+ help) usage $0 ;;
+ conf-dir=*) CONF_DIR="${OPTARG#*=}" ;;
+ *) usage $0 echo "Invalid argument '$OPTARG'" ;;
+ esac
+ ;;
+ h) usage $0 ;;
+ x) set -x ;;
+ *) usage $0 "Invalid argument '$optchar'" ;;
+ esac
+done
+
+vhosttestinit
+
+trap error_exit ERR
+
+notice "Testing vhost command line arguments"
+# Printing help will force vhost to exit without error
+"${VHOST_APP[@]}" -c /path/to/non_existing_file/conf -S $testdir -e 0x0 -s 1024 -d -h --silence-noticelog
+
+# Testing vhost create pid file option. Vhost will exit with error as invalid config path is given
+if "${VHOST_APP[@]}" -c /path/to/non_existing_file/conf -f "$VHOST_DIR/vhost/vhost.pid"; then
+ fail "vhost started when specifying invalid config file"
+fi
+rm -f $VHOST_DIR/vhost/vhost.pid
+
+# Testing vhost start with invalid config. Vhost will exit with error as bdev module init failed
+if "${VHOST_APP[@]}" -c $testdir/invalid.config; then
+ fail "vhost started when specifying invalid config file"
+fi
+
+# Expecting vhost to fail if an incorrect argument is given
+if "${VHOST_APP[@]}" -x -h; then
+ fail "vhost started with invalid -x command line option"
+fi
+
+# Passing trace flags if spdk is build without CONFIG_DEBUG=y option make vhost exit with error
+if ! "${VHOST_APP[@]}" -t vhost_scsi -h; then
+ warning "vhost did not started with trace flags enabled but ignoring this as it might not be a debug build"
+fi
+
+# Run with valid config and try some negative rpc calls
+notice "==============="
+notice ""
+notice "running SPDK"
+notice ""
+vhost_run 0
+notice ""
+rpc_py="$rootdir/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock"
+$rpc_py bdev_malloc_create -b Malloc0 128 4096
+$rpc_py bdev_malloc_create -b Malloc1 128 4096
+$rpc_py bdev_malloc_create -b Malloc2 128 4096
+$rpc_py bdev_split_create Malloc2 8
+
+# Try to get nonexistent vhost controller
+if $rpc_py vhost_get_controllers -n nonexistent; then
+ error "vhost returned controller that does not exist"
+fi
+
+notice "Set coalescing for nonexistent controller"
+if $rpc_py vhost_controller_set_coalescing nonexistent 1 100; then
+ error "Set coalescing for nonexistent controller should fail"
+fi
+
+# General commands
+notice "Trying to remove nonexistent controller"
+if $rpc_py vhost_delete_controller unk0 > /dev/null; then
+ error "Removing nonexistent controller succeeded, but it shouldn't"
+fi
+
+# SCSI
+notice "Trying to create scsi controller with incorrect cpumask"
+if $rpc_py vhost_create_scsi_controller vhost.invalid.cpumask --cpumask 0x2; then
+ error "Creating scsi controller with incorrect cpumask succeeded, but it shouldn't"
+fi
+
+notice "Trying to remove device from nonexistent scsi controller"
+if $rpc_py vhost_scsi_controller_remove_target vhost.nonexistent.name 0; then
+ error "Removing device from nonexistent scsi controller succeeded, but it shouldn't"
+fi
+
+notice "Trying to add device to nonexistent scsi controller"
+if $rpc_py vhost_scsi_controller_add_target vhost.nonexistent.name 0 Malloc0; then
+ error "Adding device to nonexistent scsi controller succeeded, but it shouldn't"
+fi
+
+notice "Trying to create scsi controller with incorrect name"
+if $rpc_py vhost_create_scsi_controller .; then
+ error "Creating scsi controller with incorrect name succeeded, but it shouldn't"
+fi
+
+notice "Creating controller naa.0"
+$rpc_py vhost_create_scsi_controller naa.0
+
+notice "Pass invalid parameter for vhost_controller_set_coalescing"
+if $rpc_py vhost_controller_set_coalescing naa.0 -1 100; then
+ error "Set coalescing with invalid parameter should fail"
+fi
+
+notice "Trying to add nonexistent device to scsi controller"
+if $rpc_py vhost_scsi_controller_add_target naa.0 0 nonexistent_bdev; then
+ error "Adding nonexistent device to scsi controller succeeded, but it shouldn't"
+fi
+
+notice "Adding device to naa.0 with slot number exceeding max"
+if $rpc_py vhost_scsi_controller_add_target naa.0 8 Malloc0; then
+ error "Adding device to naa.0 should fail but succeeded"
+fi
+
+for i in $(seq 0 7); do
+ $rpc_py vhost_scsi_controller_add_target naa.0 -1 Malloc2p$i
+done
+notice "All slots are occupied. Try to add one more device to naa.0"
+if $rpc_py vhost_scsi_controller_add_target naa.0 -1 Malloc0; then
+ error "Adding device to naa.0 should fail but succeeded"
+fi
+for i in $(seq 0 7); do
+ $rpc_py vhost_scsi_controller_remove_target naa.0 $i
+done
+
+notice "Adding initial device (0) to naa.0"
+$rpc_py vhost_scsi_controller_add_target naa.0 0 Malloc0
+
+notice "Adding device to naa.0 with slot number 0"
+if $rpc_py vhost_scsi_controller_add_target naa.0 0 Malloc1; then
+ error "Adding device to naa.0 occupied slot should fail but succeeded"
+fi
+
+notice "Trying to remove nonexistent device on existing controller"
+if $rpc_py vhost_scsi_controller_remove_target naa.0 1 > /dev/null; then
+ error "Removing nonexistent device (1) from controller naa.0 succeeded, but it shouldn't"
+fi
+
+notice "Trying to remove existing device from a controller"
+$rpc_py vhost_scsi_controller_remove_target naa.0 0
+
+notice "Trying to remove a just-deleted device from a controller again"
+if $rpc_py vhost_scsi_controller_remove_target naa.0 0 > /dev/null; then
+ error "Removing device 0 from controller naa.0 succeeded, but it shouldn't"
+fi
+
+notice "Trying to remove scsi target with invalid slot number"
+if $rpc_py vhost_scsi_controller_remove_target naa.0 8 > /dev/null; then
+ error "Removing device 8 from controller naa.0 succeeded, but it shouldn't"
+fi
+
+notice "Re-adding device 0 to naa.0"
+$rpc_py vhost_scsi_controller_add_target naa.0 0 Malloc0
+
+# BLK
+notice "Trying to create block controller with incorrect cpumask"
+if $rpc_py vhost_create_blk_controller vhost.invalid.cpumask Malloc0 --cpumask 0x2; then
+ error "Creating block controller with incorrect cpumask succeeded, but it shouldn't"
+fi
+
+notice "Trying to remove nonexistent block controller"
+if $rpc_py vhost_delete_controller vhost.nonexistent.name; then
+ error "Removing nonexistent block controller succeeded, but it shouldn't"
+fi
+
+notice "Trying to create block controller with incorrect name"
+if $rpc_py vhost_create_blk_controller . Malloc0; then
+ error "Creating block controller with incorrect name succeeded, but it shouldn't"
+fi
+
+notice "Trying to create block controller with nonexistent bdev"
+if $rpc_py vhost_create_blk_controller blk_ctrl Malloc3; then
+ error "Creating block controller with nonexistent bdev succeeded, but shouldn't"
+fi
+
+notice "Trying to create block controller with claimed bdev"
+$rpc_py bdev_lvol_create_lvstore Malloc0 lvs
+if $rpc_py vhost_create_blk_controller blk_ctrl Malloc0; then
+ error "Creating block controller with claimed bdev succeeded, but shouldn't"
+fi
+$rpc_py bdev_lvol_delete_lvstore -l lvs
+
+notice "Testing done -> shutting down"
+notice "killing vhost app"
+vhost_kill 0
+
+notice "EXIT DONE"
+notice "==============="
+
+vhosttestfini
diff --git a/src/spdk/test/vhost/perf_bench/vhost_perf.sh b/src/spdk/test/vhost/perf_bench/vhost_perf.sh
new file mode 100755
index 000000000..98c6a8e3c
--- /dev/null
+++ b/src/spdk/test/vhost/perf_bench/vhost_perf.sh
@@ -0,0 +1,473 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/vhost/common.sh
+
+vhost_num="0"
+vm_memory=2048
+vm_sar_enable=false
+host_sar_enable=false
+sar_delay="0"
+sar_interval="1"
+sar_count="10"
+vm_throttle=""
+ctrl_type="spdk_vhost_scsi"
+use_split=false
+kernel_cpus=""
+run_precondition=false
+lvol_stores=()
+lvol_bdevs=()
+split_bdevs=()
+used_vms=""
+wwpn_prefix="naa.5001405bc6498"
+packed_ring=false
+
+fio_iterations=1
+fio_gtod=""
+precond_fio_bin=$CONFIG_FIO_SOURCE_DIR/fio
+disk_map=""
+
+disk_cfg_bdfs=()
+disk_cfg_spdk_names=()
+disk_cfg_splits=()
+disk_cfg_vms=()
+disk_cfg_kernel_names=()
+
+function usage() {
+ [[ -n $2 ]] && (
+ echo "$2"
+ echo ""
+ )
+ echo "Shortcut script for doing automated test"
+ echo "Usage: $(basename $1) [OPTIONS]"
+ echo
+ echo "-h, --help Print help and exit"
+ echo " --fio-bin=PATH Path to FIO binary on host.;"
+ echo " Binary will be copied to VM, static compilation"
+ echo " of binary is recommended."
+ echo " --fio-jobs=PATH Comma separated list of fio config files to use for test."
+ echo " --fio-iterations=INT Number of times to run specified workload."
+ echo " --fio-gtod-reduce Enable fio gtod_reduce option in test."
+ echo " --vm-memory=INT Amount of RAM memory (in MB) to pass to a single VM."
+ echo " Default: 2048 MB"
+ echo " --vm-image=PATH OS image to use for running the VMs."
+ echo " Default: \$HOME/vhost_vm_image.qcow2"
+ echo " --vm-sar-enable Measure CPU utilization in guest VMs using sar."
+ echo " --host-sar-enable Measure CPU utilization on host using sar."
+ echo " --sar-delay=INT Wait for X seconds before starting SAR measurement. Default: 0."
+ echo " --sar-interval=INT Interval (seconds) argument for SAR. Default: 1s."
+ echo " --sar-count=INT Count argument for SAR. Default: 10."
+ echo " --vm-throttle-iops=INT I/Os throttle rate in IOPS for each device on the VMs."
+ echo " --ctrl-type=TYPE Controller type to use for test:"
+ echo " spdk_vhost_scsi - use spdk vhost scsi"
+ echo " spdk_vhost_blk - use spdk vhost block"
+ echo " kernel_vhost - use kernel vhost scsi"
+ echo " Default: spdk_vhost_scsi"
+ echo " --packed-ring Use packed ring support. Requires Qemu 4.2.0 or greater. Default: disabled."
+ echo " --use-split Use split vbdevs instead of Logical Volumes"
+ echo " --limit-kernel-vhost=INT Limit kernel vhost to run only on a number of CPU cores."
+ echo " --run-precondition Precondition lvols after creating. Default: true."
+ echo " --precond-fio-bin FIO binary used for SPDK fio plugin precondition. Default: $CONFIG_FIO_SOURCE_DIR/fio."
+ echo " --custom-cpu-cfg=PATH Custom CPU config for test."
+ echo " Default: spdk/test/vhost/common/autotest.config"
+ echo " --disk-map Disk map for given test. Specify which disks to use, their SPDK name,"
+ echo " how many times to split them and which VMs should be attached to created bdevs."
+ echo " Example:"
+ echo " NVME PCI BDF,Spdk Bdev Name,Split Count,VM List"
+ echo " 0000:1a:00.0,Nvme0,2,0 1"
+ echo " 0000:1b:00.0,Nvme1,2,2 3"
+ echo "-x set -x for script debug"
+ exit 0
+}
+
+function cleanup_lvol_cfg() {
+ notice "Removing lvol bdevs"
+ for lvol_bdev in "${lvol_bdevs[@]}"; do
+ $rpc_py bdev_lvol_delete $lvol_bdev
+ notice "lvol bdev $lvol_bdev removed"
+ done
+
+ notice "Removing lvol stores"
+ for lvol_store in "${lvol_stores[@]}"; do
+ $rpc_py bdev_lvol_delete_lvstore -u $lvol_store
+ notice "lvol store $lvol_store removed"
+ done
+}
+
+function cleanup_split_cfg() {
+ notice "Removing split vbdevs"
+ for disk in "${disk_cfg_spdk_names[@]}"; do
+ $rpc_py bdev_split_delete ${disk}n1
+ done
+}
+
+function cleanup_parted_config() {
+ notice "Removing parted disk configuration"
+ for disk in "${disk_cfg_kernel_names[@]}"; do
+ parted -s /dev/${disk}n1 rm 1
+ done
+}
+
+function cleanup_kernel_vhost() {
+ notice "Cleaning kernel vhost configration"
+ targetcli clearconfig confirm=True
+ cleanup_parted_config
+}
+
+function create_vm() {
+ vm_num=$1
+ setup_cmd="vm_setup --disk-type=$ctrl_type --force=$vm_num --memory=$vm_memory --os=$VM_IMAGE"
+ if [[ "$ctrl_type" == "kernel_vhost" ]]; then
+ x=$(printf %03d $vm_num)
+ setup_cmd+=" --disks=${wwpn_prefix}${x}"
+ else
+ setup_cmd+=" --disks=0"
+ fi
+
+ if $packed_ring; then
+ setup_cmd+=" --packed"
+ fi
+
+ $setup_cmd
+ used_vms+=" $vm_num"
+ echo "Added to used vms"
+ echo $used_vms
+}
+
+function create_spdk_controller() {
+ vm_num=$1
+ bdev=$2
+
+ if [[ "$ctrl_type" == "spdk_vhost_scsi" ]]; then
+ $rpc_py vhost_create_scsi_controller naa.0.$vm_num
+ notice "Created vhost scsi controller naa.0.$vm_num"
+ $rpc_py vhost_scsi_controller_add_target naa.0.$vm_num 0 $bdev
+ notice "Added LUN 0/$bdev to controller naa.0.$vm_num"
+ elif [[ "$ctrl_type" == "spdk_vhost_blk" ]]; then
+ if $packed_ring; then
+ p_opt="-p"
+ fi
+
+ $rpc_py vhost_create_blk_controller naa.0.$vm_num $bdev $p_opt
+ notice "Created vhost blk controller naa.0.$vm_num $bdev"
+ fi
+}
+
+while getopts 'xh-:' optchar; do
+ case "$optchar" in
+ -)
+ case "$OPTARG" in
+ help) usage $0 ;;
+ fio-bin=*) fio_bin="--fio-bin=${OPTARG#*=}" ;;
+ fio-jobs=*) fio_jobs="${OPTARG#*=}" ;;
+ fio-iterations=*) fio_iterations="${OPTARG#*=}" ;;
+ fio-gtod-reduce) fio_gtod="--gtod-reduce" ;;
+ vm-memory=*) vm_memory="${OPTARG#*=}" ;;
+ vm-image=*) VM_IMAGE="${OPTARG#*=}" ;;
+ vm-sar-enable) vm_sar_enable=true ;;
+ host-sar-enable) host_sar_enable=true ;;
+ sar-delay=*) sar_delay="${OPTARG#*=}" ;;
+ sar-interval=*) sar_interval="${OPTARG#*=}" ;;
+ sar-count=*) sar_count="${OPTARG#*=}" ;;
+ vm-throttle-iops=*) vm_throttle="${OPTARG#*=}" ;;
+ ctrl-type=*) ctrl_type="${OPTARG#*=}" ;;
+ packed-ring) packed_ring=true ;;
+ use-split) use_split=true ;;
+ run-precondition) run_precondition=true ;;
+ precond-fio-bin=*) precond_fio_bin="${OPTARG#*=}" ;;
+ limit-kernel-vhost=*) kernel_cpus="${OPTARG#*=}" ;;
+ custom-cpu-cfg=*) custom_cpu_cfg="${OPTARG#*=}" ;;
+ disk-map=*) disk_map="${OPTARG#*=}" ;;
+ *) usage $0 "Invalid argument '$OPTARG'" ;;
+ esac
+ ;;
+ h) usage $0 ;;
+ x)
+ set -x
+ x="-x"
+ ;;
+ *) usage $0 "Invalid argument '$OPTARG'" ;;
+ esac
+done
+
+rpc_py="$rootdir/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock"
+
+if [[ -n $custom_cpu_cfg ]]; then
+ source $custom_cpu_cfg
+ vhost_reactor_mask="vhost_${vhost_num}_reactor_mask"
+ vhost_reactor_mask="${!vhost_reactor_mask}"
+ vhost_master_core="vhost_${vhost_num}_master_core"
+ vhost_master_core="${!vhost_master_core}"
+fi
+
+if [[ -z $fio_jobs ]]; then
+ error "No FIO job specified!"
+fi
+
+trap 'error_exit "${FUNCNAME}" "${LINENO}"' INT ERR
+
+if [[ -z $disk_map ]]; then
+ fail "No disk map provided for test. Exiting."
+fi
+
+# ===== Precondition NVMes if specified =====
+if [[ $run_precondition == true ]]; then
+ # Using the same precondition routine possible for lvols thanks
+ # to --clear-method option. Lvols should not UNMAP on creation.
+ json_cfg=$rootdir/nvme.json
+ cat <<- JSON > "$json_cfg"
+ {"subsystems":[
+ $("$rootdir/scripts/gen_nvme.sh" --json)
+ ]}
+ JSON
+ mapfile -t nvmes < <(grep -oP "Nvme\d+" "$json_cfg")
+ fio_filename=$(printf ":%sn1" "${nvmes[@]}")
+ fio_filename=${fio_filename:1}
+ $precond_fio_bin --name="precondition" \
+ --ioengine="${rootdir}/build/fio/spdk_bdev" \
+ --rw="write" --spdk_json_conf="$json_cfg" --thread="1" \
+ --group_reporting --direct="1" --size="100%" --loops="2" --bs="256k" \
+ --iodepth=32 --filename="${fio_filename}" || true
+fi
+
+set +x
+readarray disk_cfg < $disk_map
+for line in "${disk_cfg[@]}"; do
+ echo $line
+ IFS=","
+ s=($line)
+ disk_cfg_bdfs+=(${s[0]})
+ disk_cfg_spdk_names+=(${s[1]})
+ disk_cfg_splits+=(${s[2]})
+ disk_cfg_vms+=("${s[3]}")
+
+ # Find kernel nvme names
+ if [[ "$ctrl_type" == "kernel_vhost" ]]; then
+ tmp=$(find /sys/devices/pci* -name ${s[0]} -print0 | xargs sh -c 'ls $0/nvme')
+ disk_cfg_kernel_names+=($tmp)
+ IFS=" "
+ fi
+done
+unset IFS
+set -x
+
+if [[ "$ctrl_type" == "kernel_vhost" ]]; then
+ notice "Configuring kernel vhost..."
+ trap 'vm_kill_all; sleep 1; cleanup_kernel_vhost; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR
+
+ # Split disks using parted for kernel vhost
+ newline=$'\n'
+ backstores=()
+ for ((i = 0; i < ${#disk_cfg_kernel_names[@]}; i++)); do
+ nvme=${disk_cfg_kernel_names[$i]}
+ splits=${disk_cfg_splits[$i]}
+ notice " Creating extended partition on disk /dev/${nvme}n1"
+ parted -s /dev/${nvme}n1 mklabel msdos
+ parted -s /dev/${nvme}n1 mkpart extended 2048s 100%
+
+ part_size=$((100 / ${disk_cfg_splits[$i]})) # Split 100% of disk into roughly even parts
+ echo " Creating ${splits} partitions of relative disk size ${part_size}"
+ for p in $(seq 0 $((splits - 1))); do
+ p_start=$((p * part_size))
+ p_end=$((p_start + part_size))
+ parted -s /dev/${nvme}n1 mkpart logical ${p_start}% ${p_end}%
+ sleep 3
+ done
+
+ # Prepare kernel vhost configuration
+ # Below grep: match only NVMe partitions which are not "Extended" type.
+ # For example: will match nvme0n1p15 but not nvme0n1p1
+ partitions=$(find /dev -name "${nvme}n1*" | sort --version-sort | grep -P 'p(?!1$)\d+')
+ # Create block backstores for vhost kernel process
+ for p in $partitions; do
+ backstore_name=$(basename $p)
+ backstores+=("$backstore_name")
+ targetcli backstores/block create $backstore_name $p
+ done
+ partitions=($partitions)
+
+ # Create kernel vhost controllers and add LUNs
+ # Setup VM configurations
+ vms_to_run=(${disk_cfg_vms[i]})
+ for ((j = 0; j < ${#vms_to_run[@]}; j++)); do
+ # WWPN prefix misses 3 characters. Need to complete it
+ # using block backstore number
+ x=$(printf %03d ${vms_to_run[$j]})
+ wwpn="${wwpn_prefix}${x}"
+ targetcli vhost/ create $wwpn
+ targetcli vhost/$wwpn/tpg1/luns create /backstores/block/$(basename ${partitions[$j]})
+ create_vm ${vms_to_run[j]}
+ sleep 1
+ done
+ done
+ targetcli ls
+else
+ notice "Configuring SPDK vhost..."
+ vhost_run "${vhost_num}" "--no-gen-nvme" "-p ${vhost_master_core}" "-m ${vhost_reactor_mask}"
+ notice "..."
+
+ if [[ $use_split == true ]]; then
+ notice "Configuring split bdevs configuration..."
+ trap 'cleanup_split_cfg; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR
+ for ((i = 0; i < ${#disk_cfg_bdfs[@]}; i++)); do
+ nvme_bdev=$($rpc_py bdev_nvme_attach_controller -b ${disk_cfg_spdk_names[$i]} -t pcie -a ${disk_cfg_bdfs[$i]})
+ notice "Created NVMe Bdev: $nvme_bdev with BDF ${disk_cfg_bdfs[$i]}"
+
+ splits=$($rpc_py bdev_split_create $nvme_bdev ${disk_cfg_splits[$i]})
+ splits=($splits)
+ notice "Created splits: ${splits[*]} on Bdev ${nvme_bdev}"
+ for s in "${splits[@]}"; do
+ split_bdevs+=($s)
+ done
+
+ vms_to_run=(${disk_cfg_vms[i]})
+ for ((j = 0; j < ${#vms_to_run[@]}; j++)); do
+ notice "Setting up VM ${vms_to_run[j]}"
+ create_spdk_controller "${vms_to_run[j]}" ${splits[j]}
+ create_vm ${vms_to_run[j]}
+ done
+ echo " "
+ done
+ bdevs=("${split_bdevs[@]}")
+ else
+ notice "Configuring LVOLs..."
+ trap 'cleanup_lvol_cfg; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR
+ for ((i = 0; i < ${#disk_cfg_bdfs[@]}; i++)); do
+ nvme_bdev=$($rpc_py bdev_nvme_attach_controller -b ${disk_cfg_spdk_names[$i]} -t pcie -a ${disk_cfg_bdfs[$i]})
+ notice "Created NVMe Bdev: $nvme_bdev with BDF ${disk_cfg_bdfs[$i]}"
+
+ ls_guid=$($rpc_py bdev_lvol_create_lvstore $nvme_bdev lvs_$i --clear-method none)
+ lvol_stores+=("$ls_guid")
+ notice "Created Lvol Store: $ls_guid on Bdev $nvme_bdev"
+
+ vms_to_run=(${disk_cfg_vms[i]})
+ for ((j = 0; j < ${disk_cfg_splits[$i]}; j++)); do
+ free_mb=$(get_lvs_free_mb "$ls_guid")
+ size=$((free_mb / ((${disk_cfg_splits[$i]} - j))))
+ lb_name=$($rpc_py bdev_lvol_create -u $ls_guid lbd_$j $size --clear-method none)
+ lvol_bdevs+=("$lb_name")
+ notice "Created LVOL Bdev $lb_name on Lvol Store $ls_guid on Bdev $nvme_bdev"
+
+ notice "Setting up VM ${vms_to_run[j]}"
+ create_spdk_controller "${vms_to_run[j]}" ${lb_name}
+ create_vm ${vms_to_run[j]}
+ done
+ echo " "
+ done
+ $rpc_py bdev_lvol_get_lvstores
+ fi
+ $rpc_py bdev_get_bdevs
+ $rpc_py vhost_get_controllers
+fi
+
+# Start VMs
+# Run VMs
+vm_run $used_vms
+vm_wait_for_boot 300 $used_vms
+
+if [[ -n "$kernel_cpus" ]]; then
+ mkdir -p /sys/fs/cgroup/cpuset/spdk
+ kernel_mask=$vhost_0_reactor_mask
+ kernel_mask=${kernel_mask#"["}
+ kernel_mask=${kernel_mask%"]"}
+
+ echo "$kernel_mask" >> /sys/fs/cgroup/cpuset/spdk/cpuset.cpus
+ echo "0-1" >> /sys/fs/cgroup/cpuset/spdk/cpuset.mems
+
+ kernel_vhost_pids=$(pgrep "vhost" -U root)
+ for kpid in $kernel_vhost_pids; do
+ echo "Limiting kernel vhost pid ${kpid}"
+ echo "${kpid}" >> /sys/fs/cgroup/cpuset/spdk/tasks
+ done
+fi
+
+# Run FIO
+fio_disks=""
+for vm_num in $used_vms; do
+ host_name="VM-$vm_num"
+ vm_exec $vm_num "hostname $host_name"
+ vm_start_fio_server $fio_bin $vm_num
+
+ if [[ "$ctrl_type" == "spdk_vhost_scsi" ]]; then
+ vm_check_scsi_location $vm_num
+ elif [[ "$ctrl_type" == "spdk_vhost_blk" ]]; then
+ vm_check_blk_location $vm_num
+ elif [[ "$ctrl_type" == "kernel_vhost" ]]; then
+ vm_check_scsi_location $vm_num
+ fi
+
+ if [[ -n "$vm_throttle" ]]; then
+ block=$(printf '%s' $SCSI_DISK)
+ major_minor=$(vm_exec "$vm_num" "cat /sys/block/$block/dev")
+ vm_exec "$vm_num" "echo \"$major_minor $vm_throttle\" > /sys/fs/cgroup/blkio/blkio.throttle.read_iops_device"
+ vm_exec "$vm_num" "echo \"$major_minor $vm_throttle\" > /sys/fs/cgroup/blkio/blkio.throttle.write_iops_device"
+ fi
+
+ fio_disks+=" --vm=${vm_num}$(printf ':/dev/%s' $SCSI_DISK)"
+done
+
+# Run FIO traffic
+for fio_job in ${fio_jobs//,/ }; do
+ fio_job_fname=$(basename $fio_job)
+ fio_log_fname="${fio_job_fname%%.*}.log"
+ for i in $(seq 1 $fio_iterations); do
+ echo "Running FIO iteration $i for $fio_job_fname"
+ run_fio $fio_bin --hide-results --job-file="$fio_job" --out="$VHOST_DIR/fio_results" --json $fio_disks $fio_gtod &
+ fio_pid=$!
+
+ if $host_sar_enable || $vm_sar_enable; then
+ pids=""
+ mkdir -p $VHOST_DIR/fio_results/sar_stats
+ sleep $sar_delay
+ fi
+
+ if $host_sar_enable; then
+ sar -P ALL $sar_interval $sar_count > "$VHOST_DIR/fio_results/sar_stats/sar_stats_host.txt" &
+ pids+=" $!"
+ fi
+
+ if $vm_sar_enable; then
+ for vm_num in $used_vms; do
+ vm_exec "$vm_num" "mkdir -p /root/sar; sar -P ALL $sar_interval $sar_count >> /root/sar/sar_stats_VM${vm_num}_run${i}.txt" &
+ pids+=" $!"
+ done
+ fi
+
+ for j in $pids; do
+ wait $j
+ done
+
+ if $vm_sar_enable; then
+ for vm_num in $used_vms; do
+ vm_scp "$vm_num" "root@127.0.0.1:/root/sar/sar_stats_VM${vm_num}_run${i}.txt" "$VHOST_DIR/fio_results/sar_stats"
+ done
+ fi
+
+ wait $fio_pid
+ mv $VHOST_DIR/fio_results/$fio_log_fname $VHOST_DIR/fio_results/$fio_log_fname.$i
+ sleep 1
+ done
+
+ parse_fio_results "$VHOST_DIR/fio_results" "$fio_log_fname"
+done
+
+notice "Shutting down virtual machines..."
+vm_shutdown_all
+
+if [[ "$ctrl_type" == "kernel_vhost" ]]; then
+ cleanup_kernel_vhost || true
+else
+ notice "Shutting down SPDK vhost app..."
+ if [[ $use_split == true ]]; then
+ cleanup_split_cfg
+ else
+ cleanup_lvol_cfg
+ fi
+ vhost_kill "${vhost_num}"
+fi
+
+if [[ -n "$kernel_cpus" ]]; then
+ rmdir /sys/fs/cgroup/cpuset/spdk
+fi
diff --git a/src/spdk/test/vhost/readonly/delete_partition_vm.sh b/src/spdk/test/vhost/readonly/delete_partition_vm.sh
new file mode 100755
index 000000000..efba257f0
--- /dev/null
+++ b/src/spdk/test/vhost/readonly/delete_partition_vm.sh
@@ -0,0 +1,42 @@
+#!/usr/bin/env bash
+
+set -xe
+
+testdir=$(readlink -f $(dirname $0))
+
+disk_name="vda"
+test_folder_name="readonly_test"
+test_file_name="some_test_file"
+
+function error() {
+ echo "==========="
+ echo -e "ERROR: $*"
+ echo "==========="
+ trap - ERR
+ set +e
+ umount "$test_folder_name"
+ rm -rf "${testdir:?}/${test_folder_name:?}"
+ exit 1
+}
+
+trap 'error "In delete_partition_vm.sh, line:" "${LINENO}"' ERR
+
+if [[ ! -d "/sys/block/$disk_name" ]]; then
+ error "No vhost-blk disk found!"
+fi
+
+if (($(lsblk -r -n -o RO -d "/dev/$disk_name") == 1)); then
+ error "Vhost-blk disk is set as readonly!"
+fi
+
+mkdir -p $test_folder_name
+
+echo "INFO: Mounting disk"
+mount /dev/$disk_name"1" $test_folder_name
+
+echo "INFO: Removing folder and unmounting $test_folder_name"
+umount "$test_folder_name"
+rm -rf "${testdir:?}/${test_folder_name:?}"
+
+echo "INFO: Deleting partition"
+echo -e "d\n1\nw" | fdisk /dev/$disk_name
diff --git a/src/spdk/test/vhost/readonly/disabled_readonly_vm.sh b/src/spdk/test/vhost/readonly/disabled_readonly_vm.sh
new file mode 100755
index 000000000..2aec5b80a
--- /dev/null
+++ b/src/spdk/test/vhost/readonly/disabled_readonly_vm.sh
@@ -0,0 +1,47 @@
+#!/usr/bin/env bash
+
+set -xe
+
+testdir=$(readlink -f $(dirname $0))
+
+disk_name="vda"
+test_folder_name="readonly_test"
+test_file_name="some_test_file"
+
+function error() {
+ echo "==========="
+ echo -e "ERROR: $*"
+ echo "==========="
+ trap - ERR
+ set +e
+ umount "$test_folder_name"
+ rm -rf "${testdir:?}/${test_folder_name:?}"
+ exit 1
+}
+
+trap 'error "In disabled_readonly_vm.sh, line:" "${LINENO}"' ERR
+
+if [[ ! -d "/sys/block/$disk_name" ]]; then
+ error "No vhost-blk disk found!"
+fi
+
+if (($(lsblk -r -n -o RO -d "/dev/$disk_name") == 1)); then
+ error "Vhost-blk disk is set as readonly!"
+fi
+
+parted -s /dev/$disk_name mklabel gpt
+parted -s /dev/$disk_name mkpart primary 2048s 100%
+partprobe
+sleep 0.1
+
+echo "INFO: Creating file system"
+mkfs.ext4 -F /dev/$disk_name"1"
+
+echo "INFO: Mounting disk"
+mkdir -p $test_folder_name
+mount /dev/$disk_name"1" $test_folder_name
+
+echo "INFO: Creating a test file $test_file_name"
+truncate -s "200M" $test_folder_name/$test_file_name
+umount "$test_folder_name"
+rm -rf "${testdir:?}/${test_folder_name:?}"
diff --git a/src/spdk/test/vhost/readonly/enabled_readonly_vm.sh b/src/spdk/test/vhost/readonly/enabled_readonly_vm.sh
new file mode 100755
index 000000000..939af6f08
--- /dev/null
+++ b/src/spdk/test/vhost/readonly/enabled_readonly_vm.sh
@@ -0,0 +1,72 @@
+#!/usr/bin/env bash
+
+set -x
+
+testdir=$(readlink -f $(dirname $0))
+
+disk_name="vda"
+test_folder_name="readonly_test"
+test_file_name="some_test_file"
+
+function error() {
+ echo "==========="
+ echo -e "ERROR: $*"
+ echo "==========="
+ umount "$test_folder_name"
+ rm -rf "${testdir:?}/${test_folder_name:?}"
+ exit 1
+}
+
+if [[ ! -d "/sys/block/$disk_name" ]]; then
+ error "No vhost-blk disk found!"
+fi
+
+if (($(lsblk -r -n -o RO -d "/dev/$disk_name") == 0)); then
+ error "Vhost-blk disk is not set as readonly!"
+fi
+
+echo "INFO: Found vhost-blk disk with readonly flag"
+if [[ ! -b "/dev/${disk_name}1" ]]; then
+ error "Partition not found!"
+fi
+
+if ! mkdir $testdir/$test_folder_name; then
+ error "Failed to create test folder $test_folder_name"
+fi
+
+echo "INFO: Mounting partition"
+if ! mount /dev/$disk_name"1" $testdir/$test_folder_name; then
+ error "Failed to mount partition $disk_name""1"
+fi
+
+echo "INFO: Trying to create file on readonly disk"
+if truncate -s "200M" $test_folder_name/$test_file_name"_on_readonly"; then
+ error "Created a file on a readonly disk!"
+fi
+
+if [[ -f $test_folder_name/$test_file_name ]]; then
+ echo "INFO: Trying to delete previously created file"
+ if rm $test_folder_name/$test_file_name; then
+ error "Deleted a file from a readonly disk!"
+ fi
+else
+ error "Previously created file not found!"
+fi
+
+echo "INFO: Copying file from readonly disk"
+cp $test_folder_name/$test_file_name $testdir
+if ! rm $testdir/$test_file_name; then
+ error "Copied file from a readonly disk was not found!"
+fi
+
+umount "$test_folder_name"
+rm -rf "${testdir:?}/${test_folder_name:?}"
+echo "INFO: Trying to create file system on a readonly disk"
+if mkfs.ext4 -F /dev/$disk_name"1"; then
+ error "Created file system on a readonly disk!"
+fi
+
+echo "INFO: Trying to delete partition from readonly disk"
+if echo -e "d\n1\nw" | fdisk /dev/$disk_name; then
+ error "Deleted partition from readonly disk!"
+fi
diff --git a/src/spdk/test/vhost/readonly/readonly.sh b/src/spdk/test/vhost/readonly/readonly.sh
new file mode 100755
index 000000000..ad66f72e0
--- /dev/null
+++ b/src/spdk/test/vhost/readonly/readonly.sh
@@ -0,0 +1,136 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/vhost/common.sh
+
+rpc_py="$testdir/../../../scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock"
+
+vm_img=""
+disk="Nvme0n1"
+x=""
+
+function usage() {
+ [[ -n $2 ]] && (
+ echo "$2"
+ echo ""
+ )
+ echo "Shortcut script for automated readonly test for vhost-block"
+ echo "For test details check test_plan.md"
+ echo
+ echo "Usage: $(basename $1) [OPTIONS]"
+ echo
+ echo "-h, --help Print help and exit"
+ echo " --vm_image= Path to VM image"
+ echo " --disk= Disk name."
+ echo " If disk=malloc, then creates malloc disk. For malloc disks, size is always 512M,"
+ echo " e.g. --disk=malloc. (Default: Nvme0n1)"
+ echo "-x set -x for script debug"
+}
+
+while getopts 'xh-:' optchar; do
+ case "$optchar" in
+ -)
+ case "$OPTARG" in
+ help) usage $0 && exit 0 ;;
+ vm_image=*) vm_img="${OPTARG#*=}" ;;
+ disk=*) disk="${OPTARG#*=}" ;;
+ *) usage $0 "Invalid argument '$OPTARG'" && exit 1 ;;
+ esac
+ ;;
+ h) usage $0 && exit 0 ;;
+ x)
+ set -x
+ x="-x"
+ ;;
+ *) usage $0 "Invalid argument '$OPTARG'" && exit 1 ;;
+ esac
+done
+
+vhosttestinit
+
+trap 'error_exit "${FUNCNAME}" "${LINENO}"' ERR
+
+if [[ $EUID -ne 0 ]]; then
+ fail "Go away user come back as root"
+fi
+
+function print_tc_name() {
+ notice ""
+ notice "==============================================================="
+ notice "Now running: $1"
+ notice "==============================================================="
+}
+
+function blk_ro_tc1() {
+ print_tc_name ${FUNCNAME[0]}
+ local vm_no="0"
+ local disk_name=$disk
+ local vhost_blk_name=""
+ local vm_dir="$VHOST_DIR/vms/$vm_no"
+
+ if [[ $disk =~ .*malloc.* ]]; then
+ if ! disk_name=$($rpc_py bdev_malloc_create 512 4096); then
+ fail "Failed to create malloc bdev"
+ fi
+
+ disk=$disk_name
+ else
+ disk_name=${disk%%_*}
+ if ! $rpc_py bdev_get_bdevs | jq -r '.[] .name' | grep -qi $disk_name$; then
+ fail "$disk_name bdev not found!"
+ fi
+ fi
+
+ #Create controller and create file on disk for later test
+ notice "Creating vhost_blk controller"
+ vhost_blk_name="naa.$disk_name.$vm_no"
+ $rpc_py vhost_create_blk_controller $vhost_blk_name $disk_name
+ vm_setup --disk-type=spdk_vhost_blk --force=$vm_no --os=$vm_img --disks=$disk --read-only=true
+
+ vm_run $vm_no
+ vm_wait_for_boot 300 $vm_no
+ notice "Preparing partition and file on guest VM"
+ vm_exec $vm_no "bash -s" < $testdir/disabled_readonly_vm.sh
+ sleep 1
+
+ vm_shutdown_all
+ #Create readonly controller and test readonly feature
+ notice "Removing controller and creating new one with readonly flag"
+ $rpc_py vhost_delete_controller $vhost_blk_name
+ $rpc_py vhost_create_blk_controller -r $vhost_blk_name $disk_name
+
+ vm_run $vm_no
+ vm_wait_for_boot 300 $vm_no
+ notice "Testing readonly feature on guest VM"
+ vm_exec $vm_no "bash -s" < $testdir/enabled_readonly_vm.sh
+ sleep 3
+
+ vm_shutdown_all
+ #Delete file from disk and delete partition
+ echo "INFO: Removing controller and creating new one"
+ $rpc_py vhost_delete_controller $vhost_blk_name
+ $rpc_py vhost_create_blk_controller $vhost_blk_name $disk_name
+
+ vm_run $vm_no
+ vm_wait_for_boot 300 $vm_no
+ notice "Removing partition and file from test disk on guest VM"
+ vm_exec $vm_no "bash -s" < $testdir/delete_partition_vm.sh
+ sleep 1
+
+ vm_shutdown_all
+}
+
+vhost_run 0
+if [[ -z $x ]]; then
+ set +x
+fi
+
+blk_ro_tc1
+
+$rpc_py bdev_nvme_detach_controller Nvme0
+
+vhost_kill 0
+
+vhosttestfini
diff --git a/src/spdk/test/vhost/shared/bdev.json b/src/spdk/test/vhost/shared/bdev.json
new file mode 100644
index 000000000..ad28314a5
--- /dev/null
+++ b/src/spdk/test/vhost/shared/bdev.json
@@ -0,0 +1,20 @@
+{
+ "subsystems": [
+ {
+ "subsystem": "bdev",
+ "config": [
+ {
+ "method": "bdev_virtio_attach_controller",
+ "params": {
+ "vq_count": 2,
+ "traddr": "Malloc.0",
+ "dev_type": "blk",
+ "vq_size": 512,
+ "name": "VirtioBlk0",
+ "trtype": "user"
+ }
+ }
+ ]
+ }
+ ]
+}
diff --git a/src/spdk/test/vhost/shared/shared.sh b/src/spdk/test/vhost/shared/shared.sh
new file mode 100755
index 000000000..bbf0fd858
--- /dev/null
+++ b/src/spdk/test/vhost/shared/shared.sh
@@ -0,0 +1,32 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/vhost/common.sh
+
+rpc_py="$rootdir/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock"
+
+function run_spdk_fio() {
+ fio_bdev --ioengine=spdk_bdev \
+ "$rootdir/test/vhost/common/fio_jobs/default_initiator.job" --runtime=10 --rw=randrw \
+ --spdk_mem=1024 --spdk_single_seg=1 --spdk_json_conf=$testdir/bdev.json "$@"
+}
+
+vhosttestinit
+
+trap 'error_exit "${FUNCNAME}" "${LINENO}"' ERR SIGTERM SIGABRT
+
+vhost_run 0
+
+$rpc_py bdev_malloc_create -b Malloc 124 4096
+$rpc_py vhost_create_blk_controller Malloc.0 Malloc
+
+run_spdk_fio --size=50% --offset=0 --filename=VirtioBlk0 &
+run_fio_pid=$!
+sleep 1
+run_spdk_fio --size=50% --offset=50% --filename=VirtioBlk0
+wait $run_fio_pid
+vhost_kill 0
+
+vhosttestfini
diff --git a/src/spdk/test/vhost/vhost.sh b/src/spdk/test/vhost/vhost.sh
new file mode 100755
index 000000000..5b050fe40
--- /dev/null
+++ b/src/spdk/test/vhost/vhost.sh
@@ -0,0 +1,107 @@
+#!/usr/bin/env bash
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/vhost/common.sh
+
+echo "Running SPDK vhost fio autotest..."
+if [[ $(uname -s) != Linux ]]; then
+ echo ""
+ echo "INFO: Vhost tests are only for Linux machine."
+ echo ""
+ exit 0
+fi
+
+CENTOS_VM_IMAGE="/home/sys_sgsw/spdk_vhost_CentOS_vm_image.qcow2"
+DEFAULT_FIO_BIN="/home/sys_sgsw/fio_ubuntu"
+CENTOS_FIO_BIN="/home/sys_sgsw/fio_ubuntu_bak"
+
+: ${FIO_BIN="$DEFAULT_FIO_BIN"}
+
+if [[ ! -r "${VM_IMAGE}" ]]; then
+ echo ""
+ echo "ERROR: VM image '${VM_IMAGE}' does not exist."
+ echo ""
+ exit 1
+fi
+
+DISKS_NUMBER=$(lspci -mm -n | grep 0108 | tr -d '"' | awk -F " " '{print "0000:"$1}' | wc -l)
+
+WORKDIR=$(readlink -f $(dirname $0))
+
+run_test "vhost_negative" $WORKDIR/other/negative.sh
+
+run_test "vhost_boot" $WORKDIR/vhost_boot/vhost_boot.sh --vm_image=$VM_IMAGE
+
+if [ $RUN_NIGHTLY -eq 1 ]; then
+ echo 'Running blk integrity suite...'
+ run_test "vhost_blk_integrity" $WORKDIR/fiotest/fio.sh -x --fio-bin=$FIO_BIN \
+ --vm=0,$VM_IMAGE,Nvme0n1p0:RaidBdev0:RaidBdev1:RaidBdev2 \
+ --test-type=spdk_vhost_blk \
+ --fio-job=$WORKDIR/common/fio_jobs/default_integrity.job
+
+ echo 'Running SCSI integrity suite...'
+ run_test "vhost_scsi_integrity" $WORKDIR/fiotest/fio.sh -x --fio-bin=$FIO_BIN \
+ --vm=0,$VM_IMAGE,Nvme0n1p0:RaidBdev0:RaidBdev1:RaidBdev2 \
+ --test-type=spdk_vhost_scsi \
+ --fio-job=$WORKDIR/common/fio_jobs/default_integrity.job
+
+ echo 'Running filesystem integrity suite with SCSI...'
+ run_test "vhost_scsi_fs_integrity" $WORKDIR/integrity/integrity_start.sh --ctrl-type=spdk_vhost_scsi --fs="xfs ntfs btrfs ext4"
+
+ echo 'Running filesystem integrity suite with BLK...'
+ run_test "vhost_blk_fs_integrity" $WORKDIR/integrity/integrity_start.sh --ctrl-type=spdk_vhost_blk --fs="xfs ntfs btrfs ext4"
+
+ if [[ $DISKS_NUMBER -ge 2 ]]; then
+ echo 'Running lvol integrity nightly suite with two cores and two controllers'
+ run_test "vhost_scsi_2core_2ctrl" $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
+ --ctrl-type=spdk_vhost_scsi --max-disks=2 --distribute-cores --vm-count=2
+
+ echo 'Running lvol integrity nightly suite with one core and two controllers'
+ run_test "vhost_scsi_1core_2ctrl" $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
+ --ctrl-type=spdk_vhost_scsi --max-disks=2 --vm-count=2
+ fi
+ if [[ -e $CENTOS_VM_IMAGE ]]; then
+ echo 'Running lvol integrity nightly suite with different os types'
+ run_test "vhost_scsi_nightly" $WORKDIR/lvol/lvol_test.sh --fio-bin=$CENTOS_FIO_BIN \
+ --ctrl-type=spdk_vhost_scsi --vm-count=2 --multi-os
+ fi
+ echo 'Running lvol integrity nightly suite with one core and one controller'
+ run_test "vhost_scsi_1core_1ctrl" $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
+ --ctrl-type=spdk_vhost_scsi --max-disks=1
+
+ if [[ $DISKS_NUMBER -ge 2 ]]; then
+ echo 'Running lvol integrity nightly suite with two cores and two controllers'
+ run_test "vhost_blk_2core_2ctrl" $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
+ --ctrl-type=spdk_vhost_blk --max-disks=2 --distribute-cores --vm-count=2
+
+ echo 'Running lvol integrity nightly suite with one core and two controllers'
+ run_test "vhost_blk_1core_2ctrl" $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
+ --ctrl-type=spdk_vhost_blk --max-disks=2 --vm-count=2
+ fi
+ if [[ -e $CENTOS_VM_IMAGE ]]; then
+ echo 'Running lvol integrity nightly suite with different os types'
+ run_test "vhost_blk_nightly" $WORKDIR/lvol/lvol_test.sh --fio-bin=$CENTOS_FIO_BIN \
+ --ctrl-type=spdk_vhost_blk --vm-count=2 --multi-os
+ fi
+ echo 'Running lvol integrity nightly suite with one core and one controller'
+ run_test "vhost_lvol_integrity_1core_1ctrl" $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
+ --ctrl-type=spdk_vhost_blk --max-disks=1
+
+ echo 'Running readonly tests suite...'
+ run_test "vhost_readonly" $WORKDIR/readonly/readonly.sh --vm_image=$VM_IMAGE --disk=Nvme0n1 -x
+
+ echo 'Running migration suite...'
+ run_test "vhost_migration" $WORKDIR/migration/migration.sh -x \
+ --fio-bin=$FIO_BIN --os=$VM_IMAGE
+fi
+
+echo 'Running lvol integrity suite...'
+run_test "vhost_scsi_lvol_integrity" $WORKDIR/lvol/lvol_test.sh -x --fio-bin=$FIO_BIN \
+ --ctrl-type=spdk_vhost_scsi --thin-provisioning
+
+echo 'Running lvol integrity suite...'
+run_test "vhost_blk_lvol_integrity" $WORKDIR/lvol/lvol_test.sh -x --fio-bin=$FIO_BIN \
+ --ctrl-type=spdk_vhost_blk
+
+run_test "spdkcli_vhost" ./test/spdkcli/vhost.sh
diff --git a/src/spdk/test/vhost/vhost_boot/vhost_boot.sh b/src/spdk/test/vhost/vhost_boot/vhost_boot.sh
new file mode 100755
index 000000000..9df2bd970
--- /dev/null
+++ b/src/spdk/test/vhost/vhost_boot/vhost_boot.sh
@@ -0,0 +1,126 @@
+#!/usr/bin/env bash
+set -xe
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/vhost/common.sh
+source $rootdir/test/bdev/nbd_common.sh
+
+rpc_py="$rootdir/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock"
+vm_no="0"
+
+function err_clean() {
+ trap - ERR
+ print_backtrace
+ set +e
+ error "Error on $1 $2"
+ vm_kill_all
+ $rpc_py vhost_scsi_controller_remove_target naa.vhost_vm.$vm_no 0
+ $rpc_py vhost_delete_controller naa.vhost_vm.$vm_no
+ $rpc_py bdev_lvol_delete $lvb_u
+ $rpc_py bdev_lvol_delete_lvstore -u $lvs_u
+ vhost_kill 0
+ exit 1
+}
+
+function usage() {
+ [[ -n $2 ]] && (
+ echo "$2"
+ echo ""
+ )
+ echo "Usage: $(basename $1) vm_image=PATH [-h|--help]"
+ echo "-h, --help Print help and exit"
+ echo " --vm_image=PATH Path to VM image used in these tests"
+}
+
+while getopts 'h-:' optchar; do
+ case "$optchar" in
+ -)
+ case "$OPTARG" in
+ vm_image=*) os_image="${OPTARG#*=}" ;;
+ *) usage $0 echo "Invalid argument '$OPTARG'" && exit 1 ;;
+ esac
+ ;;
+ h) usage $0 && exit 0 ;;
+ *) usage $0 "Invalid argument '$optchar'" && exit 1 ;;
+ esac
+done
+
+if [[ $EUID -ne 0 ]]; then
+ echo "INFO: Go away user come back as root"
+ exit 1
+fi
+
+if [[ -z $os_image ]]; then
+ echo "No path to os image is given"
+ exit 1
+fi
+
+vhosttestinit
+
+trap 'err_clean "${FUNCNAME}" "${LINENO}"' ERR
+timing_enter start_vhost
+vhost_run 0
+timing_exit start_vhost
+
+timing_enter create_lvol
+
+nvme_bdev=$($rpc_py bdev_get_bdevs -b Nvme0n1)
+nvme_bdev_bs=$(jq ".[] .block_size" <<< "$nvme_bdev")
+nvme_bdev_name=$(jq ".[] .name" <<< "$nvme_bdev")
+if [[ $nvme_bdev_bs != 512 ]]; then
+ echo "ERROR: Your device $nvme_bdev_name block size is $nvme_bdev_bs, but should be 512 bytes."
+ false
+fi
+
+lvs_u=$($rpc_py bdev_lvol_create_lvstore Nvme0n1 lvs0)
+lvb_u=$($rpc_py bdev_lvol_create -u $lvs_u lvb0 20000)
+timing_exit create_lvol
+
+timing_enter convert_vm_image
+modprobe nbd
+trap 'nbd_stop_disks $(get_vhost_dir 0)/rpc.sock /dev/nbd0; err_clean "${FUNCNAME}" "${LINENO}"' ERR
+nbd_start_disks "$(get_vhost_dir 0)/rpc.sock" $lvb_u /dev/nbd0
+qemu-img convert $os_image -O raw /dev/nbd0
+sync
+nbd_stop_disks $(get_vhost_dir 0)/rpc.sock /dev/nbd0
+sleep 1
+timing_exit convert_vm_image
+
+trap 'err_clean "${FUNCNAME}" "${LINENO}"' ERR
+timing_enter create_vhost_controller
+$rpc_py vhost_create_scsi_controller naa.vhost_vm.$vm_no
+$rpc_py vhost_scsi_controller_add_target naa.vhost_vm.$vm_no 0 $lvb_u
+timing_exit create_vhost_controller
+
+timing_enter setup_vm
+vm_setup --disk-type=spdk_vhost_scsi --force=$vm_no --disks="vhost_vm" --spdk-boot="vhost_vm"
+vm_run $vm_no
+vm_wait_for_boot 300 $vm_no
+timing_exit setup_vm
+
+timing_enter run_vm_cmd
+vm_exec $vm_no "parted -s /dev/sda mkpart primary 10GB 100%; partprobe; sleep 0.1;"
+vm_exec $vm_no "mkfs.ext4 -F /dev/sda2; mkdir -p /mnt/sda2test; mount /dev/sda2 /mnt/sda2test;"
+vm_exec $vm_no "fio --name=integrity --bsrange=4k-512k --iodepth=128 --numjobs=1 --direct=1 \
+ --thread=1 --group_reporting=1 --rw=randrw --rwmixread=70 --filename=/mnt/sda2test/test_file \
+ --verify=md5 --do_verify=1 --verify_backlog=1024 --fsync_on_close=1 --runtime=20 \
+ --time_based=1 --size=1024m"
+vm_exec $vm_no "umount /mnt/sda2test; rm -rf /mnt/sda2test"
+alignment_offset=$(vm_exec $vm_no "cat /sys/block/sda/sda1/alignment_offset")
+echo "alignment_offset: $alignment_offset"
+timing_exit run_vm_cmd
+
+vm_shutdown_all
+
+timing_enter clean_vhost
+$rpc_py vhost_scsi_controller_remove_target naa.vhost_vm.$vm_no 0
+$rpc_py vhost_delete_controller naa.vhost_vm.$vm_no
+$rpc_py bdev_lvol_delete $lvb_u
+$rpc_py bdev_lvol_delete_lvstore -u $lvs_u
+timing_exit clean_vhost
+
+vhost_kill 0
+
+vhosttestfini
diff --git a/src/spdk/test/vhost/windows/windows.sh b/src/spdk/test/vhost/windows/windows.sh
new file mode 100755
index 000000000..6bf8573f7
--- /dev/null
+++ b/src/spdk/test/vhost/windows/windows.sh
@@ -0,0 +1,141 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/vhost/common.sh
+
+rpc_py="$rootdir/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock"
+ctrl_type="spdk_vhost_scsi"
+ssh_pass=""
+vm_num="0"
+vm_image="/home/sys_sgsw/windows_server.qcow2"
+
+function usage() {
+ [[ -n $2 ]] && (
+ echo "$2"
+ echo ""
+ )
+ echo "Windows Server automated test"
+ echo "Usage: $(basename $1) [OPTIONS]"
+ echo "--vm-ssh-pass=PASSWORD Text password for the VM"
+ echo "--vm-image=PATH Path to qcow2 image of Windows VM"
+ echo "--ctrl-type=TYPE Controller type to use for test:"
+ echo " spdk_vhost_scsi - use spdk vhost scsi"
+ echo " spdk_vhost_blk - use spdk vhost block"
+ echo "-x set -x for script debug"
+ echo "-h, --help Print help and exit"
+
+ exit 0
+}
+
+while getopts 'xh-:' optchar; do
+ case "$optchar" in
+ -)
+ case "$OPTARG" in
+ help) usage $0 ;;
+ vm-ssh-pass=*) ssh_pass="${OPTARG#*=}" ;;
+ vm-image=*) vm_image="${OPTARG#*=}" ;;
+ ctrl-type=*) ctrl_type="${OPTARG#*=}" ;;
+ esac
+ ;;
+ h) usage $0 ;;
+ x)
+ set -x
+ x="-x"
+ ;;
+ *) usage $0 "Invalid argument '$OPTARG'" ;;
+ esac
+done
+
+# For some reason there is a problem between using SSH key authentication
+# and Windows UAC. Some of the powershell commands fail due to lack of
+# permissons, despite script running in elevated mode.
+# There are some clues about this setup that suggest this might not work properly:
+# https://superuser.com/questions/181581/how-can-i-run-something-as-administrator-via-cygwins-ssh
+# https://cygwin.com/ml/cygwin/2004-09/msg00087.html
+# But they apply to rather old Windows distributions.
+# Potentially using Windows Server 2016 and newer may solve the issue
+# due to OpenSSH being available directly from Windows Store.
+function vm_sshpass() {
+ vm_num_is_valid $1 || return 1
+
+ local ssh_cmd
+ ssh_cmd="sshpass -p $2 ssh \
+ -o UserKnownHostsFile=/dev/null \
+ -o StrictHostKeyChecking=no \
+ -o User=root \
+ -p $(vm_ssh_socket $1) $VM_SSH_OPTIONS 127.0.0.1"
+
+ shift 2
+ $ssh_cmd "$@"
+}
+
+if [[ -z "$ssh_pass" ]]; then
+ error "Please specify --vm-ssh-pass parameter"
+fi
+
+trap 'error_exit "${FUNCNAME}" "${LINENO}"; rm -f $aio_file' SIGTERM SIGABRT ERR
+
+vm_kill_all
+
+# Run vhost without debug!
+# Windows Virtio drivers use indirect descriptors without negotiating
+# their feature flag, which is explicitly forbidden by the Virtio 1.0 spec.
+# "(2.4.5.3.1 Driver Requirements: Indirect Descriptors)
+# The driver MUST NOT set the VIRTQ_DESC_F_INDIRECT flag unless the
+# VIRTIO_F_INDIRECT_DESC feature was negotiated.".
+# Violating this rule doesn't cause any issues for SPDK vhost,
+# but triggers an assert, so we can only run Windows VMs with non-debug SPDK builds.
+notice "running SPDK vhost"
+vhost_run 0
+notice "..."
+
+# Prepare bdevs for later vhost controllers use
+# Nvme bdev is automatically constructed during vhost_run
+# by using scripts/gen_nvme.sh. No need to add it manually.
+# Using various sizes to better identify bdevs if no name in BLK
+# is available
+# TODO: use a param for blocksize for AIO and Malloc bdevs
+aio_file="$SPDK_TEST_STORAGE/aio_disk"
+dd if=/dev/zero of=$aio_file bs=1M count=512
+$rpc_py bdev_aio_create $aio_file Aio0 512
+$rpc_py bdev_malloc_create -b Malloc0 256 512
+$rpc_py bdev_get_bdevs
+
+# Create vhost controllers
+# Prepare VM setup command
+setup_cmd="vm_setup --force=0 --memory=8192"
+setup_cmd+=" --os=$vm_image"
+
+if [[ "$ctrl_type" == "spdk_vhost_scsi" ]]; then
+ $rpc_py vhost_create_scsi_controller naa.0.0
+ $rpc_py vhost_scsi_controller_add_target naa.0.0 0 Nvme0n1
+ $rpc_py vhost_scsi_controller_add_target naa.0.0 1 Malloc0
+ $rpc_py vhost_scsi_controller_add_target naa.0.0 2 Aio0
+ setup_cmd+=" --disk-type=spdk_vhost_scsi --disks=0"
+elif [[ "$ctrl_type" == "spdk_vhost_blk" ]]; then
+ $rpc_py vhost_create_blk_controller naa.0.0 Nvme0n1
+ $rpc_py vhost_create_blk_controller naa.1.0 Malloc0
+ $rpc_py vhost_create_blk_controller naa.2.0 Aio0
+ setup_cmd+=" --disk-type=spdk_vhost_blk --disks=0:1:2"
+fi
+$rpc_py vhost_get_controllers
+$setup_cmd
+
+# Spin up VM
+vm_run "$vm_num"
+vm_wait_for_boot "300" "$vm_num"
+
+vm_sshpass "$vm_num" "$ssh_pass" "mkdir /cygdrive/c/fs_test"
+vm_scp "$vm_num" "$testdir/windows_fs_test.ps1" "127.0.0.1:/cygdrive/c/fs_test"
+vm_sshpass "$vm_num" "$ssh_pass" "cd /cygdrive/c/fs_test; powershell.exe -file windows_fs_test.ps1"
+
+notice "Shutting down Windows VM..."
+# Killing, actually. #TODO: implement vm_windwows_shutdown() function
+vm_kill $vm_num
+
+notice "Shutting down SPDK vhost app..."
+vhost_kill 0
+
+rm -f $aio_file
diff --git a/src/spdk/test/vhost/windows/windows_fs_test.ps1 b/src/spdk/test/vhost/windows/windows_fs_test.ps1
new file mode 100644
index 000000000..cda1b53f2
--- /dev/null
+++ b/src/spdk/test/vhost/windows/windows_fs_test.ps1
@@ -0,0 +1,78 @@
+# Get the ID and security principal of the current user account
+$myWindowsID=[System.Security.Principal.WindowsIdentity]::GetCurrent()
+$myWindowsPrincipal=new-object System.Security.Principal.WindowsPrincipal($myWindowsID)
+
+# Get the security principal for the Administrator role
+$adminRole=[System.Security.Principal.WindowsBuiltInRole]::Administrator
+
+# Check to see if we are currently running "as Administrator"
+if ($myWindowsPrincipal.IsInRole($adminRole)) {
+ # We are running "as Administrator" - so change the title and background color to indicate this
+ $Host.UI.RawUI.WindowTitle = $myInvocation.MyCommand.Definition + "(Elevated)"
+ $Host.UI.RawUI.BackgroundColor = "DarkBlue"
+ clear-host
+} else
+ {
+ # We are not running "as Administrator" - so relaunch as administrator
+
+ # Create a new process object that starts PowerShell
+ $newProcess = new-object System.Diagnostics.ProcessStartInfo "PowerShell";
+
+ # Specify the current script path and name as a parameter
+ $newProcess.Arguments = $myInvocation.MyCommand.Definition;
+
+ # Indicate that the process should be elevated
+ $newProcess.Verb = "runas";
+
+ # Start the new process
+ [System.Diagnostics.Process]::Start($newProcess);
+
+ # Exit from the current, unelevated, process
+ exit
+}
+
+# Set bash -e equivalent
+$ErrorActionPreference = "Stop"
+
+$filesystems=@("NTFS", "FAT32", "FAT")
+$disks = get-disk | Where-Object FriendlyName -NotMatch "QEMU"
+Start-Sleep 2
+foreach($disk in $disks)
+{
+ $size = $disk.Size
+ $number = $disk.Number
+ $serial = $disk.SerialNumber
+ $model = $disk.model.Trim()
+ $size = $size -replace " ", "_"
+ $model = $model -replace " ", "_"
+
+ $label = "${number}_${model}_${serial}_${size}"
+ echo "Running tests for disk $label"
+ start-sleep 2
+
+ Try {
+ Initialize-Disk -Number $disk.Number -PartitionStyle MBR
+ } Catch {
+ Clear-Disk -Number $disk.Number -RemoveData -Confirm:$false
+ Initialize-Disk -Number $disk.Number -PartitionStyle MBR
+ }
+ echo "`tDisk initialized"
+ start-sleep 2
+
+ $part = New-Partition -DiskNumber $disk.Number -UseMaximumSize -AssignDriveLetter
+ echo "`tCreated partition $($part.DriveLetter)"
+ start-sleep 2
+
+ foreach($fs in $filesystems) {
+ echo "`tTrying to format $($part.DriveLetter) with $fs"
+ Try {
+ $vol = Format-Volume -DriveLetter $part.DriveLetter -FileSystem $fs -Confirm:$false
+ } Catch [Exception] {
+ echo $_.Exception.GetType().FullName, $_.Exception.Message
+ echo $_.Exception | format-list -force
+ exit 1
+ }
+ echo "`tPartition $($part.DriveLetter) formatted with $fs filesystem"
+ start-sleep 2
+ }
+}
diff --git a/src/spdk/test/vhost/windows/windows_scsi_compliance.ps1 b/src/spdk/test/vhost/windows/windows_scsi_compliance.ps1
new file mode 100644
index 000000000..80d86e805
--- /dev/null
+++ b/src/spdk/test/vhost/windows/windows_scsi_compliance.ps1
@@ -0,0 +1,73 @@
+# Get the ID and security principal of the current user account
+$myWindowsID=[System.Security.Principal.WindowsIdentity]::GetCurrent()
+$myWindowsPrincipal=new-object System.Security.Principal.WindowsPrincipal($myWindowsID)
+
+# Get the security principal for the Administrator role
+$adminRole=[System.Security.Principal.WindowsBuiltInRole]::Administrator
+
+# Check to see if we are currently running "as Administrator"
+if ($myWindowsPrincipal.IsInRole($adminRole))
+ {
+ # We are running "as Administrator" - so change the title and background color to indicate this
+ $Host.UI.RawUI.WindowTitle = $myInvocation.MyCommand.Definition + "(Elevated)"
+ $Host.UI.RawUI.BackgroundColor = "DarkBlue"
+ clear-host
+ }
+else
+ {
+ # We are not running "as Administrator" - so relaunch as administrator
+
+ # Create a new process object that starts PowerShell
+ $newProcess = new-object System.Diagnostics.ProcessStartInfo "PowerShell";
+
+ # Specify the current script path and name as a parameter
+ $newProcess.Arguments = $myInvocation.MyCommand.Definition;
+
+ # Indicate that the process should be elevated
+ $newProcess.Verb = "runas";
+
+ # Start the new process
+ [System.Diagnostics.Process]::Start($newProcess);
+
+ # Exit from the current, unelevated, process
+ exit
+ }
+# Run your code that needs to be elevated here
+get-disk | Where-Object FriendlyName -NotMatch "QEMU" | Initialize-Disk -PartitionStyle MBR
+Start-Sleep 2
+get-disk | Where-Object FriendlyName -NotMatch "QEMU" | Clear-Disk -RemoveData -Confirm:$false
+Start-Sleep 2
+get-disk | Where-Object FriendlyName -NotMatch "QEMU" | Initialize-Disk -PartitionStyle MBR
+Start-Sleep 2
+
+$disks = get-disk | Where-Object FriendlyName -NotMatch "QEMU"
+Start-Sleep 2
+foreach($disk in $disks)
+{
+
+ $phy_bs = $disk.PhysicalSectorSize
+ $model = $disk.model
+ $serial = $disk.SerialNumber
+
+ $label = ""
+ $label += $model.Trim() + "_" + $serial + "_" + $phy_bs
+ $label = $label -replace " ", "_"
+ echo $label
+ start-sleep 2
+
+ $part = New-Partition -DiskNumber $disk.Number -UseMaximumSize -AssignDriveLetter
+ echo $part.DriveLetter
+ start-sleep 2
+
+ $vol = Format-Volume -DriveLetter $part.DriveLetter -FileSystem NTFS -Confirm:$false
+ echo $vol
+ start-sleep 2
+
+ cd C:\SCSI
+ .\scsicompliancetest.exe \\.\$($vol.DriveLetter): -full | tee "C:\SCSI\WIN_SCSI_1_$label.log"
+ start-sleep 2
+ mv .\scsicompliance.log.wtl ".\WIN_SCSI_1_$label.wtl"
+ .\scsicompliance.exe /Device \\.\$($vol.DriveLetter): /Operation Test /Scenario Common | tee "C:\SCSI\WIN_SCSI_2_$label.log"
+ start-sleep 2
+ mv .\scsicompliance.wtl ".\WIN_SCSI_2_$label.wtl"
+}
diff --git a/src/spdk/test/vhost/windows/windows_scsi_compliance.py b/src/spdk/test/vhost/windows/windows_scsi_compliance.py
new file mode 100755
index 000000000..a0f4ea63c
--- /dev/null
+++ b/src/spdk/test/vhost/windows/windows_scsi_compliance.py
@@ -0,0 +1,147 @@
+#!/usr/bin/env python3
+import os
+import sys
+import re
+import pprint
+import collections
+
+os.chdir(os.path.join(os.path.dirname(sys.argv[0]), "results"))
+
+scsi_logs = filter(lambda x: x.endswith(".log"), os.listdir("./"))
+scsi_1_pattern = re.compile(r"(ASSERTION\s[1-9][\d+]?\.\d+\s)(.+\s)([\w\W]+?)(Result:\s)(\w+)", re.I | re.M)
+scsi_2_pattern = re.compile(r"(?:Start:\s)(ASSERTION:\s)?(.+)(?:,.+=\s)([\w\W]+?)(End:\s)(\w+)(,.*)", re.I | re.M)
+fails = []
+warns = []
+
+expected_warns = [
+ "MODE_SELECT_6_MODE_SENSE_6_Checking_Parameters_Savable_PS_bit",
+ "MODE_SELECT_10_MODE_SENSE_10_Checking_Parameters_Savable_PS_bit",
+ "MODE_SELECT_10_Changing_WCE",
+ "MODE_SELECT_10_MODE_SENSE_10_Checking_that_WCE_has_been_cleared",
+ "MODE_SELECT_10_MODE_SENSE_10_Checking_that_Saved_Values_have_changed",
+ "MODE_SELECT_10_setting_WCE",
+ "MODE_SELECT_10_MODE_SENSE_10_Checking_that_WCE_has_been_set",
+ "MODE_SELECT_10_Attempting_to_restore_original_values",
+ "MODE_SELECT_10_MODE_SENSE_10_Verifying_values_were_restored",
+ "ASSERTION_VERIFY_16_Support_Test",
+]
+
+expected_fails = [
+ "ASSERTION_READ_6_Read-With-Disk-Cache-Cleared_Test",
+ "ASSERTION_READ_10_Read-With-Disk-Cache-Cleared_Test",
+ "ASSERTION_READ_16_Read-With-Disk-Cache-Cleared_Test",
+ "ASSERTION_INQUIRY_Checking_Identification_Descriptors_in_VPD_page_0x83",
+ "ASSERTION_VERIFY_10_Support_Test",
+]
+
+results = {"1": collections.OrderedDict(),
+ "2": collections.OrderedDict()}
+
+for log in scsi_logs:
+ # Choose regex pattern depending on tests version
+ pattern = scsi_1_pattern if "WIN_SCSI_1" in log else scsi_2_pattern
+
+ # Read log file contents
+ try:
+ with open(log, 'r') as fh:
+ fh = open(log, 'r')
+ log_text = fh.read()
+ # Dir name for saving split result files of currently processed log file
+ d_name = log.split(".")[0]
+ try:
+ os.mkdir(d_name)
+ except OSError:
+ pass
+ except IOError as e:
+ print("ERROR: While opening log file: {log_file}".format(log_file=log))
+ exit(1)
+
+ # Parse log file contents
+ matches_found = re.findall(pattern, log_text)
+ if len(matches_found) < 1:
+ print("ERROR: No results found in file {log_file}!".format(log_file=log))
+ exit(1)
+
+ # Go through output for each test from log file; parse and save to dict
+ for m in matches_found:
+ test_name = re.sub(r"\s+", "_", (m[0] + m[1]).strip())
+ test_name = re.sub(r"[():]", "", test_name)
+ test_name = test_name[0:-1] if "." in test_name[-1] else test_name
+ tc_result = m[4].upper()
+
+ if "FAIL" in tc_result.upper():
+ fails.append([log, test_name, tc_result])
+ elif "WARN" in tc_result.upper():
+ warns.append([log, test_name, tc_result])
+
+ # Save output to separate file
+ with open(os.path.join("./", d_name, test_name), 'w') as fh:
+ for line in m:
+ fh.write(line)
+
+ # Also save in dictionary for later use in generating HTML results summary
+ ver = "1" if "WIN_SCSI_1" in log else "2"
+ try:
+ results[ver][test_name][d_name] = tc_result
+ except KeyError:
+ results[ver][test_name] = collections.OrderedDict()
+ results[ver][test_name][d_name] = tc_result
+
+
+# Generate HTML file with results table
+with open(os.path.join("./", "results.html"), 'a') as fh:
+ html = "<html>"
+ for suite_ver in results.keys():
+ html += """"<h2> WIN_SCSI_{ver} </h2>
+ <table bgcolor=\"#ffffff\" border=\"1px solid black;>\"""".format(ver=suite_ver)
+
+ # Print header
+ html += "<tr><th>Test name</th>"
+ disks_header = set()
+
+ for _ in results[suite_ver].keys():
+ for disk in results[suite_ver][_].keys():
+ disks_header.add(disk)
+
+ for disk in disks_header:
+ html += "<th>{disk}</th>".format(disk=disk)
+ html += "</tr>"
+
+ # Print results
+ for test in results[suite_ver].keys():
+ html += "<tr><td>{f_name}</td>".format(f_name=test)
+ for disk in disks_header:
+ try:
+ result = results[suite_ver][test][disk]
+
+ html += "<td"
+ if "PASS" in result:
+ html += " bgcolor=\"#99ff33\">"
+ else:
+ html += " bgcolor=\"#ff5050\">"
+
+ html += "<a href={file}>{result}</a>".format(result=result, file=os.path.join("./", disk, test))
+ html += "</td>"
+
+ except KeyError:
+ html += "<td bgcolor=\"#ffff99\"></br></td>"
+ html += "</tr>"
+ html += "</table></br>"
+ html += "</html>"
+ fh.write(html)
+
+if warns:
+ not_expected_warns = [w for w in warns if w[1] not in expected_warns and "WIN_SCSI_2" in w[0]]
+ print("INFO: Windows SCSI compliance warnings:")
+ pprint.pprint(warns, width=150)
+
+if fails:
+ not_expected_fails = [f for f in fails if f[1] not in expected_fails and "WIN_SCSI_2" in f[0]]
+ print("INFO: Windows SCSI compliance fails:")
+ pprint.pprint(fails, width=150)
+
+if not_expected_warns or not_expected_fails:
+ print("Not expected fails / warnings:")
+ pprint.pprint(not_expected_warns, width=150)
+ pprint.pprint(not_expected_fails, width=150)
+ exit(1)
diff --git a/src/spdk/test/vhost/windows/windows_scsi_compliance.sh b/src/spdk/test/vhost/windows/windows_scsi_compliance.sh
new file mode 100755
index 000000000..d7c854592
--- /dev/null
+++ b/src/spdk/test/vhost/windows/windows_scsi_compliance.sh
@@ -0,0 +1,89 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/vhost/common.sh
+
+# Tested with windows vm with OS Name: Microsoft Windows Server 2012 R2 Datacenter
+# and OS Version: 6.3.9600 N/A Build 9600
+# In order to run this test with windows vm
+# windows virtio scsi driver must be installed
+WINDOWS_IMG="/home/sys_sgsw/windows_scsi_compliance/windows_vm_image.qcow2"
+aio_file="$SPDK_TEST_STORAGE/aio_disk"
+ssh_pass=""
+vm_num=1
+keep_results_dir=false
+rpc_py="$rootdir/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock"
+
+function usage() {
+ [[ -n $2 ]] && (
+ echo "$2"
+ echo ""
+ )
+ echo "Windows Server scsi compliance test"
+ echo "Usage: $(basename $1) [OPTIONS]"
+ echo " --vm-ssh-pass=PASSWORD Text password for the VM"
+ echo " --vm-image-path Path of windows image"
+ echo " --keep_results Do not delete dir with results"
+
+ exit 0
+}
+
+while getopts 'h-:' optchar; do
+ case "$optchar" in
+ -)
+ case "$OPTARG" in
+ help) usage $0 ;;
+ vm-ssh-pass=*) ssh_pass="${OPTARG#*=}" ;;
+ vm-image-path=*) WINDOWS_IMG="${OPTARG#*=}" ;;
+ keep_results*) keep_results_dir=true ;;
+ esac
+ ;;
+ h) usage $0 ;;
+ *) usage $0 "Invalid argument '$OPTARG'" ;;
+ esac
+done
+
+trap 'rm -f $aio_file; rm -rf $testdir/results; error_exit' SIGINT SIGTERM ERR
+
+VM_PASSWORD="$ssh_pass"
+mkdir -p $testdir/results
+dd if=/dev/zero of=$aio_file bs=1M count=512
+
+timing_enter vhost_run
+vhost_run 0
+$rpc_py bdev_nvme_set_hotplug -e
+$rpc_py bdev_malloc_create 256 4096 -b Malloc0
+$rpc_py bdev_aio_create $aio_file Aio0 512
+$rpc_py bdev_get_bdevs
+$rpc_py vhost_create_scsi_controller naa.vhost.1
+$rpc_py vhost_scsi_controller_add_target naa.vhost.1 0 Nvme0n1
+$rpc_py vhost_scsi_controller_add_target naa.vhost.1 1 Malloc0
+# TODO: Currently there is bug for aio device. Disable this test
+# $rpc_py vhost_scsi_controller_add_target naa.vhost.1 2 Aio0
+timing_exit vhost_run
+
+timing_enter start_vm
+vm_setup --force=1 --disk-type=spdk_vhost_scsi --os=$WINDOWS_IMG --disks=vhost --memory=4096
+vm_run "1"
+# Wait until VM goes up
+vm_wait_for_boot "300" "$vm_num"
+timing_exit start_vm
+
+vm_scp "$vm_num" $testdir/windows_scsi_compliance.ps1 127.0.0.1:/cygdrive/c/SCSI/
+vm_sshpass "$vm_num" "$ssh_pass" "cd /cygdrive/c/SCSI; powershell.exe -file windows_scsi_compliance.ps1"
+vm_scp "$vm_num" 127.0.0.1:/cygdrive/c/SCSI/WIN_SCSI_* $testdir/results/
+dos2unix $testdir/results/WIN_SCSI_*.log
+
+notice "Kill vm 1"
+vm_kill "$vm_num"
+notice "Kill spdk"
+vhost_kill 0
+notice "Remove $aio_file"
+rm -f $aio_file
+
+python3 $testdir/windows_scsi_compliance.py
+if ! $keep_results_dir; then
+ rm -rf $testdir/results
+fi
diff --git a/src/spdk/test/vmd/config/config.fio b/src/spdk/test/vmd/config/config.fio
new file mode 100644
index 000000000..ba025d520
--- /dev/null
+++ b/src/spdk/test/vmd/config/config.fio
@@ -0,0 +1,18 @@
+[global]
+ioengine=spdk
+thread=1
+group_reporting=1
+direct=1
+verify=md5
+do_verify=1
+time_based=1
+ramp_time=0
+runtime=2
+iodepth=128
+rw=randrw
+bs=4k
+enable_vmd=1
+verify_state_save=0
+
+[test]
+numjobs=1
diff --git a/src/spdk/test/vmd/vmd.sh b/src/spdk/test/vmd/vmd.sh
new file mode 100755
index 000000000..ba5156b77
--- /dev/null
+++ b/src/spdk/test/vmd/vmd.sh
@@ -0,0 +1,78 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/scripts/common.sh
+source $rootdir/test/common/autotest_common.sh
+
+rpc_py=$rootdir/scripts/rpc.py
+VMD_WHITELIST=()
+
+function vmd_identify() {
+ for bdf in $pci_devs; do
+ $SPDK_EXAMPLE_DIR/identify -i 0 -V -r "trtype:PCIe traddr:$bdf"
+ done
+}
+
+function vmd_perf() {
+ for bdf in $pci_devs; do
+ $SPDK_EXAMPLE_DIR/perf -q 128 -w read -o 12288 -t 1 -LL -i 0 -V -r "trtype:PCIe traddr:$bdf"
+ done
+}
+
+function vmd_fio() {
+ for bdf in $pci_devs; do
+ fio_nvme $testdir/config/config.fio --filename="trtype=PCIe traddr=${bdf//:/.} ns=1"
+ done
+}
+
+function vmd_bdev_svc() {
+ $rootdir/test/app/bdev_svc/bdev_svc --wait-for-rpc &
+ svcpid=$!
+ trap 'killprocess $svcpid; exit 1' SIGINT SIGTERM EXIT
+
+ # Wait until bdev_svc starts
+ waitforlisten $svcpid
+
+ $rpc_py enable_vmd
+ $rpc_py framework_start_init
+
+ for bdf in $pci_devs; do
+ $rpc_py bdev_nvme_attach_controller -b NVMe_$bdf -t PCIe -a $bdf
+ done
+
+ trap - SIGINT SIGTERM EXIT
+ killprocess $svcpid
+}
+
+# Re-run setup.sh script and only attach VMD devices to uio/vfio.
+$rootdir/scripts/setup.sh reset
+
+vmd_id=$(grep "PCI_DEVICE_ID_INTEL_VMD" $rootdir/include/spdk/pci_ids.h | awk -F"x" '{print $2}')
+
+for bdf in $(iter_pci_dev_id 8086 $vmd_id); do
+ if pci_can_use $bdf; then
+ VMD_WHITELIST+=("$bdf")
+ fi
+done
+PCI_WHITELIST="${VMD_WHITELIST[*]}" $rootdir/scripts/setup.sh
+
+pci_devs=$($SPDK_BIN_DIR/spdk_lspci | grep "NVMe disk behind VMD" | awk '{print $1}')
+
+if [[ -z "$pci_devs" ]]; then
+ echo "Couldn't find any NVMe device behind a VMD."
+ exit 1
+fi
+
+run_test "vmd_identify" vmd_identify
+run_test "vmd_hello_world" $SPDK_EXAMPLE_DIR/hello_world -V
+run_test "vmd_perf" vmd_perf
+if [[ $CONFIG_FIO_PLUGIN == y ]]; then
+ run_test "vmd_fio" vmd_fio
+fi
+
+run_test "vmd_bdev_svc" vmd_bdev_svc
+
+# Re-run setup.sh again so that other tests may continue
+$rootdir/scripts/setup.sh reset
+$rootdir/scripts/setup.sh