summaryrefslogtreecommitdiffstats
path: root/src/spdk/test/vhost/hotplug
diff options
context:
space:
mode:
Diffstat (limited to 'src/spdk/test/vhost/hotplug')
-rw-r--r--src/spdk/test/vhost/hotplug/blk_hotremove.sh235
-rw-r--r--src/spdk/test/vhost/hotplug/common.sh230
-rw-r--r--src/spdk/test/vhost/hotplug/fio_jobs/default_integrity.job16
-rwxr-xr-xsrc/spdk/test/vhost/hotplug/scsi_hotattach.sh103
-rwxr-xr-xsrc/spdk/test/vhost/hotplug/scsi_hotdetach.sh212
-rwxr-xr-xsrc/spdk/test/vhost/hotplug/scsi_hotplug.sh92
-rw-r--r--src/spdk/test/vhost/hotplug/scsi_hotremove.sh233
7 files changed, 1121 insertions, 0 deletions
diff --git a/src/spdk/test/vhost/hotplug/blk_hotremove.sh b/src/spdk/test/vhost/hotplug/blk_hotremove.sh
new file mode 100644
index 000000000..d0edab83a
--- /dev/null
+++ b/src/spdk/test/vhost/hotplug/blk_hotremove.sh
@@ -0,0 +1,235 @@
+# Vhost blk hot remove tests
+#
+# Objective
+# The purpose of these tests is to verify that SPDK vhost remains stable during
+# hot-remove operations performed on SCSI and BLK controllers devices.
+# Hot-remove is a scenario where a NVMe device is removed when already in use.
+#
+# Test cases description
+# 1. FIO I/O traffic is run during hot-remove operations.
+# By default FIO uses default_integrity*.job config files located in
+# test/vhost/hotplug/fio_jobs directory.
+# 2. FIO mode of operation is random write (randwrite) with verification enabled
+# which results in also performing read operations.
+# 3. In test cases fio status is checked after every run if any errors occurred.
+
+function prepare_fio_cmd_tc1() {
+ print_test_fio_header
+
+ run_fio="$fio_bin --eta=never "
+ for vm_num in $1; do
+ cp $fio_job $tmp_detach_job
+ vm_check_blk_location $vm_num
+ for disk in $SCSI_DISK; do
+ echo "[nvme-host$disk]" >> $tmp_detach_job
+ echo "filename=/dev/$disk" >> $tmp_detach_job
+ done
+ vm_scp "$vm_num" $tmp_detach_job 127.0.0.1:/root/default_integrity_2discs.job
+ run_fio+="--client=127.0.0.1,$(vm_fio_socket $vm_num) --remote-config /root/default_integrity_2discs.job "
+ rm $tmp_detach_job
+ done
+}
+
+function vhost_delete_controllers() {
+ $rpc_py vhost_delete_controller naa.Nvme0n1p0.0
+ $rpc_py vhost_delete_controller naa.Nvme0n1p1.0
+ $rpc_py vhost_delete_controller naa.Nvme0n1p2.1
+ $rpc_py vhost_delete_controller naa.Nvme0n1p3.1
+}
+
+# Vhost blk hot remove test cases
+#
+# Test Case 1
+function blk_hotremove_tc1() {
+ echo "Blk hotremove test case 1"
+ traddr=""
+ # 1. Run the command to hot remove NVMe disk.
+ get_traddr "Nvme0"
+ delete_nvme "Nvme0"
+ # 2. If vhost had crashed then tests would stop running
+ sleep 1
+ add_nvme "HotInNvme0" "$traddr"
+ sleep 1
+}
+
+# Test Case 2
+function blk_hotremove_tc2() {
+ echo "Blk hotremove test case 2"
+ # 1. Use rpc command to create blk controllers.
+ $rpc_py vhost_create_blk_controller naa.Nvme0n1p0.0 HotInNvme0n1p0
+ $rpc_py vhost_create_blk_controller naa.Nvme0n1p1.0 Mallocp0
+ $rpc_py vhost_create_blk_controller naa.Nvme0n1p2.1 Mallocp1
+ $rpc_py vhost_create_blk_controller naa.Nvme0n1p3.1 Mallocp2
+ # 2. Run two VMs and attach every VM to two blk controllers.
+ vm_run_with_arg "0 1"
+ vms_prepare "0"
+
+ traddr=""
+ get_traddr "Nvme0"
+ prepare_fio_cmd_tc1 "0"
+ # 3. Run FIO I/O traffic with verification enabled on NVMe disk.
+ $run_fio &
+ local last_pid=$!
+ sleep 3
+ # 4. Run the command to hot remove NVMe disk.
+ delete_nvme "HotInNvme0"
+ local retcode=0
+ wait_for_finish $last_pid || retcode=$?
+ # 5. Check that fio job run on hot-removed device stopped.
+ # Expected: Fio should return error message and return code != 0.
+ check_fio_retcode "Blk hotremove test case 2: Iteration 1." 1 $retcode
+
+ # 6. Reboot VM
+ reboot_all_and_prepare "0"
+ # 7. Run FIO I/O traffic with verification enabled on NVMe disk.
+ $run_fio &
+ local retcode=0
+ wait_for_finish $! || retcode=$?
+ # 8. Check that fio job run on hot-removed device stopped.
+ # Expected: Fio should return error message and return code != 0.
+ check_fio_retcode "Blk hotremove test case 2: Iteration 2." 1 $retcode
+ vm_shutdown_all
+ vhost_delete_controllers
+ add_nvme "HotInNvme1" "$traddr"
+ sleep 1
+}
+
+# ## Test Case 3
+function blk_hotremove_tc3() {
+ echo "Blk hotremove test case 3"
+ # 1. Use rpc command to create blk controllers.
+ $rpc_py vhost_create_blk_controller naa.Nvme0n1p0.0 HotInNvme1n1p0
+ $rpc_py vhost_create_blk_controller naa.Nvme0n1p1.0 Mallocp0
+ $rpc_py vhost_create_blk_controller naa.Nvme0n1p2.1 HotInNvme1n1p1
+ $rpc_py vhost_create_blk_controller naa.Nvme0n1p3.1 Mallocp1
+ # 2. Run two VMs and attach every VM to two blk controllers.
+ vm_run_with_arg "0 1"
+ vms_prepare "0 1"
+
+ traddr=""
+ get_traddr "Nvme0"
+ prepare_fio_cmd_tc1 "0"
+ # 3. Run FIO I/O traffic with verification enabled on first NVMe disk.
+ $run_fio &
+ local last_pid=$!
+ sleep 3
+ # 4. Run the command to hot remove of first NVMe disk.
+ delete_nvme "HotInNvme1"
+ local retcode=0
+ wait_for_finish $last_pid || retcode=$?
+ # 6. Check that fio job run on hot-removed device stopped.
+ # Expected: Fio should return error message and return code != 0.
+ check_fio_retcode "Blk hotremove test case 3: Iteration 1." 1 $retcode
+
+ # 7. Reboot VM
+ reboot_all_and_prepare "0"
+ local retcode=0
+ # 8. Run FIO I/O traffic with verification enabled on removed NVMe disk.
+ $run_fio &
+ wait_for_finish $! || retcode=$?
+ # 9. Check that fio job run on hot-removed device stopped.
+ # Expected: Fio should return error message and return code != 0.
+ check_fio_retcode "Blk hotremove test case 3: Iteration 2." 1 $retcode
+ vm_shutdown_all
+ vhost_delete_controllers
+ add_nvme "HotInNvme2" "$traddr"
+ sleep 1
+}
+
+# Test Case 4
+function blk_hotremove_tc4() {
+ echo "Blk hotremove test case 4"
+ # 1. Use rpc command to create blk controllers.
+ $rpc_py vhost_create_blk_controller naa.Nvme0n1p0.0 HotInNvme2n1p0
+ $rpc_py vhost_create_blk_controller naa.Nvme0n1p1.0 Mallocp0
+ $rpc_py vhost_create_blk_controller naa.Nvme0n1p2.1 HotInNvme2n1p1
+ $rpc_py vhost_create_blk_controller naa.Nvme0n1p3.1 Mallocp1
+ # 2. Run two VM, attached to blk controllers.
+ vm_run_with_arg "0 1"
+ vms_prepare "0 1"
+
+ prepare_fio_cmd_tc1 "0"
+ # 3. Run FIO I/O traffic on first VM with verification enabled on both NVMe disks.
+ $run_fio &
+ local last_pid_vm0=$!
+
+ prepare_fio_cmd_tc1 "1"
+ # 4. Run FIO I/O traffic on second VM with verification enabled on both NVMe disks.
+ $run_fio &
+ local last_pid_vm1=$!
+
+ sleep 3
+ prepare_fio_cmd_tc1 "0 1"
+ # 5. Run the command to hot remove of first NVMe disk.
+ delete_nvme "HotInNvme2"
+ local retcode_vm0=0
+ local retcode_vm1=0
+ wait_for_finish $last_pid_vm0 || retcode_vm0=$?
+ wait_for_finish $last_pid_vm1 || retcode_vm1=$?
+ # 6. Check that fio job run on hot-removed device stopped.
+ # Expected: Fio should return error message and return code != 0.
+ check_fio_retcode "Blk hotremove test case 4: Iteration 1." 1 $retcode_vm0
+ check_fio_retcode "Blk hotremove test case 4: Iteration 2." 1 $retcode_vm1
+
+ # 7. Reboot all VMs.
+ reboot_all_and_prepare "0 1"
+ # 8. Run FIO I/O traffic with verification enabled on removed NVMe disk.
+ $run_fio &
+ local retcode=0
+ wait_for_finish $! || retcode=$?
+ # 9. Check that fio job run on hot-removed device stopped.
+ # Expected: Fio should return error message and return code != 0.
+ check_fio_retcode "Blk hotremove test case 4: Iteration 3." 1 $retcode
+
+ vm_shutdown_all
+ vhost_delete_controllers
+ add_nvme "HotInNvme3" "$traddr"
+ sleep 1
+}
+
+# Test Case 5
+function blk_hotremove_tc5() {
+ echo "Blk hotremove test case 5"
+ # 1. Use rpc command to create blk controllers.
+ $rpc_py vhost_create_blk_controller naa.Nvme0n1p0.0 HotInNvme3n1p0
+ $rpc_py vhost_create_blk_controller naa.Nvme0n1p1.0 Mallocp0
+ $rpc_py vhost_create_blk_controller naa.Nvme0n1p2.1 Mallocp1
+ $rpc_py vhost_create_blk_controller naa.Nvme0n1p3.1 Mallocp2
+ # 2. Run two VM, attached to blk controllers.
+ vm_run_with_arg "0 1"
+ vms_prepare "0 1"
+
+ prepare_fio_cmd_tc1 "0"
+ # 3. Run FIO I/O traffic on first VM with verification enabled on both NVMe disks.
+ $run_fio &
+ local last_pid=$!
+ sleep 3
+ # 4. Run the command to hot remove of first NVMe disk.
+ delete_nvme "HotInNvme3"
+ local retcode=0
+ wait_for_finish $last_pid || retcode=$?
+ # 5. Check that fio job run on hot-removed device stopped.
+ # Expected: Fio should return error message and return code != 0.
+ check_fio_retcode "Blk hotremove test case 5: Iteration 1." 1 $retcode
+
+ # 6. Reboot VM.
+ reboot_all_and_prepare "0"
+ local retcode=0
+ # 7. Run FIO I/O traffic with verification enabled on removed NVMe disk.
+ $run_fio &
+ wait_for_finish $! || retcode=$?
+ # 8. Check that fio job run on hot-removed device stopped.
+ # Expected: Fio should return error message and return code != 0.
+ check_fio_retcode "Blk hotremove test case 5: Iteration 2." 1 $retcode
+ vm_shutdown_all
+ vhost_delete_controllers
+ add_nvme "HotInNvme4" "$traddr"
+ sleep 1
+}
+
+vms_setup
+blk_hotremove_tc1
+blk_hotremove_tc2
+blk_hotremove_tc3
+blk_hotremove_tc4
+blk_hotremove_tc5
diff --git a/src/spdk/test/vhost/hotplug/common.sh b/src/spdk/test/vhost/hotplug/common.sh
new file mode 100644
index 000000000..b7b05ee74
--- /dev/null
+++ b/src/spdk/test/vhost/hotplug/common.sh
@@ -0,0 +1,230 @@
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/vhost/common.sh
+
+dry_run=false
+no_shutdown=false
+fio_bin="fio"
+fio_jobs="$testdir/fio_jobs/"
+test_type=spdk_vhost_scsi
+reuse_vms=false
+vms=()
+used_vms=""
+disk_split=""
+x=""
+scsi_hot_remove_test=0
+blk_hot_remove_test=0
+readonly=""
+
+function usage() {
+ [[ -n $2 ]] && (
+ echo "$2"
+ echo ""
+ )
+ echo "Shortcut script for doing automated hotattach/hotdetach test"
+ echo "Usage: $(basename $1) [OPTIONS]"
+ echo
+ echo "-h, --help print help and exit"
+ echo " --test-type=TYPE Perform specified test:"
+ echo " virtio - test host virtio-scsi-pci using file as disk image"
+ echo " kernel_vhost - use kernel driver vhost-scsi"
+ echo " spdk_vhost_scsi - use spdk vhost scsi"
+ echo " spdk_vhost_blk - use spdk vhost block"
+ echo "-x set -x for script debug"
+ echo " --fio-bin=FIO Use specific fio binary (will be uploaded to VM)"
+ echo " --fio-jobs= Fio configs to use for tests. Can point to a directory or"
+ echo " --vm=NUM[,OS][,DISKS] VM configuration. This parameter might be used more than once:"
+ echo " NUM - VM number (mandatory)"
+ echo " OS - VM os disk path (optional)"
+ echo " DISKS - VM os test disks/devices path (virtio - optional, kernel_vhost - mandatory)"
+ echo " --scsi-hotremove-test Run scsi hotremove tests"
+ echo " --readonly Use readonly for fio"
+ exit 0
+}
+
+while getopts 'xh-:' optchar; do
+ case "$optchar" in
+ -)
+ case "$OPTARG" in
+ help) usage $0 ;;
+ fio-bin=*) fio_bin="${OPTARG#*=}" ;;
+ fio-jobs=*) fio_jobs="${OPTARG#*=}" ;;
+ test-type=*) test_type="${OPTARG#*=}" ;;
+ vm=*) vms+=("${OPTARG#*=}") ;;
+ scsi-hotremove-test) scsi_hot_remove_test=1 ;;
+ blk-hotremove-test) blk_hot_remove_test=1 ;;
+ readonly) readonly="--readonly" ;;
+ *) usage $0 "Invalid argument '$OPTARG'" ;;
+ esac
+ ;;
+ h) usage $0 ;;
+ x)
+ set -x
+ x="-x"
+ ;;
+ *) usage $0 "Invalid argument '$OPTARG'" ;;
+ esac
+done
+shift $((OPTIND - 1))
+
+fio_job=$testdir/fio_jobs/default_integrity.job
+tmp_attach_job=$testdir/fio_jobs/fio_attach.job.tmp
+tmp_detach_job=$testdir/fio_jobs/fio_detach.job.tmp
+
+rpc_py="$rootdir/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock"
+
+function print_test_fio_header() {
+ notice "==============="
+ notice ""
+ notice "Testing..."
+
+ notice "Running fio jobs ..."
+ if [ $# -gt 0 ]; then
+ echo $1
+ fi
+}
+
+function vms_setup() {
+ for vm_conf in "${vms[@]}"; do
+ IFS=',' read -ra conf <<< "$vm_conf"
+ if [[ x"${conf[0]}" == x"" ]] || ! assert_number ${conf[0]}; then
+ fail "invalid VM configuration syntax $vm_conf"
+ fi
+
+ # Sanity check if VM is not defined twice
+ for vm_num in $used_vms; do
+ if [[ $vm_num -eq ${conf[0]} ]]; then
+ fail "VM$vm_num defined more than twice ( $(printf "'%s' " "${vms[@]}"))!"
+ fi
+ done
+
+ used_vms+=" ${conf[0]}"
+
+ setup_cmd="vm_setup --disk-type=$test_type --force=${conf[0]}"
+ [[ x"${conf[1]}" != x"" ]] && setup_cmd+=" --os=${conf[1]}"
+ [[ x"${conf[2]}" != x"" ]] && setup_cmd+=" --disks=${conf[2]}"
+ $setup_cmd
+ done
+}
+
+function vm_run_with_arg() {
+ local vms_to_run="$*"
+ vm_run $vms_to_run
+ vm_wait_for_boot 300 $vms_to_run
+}
+
+function vms_setup_and_run() {
+ local vms_to_run="$*"
+ vms_setup
+ vm_run_with_arg $vms_to_run
+}
+
+function vms_prepare() {
+ for vm_num in $1; do
+ qemu_mask_param="VM_${vm_num}_qemu_mask"
+
+ host_name="VM-${vm_num}-${!qemu_mask_param}"
+ notice "Setting up hostname: $host_name"
+ vm_exec $vm_num "hostname $host_name"
+ vm_start_fio_server --fio-bin=$fio_bin $readonly $vm_num
+ done
+}
+
+function vms_reboot_all() {
+ notice "Rebooting all vms "
+ for vm_num in $1; do
+ vm_exec $vm_num "reboot" || true
+ while vm_os_booted $vm_num; do
+ sleep 0.5
+ done
+ done
+
+ vm_wait_for_boot 300 $1
+}
+
+function check_fio_retcode() {
+ local fio_retcode=$3
+ echo $1
+ local retcode_expected=$2
+ if [ $retcode_expected == 0 ]; then
+ if [ $fio_retcode != 0 ]; then
+ error " Fio test ended with error."
+ else
+ notice " Fio test ended with success."
+ fi
+ else
+ if [ $fio_retcode != 0 ]; then
+ notice " Fio test ended with expected error."
+ else
+ error " Fio test ended with unexpected success."
+ fi
+ fi
+}
+
+function wait_for_finish() {
+ local wait_for_pid=$1
+ local sequence=${2:-30}
+ for i in $(seq 1 $sequence); do
+ if kill -0 $wait_for_pid; then
+ sleep 0.5
+ continue
+ else
+ break
+ fi
+ done
+ if kill -0 $wait_for_pid; then
+ error "Timeout for fio command"
+ fi
+
+ wait $wait_for_pid
+}
+
+function reboot_all_and_prepare() {
+ vms_reboot_all "$1"
+ vms_prepare "$1"
+}
+
+function post_test_case() {
+ vm_shutdown_all
+ vhost_kill 0
+}
+
+function on_error_exit() {
+ set +e
+ echo "Error on $1 - $2"
+ post_test_case
+ print_backtrace
+ exit 1
+}
+
+function check_disks() {
+ if [ "$1" == "$2" ]; then
+ echo "Disk has not been deleted"
+ exit 1
+ fi
+}
+
+function get_traddr() {
+ local nvme_name=$1
+ local nvme
+ nvme="$($rootdir/scripts/gen_nvme.sh)"
+ while read -r line; do
+ if [[ $line == *"TransportID"* ]] && [[ $line == *$nvme_name* ]]; then
+ local word_array=($line)
+ for word in "${word_array[@]}"; do
+ if [[ $word == *"traddr"* ]]; then
+ traddr=$(echo $word | sed 's/traddr://' | sed 's/"//')
+ fi
+ done
+ fi
+ done <<< "$nvme"
+}
+
+function delete_nvme() {
+ $rpc_py bdev_nvme_detach_controller $1
+}
+
+function add_nvme() {
+ $rpc_py bdev_nvme_attach_controller -b $1 -t PCIe -a $2
+}
diff --git a/src/spdk/test/vhost/hotplug/fio_jobs/default_integrity.job b/src/spdk/test/vhost/hotplug/fio_jobs/default_integrity.job
new file mode 100644
index 000000000..136fe9029
--- /dev/null
+++ b/src/spdk/test/vhost/hotplug/fio_jobs/default_integrity.job
@@ -0,0 +1,16 @@
+[global]
+blocksize=4k
+iodepth=512
+iodepth_batch=128
+iodepth_low=256
+ioengine=libaio
+group_reporting
+thread
+numjobs=1
+direct=1
+rw=randwrite
+do_verify=1
+verify=md5
+verify_backlog=1024
+time_based=1
+runtime=10
diff --git a/src/spdk/test/vhost/hotplug/scsi_hotattach.sh b/src/spdk/test/vhost/hotplug/scsi_hotattach.sh
new file mode 100755
index 000000000..4b9e26ab8
--- /dev/null
+++ b/src/spdk/test/vhost/hotplug/scsi_hotattach.sh
@@ -0,0 +1,103 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/vhost/common.sh
+source $rootdir/test/vhost/hotplug/common.sh
+
+function prepare_fio_cmd_tc1() {
+ print_test_fio_header
+
+ run_fio="$fio_bin --eta=never "
+ for vm_num in $1; do
+ cp $fio_job $tmp_attach_job
+ vm_check_scsi_location $vm_num
+ for disk in $SCSI_DISK; do
+ echo "[nvme-host$disk]" >> $tmp_attach_job
+ echo "filename=/dev/$disk" >> $tmp_attach_job
+ done
+ vm_scp $vm_num $tmp_attach_job 127.0.0.1:/root/default_integrity_discs.job
+ run_fio+="--client=127.0.0.1,$(vm_fio_socket ${vm_num}) --remote-config /root/default_integrity_discs.job "
+ rm $tmp_attach_job
+ done
+}
+
+# Check if fio test passes on device attached to first controller.
+function hotattach_tc1() {
+ notice "Hotattach test case 1"
+
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p0.0 0 Nvme0n1p0
+
+ sleep 3
+ prepare_fio_cmd_tc1 "0"
+ $run_fio
+ check_fio_retcode "Hotattach test case 1: Iteration 1." 0 $?
+}
+
+# Run fio test for previously attached device.
+# During test attach another device to first controller and check fio status.
+function hotattach_tc2() {
+ notice "Hotattach test case 2"
+ prepare_fio_cmd_tc1 "0"
+
+ $run_fio &
+ last_pid=$!
+ sleep 3
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p0.0 1 Nvme0n1p1
+ wait $last_pid
+ check_fio_retcode "Hotattach test case 2: Iteration 1." 0 $?
+}
+
+# Run fio test for previously attached devices.
+# During test attach another device to second controller and check fio status.
+function hotattach_tc3() {
+ notice "Hotattach test case 3"
+ prepare_fio_cmd_tc1 "0"
+
+ $run_fio &
+ last_pid=$!
+ sleep 3
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p1.0 0 Nvme0n1p2
+ wait $last_pid
+ check_fio_retcode "Hotattach test case 3: Iteration 1." 0 $?
+}
+
+# Run fio test for previously attached devices.
+# During test attach another device to third controller(VM2) and check fio status.
+# At the end after rebooting VMs run fio test for all devices and check fio status.
+function hotattach_tc4() {
+ notice "Hotattach test case 4"
+
+ prepare_fio_cmd_tc1 "0"
+
+ $run_fio &
+ last_pid=$!
+ sleep 3
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p2.1 0 Nvme0n1p3
+ wait $last_pid
+ check_fio_retcode "Hotattach test case 4: Iteration 1." 0 $?
+
+ prepare_fio_cmd_tc1 "0 1"
+ $run_fio
+ check_fio_retcode "Hotattach test case 4: Iteration 2." 0 $?
+
+ reboot_all_and_prepare "0 1"
+
+ prepare_fio_cmd_tc1 "0 1"
+ $run_fio
+ check_fio_retcode "Hotattach test case 4: Iteration 3." 0 $?
+}
+
+function cleanup_after_tests() {
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p0.0 0
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p0.0 1
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p1.0 0
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p2.1 0
+}
+
+hotattach_tc1
+hotattach_tc2
+hotattach_tc3
+hotattach_tc4
+cleanup_after_tests
diff --git a/src/spdk/test/vhost/hotplug/scsi_hotdetach.sh b/src/spdk/test/vhost/hotplug/scsi_hotdetach.sh
new file mode 100755
index 000000000..8a7cb264f
--- /dev/null
+++ b/src/spdk/test/vhost/hotplug/scsi_hotdetach.sh
@@ -0,0 +1,212 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/vhost/common.sh
+source $rootdir/test/vhost/hotplug/common.sh
+
+function get_first_disk() {
+ vm_check_scsi_location $1
+ disk_array=($SCSI_DISK)
+ eval "$2=${disk_array[0]}"
+}
+
+function check_disks() {
+ if [ "$1" == "$2" ]; then
+ fail "Disk has not been deleted"
+ fi
+}
+
+function prepare_fio_cmd_tc1_iter1() {
+ print_test_fio_header
+
+ run_fio="$fio_bin --eta=never "
+ for vm_num in $1; do
+ cp $fio_job $tmp_detach_job
+ vm_check_scsi_location $vm_num
+ for disk in $SCSI_DISK; do
+ echo "[nvme-host$disk]" >> $tmp_detach_job
+ echo "filename=/dev/$disk" >> $tmp_detach_job
+ done
+ vm_scp "$vm_num" $tmp_detach_job 127.0.0.1:/root/default_integrity_4discs.job
+ run_fio+="--client=127.0.0.1,$(vm_fio_socket $vm_num) --remote-config /root/default_integrity_4discs.job "
+ rm $tmp_detach_job
+ done
+}
+
+function prepare_fio_cmd_tc2_iter1() {
+ print_test_fio_header
+
+ run_fio="$fio_bin --eta=never "
+ for vm_num in $1; do
+ cp $fio_job $tmp_detach_job
+ vm_check_scsi_location $vm_num
+ disk_array=($SCSI_DISK)
+ disk=${disk_array[0]}
+ echo "[nvme-host$disk]" >> $tmp_detach_job
+ echo "filename=/dev/$disk" >> $tmp_detach_job
+ vm_scp "$vm_num" $tmp_detach_job 127.0.0.1:/root/default_integrity.job
+ run_fio+="--client=127.0.0.1,$(vm_fio_socket $vm_num) --remote-config /root/default_integrity.job "
+ rm $tmp_detach_job
+ done
+}
+
+function prepare_fio_cmd_tc2_iter2() {
+ print_test_fio_header
+
+ run_fio="$fio_bin --eta=never "
+ for vm_num in $1; do
+ cp $fio_job $tmp_detach_job
+ if [ $vm_num == 2 ]; then
+ vm_job_name=default_integrity_3discs.job
+ else
+ vm_job_name=default_integrity_4discs.job
+ fi
+ vm_check_scsi_location $vm_num
+ for disk in $SCSI_DISK; do
+ echo "[nvme-host$disk]" >> $tmp_detach_job
+ echo "filename=/dev/$disk" >> $tmp_detach_job
+ done
+ vm_scp "$vm_num" $tmp_detach_job 127.0.0.1:/root/$vm_job_name
+ run_fio+="--client=127.0.0.1,$(vm_fio_socket $vm_num) --remote-config /root/${vm_job_name} "
+ rm $tmp_detach_job
+ done
+}
+
+function prepare_fio_cmd_tc3_iter1() {
+ print_test_fio_header
+
+ run_fio="$fio_bin --eta=never "
+ for vm_num in $1; do
+ cp $fio_job $tmp_detach_job
+ if [ $vm_num == 2 ]; then
+ vm_job_name=default_integrity_3discs.job
+ else
+ vm_job_name=default_integrity_4discs.job
+ fi
+ vm_check_scsi_location $vm_num
+ j=1
+ for disk in $SCSI_DISK; do
+ if [ $vm_num == 2 ]; then
+ if [ $j == 1 ]; then
+ ((j++))
+ continue
+ fi
+ fi
+ echo "[nvme-host$disk]" >> $tmp_detach_job
+ echo "filename=/dev/$disk" >> $tmp_detach_job
+ ((j++))
+ done
+ vm_scp "$vm_num" $tmp_detach_job 127.0.0.1:/root/$vm_job_name
+ run_fio+="--client=127.0.0.1,$(vm_fio_socket $vm_num) --remote-config /root/$vm_job_name "
+ rm $tmp_detach_job
+ done
+}
+
+# During fio test for all devices remove first device from fifth controller and check if fio fails.
+# Also check if disc has been removed from VM.
+function hotdetach_tc1() {
+ notice "Hotdetach test case 1"
+ first_disk=""
+ get_first_disk "2" first_disk
+ prepare_fio_cmd_tc1_iter1 "2 3"
+ $run_fio &
+ last_pid=$!
+ sleep 3
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p4.2 0
+ set +xe
+ wait $last_pid
+ check_fio_retcode "Hotdetach test case 1: Iteration 1." 1 $?
+ set -xe
+ second_disk=""
+ get_first_disk "2" second_disk
+ check_disks $first_disk $second_disk
+ clear_after_tests
+}
+
+# During fio test for device from third VM remove first device from fifth controller and check if fio fails.
+# Also check if disc has been removed from VM.
+function hotdetach_tc2() {
+ notice "Hotdetach test case 2"
+ sleep 2
+ first_disk=""
+ get_first_disk "2" first_disk
+ prepare_fio_cmd_tc2_iter1 "2"
+ $run_fio &
+ last_pid=$!
+ sleep 3
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p4.2 0
+ set +xe
+ wait $last_pid
+ check_fio_retcode "Hotdetach test case 2: Iteration 1." 1 $?
+ set -xe
+ second_disk=""
+ get_first_disk "2" second_disk
+ check_disks $first_disk $second_disk
+ clear_after_tests
+}
+
+# Run fio test for all devices except one, then remove this device and check if fio passes.
+# Also check if disc has been removed from VM.
+function hotdetach_tc3() {
+ notice "Hotdetach test case 3"
+ sleep 2
+ first_disk=""
+ get_first_disk "2" first_disk
+ prepare_fio_cmd_tc3_iter1 "2 3"
+ $run_fio &
+ last_pid=$!
+ sleep 3
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p4.2 0
+ wait $last_pid
+ check_fio_retcode "Hotdetach test case 3: Iteration 1." 0 $?
+ second_disk=""
+ get_first_disk "2" second_disk
+ check_disks $first_disk $second_disk
+ clear_after_tests
+}
+
+# Run fio test for all devices except one and run separate fio test for this device.
+# Check if first fio test passes and second fio test fails.
+# Also check if disc has been removed from VM.
+# After reboot run fio test for remaining devices and check if fio passes.
+function hotdetach_tc4() {
+ notice "Hotdetach test case 4"
+ sleep 2
+ first_disk=""
+ get_first_disk "2" first_disk
+ prepare_fio_cmd_tc2_iter1 "2"
+ $run_fio &
+ first_fio_pid=$!
+ prepare_fio_cmd_tc3_iter1 "2 3"
+ $run_fio &
+ second_fio_pid=$!
+ sleep 3
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p4.2 0
+ set +xe
+ wait $first_fio_pid
+ check_fio_retcode "Hotdetach test case 4: Iteration 1." 1 $?
+ set -xe
+ wait $second_fio_pid
+ check_fio_retcode "Hotdetach test case 4: Iteration 2." 0 $?
+ second_disk=""
+ get_first_disk "2" second_disk
+ check_disks $first_disk $second_disk
+
+ reboot_all_and_prepare "2 3"
+ sleep 2
+ prepare_fio_cmd_tc2_iter2 "2 3"
+ $run_fio
+ check_fio_retcode "Hotdetach test case 4: Iteration 3." 0 $?
+ clear_after_tests
+}
+
+function clear_after_tests() {
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p4.2 0 Nvme0n1p8
+}
+
+hotdetach_tc1
+hotdetach_tc2
+hotdetach_tc3
+hotdetach_tc4
diff --git a/src/spdk/test/vhost/hotplug/scsi_hotplug.sh b/src/spdk/test/vhost/hotplug/scsi_hotplug.sh
new file mode 100755
index 000000000..40132ab8a
--- /dev/null
+++ b/src/spdk/test/vhost/hotplug/scsi_hotplug.sh
@@ -0,0 +1,92 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/vhost/common.sh
+source $rootdir/test/vhost/hotplug/common.sh
+
+if [[ $scsi_hot_remove_test == 1 ]] && [[ $blk_hot_remove_test == 1 ]]; then
+ notice "Vhost-scsi and vhost-blk hotremove tests cannot be run together"
+fi
+
+# Run spdk by calling run_vhost from hotplug/common.sh.
+# Then prepare vhost with rpc calls and setup and run 4 VMs.
+function pre_hot_attach_detach_test_case() {
+ used_vms=""
+ $rpc_py vhost_create_scsi_controller naa.Nvme0n1p0.0
+ $rpc_py vhost_create_scsi_controller naa.Nvme0n1p1.0
+ $rpc_py vhost_create_scsi_controller naa.Nvme0n1p2.1
+ $rpc_py vhost_create_scsi_controller naa.Nvme0n1p3.1
+ $rpc_py vhost_create_scsi_controller naa.Nvme0n1p4.2
+ $rpc_py vhost_create_scsi_controller naa.Nvme0n1p5.2
+ $rpc_py vhost_create_scsi_controller naa.Nvme0n1p6.3
+ $rpc_py vhost_create_scsi_controller naa.Nvme0n1p7.3
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p4.2 0 Nvme0n1p8
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p4.2 1 Nvme0n1p9
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p5.2 0 Nvme0n1p10
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p5.2 1 Nvme0n1p11
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p6.3 0 Nvme0n1p12
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p6.3 1 Nvme0n1p13
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p7.3 0 Nvme0n1p14
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p7.3 1 Nvme0n1p15
+ vms_setup_and_run "0 1 2 3"
+ vms_prepare "0 1 2 3"
+}
+
+function clear_vhost_config() {
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p4.2 0
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p4.2 1
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p5.2 0
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p5.2 1
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p6.3 0
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p6.3 1
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p7.3 0
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p7.3 1
+ $rpc_py vhost_delete_controller naa.Nvme0n1p0.0
+ $rpc_py vhost_delete_controller naa.Nvme0n1p1.0
+ $rpc_py vhost_delete_controller naa.Nvme0n1p2.1
+ $rpc_py vhost_delete_controller naa.Nvme0n1p3.1
+ $rpc_py vhost_delete_controller naa.Nvme0n1p4.2
+ $rpc_py vhost_delete_controller naa.Nvme0n1p5.2
+ $rpc_py vhost_delete_controller naa.Nvme0n1p6.3
+ $rpc_py vhost_delete_controller naa.Nvme0n1p7.3
+}
+
+trap 'error_exit "${FUNCNAME}" "${LINENO}"' ERR
+# Hotremove/hotattach/hotdetach test cases prerequisites
+# Run vhost with 2 NVMe disks.
+
+notice "==============="
+notice ""
+notice "running SPDK"
+notice ""
+vhost_run 0
+$rpc_py bdev_nvme_set_hotplug -e
+$rpc_py bdev_split_create Nvme0n1 16
+$rpc_py bdev_malloc_create 128 512 -b Malloc
+$rpc_py bdev_split_create Malloc 4
+$rpc_py bdev_split_create HotInNvme0n1 2
+$rpc_py bdev_split_create HotInNvme1n1 2
+$rpc_py bdev_split_create HotInNvme2n1 2
+$rpc_py bdev_split_create HotInNvme3n1 2
+$rpc_py bdev_get_bdevs
+
+if [[ $scsi_hot_remove_test == 0 ]] && [[ $blk_hot_remove_test == 0 ]]; then
+ pre_hot_attach_detach_test_case
+ $testdir/scsi_hotattach.sh --fio-bin=$fio_bin &
+ first_script=$!
+ $testdir/scsi_hotdetach.sh --fio-bin=$fio_bin &
+ second_script=$!
+ wait $first_script
+ wait $second_script
+ vm_shutdown_all
+ clear_vhost_config
+fi
+if [[ $scsi_hot_remove_test == 1 ]]; then
+ source $testdir/scsi_hotremove.sh
+fi
+if [[ $blk_hot_remove_test == 1 ]]; then
+ source $testdir/blk_hotremove.sh
+fi
+post_test_case
diff --git a/src/spdk/test/vhost/hotplug/scsi_hotremove.sh b/src/spdk/test/vhost/hotplug/scsi_hotremove.sh
new file mode 100644
index 000000000..1dee4ac7f
--- /dev/null
+++ b/src/spdk/test/vhost/hotplug/scsi_hotremove.sh
@@ -0,0 +1,233 @@
+set -xe
+
+# Vhost SCSI hotremove tests
+#
+# # Objective
+# The purpose of these tests is to verify that SPDK vhost remains stable during
+# hot-remove operations performed on SCSI controllers devices.
+# Hot-remove is a scenario where a NVMe device is removed when already in use.
+# Tests consist of 4 test cases.
+#
+# # Test cases description
+# 1. FIO I/O traffic is run during hot-remove operations.
+# By default FIO uses default_integrity*.job config files located in
+# test/vhost/hotplug/fio_jobs directory.
+# 2. FIO mode of operation is random write (randwrite) with verification enabled
+# which results in also performing read operations.
+
+function prepare_fio_cmd_tc1() {
+ print_test_fio_header
+
+ run_fio="$fio_bin --eta=never "
+ for vm_num in $1; do
+ cp $fio_job $tmp_detach_job
+ vm_check_scsi_location $vm_num
+ for disk in $SCSI_DISK; do
+ cat <<- EOL >> $tmp_detach_job
+ [nvme-host$disk]
+ filename=/dev/$disk
+ size=100%
+ EOL
+ done
+ vm_scp "$vm_num" $tmp_detach_job 127.0.0.1:/root/default_integrity_2discs.job
+ run_fio+="--client=127.0.0.1,$(vm_fio_socket $vm_num) --remote-config /root/default_integrity_2discs.job "
+ rm $tmp_detach_job
+ done
+}
+
+# Vhost SCSI hot-remove test cases.
+
+# Test Case 1
+function scsi_hotremove_tc1() {
+ echo "Scsi hotremove test case 1"
+ traddr=""
+ get_traddr "Nvme0"
+ # 1. Run the command to hot remove NVMe disk.
+ delete_nvme "Nvme0"
+ # 2. If vhost had crashed then tests would stop running
+ sleep 1
+ add_nvme "HotInNvme0" "$traddr"
+}
+
+# Test Case 2
+function scsi_hotremove_tc2() {
+ echo "Scsi hotremove test case 2"
+ # 1. Attach split NVMe bdevs to scsi controller.
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p0.0 0 HotInNvme0n1p0
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p1.0 0 Mallocp0
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p2.1 0 HotInNvme0n1p1
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p3.1 0 Mallocp1
+
+ # 2. Run two VMs, attached to scsi controllers.
+ vms_setup
+ vm_run_with_arg 0 1
+ vms_prepare "0 1"
+
+ vm_check_scsi_location "0"
+ local disks="$SCSI_DISK"
+
+ traddr=""
+ get_traddr "Nvme0"
+ prepare_fio_cmd_tc1 "0 1"
+ # 3. Run FIO I/O traffic with verification enabled on on both NVMe disks in VM.
+ $run_fio &
+ local last_pid=$!
+ sleep 3
+ # 4. Run the command to hot remove NVMe disk.
+ delete_nvme "HotInNvme0"
+
+ # 5. Check that fio job run on hot-remove device stopped on VM.
+ # Expected: Fio should return error message and return code != 0.
+ wait_for_finish $last_pid || retcode=$?
+ check_fio_retcode "Scsi hotremove test case 2: Iteration 1." 1 $retcode
+
+ # 6. Check if removed devices are gone from VM.
+ vm_check_scsi_location "0"
+ local new_disks="$SCSI_DISK"
+ check_disks "$disks" "$new_disks"
+ # 7. Reboot both VMs.
+ reboot_all_and_prepare "0 1"
+ # 8. Run FIO I/O traffic with verification enabled on on both VMs.
+ local retcode=0
+ $run_fio &
+ wait_for_finish $! || retcode=$?
+ # 9. Check that fio job run on hot-remove device stopped on both VMs.
+ # Expected: Fio should return error message and return code != 0.
+ check_fio_retcode "Scsi hotremove test case 2: Iteration 2." 1 $retcode
+ vm_shutdown_all
+ add_nvme "HotInNvme1" "$traddr"
+ sleep 1
+}
+
+# Test Case 3
+function scsi_hotremove_tc3() {
+ echo "Scsi hotremove test case 3"
+ # 1. Attach added NVMe bdev to scsi controller.
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p0.0 0 HotInNvme1n1p0
+ # 2. Run two VM, attached to scsi controllers.
+ vm_run_with_arg 0 1
+ vms_prepare "0 1"
+ vm_check_scsi_location "0"
+ local disks="$SCSI_DISK"
+ traddr=""
+ get_traddr "Nvme0"
+ # 3. Run FIO I/O traffic with verification enabled on on both NVMe disks in VMs.
+ prepare_fio_cmd_tc1 "0"
+ $run_fio &
+ local last_pid=$!
+ sleep 3
+ # 4. Run the command to hot remove NVMe disk.
+ delete_nvme "HotInNvme1"
+ # 5. Check that fio job run on hot-remove device stopped on first VM.
+ # Expected: Fio should return error message and return code != 0.
+ wait_for_finish $last_pid || retcode=$?
+ check_fio_retcode "Scsi hotremove test case 3: Iteration 1." 1 $retcode
+ # 6. Check if removed devices are gone from lsblk.
+ vm_check_scsi_location "0"
+ local new_disks="$SCSI_DISK"
+ check_disks "$disks" "$new_disks"
+ # 7. Reboot both VMs.
+ reboot_all_and_prepare "0 1"
+ # 8. Run FIO I/O traffic with verification enabled on on both VMs.
+ local retcode=0
+ $run_fio &
+ wait_for_finish $! || retcode=$?
+ # 9. Check that fio job run on hot-remove device stopped on both VMs.
+ # Expected: Fio should return error message and return code != 0.
+ check_fio_retcode "Scsi hotremove test case 3: Iteration 2." 1 $retcode
+ vm_shutdown_all
+ add_nvme "HotInNvme2" "$traddr"
+ sleep 1
+}
+
+# Test Case 4
+function scsi_hotremove_tc4() {
+ echo "Scsi hotremove test case 4"
+ # 1. Attach NVMe bdevs to scsi controllers.
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p0.0 0 HotInNvme2n1p0
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p2.1 0 HotInNvme2n1p1
+ # 2. Run two VMs, attach to scsi controller.
+ vm_run_with_arg 0 1
+ vms_prepare "0 1"
+
+ # 3. Run FIO I/O traffic with verification enabled on first VM.
+ vm_check_scsi_location "0"
+ local disks_vm0="$SCSI_DISK"
+ # 4. Run FIO I/O traffic with verification enabled on second VM.
+ prepare_fio_cmd_tc1 "0"
+ $run_fio &
+ last_pid_vm0=$!
+
+ vm_check_scsi_location "1"
+ local disks_vm1="$SCSI_DISK"
+ prepare_fio_cmd_tc1 "1"
+ $run_fio &
+ local last_pid_vm1=$!
+ prepare_fio_cmd_tc1 "0 1"
+ sleep 3
+ # 5. Run the command to hot remove NVMe disk.
+ traddr=""
+ get_traddr "Nvme0"
+ delete_nvme "HotInNvme2"
+ # 6. Check that fio job run on hot-removed devices stopped.
+ # Expected: Fio should return error message and return code != 0.
+ local retcode_vm0=0
+ wait_for_finish $last_pid_vm0 || retcode_vm0=$?
+ local retcode_vm1=0
+ wait_for_finish $last_pid_vm1 || retcode_vm1=$?
+ check_fio_retcode "Scsi hotremove test case 4: Iteration 1." 1 $retcode_vm0
+ check_fio_retcode "Scsi hotremove test case 4: Iteration 2." 1 $retcode_vm1
+
+ # 7. Check if removed devices are gone from lsblk.
+ vm_check_scsi_location "0"
+ local new_disks_vm0="$SCSI_DISK"
+ check_disks "$disks_vm0" "$new_disks_vm0"
+ vm_check_scsi_location "1"
+ local new_disks_vm1="$SCSI_DISK"
+ check_disks "$disks_vm1" "$new_disks_vm1"
+
+ # 8. Reboot both VMs.
+ reboot_all_and_prepare "0 1"
+ # 9. Run FIO I/O traffic with verification enabled on on not-removed NVMe disk.
+ local retcode=0
+ $run_fio &
+ wait_for_finish $! || retcode=$?
+ # 10. Check that fio job run on hot-removed device stopped.
+ # Expect: Fio should return error message and return code != 0.
+ check_fio_retcode "Scsi hotremove test case 4: Iteration 3." 1 $retcode
+ prepare_fio_cmd_tc1 "0 1"
+ # 11. Run FIO I/O traffic with verification enabled on on not-removed NVMe disk.
+ local retcode=0
+ $run_fio &
+ wait_for_finish $! || retcode=$?
+ # 12. Check finished status FIO. Write and read in the not-removed.
+ # NVMe disk should be successful.
+ # Expected: Fio should return return code == 0.
+ check_fio_retcode "Scsi hotremove test case 4: Iteration 4." 0 $retcode
+ vm_shutdown_all
+ add_nvme "HotInNvme3" "$traddr"
+ sleep 1
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p1.0 0
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p3.1 0
+}
+
+function pre_scsi_hotremove_test_case() {
+ $rpc_py vhost_create_scsi_controller naa.Nvme0n1p0.0
+ $rpc_py vhost_create_scsi_controller naa.Nvme0n1p1.0
+ $rpc_py vhost_create_scsi_controller naa.Nvme0n1p2.1
+ $rpc_py vhost_create_scsi_controller naa.Nvme0n1p3.1
+}
+
+function post_scsi_hotremove_test_case() {
+ $rpc_py vhost_delete_controller naa.Nvme0n1p0.0
+ $rpc_py vhost_delete_controller naa.Nvme0n1p1.0
+ $rpc_py vhost_delete_controller naa.Nvme0n1p2.1
+ $rpc_py vhost_delete_controller naa.Nvme0n1p3.1
+}
+
+pre_scsi_hotremove_test_case
+scsi_hotremove_tc1
+scsi_hotremove_tc2
+scsi_hotremove_tc3
+scsi_hotremove_tc4
+post_scsi_hotremove_test_case