summaryrefslogtreecommitdiffstats
path: root/src/spdk/test/vhost/hotplug
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 18:24:20 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 18:24:20 +0000
commit483eb2f56657e8e7f419ab1a4fab8dce9ade8609 (patch)
treee5d88d25d870d5dedacb6bbdbe2a966086a0a5cf /src/spdk/test/vhost/hotplug
parentInitial commit. (diff)
downloadceph-483eb2f56657e8e7f419ab1a4fab8dce9ade8609.tar.xz
ceph-483eb2f56657e8e7f419ab1a4fab8dce9ade8609.zip
Adding upstream version 14.2.21.upstream/14.2.21upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/spdk/test/vhost/hotplug')
-rw-r--r--src/spdk/test/vhost/hotplug/blk_hotremove.sh236
-rw-r--r--src/spdk/test/vhost/hotplug/common.sh230
-rw-r--r--src/spdk/test/vhost/hotplug/fio_jobs/default_integrity.job16
-rwxr-xr-xsrc/spdk/test/vhost/hotplug/scsi_hotattach.sh104
-rwxr-xr-xsrc/spdk/test/vhost/hotplug/scsi_hotdetach.sh241
-rwxr-xr-xsrc/spdk/test/vhost/hotplug/scsi_hotplug.sh90
-rw-r--r--src/spdk/test/vhost/hotplug/scsi_hotremove.sh232
-rw-r--r--src/spdk/test/vhost/hotplug/test_plan.md86
-rw-r--r--src/spdk/test/vhost/hotplug/vhost.conf.base4
9 files changed, 1239 insertions, 0 deletions
diff --git a/src/spdk/test/vhost/hotplug/blk_hotremove.sh b/src/spdk/test/vhost/hotplug/blk_hotremove.sh
new file mode 100644
index 00000000..a350d90d
--- /dev/null
+++ b/src/spdk/test/vhost/hotplug/blk_hotremove.sh
@@ -0,0 +1,236 @@
+# Vhost blk hot remove tests
+#
+# Objective
+# The purpose of these tests is to verify that SPDK vhost remains stable during
+# hot-remove operations performed on SCSI and BLK controllers devices.
+# Hot-remove is a scenario where a NVMe device is removed when already in use.
+#
+# Test cases description
+# 1. FIO I/O traffic is run during hot-remove operations.
+# By default FIO uses default_integrity*.job config files located in
+# test/vhost/hotplug/fio_jobs directory.
+# 2. FIO mode of operation is random write (randwrite) with verification enabled
+# which results in also performing read operations.
+# 3. In test cases fio status is checked after every run if any errors occurred.
+
+function prepare_fio_cmd_tc1() {
+ print_test_fio_header
+
+ run_fio="$fio_bin --eta=never "
+ for vm_num in $1; do
+ cp $fio_job $tmp_detach_job
+ vm_dir=$VM_BASE_DIR/$vm_num
+ vm_check_blk_location $vm_num
+ for disk in $SCSI_DISK; do
+ echo "[nvme-host$disk]" >> $tmp_detach_job
+ echo "filename=/dev/$disk" >> $tmp_detach_job
+ done
+ vm_scp "$vm_num" $tmp_detach_job 127.0.0.1:/root/default_integrity_2discs.job
+ run_fio+="--client=127.0.0.1,$(vm_fio_socket $vm_num) --remote-config /root/default_integrity_2discs.job "
+ rm $tmp_detach_job
+ done
+}
+
+function remove_vhost_controllers() {
+ $rpc_py remove_vhost_controller naa.Nvme0n1p0.0
+ $rpc_py remove_vhost_controller naa.Nvme0n1p1.0
+ $rpc_py remove_vhost_controller naa.Nvme0n1p2.1
+ $rpc_py remove_vhost_controller naa.Nvme0n1p3.1
+}
+
+# Vhost blk hot remove test cases
+#
+# Test Case 1
+function blk_hotremove_tc1() {
+ echo "Blk hotremove test case 1"
+ traddr=""
+ # 1. Run the command to hot remove NVMe disk.
+ get_traddr "Nvme0"
+ delete_nvme "Nvme0"
+ # 2. If vhost had crashed then tests would stop running
+ sleep 1
+ add_nvme "HotInNvme0" "$traddr"
+ sleep 1
+}
+
+# Test Case 2
+function blk_hotremove_tc2() {
+ echo "Blk hotremove test case 2"
+ # 1. Use rpc command to create blk controllers.
+ $rpc_py construct_vhost_blk_controller naa.Nvme0n1p0.0 HotInNvme0n1p0
+ $rpc_py construct_vhost_blk_controller naa.Nvme0n1p1.0 Nvme1n1p0
+ $rpc_py construct_vhost_blk_controller naa.Nvme0n1p2.1 Nvme1n1p1
+ $rpc_py construct_vhost_blk_controller naa.Nvme0n1p3.1 Nvme1n1p2
+ # 2. Run two VMs and attach every VM to two blk controllers.
+ vm_run_with_arg "0 1"
+ vms_prepare "0"
+
+ traddr=""
+ get_traddr "Nvme0"
+ prepare_fio_cmd_tc1 "0"
+ # 3. Run FIO I/O traffic with verification enabled on NVMe disk.
+ $run_fio &
+ local last_pid=$!
+ sleep 3
+ # 4. Run the command to hot remove NVMe disk.
+ delete_nvme "HotInNvme0"
+ local retcode=0
+ wait_for_finish $last_pid || retcode=$?
+ # 5. Check that fio job run on hot-removed device stopped.
+ # Expected: Fio should return error message and return code != 0.
+ check_fio_retcode "Blk hotremove test case 2: Iteration 1." 1 $retcode
+
+ # 6. Reboot VM
+ reboot_all_and_prepare "0"
+ # 7. Run FIO I/O traffic with verification enabled on NVMe disk.
+ $run_fio &
+ local retcode=0
+ wait_for_finish $! || retcode=$?
+ # 8. Check that fio job run on hot-removed device stopped.
+ # Expected: Fio should return error message and return code != 0.
+ check_fio_retcode "Blk hotremove test case 2: Iteration 2." 1 $retcode
+ vm_shutdown_all
+ remove_vhost_controllers
+ add_nvme "HotInNvme1" "$traddr"
+ sleep 1
+}
+
+# ## Test Case 3
+function blk_hotremove_tc3() {
+ echo "Blk hotremove test case 3"
+ # 1. Use rpc command to create blk controllers.
+ $rpc_py construct_vhost_blk_controller naa.Nvme0n1p0.0 HotInNvme1n1p0
+ $rpc_py construct_vhost_blk_controller naa.Nvme0n1p1.0 Nvme1n1p0
+ $rpc_py construct_vhost_blk_controller naa.Nvme0n1p2.1 HotInNvme1n1p1
+ $rpc_py construct_vhost_blk_controller naa.Nvme0n1p3.1 Nvme1n1p1
+ # 2. Run two VMs and attach every VM to two blk controllers.
+ vm_run_with_arg "0 1"
+ vms_prepare "0 1"
+
+ traddr=""
+ get_traddr "Nvme0"
+ prepare_fio_cmd_tc1 "0"
+ # 3. Run FIO I/O traffic with verification enabled on first NVMe disk.
+ $run_fio &
+ local last_pid=$!
+ sleep 3
+ # 4. Run the command to hot remove of first NVMe disk.
+ delete_nvme "HotInNvme1"
+ local retcode=0
+ wait_for_finish $last_pid || retcode=$?
+ # 6. Check that fio job run on hot-removed device stopped.
+ # Expected: Fio should return error message and return code != 0.
+ check_fio_retcode "Blk hotremove test case 3: Iteration 1." 1 $retcode
+
+ # 7. Reboot VM
+ reboot_all_and_prepare "0"
+ local retcode=0
+ # 8. Run FIO I/O traffic with verification enabled on removed NVMe disk.
+ $run_fio &
+ wait_for_finish $! || retcode=$?
+ # 9. Check that fio job run on hot-removed device stopped.
+ # Expected: Fio should return error message and return code != 0.
+ check_fio_retcode "Blk hotremove test case 3: Iteration 2." 1 $retcode
+ vm_shutdown_all
+ remove_vhost_controllers
+ add_nvme "HotInNvme2" "$traddr"
+ sleep 1
+}
+
+# Test Case 4
+function blk_hotremove_tc4() {
+ echo "Blk hotremove test case 4"
+ # 1. Use rpc command to create blk controllers.
+ $rpc_py construct_vhost_blk_controller naa.Nvme0n1p0.0 HotInNvme2n1p0
+ $rpc_py construct_vhost_blk_controller naa.Nvme0n1p1.0 Nvme1n1p0
+ $rpc_py construct_vhost_blk_controller naa.Nvme0n1p2.1 HotInNvme2n1p1
+ $rpc_py construct_vhost_blk_controller naa.Nvme0n1p3.1 Nvme1n1p1
+ # 2. Run two VM, attached to blk controllers.
+ vm_run_with_arg "0 1"
+ vms_prepare "0 1"
+
+ prepare_fio_cmd_tc1 "0"
+ # 3. Run FIO I/O traffic on first VM with verification enabled on both NVMe disks.
+ $run_fio &
+ local last_pid_vm0=$!
+
+ prepare_fio_cmd_tc1 "1"
+ # 4. Run FIO I/O traffic on second VM with verification enabled on both NVMe disks.
+ $run_fio &
+ local last_pid_vm1=$!
+
+ sleep 3
+ prepare_fio_cmd_tc1 "0 1"
+ # 5. Run the command to hot remove of first NVMe disk.
+ delete_nvme "HotInNvme2"
+ local retcode_vm0=0
+ local retcode_vm1=0
+ wait_for_finish $last_pid_vm0 || retcode_vm0=$?
+ wait_for_finish $last_pid_vm1 || retcode_vm1=$?
+ # 6. Check that fio job run on hot-removed device stopped.
+ # Expected: Fio should return error message and return code != 0.
+ check_fio_retcode "Blk hotremove test case 4: Iteration 1." 1 $retcode_vm0
+ check_fio_retcode "Blk hotremove test case 4: Iteration 2." 1 $retcode_vm1
+
+ # 7. Reboot all VMs.
+ reboot_all_and_prepare "0 1"
+ # 8. Run FIO I/O traffic with verification enabled on removed NVMe disk.
+ $run_fio &
+ local retcode=0
+ wait_for_finish $! || retcode=$?
+ # 9. Check that fio job run on hot-removed device stopped.
+ # Expected: Fio should return error message and return code != 0.
+ check_fio_retcode "Blk hotremove test case 4: Iteration 3." 1 $retcode
+
+ vm_shutdown_all
+ remove_vhost_controllers
+ add_nvme "HotInNvme3" "$traddr"
+ sleep 1
+}
+
+# Test Case 5
+function blk_hotremove_tc5() {
+ echo "Blk hotremove test case 5"
+ # 1. Use rpc command to create blk controllers.
+ $rpc_py construct_vhost_blk_controller naa.Nvme0n1p0.0 HotInNvme3n1p0
+ $rpc_py construct_vhost_blk_controller naa.Nvme0n1p1.0 Nvme1n1p0
+ $rpc_py construct_vhost_blk_controller naa.Nvme0n1p2.1 Nvme1n1p1
+ $rpc_py construct_vhost_blk_controller naa.Nvme0n1p3.1 Nvme1n1p2
+ # 2. Run two VM, attached to blk controllers.
+ vm_run_with_arg "0 1"
+ vms_prepare "0 1"
+
+ prepare_fio_cmd_tc1 "0"
+ # 3. Run FIO I/O traffic on first VM with verification enabled on both NVMe disks.
+ $run_fio &
+ local last_pid=$!
+ sleep 3
+ # 4. Run the command to hot remove of first NVMe disk.
+ delete_nvme "HotInNvme3"
+ local retcode=0
+ wait_for_finish $last_pid || retcode=$?
+ # 5. Check that fio job run on hot-removed device stopped.
+ # Expected: Fio should return error message and return code != 0.
+ check_fio_retcode "Blk hotremove test case 5: Iteration 1." 1 $retcode
+
+ # 6. Reboot VM.
+ reboot_all_and_prepare "0"
+ local retcode=0
+ # 7. Run FIO I/O traffic with verification enabled on removed NVMe disk.
+ $run_fio &
+ wait_for_finish $! || retcode=$?
+ # 8. Check that fio job run on hot-removed device stopped.
+ # Expected: Fio should return error message and return code != 0.
+ check_fio_retcode "Blk hotremove test case 5: Iteration 2." 1 $retcode
+ vm_shutdown_all
+ remove_vhost_controllers
+ add_nvme "HotInNvme4" "$traddr"
+ sleep 1
+}
+
+vms_setup
+blk_hotremove_tc1
+blk_hotremove_tc2
+blk_hotremove_tc3
+blk_hotremove_tc4
+blk_hotremove_tc5
diff --git a/src/spdk/test/vhost/hotplug/common.sh b/src/spdk/test/vhost/hotplug/common.sh
new file mode 100644
index 00000000..a94b06cf
--- /dev/null
+++ b/src/spdk/test/vhost/hotplug/common.sh
@@ -0,0 +1,230 @@
+dry_run=false
+no_shutdown=false
+fio_bin="fio"
+fio_jobs="$BASE_DIR/fio_jobs/"
+test_type=spdk_vhost_scsi
+reuse_vms=false
+vms=()
+used_vms=""
+disk_split=""
+x=""
+scsi_hot_remove_test=0
+blk_hot_remove_test=0
+
+
+function usage() {
+ [[ ! -z $2 ]] && ( echo "$2"; echo ""; )
+ echo "Shortcut script for doing automated hotattach/hotdetach test"
+ echo "Usage: $(basename $1) [OPTIONS]"
+ echo
+ echo "-h, --help print help and exit"
+ echo " --test-type=TYPE Perform specified test:"
+ echo " virtio - test host virtio-scsi-pci using file as disk image"
+ echo " kernel_vhost - use kernel driver vhost-scsi"
+ echo " spdk_vhost_scsi - use spdk vhost scsi"
+ echo " spdk_vhost_blk - use spdk vhost block"
+ echo "-x set -x for script debug"
+ echo " --fio-bin=FIO Use specific fio binary (will be uploaded to VM)"
+ echo " --fio-jobs= Fio configs to use for tests. Can point to a directory or"
+ echo " --work-dir=WORK_DIR Where to find build file. Must exist. [default: $TEST_DIR]"
+ echo " --vm=NUM[,OS][,DISKS] VM configuration. This parameter might be used more than once:"
+ echo " NUM - VM number (mandatory)"
+ echo " OS - VM os disk path (optional)"
+ echo " DISKS - VM os test disks/devices path (virtio - optional, kernel_vhost - mandatory)"
+ echo " --scsi-hotremove-test Run scsi hotremove tests"
+ exit 0
+}
+
+while getopts 'xh-:' optchar; do
+ case "$optchar" in
+ -)
+ case "$OPTARG" in
+ help) usage $0 ;;
+ work-dir=*) TEST_DIR="${OPTARG#*=}" ;;
+ fio-bin=*) fio_bin="${OPTARG#*=}" ;;
+ fio-jobs=*) fio_jobs="${OPTARG#*=}" ;;
+ test-type=*) test_type="${OPTARG#*=}" ;;
+ vm=*) vms+=("${OPTARG#*=}") ;;
+ scsi-hotremove-test) scsi_hot_remove_test=1 ;;
+ blk-hotremove-test) blk_hot_remove_test=1 ;;
+ *) usage $0 "Invalid argument '$OPTARG'" ;;
+ esac
+ ;;
+ h) usage $0 ;;
+ x) set -x
+ x="-x" ;;
+ *) usage $0 "Invalid argument '$OPTARG'"
+ esac
+done
+shift $(( OPTIND - 1 ))
+
+fio_job=$BASE_DIR/fio_jobs/default_integrity.job
+tmp_attach_job=$BASE_DIR/fio_jobs/fio_attach.job.tmp
+tmp_detach_job=$BASE_DIR/fio_jobs/fio_detach.job.tmp
+. $BASE_DIR/../common/common.sh
+
+rpc_py="$SPDK_BUILD_DIR/scripts/rpc.py -s $(get_vhost_dir)/rpc.sock"
+
+function print_test_fio_header() {
+ notice "==============="
+ notice ""
+ notice "Testing..."
+
+ notice "Running fio jobs ..."
+ if [ $# -gt 0 ]; then
+ echo $1
+ fi
+}
+
+function run_vhost() {
+ notice "==============="
+ notice ""
+ notice "running SPDK"
+ notice ""
+ spdk_vhost_run --conf-path=$BASE_DIR
+ notice ""
+}
+
+function vms_setup() {
+ for vm_conf in ${vms[@]}; do
+ IFS=',' read -ra conf <<< "$vm_conf"
+ if [[ x"${conf[0]}" == x"" ]] || ! assert_number ${conf[0]}; then
+ fail "invalid VM configuration syntax $vm_conf"
+ fi
+
+ # Sanity check if VM is not defined twice
+ for vm_num in $used_vms; do
+ if [[ $vm_num -eq ${conf[0]} ]]; then
+ fail "VM$vm_num defined more than twice ( $(printf "'%s' " "${vms[@]}"))!"
+ fi
+ done
+
+ used_vms+=" ${conf[0]}"
+
+ setup_cmd="vm_setup --disk-type=$test_type --force=${conf[0]}"
+ [[ x"${conf[1]}" != x"" ]] && setup_cmd+=" --os=${conf[1]}"
+ [[ x"${conf[2]}" != x"" ]] && setup_cmd+=" --disks=${conf[2]}"
+ $setup_cmd
+ done
+}
+
+function vm_run_with_arg() {
+ vm_run $@
+ vm_wait_for_boot 600 $@
+}
+
+function vms_setup_and_run() {
+ vms_setup
+ vm_run_with_arg $@
+}
+
+function vms_prepare() {
+ for vm_num in $1; do
+ vm_dir=$VM_BASE_DIR/$vm_num
+
+ qemu_mask_param="VM_${vm_num}_qemu_mask"
+
+ host_name="VM-${vm_num}-${!qemu_mask_param}"
+ notice "Setting up hostname: $host_name"
+ vm_ssh $vm_num "hostname $host_name"
+ vm_start_fio_server --fio-bin=$fio_bin $readonly $vm_num
+ done
+}
+
+function vms_reboot_all() {
+ notice "Rebooting all vms "
+ for vm_num in $1; do
+ vm_ssh $vm_num "reboot" || true
+ while vm_os_booted $vm_num; do
+ sleep 0.5
+ done
+ done
+
+ vm_wait_for_boot 300 $1
+}
+
+function check_fio_retcode() {
+ local fio_retcode=$3
+ echo $1
+ local retcode_expected=$2
+ if [ $retcode_expected == 0 ]; then
+ if [ $fio_retcode != 0 ]; then
+ error " Fio test ended with error."
+ else
+ notice " Fio test ended with success."
+ fi
+ else
+ if [ $fio_retcode != 0 ]; then
+ notice " Fio test ended with expected error."
+ else
+ error " Fio test ended with unexpected success."
+ fi
+ fi
+}
+
+function wait_for_finish() {
+ local wait_for_pid=$1
+ local sequence=${2:-30}
+ for i in `seq 1 $sequence`; do
+ if kill -0 $wait_for_pid; then
+ sleep 0.5
+ continue
+ else
+ break
+ fi
+ done
+ if kill -0 $wait_for_pid; then
+ error "Timeout for fio command"
+ fi
+
+ wait $wait_for_pid
+}
+
+
+function reboot_all_and_prepare() {
+ vms_reboot_all "$1"
+ vms_prepare "$1"
+}
+
+function post_test_case() {
+ vm_shutdown_all
+ spdk_vhost_kill
+}
+
+function on_error_exit() {
+ set +e
+ echo "Error on $1 - $2"
+ post_test_case
+ print_backtrace
+ exit 1
+}
+
+function check_disks() {
+ if [ "$1" == "$2" ]; then
+ echo "Disk has not been deleted"
+ exit 1
+ fi
+}
+
+function get_traddr() {
+ local nvme_name=$1
+ local nvme="$( $SPDK_BUILD_DIR/scripts/gen_nvme.sh )"
+ while read -r line; do
+ if [[ $line == *"TransportID"* ]] && [[ $line == *$nvme_name* ]]; then
+ local word_array=($line)
+ for word in "${word_array[@]}"; do
+ if [[ $word == *"traddr"* ]]; then
+ traddr=$( echo $word | sed 's/traddr://' | sed 's/"//' )
+ fi
+ done
+ fi
+ done <<< "$nvme"
+}
+
+function delete_nvme() {
+ $rpc_py delete_nvme_controller $1
+}
+
+function add_nvme() {
+ $rpc_py construct_nvme_bdev -b $1 -t PCIe -a $2
+}
diff --git a/src/spdk/test/vhost/hotplug/fio_jobs/default_integrity.job b/src/spdk/test/vhost/hotplug/fio_jobs/default_integrity.job
new file mode 100644
index 00000000..136fe902
--- /dev/null
+++ b/src/spdk/test/vhost/hotplug/fio_jobs/default_integrity.job
@@ -0,0 +1,16 @@
+[global]
+blocksize=4k
+iodepth=512
+iodepth_batch=128
+iodepth_low=256
+ioengine=libaio
+group_reporting
+thread
+numjobs=1
+direct=1
+rw=randwrite
+do_verify=1
+verify=md5
+verify_backlog=1024
+time_based=1
+runtime=10
diff --git a/src/spdk/test/vhost/hotplug/scsi_hotattach.sh b/src/spdk/test/vhost/hotplug/scsi_hotattach.sh
new file mode 100755
index 00000000..e9d851f6
--- /dev/null
+++ b/src/spdk/test/vhost/hotplug/scsi_hotattach.sh
@@ -0,0 +1,104 @@
+#!/usr/bin/env bash
+set -e
+BASE_DIR=$(readlink -f $(dirname $0))
+[[ -z "$TEST_DIR" ]] && TEST_DIR="$(cd $BASE_DIR/../../../../ && pwd)"
+
+. $BASE_DIR/common.sh
+
+function prepare_fio_cmd_tc1() {
+ print_test_fio_header
+
+ run_fio="$fio_bin --eta=never "
+ for vm_num in $1; do
+ cp $fio_job $tmp_attach_job
+ vm_dir=$VM_BASE_DIR/$vm_num
+ vm_check_scsi_location $vm_num
+ for disk in $SCSI_DISK; do
+ echo "[nvme-host$disk]" >> $tmp_attach_job
+ echo "filename=/dev/$disk" >> $tmp_attach_job
+ done
+ vm_scp $vm_num $tmp_attach_job 127.0.0.1:/root/default_integrity_discs.job
+ run_fio+="--client=127.0.0.1,$(vm_fio_socket ${vm_num}) --remote-config /root/default_integrity_discs.job "
+ rm $tmp_attach_job
+ done
+}
+
+# Check if fio test passes on device attached to first controller.
+function hotattach_tc1() {
+ notice "Hotattach test case 1"
+
+ $rpc_py add_vhost_scsi_lun naa.Nvme0n1p0.0 0 Nvme0n1p0
+
+ sleep 3
+ prepare_fio_cmd_tc1 "0"
+ $run_fio
+ check_fio_retcode "Hotattach test case 1: Iteration 1." 0 $?
+}
+
+# Run fio test for previously attached device.
+# During test attach another device to first controller and check fio status.
+function hotattach_tc2() {
+ notice "Hotattach test case 2"
+ prepare_fio_cmd_tc1 "0"
+
+ $run_fio &
+ last_pid=$!
+ sleep 3
+ $rpc_py add_vhost_scsi_lun naa.Nvme0n1p0.0 1 Nvme0n1p1
+ wait $last_pid
+ check_fio_retcode "Hotattach test case 2: Iteration 1." 0 $?
+}
+
+# Run fio test for previously attached devices.
+# During test attach another device to second controller and check fio status.
+function hotattach_tc3() {
+ notice "Hotattach test case 3"
+ prepare_fio_cmd_tc1 "0"
+
+ $run_fio &
+ last_pid=$!
+ sleep 3
+ $rpc_py add_vhost_scsi_lun naa.Nvme0n1p1.0 0 Nvme0n1p2
+ wait $last_pid
+ check_fio_retcode "Hotattach test case 3: Iteration 1." 0 $?
+}
+
+# Run fio test for previously attached devices.
+# During test attach another device to third controller(VM2) and check fio status.
+# At the end after rebooting VMs run fio test for all devices and check fio status.
+function hotattach_tc4() {
+ notice "Hotattach test case 4"
+
+ prepare_fio_cmd_tc1 "0"
+
+ $run_fio &
+ last_pid=$!
+ sleep 3
+ $rpc_py add_vhost_scsi_lun naa.Nvme0n1p2.1 0 Nvme0n1p3
+ wait $last_pid
+ check_fio_retcode "Hotattach test case 4: Iteration 1." 0 $?
+
+ prepare_fio_cmd_tc1 "0 1"
+ $run_fio
+ check_fio_retcode "Hotattach test case 4: Iteration 2." 0 $?
+
+ reboot_all_and_prepare "0 1"
+
+ prepare_fio_cmd_tc1 "0 1"
+ $run_fio
+ check_fio_retcode "Hotattach test case 4: Iteration 3." 0 $?
+}
+
+function cleanup_after_tests() {
+ $rpc_py remove_vhost_scsi_target naa.Nvme0n1p0.0 0
+ $rpc_py remove_vhost_scsi_target naa.Nvme0n1p0.0 1
+ $rpc_py remove_vhost_scsi_target naa.Nvme0n1p1.0 0
+ $rpc_py remove_vhost_scsi_target naa.Nvme0n1p2.1 0
+ $rpc_py delete_nvme_controller Nvme0
+}
+
+hotattach_tc1
+hotattach_tc2
+hotattach_tc3
+hotattach_tc4
+cleanup_after_tests
diff --git a/src/spdk/test/vhost/hotplug/scsi_hotdetach.sh b/src/spdk/test/vhost/hotplug/scsi_hotdetach.sh
new file mode 100755
index 00000000..45c948d9
--- /dev/null
+++ b/src/spdk/test/vhost/hotplug/scsi_hotdetach.sh
@@ -0,0 +1,241 @@
+#!/usr/bin/env bash
+
+set -e
+BASE_DIR=$(readlink -f $(dirname $0))
+[[ -z "$TEST_DIR" ]] && TEST_DIR="$(cd $BASE_DIR/../../../../ && pwd)"
+
+. $BASE_DIR/common.sh
+
+function get_first_disk() {
+ vm_check_scsi_location $1
+ disk_array=( $SCSI_DISK )
+ eval "$2=${disk_array[0]}"
+}
+
+function check_disks() {
+ if [ "$1" == "$2" ]; then
+ fail "Disk has not been deleted"
+ fi
+}
+
+function prepare_fio_cmd_tc1_iter1() {
+ print_test_fio_header
+
+ run_fio="$fio_bin --eta=never "
+ for vm_num in $1; do
+ cp $fio_job $tmp_detach_job
+ vm_dir=$VM_BASE_DIR/$vm_num
+ vm_check_scsi_location $vm_num
+ for disk in $SCSI_DISK; do
+ echo "[nvme-host$disk]" >> $tmp_detach_job
+ echo "filename=/dev/$disk" >> $tmp_detach_job
+ done
+ vm_scp "$vm_num" $tmp_detach_job 127.0.0.1:/root/default_integrity_4discs.job
+ run_fio+="--client=127.0.0.1,$(vm_fio_socket $vm_num) --remote-config /root/default_integrity_4discs.job "
+ rm $tmp_detach_job
+ done
+}
+
+function prepare_fio_cmd_tc1_iter2() {
+ print_test_fio_header
+
+ for vm_num in 2; do
+ cp $fio_job $tmp_detach_job
+ vm_dir=$VM_BASE_DIR/$vm_num
+ vm_check_scsi_location $vm_num
+ for disk in $SCSI_DISK; do
+ echo "[nvme-host$disk]" >> $tmp_detach_job
+ echo "filename=/dev/$disk" >> $tmp_detach_job
+ done
+ vm_scp "$vm_num" $tmp_detach_job 127.0.0.1:/root/default_integrity_3discs.job
+ rm $tmp_detach_job
+ done
+ run_fio="$fio_bin --eta=never "
+ for vm_num in $used_vms; do
+ if [ $vm_num == 2 ]; then
+ run_fio+="--client=127.0.0.1,$(vm_fio_socket $vm_num) --remote-config /root/default_integrity_3discs.job "
+ continue
+ fi
+ run_fio+="--client=127.0.0.1,$(vm_fio_socket $vm_num) --remote-config /root/default_integrity_4discs.job "
+ done
+}
+
+function prepare_fio_cmd_tc2_iter1() {
+ print_test_fio_header
+
+ run_fio="$fio_bin --eta=never "
+ for vm_num in $1; do
+ cp $fio_job $tmp_detach_job
+ vm_dir=$VM_BASE_DIR/$vm_num
+ vm_check_scsi_location $vm_num
+ disk_array=($SCSI_DISK)
+ disk=${disk_array[0]}
+ echo "[nvme-host$disk]" >> $tmp_detach_job
+ echo "filename=/dev/$disk" >> $tmp_detach_job
+ vm_scp "$vm_num" $tmp_detach_job 127.0.0.1:/root/default_integrity.job
+ run_fio+="--client=127.0.0.1,$(vm_fio_socket $vm_num) --remote-config /root/default_integrity.job "
+ rm $tmp_detach_job
+ done
+}
+
+function prepare_fio_cmd_tc2_iter2() {
+ print_test_fio_header
+
+ run_fio="$fio_bin --eta=never "
+ for vm_num in $1; do
+ cp $fio_job $tmp_detach_job
+ if [ $vm_num == 2 ]; then
+ vm_job_name=default_integrity_3discs.job
+ else
+ vm_job_name=default_integrity_4discs.job
+ fi
+ vm_dir=$VM_BASE_DIR/$vm_num
+ vm_check_scsi_location $vm_num
+ for disk in $SCSI_DISK; do
+ echo "[nvme-host$disk]" >> $tmp_detach_job
+ echo "filename=/dev/$disk" >> $tmp_detach_job
+ done
+ vm_scp "$vm_num" $tmp_detach_job 127.0.0.1:/root/$vm_job_name
+ run_fio+="--client=127.0.0.1,$(vm_fio_socket $vm_num) --remote-config /root/${vm_job_name} "
+ rm $tmp_detach_job
+ done
+}
+
+
+function prepare_fio_cmd_tc3_iter1() {
+ print_test_fio_header
+
+ run_fio="$fio_bin --eta=never "
+ for vm_num in $1; do
+ cp $fio_job $tmp_detach_job
+ if [ $vm_num == 2 ]; then
+ vm_job_name=default_integrity_3discs.job
+ else
+ vm_job_name=default_integrity_4discs.job
+ fi
+ vm_dir=$VM_BASE_DIR/$vm_num
+ vm_check_scsi_location $vm_num
+ j=1
+ for disk in $SCSI_DISK; do
+ if [ $vm_num == 2 ]; then
+ if [ $j == 1 ]; then
+ (( j++ ))
+ continue
+ fi
+ fi
+ echo "[nvme-host$disk]" >> $tmp_detach_job
+ echo "filename=/dev/$disk" >> $tmp_detach_job
+ (( j++ ))
+ done
+ vm_scp "$vm_num" $tmp_detach_job 127.0.0.1:/root/$vm_job_name
+ run_fio+="--client=127.0.0.1,$(vm_fio_socket $vm_num) --remote-config /root/$vm_job_name "
+ rm $tmp_detach_job
+ done
+}
+
+# During fio test for all devices remove first device from fifth controller and check if fio fails.
+# Also check if disc has been removed from VM.
+function hotdetach_tc1() {
+ notice "Hotdetach test case 1"
+ first_disk=""
+ get_first_disk "2" first_disk
+ prepare_fio_cmd_tc1_iter1 "2 3"
+ $run_fio &
+ last_pid=$!
+ sleep 3
+ $rpc_py remove_vhost_scsi_target naa.Nvme0n1p4.2 0
+ set +xe
+ wait $last_pid
+ check_fio_retcode "Hotdetach test case 1: Iteration 1." 1 $?
+ set -xe
+ second_disk=""
+ get_first_disk "2" second_disk
+ check_disks $first_disk $second_disk
+ clear_after_tests
+}
+
+# During fio test for device from third VM remove first device from fifth controller and check if fio fails.
+# Also check if disc has been removed from VM.
+function hotdetach_tc2() {
+ notice "Hotdetach test case 2"
+ sleep 2
+ first_disk=""
+ get_first_disk "2" first_disk
+ prepare_fio_cmd_tc2_iter1 "2"
+ $run_fio &
+ last_pid=$!
+ sleep 3
+ $rpc_py remove_vhost_scsi_target naa.Nvme0n1p4.2 0
+ set +xe
+ wait $last_pid
+ check_fio_retcode "Hotdetach test case 2: Iteration 1." 1 $?
+ set -xe
+ second_disk=""
+ get_first_disk "2" second_disk
+ check_disks $first_disk $second_disk
+ clear_after_tests
+}
+
+# Run fio test for all devices except one, then remove this device and check if fio passes.
+# Also check if disc has been removed from VM.
+function hotdetach_tc3() {
+ notice "Hotdetach test case 3"
+ sleep 2
+ first_disk=""
+ get_first_disk "2" first_disk
+ prepare_fio_cmd_tc3_iter1 "2 3"
+ $run_fio &
+ last_pid=$!
+ sleep 3
+ $rpc_py remove_vhost_scsi_target naa.Nvme0n1p4.2 0
+ wait $last_pid
+ check_fio_retcode "Hotdetach test case 3: Iteration 1." 0 $?
+ second_disk=""
+ get_first_disk "2" second_disk
+ check_disks $first_disk $second_disk
+ clear_after_tests
+}
+
+# Run fio test for all devices except one and run separate fio test for this device.
+# Check if first fio test passes and second fio test fails.
+# Also check if disc has been removed from VM.
+# After reboot run fio test for remaining devices and check if fio passes.
+function hotdetach_tc4() {
+ notice "Hotdetach test case 4"
+ sleep 2
+ first_disk=""
+ get_first_disk "2" first_disk
+ prepare_fio_cmd_tc2_iter1 "2"
+ $run_fio &
+ first_fio_pid=$!
+ prepare_fio_cmd_tc3_iter1 "2 3"
+ $run_fio &
+ second_fio_pid=$!
+ sleep 3
+ $rpc_py remove_vhost_scsi_target naa.Nvme0n1p4.2 0
+ set +xe
+ wait $first_fio_pid
+ check_fio_retcode "Hotdetach test case 4: Iteration 1." 1 $?
+ set -xe
+ wait $second_fio_pid
+ check_fio_retcode "Hotdetach test case 4: Iteration 2." 0 $?
+ second_disk=""
+ get_first_disk "2" second_disk
+ check_disks $first_disk $second_disk
+
+ reboot_all_and_prepare "2 3"
+ sleep 2
+ prepare_fio_cmd_tc2_iter2 "2 3"
+ $run_fio
+ check_fio_retcode "Hotdetach test case 4: Iteration 3." 0 $?
+ clear_after_tests
+}
+
+function clear_after_tests() {
+ $rpc_py add_vhost_scsi_lun naa.Nvme0n1p4.2 0 Nvme0n1p8
+}
+
+hotdetach_tc1
+hotdetach_tc2
+hotdetach_tc3
+hotdetach_tc4
diff --git a/src/spdk/test/vhost/hotplug/scsi_hotplug.sh b/src/spdk/test/vhost/hotplug/scsi_hotplug.sh
new file mode 100755
index 00000000..ab429c1e
--- /dev/null
+++ b/src/spdk/test/vhost/hotplug/scsi_hotplug.sh
@@ -0,0 +1,90 @@
+#!/usr/bin/env bash
+set -e
+BASE_DIR=$(readlink -f $(dirname $0))
+. $BASE_DIR/common.sh
+
+if [[ $scsi_hot_remove_test == 1 ]] && [[ $blk_hot_remove_test == 1 ]]; then
+ notice "Vhost-scsi and vhost-blk hotremove tests cannot be run together"
+fi
+
+# Add split section into vhost config
+function gen_config() {
+ cp $BASE_DIR/vhost.conf.base $BASE_DIR/vhost.conf.in
+ cat << END_OF_CONFIG >> $BASE_DIR/vhost.conf.in
+[Split]
+ Split Nvme0n1 16
+ Split Nvme1n1 20
+ Split HotInNvme0n1 2
+ Split HotInNvme1n1 2
+ Split HotInNvme2n1 2
+ Split HotInNvme3n1 2
+END_OF_CONFIG
+}
+
+# Run spdk by calling run_vhost from hotplug/common.sh.
+# Then prepare vhost with rpc calls and setup and run 4 VMs.
+function pre_hot_attach_detach_test_case() {
+ used_vms=""
+ $rpc_py construct_vhost_scsi_controller naa.Nvme0n1p0.0
+ $rpc_py construct_vhost_scsi_controller naa.Nvme0n1p1.0
+ $rpc_py construct_vhost_scsi_controller naa.Nvme0n1p2.1
+ $rpc_py construct_vhost_scsi_controller naa.Nvme0n1p3.1
+ $rpc_py construct_vhost_scsi_controller naa.Nvme0n1p4.2
+ $rpc_py construct_vhost_scsi_controller naa.Nvme0n1p5.2
+ $rpc_py construct_vhost_scsi_controller naa.Nvme0n1p6.3
+ $rpc_py construct_vhost_scsi_controller naa.Nvme0n1p7.3
+ $rpc_py add_vhost_scsi_lun naa.Nvme0n1p4.2 0 Nvme0n1p8
+ $rpc_py add_vhost_scsi_lun naa.Nvme0n1p4.2 1 Nvme0n1p9
+ $rpc_py add_vhost_scsi_lun naa.Nvme0n1p5.2 0 Nvme0n1p10
+ $rpc_py add_vhost_scsi_lun naa.Nvme0n1p5.2 1 Nvme0n1p11
+ $rpc_py add_vhost_scsi_lun naa.Nvme0n1p6.3 0 Nvme0n1p12
+ $rpc_py add_vhost_scsi_lun naa.Nvme0n1p6.3 1 Nvme0n1p13
+ $rpc_py add_vhost_scsi_lun naa.Nvme0n1p7.3 0 Nvme0n1p14
+ $rpc_py add_vhost_scsi_lun naa.Nvme0n1p7.3 1 Nvme0n1p15
+ vms_setup_and_run "0 1 2 3"
+ vms_prepare "0 1 2 3"
+}
+
+function clear_vhost_config() {
+ $rpc_py remove_vhost_scsi_target naa.Nvme0n1p4.2 0
+ $rpc_py remove_vhost_scsi_target naa.Nvme0n1p4.2 1
+ $rpc_py remove_vhost_scsi_target naa.Nvme0n1p5.2 0
+ $rpc_py remove_vhost_scsi_target naa.Nvme0n1p5.2 1
+ $rpc_py remove_vhost_scsi_target naa.Nvme0n1p6.3 0
+ $rpc_py remove_vhost_scsi_target naa.Nvme0n1p6.3 1
+ $rpc_py remove_vhost_scsi_target naa.Nvme0n1p7.3 0
+ $rpc_py remove_vhost_scsi_target naa.Nvme0n1p7.3 1
+ $rpc_py remove_vhost_controller naa.Nvme0n1p0.0
+ $rpc_py remove_vhost_controller naa.Nvme0n1p1.0
+ $rpc_py remove_vhost_controller naa.Nvme0n1p2.1
+ $rpc_py remove_vhost_controller naa.Nvme0n1p3.1
+ $rpc_py remove_vhost_controller naa.Nvme0n1p4.2
+ $rpc_py remove_vhost_controller naa.Nvme0n1p5.2
+ $rpc_py remove_vhost_controller naa.Nvme0n1p6.3
+ $rpc_py remove_vhost_controller naa.Nvme0n1p7.3
+}
+
+trap 'error_exit "${FUNCNAME}" "${LINENO}"' ERR
+gen_config
+# Hotremove/hotattach/hotdetach test cases prerequisites
+# 1. Run vhost with 2 NVMe disks.
+run_vhost
+rm $BASE_DIR/vhost.conf.in
+if [[ $scsi_hot_remove_test == 0 ]] && [[ $blk_hot_remove_test == 0 ]]; then
+ pre_hot_attach_detach_test_case
+ $BASE_DIR/scsi_hotattach.sh --fio-bin=$fio_bin &
+ first_script=$!
+ $BASE_DIR/scsi_hotdetach.sh --fio-bin=$fio_bin &
+ second_script=$!
+ wait $first_script
+ wait $second_script
+ vm_shutdown_all
+ clear_vhost_config
+fi
+if [[ $scsi_hot_remove_test == 1 ]]; then
+ source $BASE_DIR/scsi_hotremove.sh
+fi
+if [[ $blk_hot_remove_test == 1 ]]; then
+ source $BASE_DIR/blk_hotremove.sh
+fi
+post_test_case
diff --git a/src/spdk/test/vhost/hotplug/scsi_hotremove.sh b/src/spdk/test/vhost/hotplug/scsi_hotremove.sh
new file mode 100644
index 00000000..829eb3f6
--- /dev/null
+++ b/src/spdk/test/vhost/hotplug/scsi_hotremove.sh
@@ -0,0 +1,232 @@
+set -xe
+
+# Vhost SCSI hotremove tests
+#
+# # Objective
+# The purpose of these tests is to verify that SPDK vhost remains stable during
+# hot-remove operations performed on SCSI controllers devices.
+# Hot-remove is a scenario where a NVMe device is removed when already in use.
+# Tests consist of 4 test cases.
+#
+# # Test cases description
+# 1. FIO I/O traffic is run during hot-remove operations.
+# By default FIO uses default_integrity*.job config files located in
+# test/vhost/hotplug/fio_jobs directory.
+# 2. FIO mode of operation is random write (randwrite) with verification enabled
+# which results in also performing read operations.
+
+function prepare_fio_cmd_tc1() {
+ print_test_fio_header
+
+ run_fio="$fio_bin --eta=never "
+ for vm_num in $1; do
+ cp $fio_job $tmp_detach_job
+ vm_dir=$VM_BASE_DIR/$vm_num
+ vm_check_scsi_location $vm_num
+ for disk in $SCSI_DISK; do
+ echo "[nvme-host$disk]" >> $tmp_detach_job
+ echo "filename=/dev/$disk" >> $tmp_detach_job
+ echo "size=100%" >> $tmp_detach_job
+ done
+ vm_scp "$vm_num" $tmp_detach_job 127.0.0.1:/root/default_integrity_2discs.job
+ run_fio+="--client=127.0.0.1,$(vm_fio_socket $vm_num) --remote-config /root/default_integrity_2discs.job "
+ rm $tmp_detach_job
+ done
+}
+
+# Vhost SCSI hot-remove test cases.
+
+# Test Case 1
+function scsi_hotremove_tc1() {
+ echo "Scsi hotremove test case 1"
+ traddr=""
+ get_traddr "Nvme0"
+ # 1. Run the command to hot remove NVMe disk.
+ delete_nvme "Nvme0"
+ # 2. If vhost had crashed then tests would stop running
+ sleep 1
+ add_nvme "HotInNvme0" "$traddr"
+}
+
+# Test Case 2
+function scsi_hotremove_tc2() {
+ echo "Scsi hotremove test case 2"
+ # 1. Attach split NVMe bdevs to scsi controller.
+ $rpc_py add_vhost_scsi_lun naa.Nvme0n1p0.0 0 HotInNvme0n1p0
+ $rpc_py add_vhost_scsi_lun naa.Nvme0n1p1.0 0 Nvme1n1p0
+ $rpc_py add_vhost_scsi_lun naa.Nvme0n1p2.1 0 HotInNvme0n1p1
+ $rpc_py add_vhost_scsi_lun naa.Nvme0n1p3.1 0 Nvme1n1p1
+
+ # 2. Run two VMs, attached to scsi controllers.
+ vms_setup
+ vm_run_with_arg 0 1
+ vms_prepare "0 1"
+
+ vm_check_scsi_location "0"
+ local disks="$SCSI_DISK"
+
+ traddr=""
+ get_traddr "Nvme0"
+ prepare_fio_cmd_tc1 "0 1"
+ # 3. Run FIO I/O traffic with verification enabled on on both NVMe disks in VM.
+ $run_fio &
+ local last_pid=$!
+ sleep 3
+ # 4. Run the command to hot remove NVMe disk.
+ delete_nvme "HotInNvme0"
+
+ # 5. Check that fio job run on hot-remove device stopped on VM.
+ # Expected: Fio should return error message and return code != 0.
+ wait_for_finish $last_pid || retcode=$?
+ check_fio_retcode "Scsi hotremove test case 2: Iteration 1." 1 $retcode
+
+ # 6. Check if removed devices are gone from VM.
+ vm_check_scsi_location "0"
+ local new_disks="$SCSI_DISK"
+ check_disks "$disks" "$new_disks"
+ # 7. Reboot both VMs.
+ reboot_all_and_prepare "0 1"
+ # 8. Run FIO I/O traffic with verification enabled on on both VMs.
+ local retcode=0
+ $run_fio &
+ wait_for_finish $! || retcode=$?
+ # 9. Check that fio job run on hot-remove device stopped on both VMs.
+ # Expected: Fio should return error message and return code != 0.
+ check_fio_retcode "Scsi hotremove test case 2: Iteration 2." 1 $retcode
+ vm_shutdown_all
+ add_nvme "HotInNvme1" "$traddr"
+ sleep 1
+}
+
+# Test Case 3
+function scsi_hotremove_tc3() {
+ echo "Scsi hotremove test case 3"
+ # 1. Attach added NVMe bdev to scsi controller.
+ $rpc_py add_vhost_scsi_lun naa.Nvme0n1p0.0 0 HotInNvme1n1p0
+ # 2. Run two VM, attached to scsi controllers.
+ vm_run_with_arg 0 1
+ vms_prepare "0 1"
+ vm_check_scsi_location "0"
+ local disks="$SCSI_DISK"
+ traddr=""
+ get_traddr "Nvme0"
+ # 3. Run FIO I/O traffic with verification enabled on on both NVMe disks in VMs.
+ prepare_fio_cmd_tc1 "0"
+ $run_fio &
+ local last_pid=$!
+ sleep 3
+ # 4. Run the command to hot remove NVMe disk.
+ delete_nvme "HotInNvme1"
+ # 5. Check that fio job run on hot-remove device stopped on first VM.
+ # Expected: Fio should return error message and return code != 0.
+ wait_for_finish $last_pid || retcode=$?
+ check_fio_retcode "Scsi hotremove test case 3: Iteration 1." 1 $retcode
+ # 6. Check if removed devices are gone from lsblk.
+ vm_check_scsi_location "0"
+ local new_disks="$SCSI_DISK"
+ check_disks "$disks" "$new_disks"
+ # 7. Reboot both VMs.
+ reboot_all_and_prepare "0 1"
+ # 8. Run FIO I/O traffic with verification enabled on on both VMs.
+ local retcode=0
+ $run_fio &
+ wait_for_finish $! || retcode=$?
+ # 9. Check that fio job run on hot-remove device stopped on both VMs.
+ # Expected: Fio should return error message and return code != 0.
+ check_fio_retcode "Scsi hotremove test case 3: Iteration 2." 1 $retcode
+ vm_shutdown_all
+ add_nvme "HotInNvme2" "$traddr"
+ sleep 1
+}
+
+# Test Case 4
+function scsi_hotremove_tc4() {
+ echo "Scsi hotremove test case 4"
+ # 1. Attach NVMe bdevs to scsi controllers.
+ $rpc_py add_vhost_scsi_lun naa.Nvme0n1p0.0 0 HotInNvme2n1p0
+ $rpc_py add_vhost_scsi_lun naa.Nvme0n1p2.1 0 HotInNvme2n1p1
+ # 2. Run two VMs, attach to scsi controller.
+ vm_run_with_arg 0 1
+ vms_prepare "0 1"
+
+ # 3. Run FIO I/O traffic with verification enabled on first VM.
+ vm_check_scsi_location "0"
+ local disks_vm0="$SCSI_DISK"
+ # 4. Run FIO I/O traffic with verification enabled on second VM.
+ prepare_fio_cmd_tc1 "0"
+ $run_fio &
+ last_pid_vm0=$!
+
+ vm_check_scsi_location "1"
+ local disks_vm1="$SCSI_DISK"
+ prepare_fio_cmd_tc1 "1"
+ $run_fio &
+ local last_pid_vm1=$!
+ prepare_fio_cmd_tc1 "0 1"
+ sleep 3
+ # 5. Run the command to hot remove NVMe disk.
+ traddr=""
+ get_traddr "Nvme0"
+ delete_nvme "HotInNvme2"
+ # 6. Check that fio job run on hot-removed devices stopped.
+ # Expected: Fio should return error message and return code != 0.
+ local retcode_vm0=0
+ wait_for_finish $last_pid_vm0 || retcode_vm0=$?
+ local retcode_vm1=0
+ wait_for_finish $last_pid_vm1 || retcode_vm1=$?
+ check_fio_retcode "Scsi hotremove test case 4: Iteration 1." 1 $retcode_vm0
+ check_fio_retcode "Scsi hotremove test case 4: Iteration 2." 1 $retcode_vm1
+
+ # 7. Check if removed devices are gone from lsblk.
+ vm_check_scsi_location "0"
+ local new_disks_vm0="$SCSI_DISK"
+ check_disks "$disks_vm0" "$new_disks_vm0"
+ vm_check_scsi_location "1"
+ local new_disks_vm1="$SCSI_DISK"
+ check_disks "$disks_vm1" "$new_disks_vm1"
+
+ # 8. Reboot both VMs.
+ reboot_all_and_prepare "0 1"
+ # 9. Run FIO I/O traffic with verification enabled on on not-removed NVMe disk.
+ local retcode=0
+ $run_fio &
+ wait_for_finish $! || retcode=$?
+ # 10. Check that fio job run on hot-removed device stopped.
+ # Expect: Fio should return error message and return code != 0.
+ check_fio_retcode "Scsi hotremove test case 4: Iteration 3." 1 $retcode
+ prepare_fio_cmd_tc1 "0 1"
+ # 11. Run FIO I/O traffic with verification enabled on on not-removed NVMe disk.
+ local retcode=0
+ $run_fio &
+ wait_for_finish $! || retcode=$?
+ # 12. Check finished status FIO. Write and read in the not-removed.
+ # NVMe disk should be successful.
+ # Expected: Fio should return return code == 0.
+ check_fio_retcode "Scsi hotremove test case 4: Iteration 4." 0 $retcode
+ vm_shutdown_all
+ add_nvme "HotInNvme3" "$traddr"
+ sleep 1
+ $rpc_py remove_vhost_scsi_target naa.Nvme0n1p1.0 0
+ $rpc_py remove_vhost_scsi_target naa.Nvme0n1p3.1 0
+}
+
+function pre_scsi_hotremove_test_case() {
+ $rpc_py construct_vhost_scsi_controller naa.Nvme0n1p0.0
+ $rpc_py construct_vhost_scsi_controller naa.Nvme0n1p1.0
+ $rpc_py construct_vhost_scsi_controller naa.Nvme0n1p2.1
+ $rpc_py construct_vhost_scsi_controller naa.Nvme0n1p3.1
+}
+
+function post_scsi_hotremove_test_case() {
+ $rpc_py remove_vhost_controller naa.Nvme0n1p0.0
+ $rpc_py remove_vhost_controller naa.Nvme0n1p1.0
+ $rpc_py remove_vhost_controller naa.Nvme0n1p2.1
+ $rpc_py remove_vhost_controller naa.Nvme0n1p3.1
+}
+
+pre_scsi_hotremove_test_case
+scsi_hotremove_tc1
+scsi_hotremove_tc2
+scsi_hotremove_tc3
+scsi_hotremove_tc4
+post_scsi_hotremove_test_case
diff --git a/src/spdk/test/vhost/hotplug/test_plan.md b/src/spdk/test/vhost/hotplug/test_plan.md
new file mode 100644
index 00000000..0cbc5042
--- /dev/null
+++ b/src/spdk/test/vhost/hotplug/test_plan.md
@@ -0,0 +1,86 @@
+#Vhost hotattach and hotdetach test plan
+
+## Objective
+The purpose of these tests is to verify that SPDK vhost remains stable during
+hot-attach and hot-detach operations performed on SCSI controllers devices.
+Hot-attach is a scenario where a device is added to controller already in use by
+guest VM, while in hot-detach device is removed from controller when already in use.
+
+## Test Cases Description
+1. FIO I/O traffic is run during hot-attach and detach operations.
+By default FIO uses default_integrity*.job config files located in
+test/vhost/hotfeatures/fio_jobs directory.
+2. FIO mode of operation in random write (randwrite) with verification enabled
+which results in also performing read operations.
+3. Test case descriptions below contain manual steps for testing.
+Automated tests are located in test/vhost/hotfeatures.
+
+### Hotattach, Hotdetach Test Cases prerequisites
+1. Run vhost with 8 empty controllers. Prepare 16 nvme disks.
+If you don't have 16 disks use split.
+2. In test cases fio status is checked after every run if there are any errors.
+
+### Hotattach Test Cases prerequisites
+1. Run vms, first with ctrlr-1 and ctrlr-2 and second one with ctrlr-3 and ctrlr-4.
+
+## Test Case 1
+1. Attach NVMe to Ctrlr 1
+2. Run fio integrity on attached device
+
+## Test Case 2
+1. Run fio integrity on attached device from test case 1
+2. During fio attach another NVMe to Ctrlr 1
+3. Run fio integrity on both devices
+
+## Test Case 3
+1. Run fio integrity on attached devices from previous test cases
+2. During fio attach NVMe to Ctrl2
+3. Run fio integrity on all devices
+
+## Test Case 4
+2. Run fio integrity on attached device from previous test cases
+3. During fio attach NVMe to Ctrl3/VM2
+4. Run fio integrity on all devices
+5. Reboot VMs
+6. Run fio integrity again on all devices
+
+
+### Hotdetach Test Cases prerequisites
+1. Run vms, first with ctrlr-5 and ctrlr-6 and second with ctrlr-7 and ctrlr-8.
+
+## Test Case 1
+1. Run fio on all devices
+2. Detatch NVMe from Ctrl5 during fio
+3. Check vhost or VMs did not crash
+4. Check that detatched device is gone from VM
+5. Check that fio job run on detached device stopped and failed
+
+## Test Case 2
+1. Attach NVMe to Ctrlr 5
+2. Run fio on 1 device from Ctrl 5
+3. Detatch NVMe from Ctrl5 during fio traffic
+4. Check vhost or VMs did not crash
+5. Check that fio job run on detached device stopped and failed
+6. Check that detatched device is gone from VM
+
+## Test Case 3
+1. Attach NVMe to Ctrlr 5
+2. Run fio with integrity on all devices, except one
+3. Detatch NVMe without traffic during fio running on other devices
+4. Check vhost or VMs did not crash
+5. Check that fio jobs did not fail
+6. Check that detatched device is gone from VM
+
+## Test Case 4
+1. Attach NVMe to Ctrlr 5
+2. Run fio on 1 device from Ctrl 5
+3. Run separate fio with integrity on all other devices (all VMs)
+4. Detatch NVMe from Ctrl1 during fio traffic
+5. Check vhost or VMs did not crash
+6. Check that fio job run on detached device stopped and failed
+7. Check that other fio jobs did not fail
+8. Check that detatched device is gone from VM
+9. Reboot VMs
+10. Check that detatched device is gone from VM
+11. Check that all other devices are in place
+12. Run fio integrity on all remianing devices
diff --git a/src/spdk/test/vhost/hotplug/vhost.conf.base b/src/spdk/test/vhost/hotplug/vhost.conf.base
new file mode 100644
index 00000000..4fa801d9
--- /dev/null
+++ b/src/spdk/test/vhost/hotplug/vhost.conf.base
@@ -0,0 +1,4 @@
+[Global]
+
+[Nvme]
+ HotplugEnable Yes