summaryrefslogtreecommitdiffstats
path: root/src/spdk/test/vhost
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:45:59 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:45:59 +0000
commit19fcec84d8d7d21e796c7624e521b60d28ee21ed (patch)
tree42d26aa27d1e3f7c0b8bd3fd14e7d7082f5008dc /src/spdk/test/vhost
parentInitial commit. (diff)
downloadceph-19fcec84d8d7d21e796c7624e521b60d28ee21ed.tar.xz
ceph-19fcec84d8d7d21e796c7624e521b60d28ee21ed.zip
Adding upstream version 16.2.11+ds.upstream/16.2.11+dsupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/spdk/test/vhost')
-rw-r--r--src/spdk/test/vhost/common.sh1266
-rw-r--r--src/spdk/test/vhost/common/autotest.config38
-rw-r--r--src/spdk/test/vhost/common/fio_jobs/default_initiator.job11
-rw-r--r--src/spdk/test/vhost/common/fio_jobs/default_integrity.job19
-rw-r--r--src/spdk/test/vhost/common/fio_jobs/default_integrity_nightly.job23
-rw-r--r--src/spdk/test/vhost/common/fio_jobs/default_performance.job16
-rwxr-xr-xsrc/spdk/test/vhost/fio/fio.sh58
-rw-r--r--src/spdk/test/vhost/fio/vhost_fio.job19
-rwxr-xr-xsrc/spdk/test/vhost/fiotest/fio.sh288
-rwxr-xr-xsrc/spdk/test/vhost/fuzz/fuzz.sh66
-rw-r--r--src/spdk/test/vhost/hotplug/blk_hotremove.sh235
-rw-r--r--src/spdk/test/vhost/hotplug/common.sh230
-rw-r--r--src/spdk/test/vhost/hotplug/fio_jobs/default_integrity.job16
-rwxr-xr-xsrc/spdk/test/vhost/hotplug/scsi_hotattach.sh103
-rwxr-xr-xsrc/spdk/test/vhost/hotplug/scsi_hotdetach.sh212
-rwxr-xr-xsrc/spdk/test/vhost/hotplug/scsi_hotplug.sh92
-rw-r--r--src/spdk/test/vhost/hotplug/scsi_hotremove.sh233
-rw-r--r--src/spdk/test/vhost/initiator/autotest.config5
-rw-r--r--src/spdk/test/vhost/initiator/bdev.fio51
-rw-r--r--src/spdk/test/vhost/initiator/bdev_pci.conf2
-rwxr-xr-xsrc/spdk/test/vhost/initiator/blockdev.sh82
-rwxr-xr-xsrc/spdk/test/vhost/integrity/integrity_start.sh106
-rwxr-xr-xsrc/spdk/test/vhost/integrity/integrity_vm.sh83
-rw-r--r--src/spdk/test/vhost/lvol/autotest.config74
-rwxr-xr-xsrc/spdk/test/vhost/lvol/lvol_test.sh289
-rwxr-xr-xsrc/spdk/test/vhost/manual.sh86
-rw-r--r--src/spdk/test/vhost/migration/autotest.config14
-rw-r--r--src/spdk/test/vhost/migration/migration-tc1.job25
-rw-r--r--src/spdk/test/vhost/migration/migration-tc1.sh119
-rw-r--r--src/spdk/test/vhost/migration/migration-tc2.job20
-rw-r--r--src/spdk/test/vhost/migration/migration-tc2.sh203
-rw-r--r--src/spdk/test/vhost/migration/migration-tc3.job20
-rw-r--r--src/spdk/test/vhost/migration/migration-tc3a.sh218
-rw-r--r--src/spdk/test/vhost/migration/migration-tc3b.sh77
-rwxr-xr-xsrc/spdk/test/vhost/migration/migration.sh143
-rw-r--r--src/spdk/test/vhost/other/invalid.config18
-rwxr-xr-xsrc/spdk/test/vhost/other/negative.sh209
-rwxr-xr-xsrc/spdk/test/vhost/perf_bench/vhost_perf.sh473
-rwxr-xr-xsrc/spdk/test/vhost/readonly/delete_partition_vm.sh42
-rwxr-xr-xsrc/spdk/test/vhost/readonly/disabled_readonly_vm.sh47
-rwxr-xr-xsrc/spdk/test/vhost/readonly/enabled_readonly_vm.sh72
-rwxr-xr-xsrc/spdk/test/vhost/readonly/readonly.sh136
-rw-r--r--src/spdk/test/vhost/shared/bdev.json20
-rwxr-xr-xsrc/spdk/test/vhost/shared/shared.sh32
-rwxr-xr-xsrc/spdk/test/vhost/vhost.sh107
-rwxr-xr-xsrc/spdk/test/vhost/vhost_boot/vhost_boot.sh126
-rwxr-xr-xsrc/spdk/test/vhost/windows/windows.sh141
-rw-r--r--src/spdk/test/vhost/windows/windows_fs_test.ps178
-rw-r--r--src/spdk/test/vhost/windows/windows_scsi_compliance.ps173
-rwxr-xr-xsrc/spdk/test/vhost/windows/windows_scsi_compliance.py147
-rwxr-xr-xsrc/spdk/test/vhost/windows/windows_scsi_compliance.sh89
51 files changed, 6352 insertions, 0 deletions
diff --git a/src/spdk/test/vhost/common.sh b/src/spdk/test/vhost/common.sh
new file mode 100644
index 000000000..33c8e0953
--- /dev/null
+++ b/src/spdk/test/vhost/common.sh
@@ -0,0 +1,1266 @@
+: ${SPDK_VHOST_VERBOSE=false}
+: ${VHOST_DIR="$HOME/vhost_test"}
+: ${QEMU_BIN="qemu-system-x86_64"}
+: ${QEMU_IMG_BIN="qemu-img"}
+
+TEST_DIR=$(readlink -f $rootdir/..)
+VM_DIR=$VHOST_DIR/vms
+TARGET_DIR=$VHOST_DIR/vhost
+VM_PASSWORD="root"
+
+#TODO: Move vhost_vm_image.qcow2 into VHOST_DIR on test systems.
+VM_IMAGE=$HOME/vhost_vm_image.qcow2
+
+if ! hash $QEMU_IMG_BIN $QEMU_BIN; then
+ error 'QEMU is not installed on this system. Unable to run vhost tests.'
+ exit 1
+fi
+
+mkdir -p $VHOST_DIR
+mkdir -p $VM_DIR
+mkdir -p $TARGET_DIR
+
+#
+# Source config describing QEMU and VHOST cores and NUMA
+#
+source $rootdir/test/vhost/common/autotest.config
+
+function vhosttestinit() {
+ if [ "$TEST_MODE" == "iso" ]; then
+ $rootdir/scripts/setup.sh
+
+ # Look for the VM image
+ if [[ ! -f $VM_IMAGE ]]; then
+ echo "VM image not found at $VM_IMAGE"
+ echo "Download to $HOME? [yn]"
+ read -r download
+ if [ "$download" = "y" ]; then
+ curl https://ci.spdk.io/download/test_resources/vhost_vm_image.tar.gz | tar xz -C $HOME
+ fi
+ fi
+ fi
+
+ # Look for the VM image
+ if [[ ! -f $VM_IMAGE ]]; then
+ error "VM image not found at $VM_IMAGE"
+ exit 1
+ fi
+}
+
+function vhosttestfini() {
+ if [ "$TEST_MODE" == "iso" ]; then
+ $rootdir/scripts/setup.sh reset
+ fi
+}
+
+function message() {
+ local verbose_out
+ if ! $SPDK_VHOST_VERBOSE; then
+ verbose_out=""
+ elif [[ ${FUNCNAME[2]} == "source" ]]; then
+ verbose_out=" (file $(basename ${BASH_SOURCE[1]}):${BASH_LINENO[1]})"
+ else
+ verbose_out=" (function ${FUNCNAME[2]}:${BASH_LINENO[1]})"
+ fi
+
+ local msg_type="$1"
+ shift
+ echo -e "${msg_type}${verbose_out}: $*"
+}
+
+function fail() {
+ echo "===========" >&2
+ message "FAIL" "$@" >&2
+ echo "===========" >&2
+ exit 1
+}
+
+function error() {
+ echo "===========" >&2
+ message "ERROR" "$@" >&2
+ echo "===========" >&2
+ # Don't 'return 1' since the stack trace will be incomplete (why?) missing upper command.
+ false
+}
+
+function warning() {
+ message "WARN" "$@" >&2
+}
+
+function notice() {
+ message "INFO" "$@"
+}
+
+function check_qemu_packedring_support() {
+ qemu_version=$($QEMU_BIN -version | grep -Po "(?<=version )\d+.\d+.\d+")
+ if [[ "$qemu_version" < "4.2.0" ]]; then
+ error "This qemu binary does not support packed ring"
+ fi
+}
+
+function get_vhost_dir() {
+ local vhost_name="$1"
+
+ if [[ -z "$vhost_name" ]]; then
+ error "vhost name must be provided to get_vhost_dir"
+ return 1
+ fi
+
+ echo "$TARGET_DIR/${vhost_name}"
+}
+
+function vhost_run() {
+ local vhost_name="$1"
+ local run_gen_nvme=true
+
+ if [[ -z "$vhost_name" ]]; then
+ error "vhost name must be provided to vhost_run"
+ return 1
+ fi
+ shift
+
+ if [[ "$1" == "--no-gen-nvme" ]]; then
+ notice "Skipping gen_nvmf.sh NVMe bdev configuration"
+ run_gen_nvme=false
+ shift
+ fi
+
+ local vhost_dir
+ vhost_dir="$(get_vhost_dir $vhost_name)"
+ local vhost_app="$SPDK_BIN_DIR/vhost"
+ local vhost_log_file="$vhost_dir/vhost.log"
+ local vhost_pid_file="$vhost_dir/vhost.pid"
+ local vhost_socket="$vhost_dir/usvhost"
+ notice "starting vhost app in background"
+ [[ -r "$vhost_pid_file" ]] && vhost_kill $vhost_name
+ [[ -d $vhost_dir ]] && rm -f $vhost_dir/*
+ mkdir -p $vhost_dir
+
+ if [[ ! -x $vhost_app ]]; then
+ error "application not found: $vhost_app"
+ return 1
+ fi
+
+ local cmd="$vhost_app -r $vhost_dir/rpc.sock $*"
+
+ notice "Loging to: $vhost_log_file"
+ notice "Socket: $vhost_socket"
+ notice "Command: $cmd"
+
+ timing_enter vhost_start
+ cd $vhost_dir
+ $cmd &
+ vhost_pid=$!
+ echo $vhost_pid > $vhost_pid_file
+
+ notice "waiting for app to run..."
+ waitforlisten "$vhost_pid" "$vhost_dir/rpc.sock"
+ #do not generate nvmes if pci access is disabled
+ if [[ "$cmd" != *"--no-pci"* ]] && [[ "$cmd" != *"-u"* ]] && $run_gen_nvme; then
+ $rootdir/scripts/gen_nvme.sh "--json" | $rootdir/scripts/rpc.py -s $vhost_dir/rpc.sock load_subsystem_config
+ fi
+
+ notice "vhost started - pid=$vhost_pid"
+ timing_exit vhost_start
+}
+
+function vhost_kill() {
+ local rc=0
+ local vhost_name="$1"
+
+ if [[ -z "$vhost_name" ]]; then
+ error "Must provide vhost name to vhost_kill"
+ return 0
+ fi
+
+ local vhost_dir
+ vhost_dir="$(get_vhost_dir $vhost_name)"
+ local vhost_pid_file="$vhost_dir/vhost.pid"
+
+ if [[ ! -r $vhost_pid_file ]]; then
+ warning "no vhost pid file found"
+ return 0
+ fi
+
+ timing_enter vhost_kill
+ local vhost_pid
+ vhost_pid="$(cat $vhost_pid_file)"
+ notice "killing vhost (PID $vhost_pid) app"
+
+ if kill -INT $vhost_pid > /dev/null; then
+ notice "sent SIGINT to vhost app - waiting 60 seconds to exit"
+ for ((i = 0; i < 60; i++)); do
+ if kill -0 $vhost_pid; then
+ echo "."
+ sleep 1
+ else
+ break
+ fi
+ done
+ if kill -0 $vhost_pid; then
+ error "ERROR: vhost was NOT killed - sending SIGABRT"
+ kill -ABRT $vhost_pid
+ rm $vhost_pid_file
+ rc=1
+ else
+ while kill -0 $vhost_pid; do
+ echo "."
+ done
+ fi
+ elif kill -0 $vhost_pid; then
+ error "vhost NOT killed - you need to kill it manually"
+ rc=1
+ else
+ notice "vhost was not running"
+ fi
+
+ timing_exit vhost_kill
+ if [[ $rc == 0 ]]; then
+ rm $vhost_pid_file
+ fi
+
+ rm -rf "$vhost_dir"
+
+ return $rc
+}
+
+function vhost_rpc() {
+ local vhost_name="$1"
+
+ if [[ -z "$vhost_name" ]]; then
+ error "vhost name must be provided to vhost_rpc"
+ return 1
+ fi
+ shift
+
+ $rootdir/scripts/rpc.py -s $(get_vhost_dir $vhost_name)/rpc.sock "$@"
+}
+
+###
+# Mgmt functions
+###
+
+function assert_number() {
+ [[ "$1" =~ [0-9]+ ]] && return 0
+
+ error "Invalid or missing paramter: need number but got '$1'"
+ return 1
+}
+
+# Run command on vm with given password
+# First argument - vm number
+# Second argument - ssh password for vm
+#
+function vm_sshpass() {
+ vm_num_is_valid $1 || return 1
+
+ local ssh_cmd
+ ssh_cmd="sshpass -p $2 ssh \
+ -o UserKnownHostsFile=/dev/null \
+ -o StrictHostKeyChecking=no \
+ -o User=root \
+ -p $(vm_ssh_socket $1) $VM_SSH_OPTIONS 127.0.0.1"
+
+ shift 2
+ $ssh_cmd "$@"
+}
+
+# Helper to validate VM number
+# param $1 VM number
+#
+function vm_num_is_valid() {
+ [[ "$1" =~ ^[0-9]+$ ]] && return 0
+
+ error "Invalid or missing paramter: vm number '$1'"
+ return 1
+}
+
+# Print network socket for given VM number
+# param $1 virtual machine number
+#
+function vm_ssh_socket() {
+ vm_num_is_valid $1 || return 1
+ local vm_dir="$VM_DIR/$1"
+
+ cat $vm_dir/ssh_socket
+}
+
+function vm_fio_socket() {
+ vm_num_is_valid $1 || return 1
+ local vm_dir="$VM_DIR/$1"
+
+ cat $vm_dir/fio_socket
+}
+
+# Execute command on given VM
+# param $1 virtual machine number
+#
+function vm_exec() {
+ vm_num_is_valid $1 || return 1
+
+ local vm_num="$1"
+ shift
+
+ sshpass -p "$VM_PASSWORD" ssh \
+ -o UserKnownHostsFile=/dev/null \
+ -o StrictHostKeyChecking=no \
+ -o User=root \
+ -p $(vm_ssh_socket $vm_num) $VM_SSH_OPTIONS 127.0.0.1 \
+ "$@"
+}
+
+# Execute scp command on given VM
+# param $1 virtual machine number
+#
+function vm_scp() {
+ vm_num_is_valid $1 || return 1
+
+ local vm_num="$1"
+ shift
+
+ sshpass -p "$VM_PASSWORD" scp \
+ -o UserKnownHostsFile=/dev/null \
+ -o StrictHostKeyChecking=no \
+ -o User=root \
+ -P $(vm_ssh_socket $vm_num) $VM_SSH_OPTIONS \
+ "$@"
+}
+
+# check if specified VM is running
+# param $1 VM num
+function vm_is_running() {
+ vm_num_is_valid $1 || return 1
+ local vm_dir="$VM_DIR/$1"
+
+ if [[ ! -r $vm_dir/qemu.pid ]]; then
+ return 1
+ fi
+
+ local vm_pid
+ vm_pid="$(cat $vm_dir/qemu.pid)"
+
+ if /bin/kill -0 $vm_pid; then
+ return 0
+ else
+ if [[ $EUID -ne 0 ]]; then
+ warning "not root - assuming VM running since can't be checked"
+ return 0
+ fi
+
+ # not running - remove pid file
+ rm $vm_dir/qemu.pid
+ return 1
+ fi
+}
+
+# check if specified VM is running
+# param $1 VM num
+function vm_os_booted() {
+ vm_num_is_valid $1 || return 1
+ local vm_dir="$VM_DIR/$1"
+
+ if [[ ! -r $vm_dir/qemu.pid ]]; then
+ error "VM $1 is not running"
+ return 1
+ fi
+
+ if ! VM_SSH_OPTIONS="-o ControlMaster=no" vm_exec $1 "true" 2> /dev/null; then
+ # Shutdown existing master. Ignore errors as it might not exist.
+ VM_SSH_OPTIONS="-O exit" vm_exec $1 "true" 2> /dev/null
+ return 1
+ fi
+
+ return 0
+}
+
+# Shutdown given VM
+# param $1 virtual machine number
+# return non-zero in case of error.
+function vm_shutdown() {
+ vm_num_is_valid $1 || return 1
+ local vm_dir="$VM_DIR/$1"
+ if [[ ! -d "$vm_dir" ]]; then
+ error "VM$1 ($vm_dir) not exist - setup it first"
+ return 1
+ fi
+
+ if ! vm_is_running $1; then
+ notice "VM$1 ($vm_dir) is not running"
+ return 0
+ fi
+
+ # Temporarily disabling exit flag for next ssh command, since it will
+ # "fail" due to shutdown
+ notice "Shutting down virtual machine $vm_dir"
+ set +e
+ vm_exec $1 "nohup sh -c 'shutdown -h -P now'" || true
+ notice "VM$1 is shutting down - wait a while to complete"
+ set -e
+}
+
+# Kill given VM
+# param $1 virtual machine number
+#
+function vm_kill() {
+ vm_num_is_valid $1 || return 1
+ local vm_dir="$VM_DIR/$1"
+
+ if [[ ! -r $vm_dir/qemu.pid ]]; then
+ return 0
+ fi
+
+ local vm_pid
+ vm_pid="$(cat $vm_dir/qemu.pid)"
+
+ notice "Killing virtual machine $vm_dir (pid=$vm_pid)"
+ # First kill should fail, second one must fail
+ if /bin/kill $vm_pid; then
+ notice "process $vm_pid killed"
+ rm $vm_dir/qemu.pid
+ rm -rf $vm_dir
+ elif vm_is_running $1; then
+ error "Process $vm_pid NOT killed"
+ return 1
+ fi
+}
+
+# List all VM numbers in VM_DIR
+#
+function vm_list_all() {
+ local vms
+ vms="$(
+ shopt -s nullglob
+ echo $VM_DIR/[0-9]*
+ )"
+ if [[ -n "$vms" ]]; then
+ basename --multiple $vms
+ fi
+}
+
+# Kills all VM in $VM_DIR
+#
+function vm_kill_all() {
+ local vm
+ for vm in $(vm_list_all); do
+ vm_kill $vm
+ done
+
+ rm -rf $VM_DIR
+}
+
+# Shutdown all VM in $VM_DIR
+#
+function vm_shutdown_all() {
+ # XXX: temporarily disable to debug shutdown issue
+ # xtrace_disable
+
+ local vms
+ vms=$(vm_list_all)
+ local vm
+
+ for vm in $vms; do
+ vm_shutdown $vm
+ done
+
+ notice "Waiting for VMs to shutdown..."
+ local timeo=30
+ while [[ $timeo -gt 0 ]]; do
+ local all_vms_down=1
+ for vm in $vms; do
+ if vm_is_running $vm; then
+ all_vms_down=0
+ break
+ fi
+ done
+
+ if [[ $all_vms_down == 1 ]]; then
+ notice "All VMs successfully shut down"
+ xtrace_restore
+ return 0
+ fi
+
+ ((timeo -= 1))
+ sleep 1
+ done
+
+ rm -rf $VM_DIR
+
+ xtrace_restore
+}
+
+function vm_setup() {
+ xtrace_disable
+ local OPTIND optchar vm_num
+
+ local os=""
+ local os_mode=""
+ local qemu_args=()
+ local disk_type_g=NOT_DEFINED
+ local read_only="false"
+ # List created of a strings separated with a ":"
+ local disks=()
+ local raw_cache=""
+ local vm_incoming=""
+ local vm_migrate_to=""
+ local force_vm=""
+ local guest_memory=1024
+ local queue_number=""
+ local vhost_dir
+ local packed=false
+ vhost_dir="$(get_vhost_dir 0)"
+ while getopts ':-:' optchar; do
+ case "$optchar" in
+ -)
+ case "$OPTARG" in
+ os=*) os="${OPTARG#*=}" ;;
+ os-mode=*) os_mode="${OPTARG#*=}" ;;
+ qemu-args=*) qemu_args+=("${OPTARG#*=}") ;;
+ disk-type=*) disk_type_g="${OPTARG#*=}" ;;
+ read-only=*) read_only="${OPTARG#*=}" ;;
+ disks=*) IFS=":" read -ra disks <<< "${OPTARG#*=}" ;;
+ raw-cache=*) raw_cache=",cache${OPTARG#*=}" ;;
+ force=*) force_vm=${OPTARG#*=} ;;
+ memory=*) guest_memory=${OPTARG#*=} ;;
+ queue_num=*) queue_number=${OPTARG#*=} ;;
+ incoming=*) vm_incoming="${OPTARG#*=}" ;;
+ migrate-to=*) vm_migrate_to="${OPTARG#*=}" ;;
+ vhost-name=*) vhost_dir="$(get_vhost_dir ${OPTARG#*=})" ;;
+ spdk-boot=*) local boot_from="${OPTARG#*=}" ;;
+ packed) packed=true ;;
+ *)
+ error "unknown argument $OPTARG"
+ return 1
+ ;;
+ esac
+ ;;
+ *)
+ error "vm_create Unknown param $OPTARG"
+ return 1
+ ;;
+ esac
+ done
+
+ # Find next directory we can use
+ if [[ -n $force_vm ]]; then
+ vm_num=$force_vm
+
+ vm_num_is_valid $vm_num || return 1
+ local vm_dir="$VM_DIR/$vm_num"
+ [[ -d $vm_dir ]] && warning "removing existing VM in '$vm_dir'"
+ else
+ local vm_dir=""
+
+ set +x
+ for ((i = 0; i <= 256; i++)); do
+ local vm_dir="$VM_DIR/$i"
+ [[ ! -d $vm_dir ]] && break
+ done
+ xtrace_restore
+
+ vm_num=$i
+ fi
+
+ if [[ $vm_num -eq 256 ]]; then
+ error "no free VM found. do some cleanup (256 VMs created, are you insane?)"
+ return 1
+ fi
+
+ if [[ -n "$vm_migrate_to" && -n "$vm_incoming" ]]; then
+ error "'--incoming' and '--migrate-to' cannot be used together"
+ return 1
+ elif [[ -n "$vm_incoming" ]]; then
+ if [[ -n "$os_mode" || -n "$os" ]]; then
+ error "'--incoming' can't be used together with '--os' nor '--os-mode'"
+ return 1
+ fi
+
+ os_mode="original"
+ os="$VM_DIR/$vm_incoming/os.qcow2"
+ elif [[ -n "$vm_migrate_to" ]]; then
+ [[ "$os_mode" != "backing" ]] && warning "Using 'backing' mode for OS since '--migrate-to' is used"
+ os_mode=backing
+ fi
+
+ notice "Creating new VM in $vm_dir"
+ mkdir -p $vm_dir
+
+ if [[ "$os_mode" == "backing" ]]; then
+ notice "Creating backing file for OS image file: $os"
+ if ! $QEMU_IMG_BIN create -f qcow2 -b $os $vm_dir/os.qcow2; then
+ error "Failed to create OS backing file in '$vm_dir/os.qcow2' using '$os'"
+ return 1
+ fi
+
+ local os=$vm_dir/os.qcow2
+ elif [[ "$os_mode" == "original" ]]; then
+ warning "Using original OS image file: $os"
+ elif [[ "$os_mode" != "snapshot" ]]; then
+ if [[ -z "$os_mode" ]]; then
+ notice "No '--os-mode' parameter provided - using 'snapshot'"
+ os_mode="snapshot"
+ else
+ error "Invalid '--os-mode=$os_mode'"
+ return 1
+ fi
+ fi
+
+ local qemu_mask_param="VM_${vm_num}_qemu_mask"
+ local qemu_numa_node_param="VM_${vm_num}_qemu_numa_node"
+
+ if [[ -z "${!qemu_mask_param}" ]] || [[ -z "${!qemu_numa_node_param}" ]]; then
+ error "Parameters ${qemu_mask_param} or ${qemu_numa_node_param} not found in autotest.config file"
+ return 1
+ fi
+
+ local task_mask=${!qemu_mask_param}
+
+ notice "TASK MASK: $task_mask"
+ local cmd=(taskset -a -c "$task_mask" "$QEMU_BIN")
+ local vm_socket_offset=$((10000 + 100 * vm_num))
+
+ local ssh_socket=$((vm_socket_offset + 0))
+ local fio_socket=$((vm_socket_offset + 1))
+ local monitor_port=$((vm_socket_offset + 2))
+ local migration_port=$((vm_socket_offset + 3))
+ local gdbserver_socket=$((vm_socket_offset + 4))
+ local vnc_socket=$((100 + vm_num))
+ local qemu_pid_file="$vm_dir/qemu.pid"
+ local cpu_num=0
+
+ set +x
+ # cpu list for taskset can be comma separated or range
+ # or both at the same time, so first split on commas
+ cpu_list=$(echo $task_mask | tr "," "\n")
+ queue_number=0
+ for c in $cpu_list; do
+ # if range is detected - count how many cpus
+ if [[ $c =~ [0-9]+-[0-9]+ ]]; then
+ val=$((c - 1))
+ val=${val#-}
+ else
+ val=1
+ fi
+ cpu_num=$((cpu_num + val))
+ queue_number=$((queue_number + val))
+ done
+
+ if [ -z $queue_number ]; then
+ queue_number=$cpu_num
+ fi
+
+ xtrace_restore
+
+ local node_num=${!qemu_numa_node_param}
+ local boot_disk_present=false
+ notice "NUMA NODE: $node_num"
+ cmd+=(-m "$guest_memory" --enable-kvm -cpu host -smp "$cpu_num" -vga std -vnc ":$vnc_socket" -daemonize)
+ cmd+=(-object "memory-backend-file,id=mem,size=${guest_memory}M,mem-path=/dev/hugepages,share=on,prealloc=yes,host-nodes=$node_num,policy=bind")
+ [[ $os_mode == snapshot ]] && cmd+=(-snapshot)
+ [[ -n "$vm_incoming" ]] && cmd+=(-incoming "tcp:0:$migration_port")
+ cmd+=(-monitor "telnet:127.0.0.1:$monitor_port,server,nowait")
+ cmd+=(-numa "node,memdev=mem")
+ cmd+=(-pidfile "$qemu_pid_file")
+ cmd+=(-serial "file:$vm_dir/serial.log")
+ cmd+=(-D "$vm_dir/qemu.log")
+ cmd+=(-chardev "file,path=$vm_dir/seabios.log,id=seabios" -device "isa-debugcon,iobase=0x402,chardev=seabios")
+ cmd+=(-net "user,hostfwd=tcp::$ssh_socket-:22,hostfwd=tcp::$fio_socket-:8765")
+ cmd+=(-net nic)
+ if [[ -z "$boot_from" ]]; then
+ cmd+=(-drive "file=$os,if=none,id=os_disk")
+ cmd+=(-device "ide-hd,drive=os_disk,bootindex=0")
+ fi
+
+ if ((${#disks[@]} == 0)) && [[ $disk_type_g == virtio* ]]; then
+ disks=("default_virtio.img")
+ elif ((${#disks[@]} == 0)); then
+ error "No disks defined, aborting"
+ return 1
+ fi
+
+ for disk in "${disks[@]}"; do
+ # Each disk can define its type in a form of a disk_name,type. The remaining parts
+ # of the string are dropped.
+ IFS="," read -r disk disk_type _ <<< "$disk"
+ [[ -z $disk_type ]] && disk_type=$disk_type_g
+
+ case $disk_type in
+ virtio)
+ local raw_name="RAWSCSI"
+ local raw_disk=$vm_dir/test.img
+
+ if [[ -n $disk ]]; then
+ [[ ! -b $disk ]] && touch $disk
+ local raw_disk
+ raw_disk=$(readlink -f $disk)
+ fi
+
+ # Create disk file if it not exist or it is smaller than 1G
+ if { [[ -f $raw_disk ]] && [[ $(stat --printf="%s" $raw_disk) -lt $((1024 * 1024 * 1024)) ]]; } \
+ || [[ ! -e $raw_disk ]]; then
+ if [[ $raw_disk =~ /dev/.* ]]; then
+ error \
+ "ERROR: Virtio disk point to missing device ($raw_disk) -\n" \
+ " this is probably not what you want."
+ return 1
+ fi
+
+ notice "Creating Virtio disc $raw_disk"
+ dd if=/dev/zero of=$raw_disk bs=1024k count=1024
+ else
+ notice "Using existing image $raw_disk"
+ fi
+
+ cmd+=(-device "virtio-scsi-pci,num_queues=$queue_number")
+ cmd+=(-device "scsi-hd,drive=hd$i,vendor=$raw_name")
+ cmd+=(-drive "if=none,id=hd$i,file=$raw_disk,format=raw$raw_cache")
+ ;;
+ spdk_vhost_scsi)
+ notice "using socket $vhost_dir/naa.$disk.$vm_num"
+ cmd+=(-chardev "socket,id=char_$disk,path=$vhost_dir/naa.$disk.$vm_num")
+ cmd+=(-device "vhost-user-scsi-pci,id=scsi_$disk,num_queues=$queue_number,chardev=char_$disk")
+ if [[ "$disk" == "$boot_from" ]]; then
+ cmd[-1]+=,bootindex=0
+ boot_disk_present=true
+ fi
+ ;;
+ spdk_vhost_blk)
+ notice "using socket $vhost_dir/naa.$disk.$vm_num"
+ cmd+=(-chardev "socket,id=char_$disk,path=$vhost_dir/naa.$disk.$vm_num")
+ cmd+=(-device "vhost-user-blk-pci,num-queues=$queue_number,chardev=char_$disk")
+ if [[ "$disk" == "$boot_from" ]]; then
+ cmd[-1]+=,bootindex=0
+ boot_disk_present=true
+ fi
+
+ if $packed; then
+ check_qemu_packedring_support
+ notice "Enabling packed ring support for VM $vm_num, controller $vhost_dir/naa.$disk.$vm_num"
+ cmd[-1]+=,packed=on
+ fi
+ ;;
+ kernel_vhost)
+ if [[ -z $disk ]]; then
+ error "need WWN for $disk_type"
+ return 1
+ elif [[ ! $disk =~ ^[[:alpha:]]{3}[.][[:xdigit:]]+$ ]]; then
+ error "$disk_type - disk(wnn)=$disk does not look like WNN number"
+ return 1
+ fi
+ notice "Using kernel vhost disk wwn=$disk"
+ cmd+=(-device "vhost-scsi-pci,wwpn=$disk,num_queues=$queue_number")
+ ;;
+ *)
+ error "unknown mode '$disk_type', use: virtio, spdk_vhost_scsi, spdk_vhost_blk or kernel_vhost"
+ return 1
+ ;;
+ esac
+ done
+
+ if [[ -n $boot_from ]] && [[ $boot_disk_present == false ]]; then
+ error "Boot from $boot_from is selected but device is not present"
+ return 1
+ fi
+
+ ((${#qemu_args[@]})) && cmd+=("${qemu_args[@]}")
+ notice "Saving to $vm_dir/run.sh"
+ cat <<- RUN > "$vm_dir/run.sh"
+ #!/bin/bash
+ qemu_log () {
+ echo "=== qemu.log ==="
+ [[ -s $vm_dir/qemu.log ]] && cat $vm_dir/qemu.log
+ echo "=== qemu.log ==="
+ }
+
+ if [[ \$EUID -ne 0 ]]; then
+ echo "Go away user come back as root"
+ exit 1
+ fi
+
+ trap "qemu_log" EXIT
+
+ qemu_cmd=($(printf '%s\n' "${cmd[@]}"))
+ chmod +r $vm_dir/*
+ echo "Running VM in $vm_dir"
+ rm -f $qemu_pid_file
+ "\${qemu_cmd[@]}"
+
+ echo "Waiting for QEMU pid file"
+ sleep 1
+ [[ ! -f $qemu_pid_file ]] && sleep 1
+ [[ ! -f $qemu_pid_file ]] && echo "ERROR: no qemu pid file found" && exit 1
+ exit 0
+ # EOF
+ RUN
+ chmod +x $vm_dir/run.sh
+
+ # Save generated sockets redirection
+ echo $ssh_socket > $vm_dir/ssh_socket
+ echo $fio_socket > $vm_dir/fio_socket
+ echo $monitor_port > $vm_dir/monitor_port
+
+ rm -f $vm_dir/migration_port
+ [[ -z $vm_incoming ]] || echo $migration_port > $vm_dir/migration_port
+
+ echo $gdbserver_socket > $vm_dir/gdbserver_socket
+ echo $vnc_socket >> $vm_dir/vnc_socket
+
+ [[ -z $vm_incoming ]] || ln -fs $VM_DIR/$vm_incoming $vm_dir/vm_incoming
+ [[ -z $vm_migrate_to ]] || ln -fs $VM_DIR/$vm_migrate_to $vm_dir/vm_migrate_to
+}
+
+function vm_run() {
+ local OPTIND optchar vm
+ local run_all=false
+ local vms_to_run=""
+
+ while getopts 'a-:' optchar; do
+ case "$optchar" in
+ a) run_all=true ;;
+ *)
+ error "Unknown param $OPTARG"
+ return 1
+ ;;
+ esac
+ done
+
+ if $run_all; then
+ vms_to_run="$(vm_list_all)"
+ else
+ shift $((OPTIND - 1))
+ for vm in "$@"; do
+ vm_num_is_valid $1 || return 1
+ if [[ ! -x $VM_DIR/$vm/run.sh ]]; then
+ error "VM$vm not defined - setup it first"
+ return 1
+ fi
+ vms_to_run+=" $vm"
+ done
+ fi
+
+ for vm in $vms_to_run; do
+ if vm_is_running $vm; then
+ warning "VM$vm ($VM_DIR/$vm) already running"
+ continue
+ fi
+
+ notice "running $VM_DIR/$vm/run.sh"
+ if ! $VM_DIR/$vm/run.sh; then
+ error "FAILED to run vm $vm"
+ return 1
+ fi
+ done
+}
+
+function vm_print_logs() {
+ vm_num=$1
+ warning "================"
+ warning "QEMU LOG:"
+ if [[ -r $VM_DIR/$vm_num/qemu.log ]]; then
+ cat $VM_DIR/$vm_num/qemu.log
+ else
+ warning "LOG qemu.log not found"
+ fi
+
+ warning "VM LOG:"
+ if [[ -r $VM_DIR/$vm_num/serial.log ]]; then
+ cat $VM_DIR/$vm_num/serial.log
+ else
+ warning "LOG serial.log not found"
+ fi
+
+ warning "SEABIOS LOG:"
+ if [[ -r $VM_DIR/$vm_num/seabios.log ]]; then
+ cat $VM_DIR/$vm_num/seabios.log
+ else
+ warning "LOG seabios.log not found"
+ fi
+ warning "================"
+}
+
+# Wait for all created VMs to boot.
+# param $1 max wait time
+function vm_wait_for_boot() {
+ assert_number $1
+
+ xtrace_disable
+
+ local all_booted=false
+ local timeout_time=$1
+ [[ $timeout_time -lt 10 ]] && timeout_time=10
+ local timeout_time
+ timeout_time=$(date -d "+$timeout_time seconds" +%s)
+
+ notice "Waiting for VMs to boot"
+ shift
+ if [[ "$*" == "" ]]; then
+ local vms_to_check="$VM_DIR/[0-9]*"
+ else
+ local vms_to_check=""
+ for vm in "$@"; do
+ vms_to_check+=" $VM_DIR/$vm"
+ done
+ fi
+
+ for vm in $vms_to_check; do
+ local vm_num
+ vm_num=$(basename $vm)
+ local i=0
+ notice "waiting for VM$vm_num ($vm)"
+ while ! vm_os_booted $vm_num; do
+ if ! vm_is_running $vm_num; then
+ warning "VM $vm_num is not running"
+ vm_print_logs $vm_num
+ xtrace_restore
+ return 1
+ fi
+
+ if [[ $(date +%s) -gt $timeout_time ]]; then
+ warning "timeout waiting for machines to boot"
+ vm_print_logs $vm_num
+ xtrace_restore
+ return 1
+ fi
+ if ((i > 30)); then
+ local i=0
+ echo
+ fi
+ echo -n "."
+ sleep 1
+ done
+ echo ""
+ notice "VM$vm_num ready"
+ #Change Timeout for stopping services to prevent lengthy powerdowns
+ #Check that remote system is not Cygwin in case of Windows VMs
+ local vm_os
+ vm_os=$(vm_exec $vm_num "uname -o")
+ if [[ "$vm_os" != "Cygwin" ]]; then
+ vm_exec $vm_num "echo 'DefaultTimeoutStopSec=10' >> /etc/systemd/system.conf; systemctl daemon-reexec"
+ fi
+ done
+
+ notice "all VMs ready"
+ xtrace_restore
+ return 0
+}
+
+function vm_start_fio_server() {
+ local OPTIND optchar
+ local readonly=''
+ local fio_bin=''
+ while getopts ':-:' optchar; do
+ case "$optchar" in
+ -)
+ case "$OPTARG" in
+ fio-bin=*) local fio_bin="${OPTARG#*=}" ;;
+ readonly) local readonly="--readonly" ;;
+ *) error "Invalid argument '$OPTARG'" && return 1 ;;
+ esac
+ ;;
+ *) error "Invalid argument '$OPTARG'" && return 1 ;;
+ esac
+ done
+
+ shift $((OPTIND - 1))
+ for vm_num in "$@"; do
+ notice "Starting fio server on VM$vm_num"
+ if [[ $fio_bin != "" ]]; then
+ vm_exec $vm_num 'cat > /root/fio; chmod +x /root/fio' < $fio_bin
+ vm_exec $vm_num /root/fio $readonly --eta=never --server --daemonize=/root/fio.pid
+ else
+ vm_exec $vm_num fio $readonly --eta=never --server --daemonize=/root/fio.pid
+ fi
+ done
+}
+
+function vm_check_scsi_location() {
+ # Script to find wanted disc
+ local script='shopt -s nullglob;
+ for entry in /sys/block/sd*; do
+ disk_type="$(cat $entry/device/vendor)";
+ if [[ $disk_type == INTEL* ]] || [[ $disk_type == RAWSCSI* ]] || [[ $disk_type == LIO-ORG* ]]; then
+ fname=$(basename $entry);
+ echo -n " $fname";
+ fi;
+ done'
+
+ SCSI_DISK="$(echo "$script" | vm_exec $1 bash -s)"
+
+ if [[ -z "$SCSI_DISK" ]]; then
+ error "no test disk found!"
+ return 1
+ fi
+}
+
+# Note: to use this function your VM should be run with
+# appropriate memory and with SPDK source already cloned
+# and compiled in /root/spdk.
+function vm_check_virtio_location() {
+ vm_exec $1 NRHUGE=512 /root/spdk/scripts/setup.sh
+ vm_exec $1 "cat > /root/bdev.conf" <<- EOF
+ [VirtioPci]
+ Enable Yes
+ EOF
+
+ vm_exec $1 "cat /root/bdev.conf"
+
+ vm_exec $1 "bash -s" <<- EOF
+ set -e
+ rootdir="/root/spdk"
+ source /root/spdk/test/common/autotest_common.sh
+ discover_bdevs /root/spdk /root/bdev.conf | jq -r '[.[].name] | join(" ")' > /root/fio_bdev_filenames
+ exit 0
+ EOF
+
+ SCSI_DISK=$(vm_exec $1 cat /root/fio_bdev_filenames)
+ if [[ -z "$SCSI_DISK" ]]; then
+ error "no virtio test disk found!"
+ return 1
+ fi
+}
+
+# Script to perform scsi device reset on all disks in VM
+# param $1 VM num
+# param $2..$n Disks to perform reset on
+function vm_reset_scsi_devices() {
+ for disk in "${@:2}"; do
+ notice "VM$1 Performing device reset on disk $disk"
+ vm_exec $1 sg_reset /dev/$disk -vNd
+ done
+}
+
+function vm_check_blk_location() {
+ local script='shopt -s nullglob; cd /sys/block; echo vd*'
+ SCSI_DISK="$(echo "$script" | vm_exec $1 bash -s)"
+
+ if [[ -z "$SCSI_DISK" ]]; then
+ error "no blk test disk found!"
+ return 1
+ fi
+}
+
+function run_fio() {
+ local arg
+ local job_file=""
+ local fio_bin=""
+ local vms=()
+ local out=""
+ local vm
+ local run_server_mode=true
+ local run_plugin_mode=false
+ local fio_start_cmd
+ local fio_output_format="normal"
+ local fio_gtod_reduce=false
+ local wait_for_fio=true
+
+ for arg in "$@"; do
+ case "$arg" in
+ --job-file=*) local job_file="${arg#*=}" ;;
+ --fio-bin=*) local fio_bin="${arg#*=}" ;;
+ --vm=*) vms+=("${arg#*=}") ;;
+ --out=*)
+ local out="${arg#*=}"
+ mkdir -p $out
+ ;;
+ --local) run_server_mode=false ;;
+ --plugin)
+ notice "Using plugin mode. Disabling server mode."
+ run_plugin_mode=true
+ run_server_mode=false
+ ;;
+ --json) fio_output_format="json" ;;
+ --hide-results) hide_results=true ;;
+ --no-wait-for-fio) wait_for_fio=false ;;
+ --gtod-reduce) fio_gtod_reduce=true ;;
+ *)
+ error "Invalid argument '$arg'"
+ return 1
+ ;;
+ esac
+ done
+
+ if [[ -n "$fio_bin" && ! -r "$fio_bin" ]]; then
+ error "FIO binary '$fio_bin' does not exist"
+ return 1
+ fi
+
+ if [[ -z "$fio_bin" ]]; then
+ fio_bin="fio"
+ fi
+
+ if [[ ! -r "$job_file" ]]; then
+ error "Fio job '$job_file' does not exist"
+ return 1
+ fi
+
+ fio_start_cmd="$fio_bin --eta=never "
+
+ local job_fname
+ job_fname=$(basename "$job_file")
+ log_fname="${job_fname%%.*}.log"
+ fio_start_cmd+=" --output=$out/$log_fname --output-format=$fio_output_format "
+
+ # prepare job file for each VM
+ for vm in "${vms[@]}"; do
+ local vm_num=${vm%%:*}
+ local vmdisks=${vm#*:}
+
+ sed "s@filename=@filename=$vmdisks@" $job_file | vm_exec $vm_num "cat > /root/$job_fname"
+
+ if $fio_gtod_reduce; then
+ vm_exec $vm_num "echo 'gtod_reduce=1' >> /root/$job_fname"
+ fi
+
+ vm_exec $vm_num cat /root/$job_fname
+
+ if $run_server_mode; then
+ fio_start_cmd+="--client=127.0.0.1,$(vm_fio_socket $vm_num) --remote-config /root/$job_fname "
+ fi
+
+ if ! $run_server_mode; then
+ if [[ -n "$fio_bin" ]]; then
+ if ! $run_plugin_mode; then
+ vm_exec $vm_num 'cat > /root/fio; chmod +x /root/fio' < $fio_bin
+ vm_fio_bin="/root/fio"
+ else
+ vm_fio_bin="/usr/src/fio/fio"
+ fi
+ fi
+
+ notice "Running local fio on VM $vm_num"
+ vm_exec $vm_num "$vm_fio_bin --output=/root/$log_fname --output-format=$fio_output_format /root/$job_fname & echo \$! > /root/fio.pid" &
+ vm_exec_pids+=("$!")
+ fi
+ done
+
+ if ! $run_server_mode; then
+ if ! $wait_for_fio; then
+ return 0
+ fi
+ echo "Waiting for guest fio instances to finish.."
+ wait "${vm_exec_pids[@]}"
+
+ for vm in "${vms[@]}"; do
+ local vm_num=${vm%%:*}
+ vm_exec $vm_num cat /root/$log_fname > "$out/vm${vm_num}_${log_fname}"
+ done
+ return 0
+ fi
+
+ $fio_start_cmd
+ sleep 1
+
+ if [[ "$fio_output_format" == "json" ]]; then
+ # Fio in client-server mode produces a lot of "trash" output
+ # preceding JSON structure, making it not possible to parse.
+ # Remove these lines from file.
+ # shellcheck disable=SC2005
+ echo "$(grep -vP '^[<\w]' "$out/$log_fname")" > "$out/$log_fname"
+ fi
+
+ if [[ ! $hide_results ]]; then
+ cat $out/$log_fname
+ fi
+}
+
+# Parsing fio results for json output and client-server mode only!
+function parse_fio_results() {
+ local fio_log_dir=$1
+ local fio_log_filename=$2
+ local fio_csv_filename
+
+ # Variables used in parsing loop
+ local log_file
+ local rwmode mixread mixwrite
+ local lat_key lat_divisor
+ local client_stats iops bw
+ local read_avg_lat read_min_lat read_max_lat
+ local write_avg_lat write_min_lat write_min_lat
+
+ declare -A results
+ results["iops"]=0
+ results["bw"]=0
+ results["avg_lat"]=0
+ results["min_lat"]=0
+ results["max_lat"]=0
+
+ # Loop using the log filename to see if there are any other
+ # matching files. This is in case we ran fio test multiple times.
+ log_files=("$fio_log_dir/$fio_log_filename"*)
+ for log_file in "${log_files[@]}"; do
+ rwmode=$(jq -r '.["client_stats"][0]["job options"]["rw"]' "$log_file")
+ mixread=1
+ mixwrite=1
+ if [[ $rwmode = *"rw"* ]]; then
+ mixread=$(jq -r '.["client_stats"][0]["job options"]["rwmixread"]' "$log_file")
+ mixread=$(bc -l <<< "scale=3; $mixread/100")
+ mixwrite=$(bc -l <<< "scale=3; 1-$mixread")
+ fi
+
+ client_stats=$(jq -r '.["client_stats"][] | select(.jobname == "All clients")' "$log_file")
+
+ # Check latency unit and later normalize to microseconds
+ lat_key="lat_us"
+ lat_divisor=1
+ if jq -er '.read["lat_ns"]' &> /dev/null <<< $client_stats; then
+ lat_key="lat_ns"
+ lat_divisor=1000
+ fi
+
+ # Horrific bash float point arithmetic oprations below.
+ # Viewer discretion is advised.
+ iops=$(jq -r '[.read["iops"],.write["iops"]] | add' <<< $client_stats)
+ bw=$(jq -r '[.read["bw"],.write["bw"]] | add' <<< $client_stats)
+ read_avg_lat=$(jq -r --arg lat_key $lat_key '.read[$lat_key]["mean"]' <<< $client_stats)
+ read_min_lat=$(jq -r --arg lat_key $lat_key '.read[$lat_key]["min"]' <<< $client_stats)
+ read_max_lat=$(jq -r --arg lat_key $lat_key '.read[$lat_key]["max"]' <<< $client_stats)
+ write_avg_lat=$(jq -r --arg lat_key $lat_key '.write[$lat_key]["mean"]' <<< $client_stats)
+ write_min_lat=$(jq -r --arg lat_key $lat_key '.write[$lat_key]["min"]' <<< $client_stats)
+ write_max_lat=$(jq -r --arg lat_key $lat_key '.write[$lat_key]["max"]' <<< $client_stats)
+
+ results["iops"]=$(bc -l <<< "${results[iops]} + $iops")
+ results["bw"]=$(bc -l <<< "${results[bw]} + $bw")
+ results["avg_lat"]=$(bc -l <<< "${results[avg_lat]} + ($mixread*$read_avg_lat + $mixwrite*$write_avg_lat)/$lat_divisor")
+ results["min_lat"]=$(bc -l <<< "${results[min_lat]} + ($mixread*$read_min_lat + $mixwrite*$write_min_lat)/$lat_divisor")
+ results["max_lat"]=$(bc -l <<< "${results[max_lat]} + ($mixread*$read_max_lat + $mixwrite*$write_max_lat)/$lat_divisor")
+ done
+
+ results["iops"]=$(bc -l <<< "scale=3; ${results[iops]} / ${#log_files[@]}")
+ results["bw"]=$(bc -l <<< "scale=3; ${results[bw]} / ${#log_files[@]}")
+ results["avg_lat"]=$(bc -l <<< "scale=3; ${results[avg_lat]} / ${#log_files[@]}")
+ results["min_lat"]=$(bc -l <<< "scale=3; ${results[min_lat]} / ${#log_files[@]}")
+ results["max_lat"]=$(bc -l <<< "scale=3; ${results[max_lat]} / ${#log_files[@]}")
+
+ fio_csv_filename="${fio_log_filename%%.*}.csv"
+ cat <<- EOF > "$fio_log_dir/$fio_csv_filename"
+ iops,bw,avg_lat,min_lat,max_lat
+ ${results["iops"]},${results["bw"]},${results["avg_lat"]},${results["min_lat"]},${results["max_lat"]}
+ EOF
+}
+
+# Shutdown or kill any running VM and SPDK APP.
+#
+function at_app_exit() {
+ local vhost_name
+
+ notice "APP EXITING"
+ notice "killing all VMs"
+ vm_kill_all
+ # Kill vhost application
+ notice "killing vhost app"
+
+ for vhost_name in "$TARGET_DIR"/*; do
+ vhost_kill $vhost_name
+ done
+
+ notice "EXIT DONE"
+}
+
+function error_exit() {
+ trap - ERR
+ print_backtrace
+ set +e
+ error "Error on $1 $2"
+
+ at_app_exit
+ exit 1
+}
diff --git a/src/spdk/test/vhost/common/autotest.config b/src/spdk/test/vhost/common/autotest.config
new file mode 100644
index 000000000..96b0d08be
--- /dev/null
+++ b/src/spdk/test/vhost/common/autotest.config
@@ -0,0 +1,38 @@
+vhost_0_reactor_mask="[0]"
+vhost_0_master_core=0
+
+VM_0_qemu_mask=1-2
+VM_0_qemu_numa_node=0
+
+VM_1_qemu_mask=3-4
+VM_1_qemu_numa_node=0
+
+VM_2_qemu_mask=5-6
+VM_2_qemu_numa_node=0
+
+VM_3_qemu_mask=7-8
+VM_3_qemu_numa_node=0
+
+VM_4_qemu_mask=9-10
+VM_4_qemu_numa_node=0
+
+VM_5_qemu_mask=11-12
+VM_5_qemu_numa_node=0
+
+VM_6_qemu_mask=13-14
+VM_6_qemu_numa_node=1
+
+VM_7_qemu_mask=15-16
+VM_7_qemu_numa_node=1
+
+VM_8_qemu_mask=17-18
+VM_8_qemu_numa_node=1
+
+VM_9_qemu_mask=19-20
+VM_9_qemu_numa_node=1
+
+VM_10_qemu_mask=21-22
+VM_10_qemu_numa_node=1
+
+VM_11_qemu_mask=23-24
+VM_11_qemu_numa_node=1
diff --git a/src/spdk/test/vhost/common/fio_jobs/default_initiator.job b/src/spdk/test/vhost/common/fio_jobs/default_initiator.job
new file mode 100644
index 000000000..32c993bd2
--- /dev/null
+++ b/src/spdk/test/vhost/common/fio_jobs/default_initiator.job
@@ -0,0 +1,11 @@
+[global]
+thread=1
+group_reporting=1
+direct=1
+time_based=1
+do_verify=1
+verify=md5
+verify_backlog=1024
+fsync_on_close=1
+iodepth=32
+[job0]
diff --git a/src/spdk/test/vhost/common/fio_jobs/default_integrity.job b/src/spdk/test/vhost/common/fio_jobs/default_integrity.job
new file mode 100644
index 000000000..06398b506
--- /dev/null
+++ b/src/spdk/test/vhost/common/fio_jobs/default_integrity.job
@@ -0,0 +1,19 @@
+[global]
+blocksize_range=4k-512k
+iodepth=512
+iodepth_batch=128
+iodepth_low=256
+ioengine=libaio
+size=1G
+io_size=4G
+filename=
+group_reporting
+thread
+numjobs=1
+direct=1
+rw=randwrite
+do_verify=1
+verify=md5
+verify_backlog=1024
+fsync_on_close=1
+[nvme-host]
diff --git a/src/spdk/test/vhost/common/fio_jobs/default_integrity_nightly.job b/src/spdk/test/vhost/common/fio_jobs/default_integrity_nightly.job
new file mode 100644
index 000000000..097401780
--- /dev/null
+++ b/src/spdk/test/vhost/common/fio_jobs/default_integrity_nightly.job
@@ -0,0 +1,23 @@
+[global]
+ioengine=libaio
+runtime=10
+filename=
+group_reporting
+thread
+numjobs=1
+direct=1
+do_verify=1
+verify=md5
+verify_backlog=1024
+
+[randwrite]
+stonewall
+rw=randwrite
+bs=512k
+iodepth=256
+
+[randrw]
+stonewall
+rw=randrw
+bs=128k
+iodepth=64
diff --git a/src/spdk/test/vhost/common/fio_jobs/default_performance.job b/src/spdk/test/vhost/common/fio_jobs/default_performance.job
new file mode 100644
index 000000000..a51cb5eda
--- /dev/null
+++ b/src/spdk/test/vhost/common/fio_jobs/default_performance.job
@@ -0,0 +1,16 @@
+[global]
+blocksize_range=4k-512k
+iodepth=512
+iodepth_batch=128
+iodepth_low=256
+ioengine=libaio
+size=10G
+filename=
+ramp_time=10
+group_reporting
+thread
+numjobs=1
+direct=1
+rw=randread
+fsync_on_close=1
+[nvme-host]
diff --git a/src/spdk/test/vhost/fio/fio.sh b/src/spdk/test/vhost/fio/fio.sh
new file mode 100755
index 000000000..3d8bf6092
--- /dev/null
+++ b/src/spdk/test/vhost/fio/fio.sh
@@ -0,0 +1,58 @@
+#!/usr/bin/env bash
+
+set -e
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/vhost/common.sh
+
+MALLOC_BDEV_SIZE=128
+MALLOC_BLOCK_SIZE=512
+
+vhosttestinit
+
+#TODO: Both scsi and blk?
+
+timing_enter vhost_fio
+
+trap "at_app_exit; process_shm --id 0; exit 1" SIGINT SIGTERM EXIT
+
+vhost_run vhost0 "-m 0x1"
+
+# Create vhost scsi controller
+vhost_rpc vhost0 bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc0
+vhost_rpc vhost0 vhost_create_scsi_controller naa.VhostScsi0.0
+vhost_rpc vhost0 vhost_scsi_controller_add_target naa.VhostScsi0.0 0 "Malloc0"
+
+# Create vhost blk controller
+vhost_rpc vhost0 bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc1
+vhost_rpc vhost0 vhost_create_blk_controller naa.Malloc1.1 Malloc1
+
+# Start qemu based VMs
+vm_setup --os="$VM_IMAGE" --disk-type=spdk_vhost_scsi --disks="VhostScsi0" --vhost-name=vhost0 --force=0
+vm_setup --os="$VM_IMAGE" --disk-type=spdk_vhost_blk --disks="Malloc1" --vhost-name=vhost0 --force=1
+
+vm_run 0
+vm_run 1
+
+vm_wait_for_boot 300 0
+vm_wait_for_boot 300 1
+sleep 5
+
+# Run the fio workload on the VM
+vm_scp 0 $testdir/vhost_fio.job 127.0.0.1:/root/vhost_fio.job
+vm_exec 0 "fio /root/vhost_fio.job"
+
+vm_scp 1 $testdir/vhost_fio.job 127.0.0.1:/root/vhost_fio.job
+vm_exec 1 "fio /root/vhost_fio.job"
+
+# Shut the VM down
+vm_shutdown_all
+
+# Shut vhost down
+vhost_kill vhost0
+
+trap - SIGINT SIGTERM EXIT
+
+vhosttestfini
+timing_exit vhost_fio
diff --git a/src/spdk/test/vhost/fio/vhost_fio.job b/src/spdk/test/vhost/fio/vhost_fio.job
new file mode 100644
index 000000000..350aa895e
--- /dev/null
+++ b/src/spdk/test/vhost/fio/vhost_fio.job
@@ -0,0 +1,19 @@
+[global]
+blocksize=4k-512k
+iodepth=128
+ioengine=libaio
+filename=/dev/sdb
+group_reporting
+thread
+numjobs=1
+direct=1
+do_verify=1
+verify=md5
+verify_fatal=1
+verify_dump=1
+verify_backlog=8
+
+[randwrite]
+rw=randwrite
+runtime=15
+time_based
diff --git a/src/spdk/test/vhost/fiotest/fio.sh b/src/spdk/test/vhost/fiotest/fio.sh
new file mode 100755
index 000000000..930948d6d
--- /dev/null
+++ b/src/spdk/test/vhost/fiotest/fio.sh
@@ -0,0 +1,288 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/vhost/common.sh
+
+dry_run=false
+no_shutdown=false
+fio_bin=""
+remote_fio_bin=""
+fio_jobs=""
+test_type=spdk_vhost_scsi
+reuse_vms=false
+vms=()
+used_vms=""
+x=""
+readonly=""
+
+function usage() {
+ [[ -n $2 ]] && (
+ echo "$2"
+ echo ""
+ )
+ echo "Shortcut script for doing automated test"
+ echo "Usage: $(basename $1) [OPTIONS]"
+ echo
+ echo "-h, --help print help and exit"
+ echo " --test-type=TYPE Perform specified test:"
+ echo " virtio - test host virtio-scsi-pci using file as disk image"
+ echo " kernel_vhost - use kernel driver vhost-scsi"
+ echo " spdk_vhost_scsi - use spdk vhost scsi"
+ echo " spdk_vhost_blk - use spdk vhost block"
+ echo "-x set -x for script debug"
+ echo " --fio-bin=FIO Use specific fio binary (will be uploaded to VM)"
+ echo " --fio-job= Fio config to use for test."
+ echo " All VMs will run the same fio job when FIO executes."
+ echo " (no unique jobs for specific VMs)"
+ echo " --dry-run Don't perform any tests, run only and wait for enter to terminate"
+ echo " --no-shutdown Don't shutdown at the end but leave envirionment working"
+ echo " --vm=NUM[,OS][,DISKS] VM configuration. This parameter might be used more than once:"
+ echo " NUM - VM number (mandatory)"
+ echo " OS - VM os disk path (optional)"
+ echo " DISKS - VM os test disks/devices path (virtio - optional, kernel_vhost - mandatory)"
+ echo " --readonly Use readonly for fio"
+ exit 0
+}
+
+#default raw file is NVMe drive
+
+while getopts 'xh-:' optchar; do
+ case "$optchar" in
+ -)
+ case "$OPTARG" in
+ help) usage $0 ;;
+ fio-bin=*) fio_bin="--fio-bin=${OPTARG#*=}" ;;
+ fio-job=*) fio_job="${OPTARG#*=}" ;;
+ dry-run) dry_run=true ;;
+ no-shutdown) no_shutdown=true ;;
+ test-type=*) test_type="${OPTARG#*=}" ;;
+ vm=*) vms+=("${OPTARG#*=}") ;;
+ readonly) readonly="--readonly" ;;
+ *) usage $0 "Invalid argument '$OPTARG'" ;;
+ esac
+ ;;
+ h) usage $0 ;;
+ x)
+ set -x
+ x="-x"
+ ;;
+ *) usage $0 "Invalid argument '$OPTARG'" ;;
+ esac
+done
+shift $((OPTIND - 1))
+
+if [[ ! -r "$fio_job" ]]; then
+ fail "no fio job file specified"
+fi
+
+vhosttestinit
+
+trap 'error_exit "${FUNCNAME}" "${LINENO}"' ERR
+
+vm_kill_all
+
+if [[ $test_type =~ "spdk_vhost" ]]; then
+ notice "==============="
+ notice ""
+ notice "running SPDK"
+ notice ""
+ vhost_run 0
+ rpc_py="$rootdir/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock"
+ $rpc_py bdev_split_create Nvme0n1 4
+ $rpc_py bdev_malloc_create -b Malloc0 128 4096
+ $rpc_py bdev_malloc_create -b Malloc1 128 4096
+ $rpc_py bdev_malloc_create -b Malloc2 64 512
+ $rpc_py bdev_malloc_create -b Malloc3 64 512
+ $rpc_py bdev_malloc_create -b Malloc4 64 512
+ $rpc_py bdev_malloc_create -b Malloc5 64 512
+ $rpc_py bdev_malloc_create -b Malloc6 64 512
+ $rpc_py bdev_raid_create -n RaidBdev0 -z 128 -r 0 -b "Malloc2 Malloc3"
+ $rpc_py bdev_raid_create -n RaidBdev1 -z 128 -r 0 -b "Nvme0n1p2 Malloc4"
+ $rpc_py bdev_raid_create -n RaidBdev2 -z 128 -r 0 -b "Malloc5 Malloc6"
+ $rpc_py vhost_create_scsi_controller --cpumask 0x1 vhost.0
+ $rpc_py vhost_scsi_controller_add_target vhost.0 0 Malloc0
+ $rpc_py vhost_create_blk_controller --cpumask 0x1 -r vhost.1 Malloc1
+ notice ""
+fi
+
+notice "==============="
+notice ""
+notice "Setting up VM"
+notice ""
+
+rpc_py="$rootdir/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock"
+
+for vm_conf in "${vms[@]}"; do
+ IFS=',' read -ra conf <<< "$vm_conf"
+ if [[ x"${conf[0]}" == x"" ]] || ! assert_number ${conf[0]}; then
+ fail "invalid VM configuration syntax $vm_conf"
+ fi
+
+ # Sanity check if VM is not defined twice
+ for vm_num in $used_vms; do
+ if [[ $vm_num -eq ${conf[0]} ]]; then
+ fail "VM$vm_num defined more than twice ( $(printf "'%s' " "${vms[@]}"))!"
+ fi
+ done
+
+ used_vms+=" ${conf[0]}"
+
+ if [[ $test_type =~ "spdk_vhost" ]]; then
+
+ notice "Adding device via RPC ..."
+
+ while IFS=':' read -ra disks; do
+ for disk in "${disks[@]}"; do
+ notice "Create a lvol store on RaidBdev2 and then a lvol bdev on the lvol store"
+ if [[ $disk == "RaidBdev2" ]]; then
+ ls_guid=$($rpc_py bdev_lvol_create_lvstore RaidBdev2 lvs_0 -c 4194304)
+ free_mb=$(get_lvs_free_mb "$ls_guid")
+ based_disk=$($rpc_py bdev_lvol_create -u $ls_guid lbd_0 $free_mb)
+ else
+ based_disk="$disk"
+ fi
+
+ if [[ "$test_type" == "spdk_vhost_blk" ]]; then
+ disk=${disk%%_*}
+ notice "Creating vhost block controller naa.$disk.${conf[0]} with device $disk"
+ $rpc_py vhost_create_blk_controller naa.$disk.${conf[0]} $based_disk
+ else
+ notice "Creating controller naa.$disk.${conf[0]}"
+ $rpc_py vhost_create_scsi_controller naa.$disk.${conf[0]}
+
+ notice "Adding device (0) to naa.$disk.${conf[0]}"
+ $rpc_py vhost_scsi_controller_add_target naa.$disk.${conf[0]} 0 $based_disk
+ fi
+ done
+ done <<< "${conf[2]}"
+ unset IFS
+ $rpc_py vhost_get_controllers
+ fi
+
+ setup_cmd="vm_setup --force=${conf[0]} --disk-type=$test_type"
+ [[ x"${conf[1]}" != x"" ]] && setup_cmd+=" --os=${conf[1]}"
+ [[ x"${conf[2]}" != x"" ]] && setup_cmd+=" --disks=${conf[2]}"
+
+ $setup_cmd
+done
+
+# Run everything
+vm_run $used_vms
+vm_wait_for_boot 300 $used_vms
+
+if [[ $test_type == "spdk_vhost_scsi" ]]; then
+ for vm_conf in "${vms[@]}"; do
+ IFS=',' read -ra conf <<< "$vm_conf"
+ while IFS=':' read -ra disks; do
+ for disk in "${disks[@]}"; do
+ # For RaidBdev2, the lvol bdev on RaidBdev2 is being used.
+ if [[ $disk == "RaidBdev2" ]]; then
+ based_disk="lvs_0/lbd_0"
+ else
+ based_disk="$disk"
+ fi
+ notice "Hotdetach test. Trying to remove existing device from a controller naa.$disk.${conf[0]}"
+ $rpc_py vhost_scsi_controller_remove_target naa.$disk.${conf[0]} 0
+
+ sleep 0.1
+
+ notice "Hotattach test. Re-adding device 0 to naa.$disk.${conf[0]}"
+ $rpc_py vhost_scsi_controller_add_target naa.$disk.${conf[0]} 0 $based_disk
+ done
+ done <<< "${conf[2]}"
+ unset IFS
+ done
+fi
+
+sleep 0.1
+
+notice "==============="
+notice ""
+notice "Testing..."
+
+notice "Running fio jobs ..."
+
+# Check if all VM have disk in tha same location
+DISK=""
+
+fio_disks=""
+for vm_num in $used_vms; do
+ qemu_mask_param="VM_${vm_num}_qemu_mask"
+
+ host_name="VM-$vm_num"
+ notice "Setting up hostname: $host_name"
+ vm_exec $vm_num "hostname $host_name"
+ vm_start_fio_server $fio_bin $readonly $vm_num
+
+ if [[ "$test_type" == "spdk_vhost_scsi" ]]; then
+ vm_check_scsi_location $vm_num
+ #vm_reset_scsi_devices $vm_num $SCSI_DISK
+ elif [[ "$test_type" == "spdk_vhost_blk" ]]; then
+ vm_check_blk_location $vm_num
+ fi
+
+ fio_disks+=" --vm=${vm_num}$(printf ':/dev/%s' $SCSI_DISK)"
+done
+
+if $dry_run; then
+ read -r -p "Enter to kill evething" xx
+ sleep 3
+ at_app_exit
+ exit 0
+fi
+
+run_fio $fio_bin --job-file="$fio_job" --out="$VHOST_DIR/fio_results" $fio_disks
+
+if [[ "$test_type" == "spdk_vhost_scsi" ]]; then
+ for vm_num in $used_vms; do
+ vm_reset_scsi_devices $vm_num $SCSI_DISK
+ done
+fi
+
+if ! $no_shutdown; then
+ notice "==============="
+ notice "APP EXITING"
+ notice "killing all VMs"
+ vm_shutdown_all
+ notice "waiting 2 seconds to let all VMs die"
+ sleep 2
+ if [[ $test_type =~ "spdk_vhost" ]]; then
+ notice "Removing vhost devices & controllers via RPC ..."
+ for vm_conf in "${vms[@]}"; do
+ IFS=',' read -ra conf <<< "$vm_conf"
+
+ while IFS=':' read -ra disks; do
+ for disk in "${disks[@]}"; do
+ disk=${disk%%_*}
+ notice "Removing all vhost devices from controller naa.$disk.${conf[0]}"
+ if [[ "$test_type" == "spdk_vhost_scsi" ]]; then
+ $rpc_py vhost_scsi_controller_remove_target naa.$disk.${conf[0]} 0
+ fi
+
+ $rpc_py vhost_delete_controller naa.$disk.${conf[0]}
+ if [[ $disk == "RaidBdev2" ]]; then
+ notice "Removing lvol bdev and lvol store"
+ $rpc_py bdev_lvol_delete lvs_0/lbd_0
+ $rpc_py bdev_lvol_delete_lvstore -l lvs_0
+ fi
+ done
+ done <<< "${conf[2]}"
+ done
+ fi
+ notice "Testing done -> shutting down"
+ notice "killing vhost app"
+ vhost_kill 0
+
+ notice "EXIT DONE"
+ notice "==============="
+else
+ notice "==============="
+ notice ""
+ notice "Leaving environment working!"
+ notice ""
+ notice "==============="
+fi
+
+vhosttestfini
diff --git a/src/spdk/test/vhost/fuzz/fuzz.sh b/src/spdk/test/vhost/fuzz/fuzz.sh
new file mode 100755
index 000000000..7502f1976
--- /dev/null
+++ b/src/spdk/test/vhost/fuzz/fuzz.sh
@@ -0,0 +1,66 @@
+#!/usr/bin/env bash
+set -e
+
+rootdir=$(readlink -f $(dirname $0))/../../..
+source $rootdir/test/common/autotest_common.sh
+source "$rootdir/scripts/common.sh"
+
+VHOST_APP+=(-p 0)
+FUZZ_RPC_SOCK="/var/tmp/spdk_fuzz.sock"
+VHOST_FUZZ_APP+=(-r "$FUZZ_RPC_SOCK" -g --wait-for-rpc)
+
+vhost_rpc_py="$rootdir/scripts/rpc.py"
+fuzz_generic_rpc_py="$rootdir/scripts/rpc.py -s $FUZZ_RPC_SOCK"
+fuzz_specific_rpc_py="$rootdir/test/app/fuzz/common/fuzz_rpc.py -s $FUZZ_RPC_SOCK"
+
+"${VHOST_APP[@]}" > "$output_dir/vhost_fuzz_tgt_output.txt" 2>&1 &
+vhostpid=$!
+waitforlisten $vhostpid
+
+trap 'killprocess $vhostpid; exit 1' SIGINT SIGTERM exit
+
+"${VHOST_FUZZ_APP[@]}" -t 10 2> "$output_dir/vhost_fuzz_output1.txt" &
+fuzzpid=$!
+waitforlisten $fuzzpid $FUZZ_RPC_SOCK
+
+trap 'killprocess $vhostpid; killprocess $fuzzpid; exit 1' SIGINT SIGTERM exit
+
+$vhost_rpc_py bdev_malloc_create -b Malloc0 64 512
+$vhost_rpc_py vhost_create_blk_controller Vhost.1 Malloc0
+
+$vhost_rpc_py bdev_malloc_create -b Malloc1 64 512
+$vhost_rpc_py vhost_create_scsi_controller naa.VhostScsi0.1
+$vhost_rpc_py vhost_scsi_controller_add_target naa.VhostScsi0.1 0 Malloc1
+
+$vhost_rpc_py bdev_malloc_create -b Malloc2 64 512
+$vhost_rpc_py vhost_create_scsi_controller naa.VhostScsi0.2
+$vhost_rpc_py vhost_scsi_controller_add_target naa.VhostScsi0.2 0 Malloc2
+
+# test the vhost blk controller with valid data buffers.
+$fuzz_specific_rpc_py fuzz_vhost_create_dev -s $(pwd)/Vhost.1 -b -v
+# test the vhost scsi I/O queue with valid data buffers on a valid lun.
+$fuzz_specific_rpc_py fuzz_vhost_create_dev -s $(pwd)/naa.VhostScsi0.1 -l -v
+# test the vhost scsi management queue with valid data buffers.
+$fuzz_specific_rpc_py fuzz_vhost_create_dev -s $(pwd)/naa.VhostScsi0.2 -v -m
+# The test won't actually begin until this option is passed in.
+$fuzz_generic_rpc_py framework_start_init
+
+wait $fuzzpid
+
+"${VHOST_FUZZ_APP[@]}" -j "$rootdir/test/app/fuzz/vhost_fuzz/example.json" 2> "$output_dir/vhost_fuzz_output2.txt" &
+fuzzpid=$!
+waitforlisten $fuzzpid $FUZZ_RPC_SOCK
+
+# re-evaluate fuzzpid
+trap 'killprocess $vhostpid; killprocess $fuzzpid; exit 1' SIGINT SIGTERM exit
+
+$fuzz_specific_rpc_py fuzz_vhost_create_dev -s $(pwd)/Vhost.1 -b -v
+$fuzz_specific_rpc_py fuzz_vhost_create_dev -s $(pwd)/naa.VhostScsi0.1 -l -v
+$fuzz_specific_rpc_py fuzz_vhost_create_dev -s $(pwd)/naa.VhostScsi0.2 -v -m
+$fuzz_generic_rpc_py framework_start_init
+
+wait $fuzzpid
+
+trap - SIGINT SIGTERM exit
+
+killprocess $vhostpid
diff --git a/src/spdk/test/vhost/hotplug/blk_hotremove.sh b/src/spdk/test/vhost/hotplug/blk_hotremove.sh
new file mode 100644
index 000000000..d0edab83a
--- /dev/null
+++ b/src/spdk/test/vhost/hotplug/blk_hotremove.sh
@@ -0,0 +1,235 @@
+# Vhost blk hot remove tests
+#
+# Objective
+# The purpose of these tests is to verify that SPDK vhost remains stable during
+# hot-remove operations performed on SCSI and BLK controllers devices.
+# Hot-remove is a scenario where a NVMe device is removed when already in use.
+#
+# Test cases description
+# 1. FIO I/O traffic is run during hot-remove operations.
+# By default FIO uses default_integrity*.job config files located in
+# test/vhost/hotplug/fio_jobs directory.
+# 2. FIO mode of operation is random write (randwrite) with verification enabled
+# which results in also performing read operations.
+# 3. In test cases fio status is checked after every run if any errors occurred.
+
+function prepare_fio_cmd_tc1() {
+ print_test_fio_header
+
+ run_fio="$fio_bin --eta=never "
+ for vm_num in $1; do
+ cp $fio_job $tmp_detach_job
+ vm_check_blk_location $vm_num
+ for disk in $SCSI_DISK; do
+ echo "[nvme-host$disk]" >> $tmp_detach_job
+ echo "filename=/dev/$disk" >> $tmp_detach_job
+ done
+ vm_scp "$vm_num" $tmp_detach_job 127.0.0.1:/root/default_integrity_2discs.job
+ run_fio+="--client=127.0.0.1,$(vm_fio_socket $vm_num) --remote-config /root/default_integrity_2discs.job "
+ rm $tmp_detach_job
+ done
+}
+
+function vhost_delete_controllers() {
+ $rpc_py vhost_delete_controller naa.Nvme0n1p0.0
+ $rpc_py vhost_delete_controller naa.Nvme0n1p1.0
+ $rpc_py vhost_delete_controller naa.Nvme0n1p2.1
+ $rpc_py vhost_delete_controller naa.Nvme0n1p3.1
+}
+
+# Vhost blk hot remove test cases
+#
+# Test Case 1
+function blk_hotremove_tc1() {
+ echo "Blk hotremove test case 1"
+ traddr=""
+ # 1. Run the command to hot remove NVMe disk.
+ get_traddr "Nvme0"
+ delete_nvme "Nvme0"
+ # 2. If vhost had crashed then tests would stop running
+ sleep 1
+ add_nvme "HotInNvme0" "$traddr"
+ sleep 1
+}
+
+# Test Case 2
+function blk_hotremove_tc2() {
+ echo "Blk hotremove test case 2"
+ # 1. Use rpc command to create blk controllers.
+ $rpc_py vhost_create_blk_controller naa.Nvme0n1p0.0 HotInNvme0n1p0
+ $rpc_py vhost_create_blk_controller naa.Nvme0n1p1.0 Mallocp0
+ $rpc_py vhost_create_blk_controller naa.Nvme0n1p2.1 Mallocp1
+ $rpc_py vhost_create_blk_controller naa.Nvme0n1p3.1 Mallocp2
+ # 2. Run two VMs and attach every VM to two blk controllers.
+ vm_run_with_arg "0 1"
+ vms_prepare "0"
+
+ traddr=""
+ get_traddr "Nvme0"
+ prepare_fio_cmd_tc1 "0"
+ # 3. Run FIO I/O traffic with verification enabled on NVMe disk.
+ $run_fio &
+ local last_pid=$!
+ sleep 3
+ # 4. Run the command to hot remove NVMe disk.
+ delete_nvme "HotInNvme0"
+ local retcode=0
+ wait_for_finish $last_pid || retcode=$?
+ # 5. Check that fio job run on hot-removed device stopped.
+ # Expected: Fio should return error message and return code != 0.
+ check_fio_retcode "Blk hotremove test case 2: Iteration 1." 1 $retcode
+
+ # 6. Reboot VM
+ reboot_all_and_prepare "0"
+ # 7. Run FIO I/O traffic with verification enabled on NVMe disk.
+ $run_fio &
+ local retcode=0
+ wait_for_finish $! || retcode=$?
+ # 8. Check that fio job run on hot-removed device stopped.
+ # Expected: Fio should return error message and return code != 0.
+ check_fio_retcode "Blk hotremove test case 2: Iteration 2." 1 $retcode
+ vm_shutdown_all
+ vhost_delete_controllers
+ add_nvme "HotInNvme1" "$traddr"
+ sleep 1
+}
+
+# ## Test Case 3
+function blk_hotremove_tc3() {
+ echo "Blk hotremove test case 3"
+ # 1. Use rpc command to create blk controllers.
+ $rpc_py vhost_create_blk_controller naa.Nvme0n1p0.0 HotInNvme1n1p0
+ $rpc_py vhost_create_blk_controller naa.Nvme0n1p1.0 Mallocp0
+ $rpc_py vhost_create_blk_controller naa.Nvme0n1p2.1 HotInNvme1n1p1
+ $rpc_py vhost_create_blk_controller naa.Nvme0n1p3.1 Mallocp1
+ # 2. Run two VMs and attach every VM to two blk controllers.
+ vm_run_with_arg "0 1"
+ vms_prepare "0 1"
+
+ traddr=""
+ get_traddr "Nvme0"
+ prepare_fio_cmd_tc1 "0"
+ # 3. Run FIO I/O traffic with verification enabled on first NVMe disk.
+ $run_fio &
+ local last_pid=$!
+ sleep 3
+ # 4. Run the command to hot remove of first NVMe disk.
+ delete_nvme "HotInNvme1"
+ local retcode=0
+ wait_for_finish $last_pid || retcode=$?
+ # 6. Check that fio job run on hot-removed device stopped.
+ # Expected: Fio should return error message and return code != 0.
+ check_fio_retcode "Blk hotremove test case 3: Iteration 1." 1 $retcode
+
+ # 7. Reboot VM
+ reboot_all_and_prepare "0"
+ local retcode=0
+ # 8. Run FIO I/O traffic with verification enabled on removed NVMe disk.
+ $run_fio &
+ wait_for_finish $! || retcode=$?
+ # 9. Check that fio job run on hot-removed device stopped.
+ # Expected: Fio should return error message and return code != 0.
+ check_fio_retcode "Blk hotremove test case 3: Iteration 2." 1 $retcode
+ vm_shutdown_all
+ vhost_delete_controllers
+ add_nvme "HotInNvme2" "$traddr"
+ sleep 1
+}
+
+# Test Case 4
+function blk_hotremove_tc4() {
+ echo "Blk hotremove test case 4"
+ # 1. Use rpc command to create blk controllers.
+ $rpc_py vhost_create_blk_controller naa.Nvme0n1p0.0 HotInNvme2n1p0
+ $rpc_py vhost_create_blk_controller naa.Nvme0n1p1.0 Mallocp0
+ $rpc_py vhost_create_blk_controller naa.Nvme0n1p2.1 HotInNvme2n1p1
+ $rpc_py vhost_create_blk_controller naa.Nvme0n1p3.1 Mallocp1
+ # 2. Run two VM, attached to blk controllers.
+ vm_run_with_arg "0 1"
+ vms_prepare "0 1"
+
+ prepare_fio_cmd_tc1 "0"
+ # 3. Run FIO I/O traffic on first VM with verification enabled on both NVMe disks.
+ $run_fio &
+ local last_pid_vm0=$!
+
+ prepare_fio_cmd_tc1 "1"
+ # 4. Run FIO I/O traffic on second VM with verification enabled on both NVMe disks.
+ $run_fio &
+ local last_pid_vm1=$!
+
+ sleep 3
+ prepare_fio_cmd_tc1 "0 1"
+ # 5. Run the command to hot remove of first NVMe disk.
+ delete_nvme "HotInNvme2"
+ local retcode_vm0=0
+ local retcode_vm1=0
+ wait_for_finish $last_pid_vm0 || retcode_vm0=$?
+ wait_for_finish $last_pid_vm1 || retcode_vm1=$?
+ # 6. Check that fio job run on hot-removed device stopped.
+ # Expected: Fio should return error message and return code != 0.
+ check_fio_retcode "Blk hotremove test case 4: Iteration 1." 1 $retcode_vm0
+ check_fio_retcode "Blk hotremove test case 4: Iteration 2." 1 $retcode_vm1
+
+ # 7. Reboot all VMs.
+ reboot_all_and_prepare "0 1"
+ # 8. Run FIO I/O traffic with verification enabled on removed NVMe disk.
+ $run_fio &
+ local retcode=0
+ wait_for_finish $! || retcode=$?
+ # 9. Check that fio job run on hot-removed device stopped.
+ # Expected: Fio should return error message and return code != 0.
+ check_fio_retcode "Blk hotremove test case 4: Iteration 3." 1 $retcode
+
+ vm_shutdown_all
+ vhost_delete_controllers
+ add_nvme "HotInNvme3" "$traddr"
+ sleep 1
+}
+
+# Test Case 5
+function blk_hotremove_tc5() {
+ echo "Blk hotremove test case 5"
+ # 1. Use rpc command to create blk controllers.
+ $rpc_py vhost_create_blk_controller naa.Nvme0n1p0.0 HotInNvme3n1p0
+ $rpc_py vhost_create_blk_controller naa.Nvme0n1p1.0 Mallocp0
+ $rpc_py vhost_create_blk_controller naa.Nvme0n1p2.1 Mallocp1
+ $rpc_py vhost_create_blk_controller naa.Nvme0n1p3.1 Mallocp2
+ # 2. Run two VM, attached to blk controllers.
+ vm_run_with_arg "0 1"
+ vms_prepare "0 1"
+
+ prepare_fio_cmd_tc1 "0"
+ # 3. Run FIO I/O traffic on first VM with verification enabled on both NVMe disks.
+ $run_fio &
+ local last_pid=$!
+ sleep 3
+ # 4. Run the command to hot remove of first NVMe disk.
+ delete_nvme "HotInNvme3"
+ local retcode=0
+ wait_for_finish $last_pid || retcode=$?
+ # 5. Check that fio job run on hot-removed device stopped.
+ # Expected: Fio should return error message and return code != 0.
+ check_fio_retcode "Blk hotremove test case 5: Iteration 1." 1 $retcode
+
+ # 6. Reboot VM.
+ reboot_all_and_prepare "0"
+ local retcode=0
+ # 7. Run FIO I/O traffic with verification enabled on removed NVMe disk.
+ $run_fio &
+ wait_for_finish $! || retcode=$?
+ # 8. Check that fio job run on hot-removed device stopped.
+ # Expected: Fio should return error message and return code != 0.
+ check_fio_retcode "Blk hotremove test case 5: Iteration 2." 1 $retcode
+ vm_shutdown_all
+ vhost_delete_controllers
+ add_nvme "HotInNvme4" "$traddr"
+ sleep 1
+}
+
+vms_setup
+blk_hotremove_tc1
+blk_hotremove_tc2
+blk_hotremove_tc3
+blk_hotremove_tc4
+blk_hotremove_tc5
diff --git a/src/spdk/test/vhost/hotplug/common.sh b/src/spdk/test/vhost/hotplug/common.sh
new file mode 100644
index 000000000..b7b05ee74
--- /dev/null
+++ b/src/spdk/test/vhost/hotplug/common.sh
@@ -0,0 +1,230 @@
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/vhost/common.sh
+
+dry_run=false
+no_shutdown=false
+fio_bin="fio"
+fio_jobs="$testdir/fio_jobs/"
+test_type=spdk_vhost_scsi
+reuse_vms=false
+vms=()
+used_vms=""
+disk_split=""
+x=""
+scsi_hot_remove_test=0
+blk_hot_remove_test=0
+readonly=""
+
+function usage() {
+ [[ -n $2 ]] && (
+ echo "$2"
+ echo ""
+ )
+ echo "Shortcut script for doing automated hotattach/hotdetach test"
+ echo "Usage: $(basename $1) [OPTIONS]"
+ echo
+ echo "-h, --help print help and exit"
+ echo " --test-type=TYPE Perform specified test:"
+ echo " virtio - test host virtio-scsi-pci using file as disk image"
+ echo " kernel_vhost - use kernel driver vhost-scsi"
+ echo " spdk_vhost_scsi - use spdk vhost scsi"
+ echo " spdk_vhost_blk - use spdk vhost block"
+ echo "-x set -x for script debug"
+ echo " --fio-bin=FIO Use specific fio binary (will be uploaded to VM)"
+ echo " --fio-jobs= Fio configs to use for tests. Can point to a directory or"
+ echo " --vm=NUM[,OS][,DISKS] VM configuration. This parameter might be used more than once:"
+ echo " NUM - VM number (mandatory)"
+ echo " OS - VM os disk path (optional)"
+ echo " DISKS - VM os test disks/devices path (virtio - optional, kernel_vhost - mandatory)"
+ echo " --scsi-hotremove-test Run scsi hotremove tests"
+ echo " --readonly Use readonly for fio"
+ exit 0
+}
+
+while getopts 'xh-:' optchar; do
+ case "$optchar" in
+ -)
+ case "$OPTARG" in
+ help) usage $0 ;;
+ fio-bin=*) fio_bin="${OPTARG#*=}" ;;
+ fio-jobs=*) fio_jobs="${OPTARG#*=}" ;;
+ test-type=*) test_type="${OPTARG#*=}" ;;
+ vm=*) vms+=("${OPTARG#*=}") ;;
+ scsi-hotremove-test) scsi_hot_remove_test=1 ;;
+ blk-hotremove-test) blk_hot_remove_test=1 ;;
+ readonly) readonly="--readonly" ;;
+ *) usage $0 "Invalid argument '$OPTARG'" ;;
+ esac
+ ;;
+ h) usage $0 ;;
+ x)
+ set -x
+ x="-x"
+ ;;
+ *) usage $0 "Invalid argument '$OPTARG'" ;;
+ esac
+done
+shift $((OPTIND - 1))
+
+fio_job=$testdir/fio_jobs/default_integrity.job
+tmp_attach_job=$testdir/fio_jobs/fio_attach.job.tmp
+tmp_detach_job=$testdir/fio_jobs/fio_detach.job.tmp
+
+rpc_py="$rootdir/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock"
+
+function print_test_fio_header() {
+ notice "==============="
+ notice ""
+ notice "Testing..."
+
+ notice "Running fio jobs ..."
+ if [ $# -gt 0 ]; then
+ echo $1
+ fi
+}
+
+function vms_setup() {
+ for vm_conf in "${vms[@]}"; do
+ IFS=',' read -ra conf <<< "$vm_conf"
+ if [[ x"${conf[0]}" == x"" ]] || ! assert_number ${conf[0]}; then
+ fail "invalid VM configuration syntax $vm_conf"
+ fi
+
+ # Sanity check if VM is not defined twice
+ for vm_num in $used_vms; do
+ if [[ $vm_num -eq ${conf[0]} ]]; then
+ fail "VM$vm_num defined more than twice ( $(printf "'%s' " "${vms[@]}"))!"
+ fi
+ done
+
+ used_vms+=" ${conf[0]}"
+
+ setup_cmd="vm_setup --disk-type=$test_type --force=${conf[0]}"
+ [[ x"${conf[1]}" != x"" ]] && setup_cmd+=" --os=${conf[1]}"
+ [[ x"${conf[2]}" != x"" ]] && setup_cmd+=" --disks=${conf[2]}"
+ $setup_cmd
+ done
+}
+
+function vm_run_with_arg() {
+ local vms_to_run="$*"
+ vm_run $vms_to_run
+ vm_wait_for_boot 300 $vms_to_run
+}
+
+function vms_setup_and_run() {
+ local vms_to_run="$*"
+ vms_setup
+ vm_run_with_arg $vms_to_run
+}
+
+function vms_prepare() {
+ for vm_num in $1; do
+ qemu_mask_param="VM_${vm_num}_qemu_mask"
+
+ host_name="VM-${vm_num}-${!qemu_mask_param}"
+ notice "Setting up hostname: $host_name"
+ vm_exec $vm_num "hostname $host_name"
+ vm_start_fio_server --fio-bin=$fio_bin $readonly $vm_num
+ done
+}
+
+function vms_reboot_all() {
+ notice "Rebooting all vms "
+ for vm_num in $1; do
+ vm_exec $vm_num "reboot" || true
+ while vm_os_booted $vm_num; do
+ sleep 0.5
+ done
+ done
+
+ vm_wait_for_boot 300 $1
+}
+
+function check_fio_retcode() {
+ local fio_retcode=$3
+ echo $1
+ local retcode_expected=$2
+ if [ $retcode_expected == 0 ]; then
+ if [ $fio_retcode != 0 ]; then
+ error " Fio test ended with error."
+ else
+ notice " Fio test ended with success."
+ fi
+ else
+ if [ $fio_retcode != 0 ]; then
+ notice " Fio test ended with expected error."
+ else
+ error " Fio test ended with unexpected success."
+ fi
+ fi
+}
+
+function wait_for_finish() {
+ local wait_for_pid=$1
+ local sequence=${2:-30}
+ for i in $(seq 1 $sequence); do
+ if kill -0 $wait_for_pid; then
+ sleep 0.5
+ continue
+ else
+ break
+ fi
+ done
+ if kill -0 $wait_for_pid; then
+ error "Timeout for fio command"
+ fi
+
+ wait $wait_for_pid
+}
+
+function reboot_all_and_prepare() {
+ vms_reboot_all "$1"
+ vms_prepare "$1"
+}
+
+function post_test_case() {
+ vm_shutdown_all
+ vhost_kill 0
+}
+
+function on_error_exit() {
+ set +e
+ echo "Error on $1 - $2"
+ post_test_case
+ print_backtrace
+ exit 1
+}
+
+function check_disks() {
+ if [ "$1" == "$2" ]; then
+ echo "Disk has not been deleted"
+ exit 1
+ fi
+}
+
+function get_traddr() {
+ local nvme_name=$1
+ local nvme
+ nvme="$($rootdir/scripts/gen_nvme.sh)"
+ while read -r line; do
+ if [[ $line == *"TransportID"* ]] && [[ $line == *$nvme_name* ]]; then
+ local word_array=($line)
+ for word in "${word_array[@]}"; do
+ if [[ $word == *"traddr"* ]]; then
+ traddr=$(echo $word | sed 's/traddr://' | sed 's/"//')
+ fi
+ done
+ fi
+ done <<< "$nvme"
+}
+
+function delete_nvme() {
+ $rpc_py bdev_nvme_detach_controller $1
+}
+
+function add_nvme() {
+ $rpc_py bdev_nvme_attach_controller -b $1 -t PCIe -a $2
+}
diff --git a/src/spdk/test/vhost/hotplug/fio_jobs/default_integrity.job b/src/spdk/test/vhost/hotplug/fio_jobs/default_integrity.job
new file mode 100644
index 000000000..136fe9029
--- /dev/null
+++ b/src/spdk/test/vhost/hotplug/fio_jobs/default_integrity.job
@@ -0,0 +1,16 @@
+[global]
+blocksize=4k
+iodepth=512
+iodepth_batch=128
+iodepth_low=256
+ioengine=libaio
+group_reporting
+thread
+numjobs=1
+direct=1
+rw=randwrite
+do_verify=1
+verify=md5
+verify_backlog=1024
+time_based=1
+runtime=10
diff --git a/src/spdk/test/vhost/hotplug/scsi_hotattach.sh b/src/spdk/test/vhost/hotplug/scsi_hotattach.sh
new file mode 100755
index 000000000..4b9e26ab8
--- /dev/null
+++ b/src/spdk/test/vhost/hotplug/scsi_hotattach.sh
@@ -0,0 +1,103 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/vhost/common.sh
+source $rootdir/test/vhost/hotplug/common.sh
+
+function prepare_fio_cmd_tc1() {
+ print_test_fio_header
+
+ run_fio="$fio_bin --eta=never "
+ for vm_num in $1; do
+ cp $fio_job $tmp_attach_job
+ vm_check_scsi_location $vm_num
+ for disk in $SCSI_DISK; do
+ echo "[nvme-host$disk]" >> $tmp_attach_job
+ echo "filename=/dev/$disk" >> $tmp_attach_job
+ done
+ vm_scp $vm_num $tmp_attach_job 127.0.0.1:/root/default_integrity_discs.job
+ run_fio+="--client=127.0.0.1,$(vm_fio_socket ${vm_num}) --remote-config /root/default_integrity_discs.job "
+ rm $tmp_attach_job
+ done
+}
+
+# Check if fio test passes on device attached to first controller.
+function hotattach_tc1() {
+ notice "Hotattach test case 1"
+
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p0.0 0 Nvme0n1p0
+
+ sleep 3
+ prepare_fio_cmd_tc1 "0"
+ $run_fio
+ check_fio_retcode "Hotattach test case 1: Iteration 1." 0 $?
+}
+
+# Run fio test for previously attached device.
+# During test attach another device to first controller and check fio status.
+function hotattach_tc2() {
+ notice "Hotattach test case 2"
+ prepare_fio_cmd_tc1 "0"
+
+ $run_fio &
+ last_pid=$!
+ sleep 3
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p0.0 1 Nvme0n1p1
+ wait $last_pid
+ check_fio_retcode "Hotattach test case 2: Iteration 1." 0 $?
+}
+
+# Run fio test for previously attached devices.
+# During test attach another device to second controller and check fio status.
+function hotattach_tc3() {
+ notice "Hotattach test case 3"
+ prepare_fio_cmd_tc1 "0"
+
+ $run_fio &
+ last_pid=$!
+ sleep 3
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p1.0 0 Nvme0n1p2
+ wait $last_pid
+ check_fio_retcode "Hotattach test case 3: Iteration 1." 0 $?
+}
+
+# Run fio test for previously attached devices.
+# During test attach another device to third controller(VM2) and check fio status.
+# At the end after rebooting VMs run fio test for all devices and check fio status.
+function hotattach_tc4() {
+ notice "Hotattach test case 4"
+
+ prepare_fio_cmd_tc1 "0"
+
+ $run_fio &
+ last_pid=$!
+ sleep 3
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p2.1 0 Nvme0n1p3
+ wait $last_pid
+ check_fio_retcode "Hotattach test case 4: Iteration 1." 0 $?
+
+ prepare_fio_cmd_tc1 "0 1"
+ $run_fio
+ check_fio_retcode "Hotattach test case 4: Iteration 2." 0 $?
+
+ reboot_all_and_prepare "0 1"
+
+ prepare_fio_cmd_tc1 "0 1"
+ $run_fio
+ check_fio_retcode "Hotattach test case 4: Iteration 3." 0 $?
+}
+
+function cleanup_after_tests() {
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p0.0 0
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p0.0 1
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p1.0 0
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p2.1 0
+}
+
+hotattach_tc1
+hotattach_tc2
+hotattach_tc3
+hotattach_tc4
+cleanup_after_tests
diff --git a/src/spdk/test/vhost/hotplug/scsi_hotdetach.sh b/src/spdk/test/vhost/hotplug/scsi_hotdetach.sh
new file mode 100755
index 000000000..8a7cb264f
--- /dev/null
+++ b/src/spdk/test/vhost/hotplug/scsi_hotdetach.sh
@@ -0,0 +1,212 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/vhost/common.sh
+source $rootdir/test/vhost/hotplug/common.sh
+
+function get_first_disk() {
+ vm_check_scsi_location $1
+ disk_array=($SCSI_DISK)
+ eval "$2=${disk_array[0]}"
+}
+
+function check_disks() {
+ if [ "$1" == "$2" ]; then
+ fail "Disk has not been deleted"
+ fi
+}
+
+function prepare_fio_cmd_tc1_iter1() {
+ print_test_fio_header
+
+ run_fio="$fio_bin --eta=never "
+ for vm_num in $1; do
+ cp $fio_job $tmp_detach_job
+ vm_check_scsi_location $vm_num
+ for disk in $SCSI_DISK; do
+ echo "[nvme-host$disk]" >> $tmp_detach_job
+ echo "filename=/dev/$disk" >> $tmp_detach_job
+ done
+ vm_scp "$vm_num" $tmp_detach_job 127.0.0.1:/root/default_integrity_4discs.job
+ run_fio+="--client=127.0.0.1,$(vm_fio_socket $vm_num) --remote-config /root/default_integrity_4discs.job "
+ rm $tmp_detach_job
+ done
+}
+
+function prepare_fio_cmd_tc2_iter1() {
+ print_test_fio_header
+
+ run_fio="$fio_bin --eta=never "
+ for vm_num in $1; do
+ cp $fio_job $tmp_detach_job
+ vm_check_scsi_location $vm_num
+ disk_array=($SCSI_DISK)
+ disk=${disk_array[0]}
+ echo "[nvme-host$disk]" >> $tmp_detach_job
+ echo "filename=/dev/$disk" >> $tmp_detach_job
+ vm_scp "$vm_num" $tmp_detach_job 127.0.0.1:/root/default_integrity.job
+ run_fio+="--client=127.0.0.1,$(vm_fio_socket $vm_num) --remote-config /root/default_integrity.job "
+ rm $tmp_detach_job
+ done
+}
+
+function prepare_fio_cmd_tc2_iter2() {
+ print_test_fio_header
+
+ run_fio="$fio_bin --eta=never "
+ for vm_num in $1; do
+ cp $fio_job $tmp_detach_job
+ if [ $vm_num == 2 ]; then
+ vm_job_name=default_integrity_3discs.job
+ else
+ vm_job_name=default_integrity_4discs.job
+ fi
+ vm_check_scsi_location $vm_num
+ for disk in $SCSI_DISK; do
+ echo "[nvme-host$disk]" >> $tmp_detach_job
+ echo "filename=/dev/$disk" >> $tmp_detach_job
+ done
+ vm_scp "$vm_num" $tmp_detach_job 127.0.0.1:/root/$vm_job_name
+ run_fio+="--client=127.0.0.1,$(vm_fio_socket $vm_num) --remote-config /root/${vm_job_name} "
+ rm $tmp_detach_job
+ done
+}
+
+function prepare_fio_cmd_tc3_iter1() {
+ print_test_fio_header
+
+ run_fio="$fio_bin --eta=never "
+ for vm_num in $1; do
+ cp $fio_job $tmp_detach_job
+ if [ $vm_num == 2 ]; then
+ vm_job_name=default_integrity_3discs.job
+ else
+ vm_job_name=default_integrity_4discs.job
+ fi
+ vm_check_scsi_location $vm_num
+ j=1
+ for disk in $SCSI_DISK; do
+ if [ $vm_num == 2 ]; then
+ if [ $j == 1 ]; then
+ ((j++))
+ continue
+ fi
+ fi
+ echo "[nvme-host$disk]" >> $tmp_detach_job
+ echo "filename=/dev/$disk" >> $tmp_detach_job
+ ((j++))
+ done
+ vm_scp "$vm_num" $tmp_detach_job 127.0.0.1:/root/$vm_job_name
+ run_fio+="--client=127.0.0.1,$(vm_fio_socket $vm_num) --remote-config /root/$vm_job_name "
+ rm $tmp_detach_job
+ done
+}
+
+# During fio test for all devices remove first device from fifth controller and check if fio fails.
+# Also check if disc has been removed from VM.
+function hotdetach_tc1() {
+ notice "Hotdetach test case 1"
+ first_disk=""
+ get_first_disk "2" first_disk
+ prepare_fio_cmd_tc1_iter1 "2 3"
+ $run_fio &
+ last_pid=$!
+ sleep 3
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p4.2 0
+ set +xe
+ wait $last_pid
+ check_fio_retcode "Hotdetach test case 1: Iteration 1." 1 $?
+ set -xe
+ second_disk=""
+ get_first_disk "2" second_disk
+ check_disks $first_disk $second_disk
+ clear_after_tests
+}
+
+# During fio test for device from third VM remove first device from fifth controller and check if fio fails.
+# Also check if disc has been removed from VM.
+function hotdetach_tc2() {
+ notice "Hotdetach test case 2"
+ sleep 2
+ first_disk=""
+ get_first_disk "2" first_disk
+ prepare_fio_cmd_tc2_iter1 "2"
+ $run_fio &
+ last_pid=$!
+ sleep 3
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p4.2 0
+ set +xe
+ wait $last_pid
+ check_fio_retcode "Hotdetach test case 2: Iteration 1." 1 $?
+ set -xe
+ second_disk=""
+ get_first_disk "2" second_disk
+ check_disks $first_disk $second_disk
+ clear_after_tests
+}
+
+# Run fio test for all devices except one, then remove this device and check if fio passes.
+# Also check if disc has been removed from VM.
+function hotdetach_tc3() {
+ notice "Hotdetach test case 3"
+ sleep 2
+ first_disk=""
+ get_first_disk "2" first_disk
+ prepare_fio_cmd_tc3_iter1 "2 3"
+ $run_fio &
+ last_pid=$!
+ sleep 3
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p4.2 0
+ wait $last_pid
+ check_fio_retcode "Hotdetach test case 3: Iteration 1." 0 $?
+ second_disk=""
+ get_first_disk "2" second_disk
+ check_disks $first_disk $second_disk
+ clear_after_tests
+}
+
+# Run fio test for all devices except one and run separate fio test for this device.
+# Check if first fio test passes and second fio test fails.
+# Also check if disc has been removed from VM.
+# After reboot run fio test for remaining devices and check if fio passes.
+function hotdetach_tc4() {
+ notice "Hotdetach test case 4"
+ sleep 2
+ first_disk=""
+ get_first_disk "2" first_disk
+ prepare_fio_cmd_tc2_iter1 "2"
+ $run_fio &
+ first_fio_pid=$!
+ prepare_fio_cmd_tc3_iter1 "2 3"
+ $run_fio &
+ second_fio_pid=$!
+ sleep 3
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p4.2 0
+ set +xe
+ wait $first_fio_pid
+ check_fio_retcode "Hotdetach test case 4: Iteration 1." 1 $?
+ set -xe
+ wait $second_fio_pid
+ check_fio_retcode "Hotdetach test case 4: Iteration 2." 0 $?
+ second_disk=""
+ get_first_disk "2" second_disk
+ check_disks $first_disk $second_disk
+
+ reboot_all_and_prepare "2 3"
+ sleep 2
+ prepare_fio_cmd_tc2_iter2 "2 3"
+ $run_fio
+ check_fio_retcode "Hotdetach test case 4: Iteration 3." 0 $?
+ clear_after_tests
+}
+
+function clear_after_tests() {
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p4.2 0 Nvme0n1p8
+}
+
+hotdetach_tc1
+hotdetach_tc2
+hotdetach_tc3
+hotdetach_tc4
diff --git a/src/spdk/test/vhost/hotplug/scsi_hotplug.sh b/src/spdk/test/vhost/hotplug/scsi_hotplug.sh
new file mode 100755
index 000000000..40132ab8a
--- /dev/null
+++ b/src/spdk/test/vhost/hotplug/scsi_hotplug.sh
@@ -0,0 +1,92 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/vhost/common.sh
+source $rootdir/test/vhost/hotplug/common.sh
+
+if [[ $scsi_hot_remove_test == 1 ]] && [[ $blk_hot_remove_test == 1 ]]; then
+ notice "Vhost-scsi and vhost-blk hotremove tests cannot be run together"
+fi
+
+# Run spdk by calling run_vhost from hotplug/common.sh.
+# Then prepare vhost with rpc calls and setup and run 4 VMs.
+function pre_hot_attach_detach_test_case() {
+ used_vms=""
+ $rpc_py vhost_create_scsi_controller naa.Nvme0n1p0.0
+ $rpc_py vhost_create_scsi_controller naa.Nvme0n1p1.0
+ $rpc_py vhost_create_scsi_controller naa.Nvme0n1p2.1
+ $rpc_py vhost_create_scsi_controller naa.Nvme0n1p3.1
+ $rpc_py vhost_create_scsi_controller naa.Nvme0n1p4.2
+ $rpc_py vhost_create_scsi_controller naa.Nvme0n1p5.2
+ $rpc_py vhost_create_scsi_controller naa.Nvme0n1p6.3
+ $rpc_py vhost_create_scsi_controller naa.Nvme0n1p7.3
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p4.2 0 Nvme0n1p8
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p4.2 1 Nvme0n1p9
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p5.2 0 Nvme0n1p10
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p5.2 1 Nvme0n1p11
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p6.3 0 Nvme0n1p12
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p6.3 1 Nvme0n1p13
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p7.3 0 Nvme0n1p14
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p7.3 1 Nvme0n1p15
+ vms_setup_and_run "0 1 2 3"
+ vms_prepare "0 1 2 3"
+}
+
+function clear_vhost_config() {
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p4.2 0
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p4.2 1
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p5.2 0
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p5.2 1
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p6.3 0
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p6.3 1
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p7.3 0
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p7.3 1
+ $rpc_py vhost_delete_controller naa.Nvme0n1p0.0
+ $rpc_py vhost_delete_controller naa.Nvme0n1p1.0
+ $rpc_py vhost_delete_controller naa.Nvme0n1p2.1
+ $rpc_py vhost_delete_controller naa.Nvme0n1p3.1
+ $rpc_py vhost_delete_controller naa.Nvme0n1p4.2
+ $rpc_py vhost_delete_controller naa.Nvme0n1p5.2
+ $rpc_py vhost_delete_controller naa.Nvme0n1p6.3
+ $rpc_py vhost_delete_controller naa.Nvme0n1p7.3
+}
+
+trap 'error_exit "${FUNCNAME}" "${LINENO}"' ERR
+# Hotremove/hotattach/hotdetach test cases prerequisites
+# Run vhost with 2 NVMe disks.
+
+notice "==============="
+notice ""
+notice "running SPDK"
+notice ""
+vhost_run 0
+$rpc_py bdev_nvme_set_hotplug -e
+$rpc_py bdev_split_create Nvme0n1 16
+$rpc_py bdev_malloc_create 128 512 -b Malloc
+$rpc_py bdev_split_create Malloc 4
+$rpc_py bdev_split_create HotInNvme0n1 2
+$rpc_py bdev_split_create HotInNvme1n1 2
+$rpc_py bdev_split_create HotInNvme2n1 2
+$rpc_py bdev_split_create HotInNvme3n1 2
+$rpc_py bdev_get_bdevs
+
+if [[ $scsi_hot_remove_test == 0 ]] && [[ $blk_hot_remove_test == 0 ]]; then
+ pre_hot_attach_detach_test_case
+ $testdir/scsi_hotattach.sh --fio-bin=$fio_bin &
+ first_script=$!
+ $testdir/scsi_hotdetach.sh --fio-bin=$fio_bin &
+ second_script=$!
+ wait $first_script
+ wait $second_script
+ vm_shutdown_all
+ clear_vhost_config
+fi
+if [[ $scsi_hot_remove_test == 1 ]]; then
+ source $testdir/scsi_hotremove.sh
+fi
+if [[ $blk_hot_remove_test == 1 ]]; then
+ source $testdir/blk_hotremove.sh
+fi
+post_test_case
diff --git a/src/spdk/test/vhost/hotplug/scsi_hotremove.sh b/src/spdk/test/vhost/hotplug/scsi_hotremove.sh
new file mode 100644
index 000000000..1dee4ac7f
--- /dev/null
+++ b/src/spdk/test/vhost/hotplug/scsi_hotremove.sh
@@ -0,0 +1,233 @@
+set -xe
+
+# Vhost SCSI hotremove tests
+#
+# # Objective
+# The purpose of these tests is to verify that SPDK vhost remains stable during
+# hot-remove operations performed on SCSI controllers devices.
+# Hot-remove is a scenario where a NVMe device is removed when already in use.
+# Tests consist of 4 test cases.
+#
+# # Test cases description
+# 1. FIO I/O traffic is run during hot-remove operations.
+# By default FIO uses default_integrity*.job config files located in
+# test/vhost/hotplug/fio_jobs directory.
+# 2. FIO mode of operation is random write (randwrite) with verification enabled
+# which results in also performing read operations.
+
+function prepare_fio_cmd_tc1() {
+ print_test_fio_header
+
+ run_fio="$fio_bin --eta=never "
+ for vm_num in $1; do
+ cp $fio_job $tmp_detach_job
+ vm_check_scsi_location $vm_num
+ for disk in $SCSI_DISK; do
+ cat <<- EOL >> $tmp_detach_job
+ [nvme-host$disk]
+ filename=/dev/$disk
+ size=100%
+ EOL
+ done
+ vm_scp "$vm_num" $tmp_detach_job 127.0.0.1:/root/default_integrity_2discs.job
+ run_fio+="--client=127.0.0.1,$(vm_fio_socket $vm_num) --remote-config /root/default_integrity_2discs.job "
+ rm $tmp_detach_job
+ done
+}
+
+# Vhost SCSI hot-remove test cases.
+
+# Test Case 1
+function scsi_hotremove_tc1() {
+ echo "Scsi hotremove test case 1"
+ traddr=""
+ get_traddr "Nvme0"
+ # 1. Run the command to hot remove NVMe disk.
+ delete_nvme "Nvme0"
+ # 2. If vhost had crashed then tests would stop running
+ sleep 1
+ add_nvme "HotInNvme0" "$traddr"
+}
+
+# Test Case 2
+function scsi_hotremove_tc2() {
+ echo "Scsi hotremove test case 2"
+ # 1. Attach split NVMe bdevs to scsi controller.
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p0.0 0 HotInNvme0n1p0
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p1.0 0 Mallocp0
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p2.1 0 HotInNvme0n1p1
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p3.1 0 Mallocp1
+
+ # 2. Run two VMs, attached to scsi controllers.
+ vms_setup
+ vm_run_with_arg 0 1
+ vms_prepare "0 1"
+
+ vm_check_scsi_location "0"
+ local disks="$SCSI_DISK"
+
+ traddr=""
+ get_traddr "Nvme0"
+ prepare_fio_cmd_tc1 "0 1"
+ # 3. Run FIO I/O traffic with verification enabled on on both NVMe disks in VM.
+ $run_fio &
+ local last_pid=$!
+ sleep 3
+ # 4. Run the command to hot remove NVMe disk.
+ delete_nvme "HotInNvme0"
+
+ # 5. Check that fio job run on hot-remove device stopped on VM.
+ # Expected: Fio should return error message and return code != 0.
+ wait_for_finish $last_pid || retcode=$?
+ check_fio_retcode "Scsi hotremove test case 2: Iteration 1." 1 $retcode
+
+ # 6. Check if removed devices are gone from VM.
+ vm_check_scsi_location "0"
+ local new_disks="$SCSI_DISK"
+ check_disks "$disks" "$new_disks"
+ # 7. Reboot both VMs.
+ reboot_all_and_prepare "0 1"
+ # 8. Run FIO I/O traffic with verification enabled on on both VMs.
+ local retcode=0
+ $run_fio &
+ wait_for_finish $! || retcode=$?
+ # 9. Check that fio job run on hot-remove device stopped on both VMs.
+ # Expected: Fio should return error message and return code != 0.
+ check_fio_retcode "Scsi hotremove test case 2: Iteration 2." 1 $retcode
+ vm_shutdown_all
+ add_nvme "HotInNvme1" "$traddr"
+ sleep 1
+}
+
+# Test Case 3
+function scsi_hotremove_tc3() {
+ echo "Scsi hotremove test case 3"
+ # 1. Attach added NVMe bdev to scsi controller.
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p0.0 0 HotInNvme1n1p0
+ # 2. Run two VM, attached to scsi controllers.
+ vm_run_with_arg 0 1
+ vms_prepare "0 1"
+ vm_check_scsi_location "0"
+ local disks="$SCSI_DISK"
+ traddr=""
+ get_traddr "Nvme0"
+ # 3. Run FIO I/O traffic with verification enabled on on both NVMe disks in VMs.
+ prepare_fio_cmd_tc1 "0"
+ $run_fio &
+ local last_pid=$!
+ sleep 3
+ # 4. Run the command to hot remove NVMe disk.
+ delete_nvme "HotInNvme1"
+ # 5. Check that fio job run on hot-remove device stopped on first VM.
+ # Expected: Fio should return error message and return code != 0.
+ wait_for_finish $last_pid || retcode=$?
+ check_fio_retcode "Scsi hotremove test case 3: Iteration 1." 1 $retcode
+ # 6. Check if removed devices are gone from lsblk.
+ vm_check_scsi_location "0"
+ local new_disks="$SCSI_DISK"
+ check_disks "$disks" "$new_disks"
+ # 7. Reboot both VMs.
+ reboot_all_and_prepare "0 1"
+ # 8. Run FIO I/O traffic with verification enabled on on both VMs.
+ local retcode=0
+ $run_fio &
+ wait_for_finish $! || retcode=$?
+ # 9. Check that fio job run on hot-remove device stopped on both VMs.
+ # Expected: Fio should return error message and return code != 0.
+ check_fio_retcode "Scsi hotremove test case 3: Iteration 2." 1 $retcode
+ vm_shutdown_all
+ add_nvme "HotInNvme2" "$traddr"
+ sleep 1
+}
+
+# Test Case 4
+function scsi_hotremove_tc4() {
+ echo "Scsi hotremove test case 4"
+ # 1. Attach NVMe bdevs to scsi controllers.
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p0.0 0 HotInNvme2n1p0
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p2.1 0 HotInNvme2n1p1
+ # 2. Run two VMs, attach to scsi controller.
+ vm_run_with_arg 0 1
+ vms_prepare "0 1"
+
+ # 3. Run FIO I/O traffic with verification enabled on first VM.
+ vm_check_scsi_location "0"
+ local disks_vm0="$SCSI_DISK"
+ # 4. Run FIO I/O traffic with verification enabled on second VM.
+ prepare_fio_cmd_tc1 "0"
+ $run_fio &
+ last_pid_vm0=$!
+
+ vm_check_scsi_location "1"
+ local disks_vm1="$SCSI_DISK"
+ prepare_fio_cmd_tc1 "1"
+ $run_fio &
+ local last_pid_vm1=$!
+ prepare_fio_cmd_tc1 "0 1"
+ sleep 3
+ # 5. Run the command to hot remove NVMe disk.
+ traddr=""
+ get_traddr "Nvme0"
+ delete_nvme "HotInNvme2"
+ # 6. Check that fio job run on hot-removed devices stopped.
+ # Expected: Fio should return error message and return code != 0.
+ local retcode_vm0=0
+ wait_for_finish $last_pid_vm0 || retcode_vm0=$?
+ local retcode_vm1=0
+ wait_for_finish $last_pid_vm1 || retcode_vm1=$?
+ check_fio_retcode "Scsi hotremove test case 4: Iteration 1." 1 $retcode_vm0
+ check_fio_retcode "Scsi hotremove test case 4: Iteration 2." 1 $retcode_vm1
+
+ # 7. Check if removed devices are gone from lsblk.
+ vm_check_scsi_location "0"
+ local new_disks_vm0="$SCSI_DISK"
+ check_disks "$disks_vm0" "$new_disks_vm0"
+ vm_check_scsi_location "1"
+ local new_disks_vm1="$SCSI_DISK"
+ check_disks "$disks_vm1" "$new_disks_vm1"
+
+ # 8. Reboot both VMs.
+ reboot_all_and_prepare "0 1"
+ # 9. Run FIO I/O traffic with verification enabled on on not-removed NVMe disk.
+ local retcode=0
+ $run_fio &
+ wait_for_finish $! || retcode=$?
+ # 10. Check that fio job run on hot-removed device stopped.
+ # Expect: Fio should return error message and return code != 0.
+ check_fio_retcode "Scsi hotremove test case 4: Iteration 3." 1 $retcode
+ prepare_fio_cmd_tc1 "0 1"
+ # 11. Run FIO I/O traffic with verification enabled on on not-removed NVMe disk.
+ local retcode=0
+ $run_fio &
+ wait_for_finish $! || retcode=$?
+ # 12. Check finished status FIO. Write and read in the not-removed.
+ # NVMe disk should be successful.
+ # Expected: Fio should return return code == 0.
+ check_fio_retcode "Scsi hotremove test case 4: Iteration 4." 0 $retcode
+ vm_shutdown_all
+ add_nvme "HotInNvme3" "$traddr"
+ sleep 1
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p1.0 0
+ $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p3.1 0
+}
+
+function pre_scsi_hotremove_test_case() {
+ $rpc_py vhost_create_scsi_controller naa.Nvme0n1p0.0
+ $rpc_py vhost_create_scsi_controller naa.Nvme0n1p1.0
+ $rpc_py vhost_create_scsi_controller naa.Nvme0n1p2.1
+ $rpc_py vhost_create_scsi_controller naa.Nvme0n1p3.1
+}
+
+function post_scsi_hotremove_test_case() {
+ $rpc_py vhost_delete_controller naa.Nvme0n1p0.0
+ $rpc_py vhost_delete_controller naa.Nvme0n1p1.0
+ $rpc_py vhost_delete_controller naa.Nvme0n1p2.1
+ $rpc_py vhost_delete_controller naa.Nvme0n1p3.1
+}
+
+pre_scsi_hotremove_test_case
+scsi_hotremove_tc1
+scsi_hotremove_tc2
+scsi_hotremove_tc3
+scsi_hotremove_tc4
+post_scsi_hotremove_test_case
diff --git a/src/spdk/test/vhost/initiator/autotest.config b/src/spdk/test/vhost/initiator/autotest.config
new file mode 100644
index 000000000..61a1a2424
--- /dev/null
+++ b/src/spdk/test/vhost/initiator/autotest.config
@@ -0,0 +1,5 @@
+vhost_0_reactor_mask=["0"]
+vhost_0_master_core=0
+
+VM_0_qemu_mask=1-10
+VM_0_qemu_numa_node=0
diff --git a/src/spdk/test/vhost/initiator/bdev.fio b/src/spdk/test/vhost/initiator/bdev.fio
new file mode 100644
index 000000000..405202282
--- /dev/null
+++ b/src/spdk/test/vhost/initiator/bdev.fio
@@ -0,0 +1,51 @@
+[global]
+thread=1
+group_reporting=1
+direct=1
+norandommap=1
+time_based=1
+do_verify=1
+verify=md5
+verify_backlog=1024
+iodepth=128
+bs=4K
+runtime=10
+size=13%
+
+[job_randwrite]
+rw=randwrite
+name=randwrite
+
+[job_randrw]
+offset=13%
+rw=randrw
+name=randrw
+
+[job_write]
+offset=26%
+rw=write
+name=write
+
+[job_rw]
+offset=39%
+rw=rw
+name=rw
+
+[job_unmap_trim_sequential]
+offset=52%
+rw=trim
+trim_verify_zero=1
+name=unmap_trim_sequential
+
+[job_unmap_trim_random]
+offset=65%
+rw=randtrim
+trim_verify_zero=1
+name=unmap_trim_random
+
+[job_unmap_write]
+stonewall
+offset=52%
+size=26%
+rw=randwrite
+name=unmap_write
diff --git a/src/spdk/test/vhost/initiator/bdev_pci.conf b/src/spdk/test/vhost/initiator/bdev_pci.conf
new file mode 100644
index 000000000..0e47e88a7
--- /dev/null
+++ b/src/spdk/test/vhost/initiator/bdev_pci.conf
@@ -0,0 +1,2 @@
+[VirtioPci]
+ Enable Yes
diff --git a/src/spdk/test/vhost/initiator/blockdev.sh b/src/spdk/test/vhost/initiator/blockdev.sh
new file mode 100755
index 000000000..9667f1f3d
--- /dev/null
+++ b/src/spdk/test/vhost/initiator/blockdev.sh
@@ -0,0 +1,82 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+
+function run_spdk_fio() {
+ fio_bdev --ioengine=spdk_bdev "$@" --spdk_mem=1024 --spdk_single_seg=1 \
+ --verify_state_save=0
+}
+
+function err_cleanup() {
+ rm -f $testdir/bdev.json
+ killprocess $vhost_pid
+ if [[ -n "$dummy_spdk_pid" ]] && kill -0 $dummy_spdk_pid &> /dev/null; then
+ killprocess $dummy_spdk_pid
+ fi
+}
+
+# start vhost and configure it
+trap 'err_cleanup; exit 1' SIGINT SIGTERM EXIT
+$SPDK_BIN_DIR/vhost &
+vhost_pid=$!
+waitforlisten $vhost_pid
+
+$rootdir/scripts/gen_nvme.sh --json | $rootdir/scripts/rpc.py load_subsystem_config
+if [ -z "$(rpc_cmd bdev_get_bdevs | jq '.[] | select(.name=="Nvme0n1")')" ]; then
+ echo "Nvme0n1 bdev not found!" && false
+fi
+
+rpc_cmd bdev_split_create Nvme0n1 6
+
+rpc_cmd vhost_create_scsi_controller naa.Nvme0n1_scsi0.0
+rpc_cmd vhost_scsi_controller_add_target naa.Nvme0n1_scsi0.0 0 Nvme0n1p0
+rpc_cmd vhost_scsi_controller_add_target naa.Nvme0n1_scsi0.0 1 Nvme0n1p1
+rpc_cmd vhost_scsi_controller_add_target naa.Nvme0n1_scsi0.0 2 Nvme0n1p2
+rpc_cmd vhost_scsi_controller_add_target naa.Nvme0n1_scsi0.0 3 Nvme0n1p3
+
+rpc_cmd vhost_create_blk_controller naa.Nvme0n1_blk0.0 Nvme0n1p4
+rpc_cmd vhost_create_blk_controller naa.Nvme0n1_blk1.0 Nvme0n1p5
+
+rpc_cmd bdev_malloc_create 128 512 --name Malloc0
+rpc_cmd vhost_create_scsi_controller naa.Malloc0.0
+rpc_cmd vhost_scsi_controller_add_target naa.Malloc0.0 0 Malloc0
+
+rpc_cmd bdev_malloc_create 128 4096 --name Malloc1
+rpc_cmd vhost_create_scsi_controller naa.Malloc1.0
+rpc_cmd vhost_scsi_controller_add_target naa.Malloc1.0 0 Malloc1
+
+# start a dummy app, create vhost bdevs in it, then dump the config for FIO
+$SPDK_BIN_DIR/spdk_tgt -r /tmp/spdk2.sock -g &
+dummy_spdk_pid=$!
+waitforlisten $dummy_spdk_pid /tmp/spdk2.sock
+rpc_cmd -s /tmp/spdk2.sock bdev_virtio_attach_controller --trtype user --traddr 'naa.Nvme0n1_scsi0.0' -d scsi --vq-count 8 'VirtioScsi0'
+rpc_cmd -s /tmp/spdk2.sock bdev_virtio_attach_controller --trtype user --traddr 'naa.Nvme0n1_blk0.0' -d blk --vq-count 8 'VirtioBlk3'
+rpc_cmd -s /tmp/spdk2.sock bdev_virtio_attach_controller --trtype user --traddr 'naa.Nvme0n1_blk1.0' -d blk --vq-count 8 'VirtioBlk4'
+
+rpc_cmd -s /tmp/spdk2.sock bdev_virtio_attach_controller --trtype user --traddr 'naa.Malloc0.0' -d scsi --vq-count 8 'VirtioScsi1'
+rpc_cmd -s /tmp/spdk2.sock bdev_virtio_attach_controller --trtype user --traddr 'naa.Malloc1.0' -d scsi --vq-count 8 'VirtioScsi2'
+
+cat <<- CONF > $testdir/bdev.json
+ {"subsystems":[
+ $(rpc_cmd -s /tmp/spdk2.sock save_subsystem_config -n bdev)
+ ]}
+CONF
+killprocess $dummy_spdk_pid
+
+# run FIO with previously acquired spdk config files
+timing_enter run_spdk_fio
+run_spdk_fio $testdir/bdev.fio --filename=* --section=job_randwrite --spdk_json_conf=$testdir/bdev.json
+timing_exit run_spdk_fio
+
+timing_enter run_spdk_fio_unmap
+run_spdk_fio $testdir/bdev.fio --filename="VirtioScsi1t0:VirtioScsi2t0" --spdk_json_conf=$testdir/bdev.json
+timing_exit run_spdk_fio_unmap
+
+rpc_cmd bdev_nvme_detach_controller Nvme0
+
+trap - SIGINT SIGTERM EXIT
+rm -f $testdir/bdev.json
+
+killprocess $vhost_pid
diff --git a/src/spdk/test/vhost/integrity/integrity_start.sh b/src/spdk/test/vhost/integrity/integrity_start.sh
new file mode 100755
index 000000000..ff3e98bda
--- /dev/null
+++ b/src/spdk/test/vhost/integrity/integrity_start.sh
@@ -0,0 +1,106 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/vhost/common.sh
+
+ctrl_type="spdk_vhost_scsi"
+vm_fs="ext4"
+
+function usage() {
+ [[ -n $2 ]] && (
+ echo "$2"
+ echo ""
+ )
+ echo "Shortcut script for doing automated test"
+ echo "Usage: $(basename $1) [OPTIONS]"
+ echo
+ echo "-h, --help Print help and exit"
+ echo " --ctrl-type=TYPE Controller type to use for test:"
+ echo " spdk_vhost_scsi - use spdk vhost scsi"
+ echo " --fs=FS_LIST Filesystems to use for test in VM:"
+ echo " Example: --fs=\"ext4 ntfs ext2\""
+ echo " Default: ext4"
+ echo " spdk_vhost_blk - use spdk vhost block"
+ echo "-x set -x for script debug"
+ exit 0
+}
+
+function clean_lvol_cfg() {
+ notice "Removing lvol bdev and lvol store"
+ $rpc_py bdev_lvol_delete lvol_store/lvol_bdev
+ $rpc_py bdev_lvol_delete_lvstore -l lvol_store
+}
+
+while getopts 'xh-:' optchar; do
+ case "$optchar" in
+ -)
+ case "$OPTARG" in
+ help) usage $0 ;;
+ ctrl-type=*) ctrl_type="${OPTARG#*=}" ;;
+ fs=*) vm_fs="${OPTARG#*=}" ;;
+ *) usage $0 "Invalid argument '$OPTARG'" ;;
+ esac
+ ;;
+ h) usage $0 ;;
+ x)
+ set -x
+ x="-x"
+ ;;
+ *) usage $0 "Invalid argument '$OPTARG'" ;;
+ esac
+done
+
+vhosttestinit
+
+. $(readlink -e "$(dirname $0)/../common.sh") || exit 1
+rpc_py="$rootdir/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock"
+
+trap 'error_exit "${FUNCNAME}" "${LINENO}"' SIGTERM SIGABRT ERR
+
+# Try to kill if any VM remains from previous runs
+vm_kill_all
+
+notice "Starting SPDK vhost"
+vhost_run 0
+notice "..."
+
+# Set up lvols and vhost controllers
+trap 'clean_lvol_cfg; error_exit "${FUNCNAME}" "${LINENO}"' SIGTERM SIGABRT ERR
+notice "Creating lvol store and lvol bdev on top of Nvme0n1"
+lvs_uuid=$($rpc_py bdev_lvol_create_lvstore Nvme0n1 lvol_store)
+$rpc_py bdev_lvol_create lvol_bdev 10000 -l lvol_store
+
+if [[ "$ctrl_type" == "spdk_vhost_scsi" ]]; then
+ $rpc_py vhost_create_scsi_controller naa.Nvme0n1.0
+ $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1.0 0 lvol_store/lvol_bdev
+elif [[ "$ctrl_type" == "spdk_vhost_blk" ]]; then
+ $rpc_py vhost_create_blk_controller naa.Nvme0n1.0 lvol_store/lvol_bdev
+fi
+
+# Set up and run VM
+setup_cmd="vm_setup --disk-type=$ctrl_type --force=0"
+setup_cmd+=" --os=$VM_IMAGE"
+setup_cmd+=" --disks=Nvme0n1"
+$setup_cmd
+
+# Run VM
+vm_run 0
+vm_wait_for_boot 300 0
+
+# Run tests on VM
+vm_scp 0 $testdir/integrity_vm.sh root@127.0.0.1:/root/integrity_vm.sh
+vm_exec 0 "/root/integrity_vm.sh $ctrl_type \"$vm_fs\""
+
+notice "Shutting down virtual machine..."
+vm_shutdown_all
+
+clean_lvol_cfg
+
+$rpc_py bdev_nvme_detach_controller Nvme0
+
+notice "Shutting down SPDK vhost app..."
+vhost_kill 0
+
+vhosttestfini
diff --git a/src/spdk/test/vhost/integrity/integrity_vm.sh b/src/spdk/test/vhost/integrity/integrity_vm.sh
new file mode 100755
index 000000000..5e83fef95
--- /dev/null
+++ b/src/spdk/test/vhost/integrity/integrity_vm.sh
@@ -0,0 +1,83 @@
+#!/usr/bin/env bash
+set -xe
+
+MAKE="make -j$(($(nproc) * 2))"
+
+if [[ $1 == "spdk_vhost_scsi" ]]; then
+ devs=""
+ for entry in /sys/block/sd*; do
+ if grep -Eq '(INTEL|RAWSCSI|LIO-ORG)' $entry/device/vendor; then
+ devs+="$(basename $entry) "
+ fi
+ done
+elif [[ $1 == "spdk_vhost_blk" ]]; then
+ devs=$(
+ cd /sys/block
+ echo vd*
+ )
+fi
+
+fs=$2
+
+trap "exit 1" SIGINT SIGTERM EXIT
+
+for fs in $fs; do
+ for dev in $devs; do
+ i=0
+ parted_cmd="parted -s /dev/${dev}"
+
+ echo "INFO: Creating partition table on disk using: $parted_cmd mklabel gpt"
+ $parted_cmd mklabel gpt
+ while ! ($parted_cmd print | grep -q gpt); do
+ [[ $i -lt 100 ]] || break
+ i=$((i + 1))
+ sleep 0.1
+ done
+ $parted_cmd mkpart primary 2048s 100%
+
+ mkfs_cmd="mkfs.$fs"
+ if [[ $fs == "ntfs" ]]; then
+ mkfs_cmd+=" -f"
+ fi
+ mkfs_cmd+=" /dev/${dev}1"
+ echo "INFO: Creating filesystem using: $mkfs_cmd"
+ i=0
+ until wipefs -a /dev/${dev}1; do
+ [[ $i -lt 100 ]] || break
+ i=$((i + 1))
+ echo "Waiting for /dev/${dev}1"
+ sleep 0.1
+ done
+ $mkfs_cmd
+
+ mkdir -p /mnt/${dev}dir
+ mount -o sync /dev/${dev}1 /mnt/${dev}dir
+
+ fio --name="integrity" --bsrange=4k-512k --iodepth=128 --numjobs=1 --direct=1 \
+ --thread=1 --group_reporting=1 --rw=randrw --rwmixread=70 \
+ --filename=/mnt/${dev}dir/test_file --verify=md5 --do_verify=1 \
+ --verify_backlog=1024 --fsync_on_close=1 --runtime=20 --time_based=1 --size=512m
+
+ # Print out space consumed on target device
+ df -h /dev/$dev
+ done
+
+ for dev in $devs; do
+ umount /mnt/${dev}dir
+ rm -rf /mnt/${dev}dir
+ parted -s /dev/${dev} rm 1
+
+ stats=($(cat /sys/block/$dev/stat))
+ echo ""
+ echo "$dev stats"
+ printf "READ IO cnt: % 8u merges: % 8u sectors: % 8u ticks: % 8u\n" \
+ ${stats[0]} ${stats[1]} ${stats[2]} ${stats[3]}
+ printf "WRITE IO cnt: % 8u merges: % 8u sectors: % 8u ticks: % 8u\n" \
+ ${stats[4]} ${stats[5]} ${stats[6]} ${stats[7]}
+ printf "in flight: % 8u io ticks: % 8u time in queue: % 8u\n" \
+ ${stats[8]} ${stats[9]} ${stats[10]}
+ echo ""
+ done
+done
+
+trap - SIGINT SIGTERM EXIT
diff --git a/src/spdk/test/vhost/lvol/autotest.config b/src/spdk/test/vhost/lvol/autotest.config
new file mode 100644
index 000000000..9b653cd7f
--- /dev/null
+++ b/src/spdk/test/vhost/lvol/autotest.config
@@ -0,0 +1,74 @@
+vhost_0_reactor_mask="[0-31]"
+vhost_0_master_core=0
+
+VM_0_qemu_mask=1
+VM_0_qemu_numa_node=0
+
+VM_1_qemu_mask=2
+VM_1_qemu_numa_node=0
+
+VM_2_qemu_mask=3
+VM_2_qemu_numa_node=0
+
+VM_3_qemu_mask=4
+VM_3_qemu_numa_node=0
+
+VM_4_qemu_mask=5
+VM_4_qemu_numa_node=0
+
+VM_5_qemu_mask=6
+VM_5_qemu_numa_node=0
+
+VM_6_qemu_mask=7
+VM_6_qemu_numa_node=0
+
+VM_7_qemu_mask=8
+VM_7_qemu_numa_node=0
+
+VM_8_qemu_mask=9
+VM_8_qemu_numa_node=0
+
+VM_9_qemu_mask=10
+VM_9_qemu_numa_node=0
+
+VM_10_qemu_mask=11
+VM_10_qemu_numa_node=0
+
+VM_11_qemu_mask=12
+VM_11_qemu_numa_node=0
+
+VM_12_qemu_mask=13
+VM_12_qemu_numa_node=1
+
+VM_13_qemu_mask=14
+VM_13_qemu_numa_node=1
+
+VM_14_qemu_mask=15
+VM_14_qemu_numa_node=1
+
+VM_15_qemu_mask=16
+VM_15_qemu_numa_node=1
+
+VM_16_qemu_mask=17
+VM_16_qemu_numa_node=1
+
+VM_17_qemu_mask=18
+VM_17_qemu_numa_node=1
+
+VM_18_qemu_mask=19
+VM_18_qemu_numa_node=1
+
+VM_19_qemu_mask=20
+VM_19_qemu_numa_node=1
+
+VM_20_qemu_mask=21
+VM_20_qemu_numa_node=1
+
+VM_21_qemu_mask=22
+VM_21_qemu_numa_node=1
+
+VM_22_qemu_mask=23
+VM_22_qemu_numa_node=1
+
+VM_23_qemu_mask=24
+VM_23_qemu_numa_node=1
diff --git a/src/spdk/test/vhost/lvol/lvol_test.sh b/src/spdk/test/vhost/lvol/lvol_test.sh
new file mode 100755
index 000000000..fba38e059
--- /dev/null
+++ b/src/spdk/test/vhost/lvol/lvol_test.sh
@@ -0,0 +1,289 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/vhost/common.sh
+source $rootdir/scripts/common.sh
+
+rpc_py="$rootdir/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock"
+
+vm_count=1
+max_disks=""
+ctrl_type="spdk_vhost_scsi"
+use_fs=false
+nested_lvol=false
+distribute_cores=false
+
+function usage() {
+ [[ -n $2 ]] && (
+ echo "$2"
+ echo ""
+ )
+ echo "Shortcut script for doing automated test"
+ echo "Usage: $(basename $1) [OPTIONS]"
+ echo
+ echo "-h, --help Print help and exit"
+ echo " --fio-bin=PATH Path to FIO binary.;"
+ echo " --vm-count=INT Virtual machines to use in test;"
+ echo " Each VM will get one lvol bdev on each NVMe."
+ echo " Default: 1"
+ echo " --max-disks=INT Maximum number of NVMe drives to use in test."
+ echo " Default: will use all available NVMes."
+ echo " --ctrl-type=TYPE Controller type to use for test:"
+ echo " spdk_vhost_scsi - use spdk vhost scsi"
+ echo " spdk_vhost_blk - use spdk vhost block"
+ echo " --nested-lvol If enabled will create additional lvol bdev"
+ echo " on each NVMe for use as base device for next"
+ echo " lvol store and lvol bdevs."
+ echo " (NVMe->lvol_store->lvol_bdev->lvol_store->lvol_bdev)"
+ echo " Default: False"
+ echo " --thin-provisioning Create lvol bdevs thin provisioned instead of"
+ echo " allocating space up front"
+ echo " --distribute-cores Use custom config file and run vhost controllers"
+ echo " on different CPU cores instead of single core."
+ echo " Default: False"
+ echo "-x set -x for script debug"
+ echo " --multi-os Run tests on different os types in VMs"
+ echo " Default: False"
+ exit 0
+}
+
+function clean_lvol_cfg() {
+ notice "Removing nested lvol bdevs"
+ for lvol_bdev in "${nest_lvol_bdevs[@]}"; do
+ $rpc_py bdev_lvol_delete $lvol_bdev
+ notice "nested lvol bdev $lvol_bdev removed"
+ done
+
+ notice "Removing nested lvol stores"
+ for lvol_store in "${nest_lvol_stores[@]}"; do
+ $rpc_py bdev_lvol_delete_lvstore -u $lvol_store
+ notice "nested lvol store $lvol_store removed"
+ done
+
+ notice "Removing lvol bdevs"
+ for lvol_bdev in "${lvol_bdevs[@]}"; do
+ $rpc_py bdev_lvol_delete $lvol_bdev
+ notice "lvol bdev $lvol_bdev removed"
+ done
+
+ notice "Removing lvol stores"
+ for lvol_store in "${lvol_stores[@]}"; do
+ $rpc_py bdev_lvol_delete_lvstore -u $lvol_store
+ notice "lvol store $lvol_store removed"
+ done
+}
+
+while getopts 'xh-:' optchar; do
+ case "$optchar" in
+ -)
+ case "$OPTARG" in
+ help) usage $0 ;;
+ fio-bin=*) fio_bin="--fio-bin=${OPTARG#*=}" ;;
+ vm-count=*) vm_count="${OPTARG#*=}" ;;
+ max-disks=*) max_disks="${OPTARG#*=}" ;;
+ ctrl-type=*) ctrl_type="${OPTARG#*=}" ;;
+ nested-lvol) nested_lvol=true ;;
+ distribute-cores) distribute_cores=true ;;
+ thin-provisioning) thin=" -t " ;;
+ multi-os) multi_os=true ;;
+ *) usage $0 "Invalid argument '$OPTARG'" ;;
+ esac
+ ;;
+ h) usage $0 ;;
+ x)
+ set -x
+ x="-x"
+ ;;
+ *) usage $0 "Invalid argument '$OPTARG'" ;;
+ esac
+done
+
+vhosttestinit
+
+notice "Get NVMe disks:"
+nvmes=($(get_nvme_bdfs))
+
+if [[ -z $max_disks ]]; then
+ max_disks=${#nvmes[@]}
+fi
+
+if ((${#nvmes[@]} < max_disks)); then
+ fail "Number of NVMe drives (${#nvmes[@]}) is lower than number of requested disks for test ($max_disks)"
+fi
+
+if $distribute_cores; then
+ # FIXME: this need to be handled entirely in common.sh
+ source $testdir/autotest.config
+fi
+
+trap 'error_exit "${FUNCNAME}" "${LINENO}"' SIGTERM SIGABRT ERR
+
+vm_kill_all
+
+notice "running SPDK vhost"
+vhost_run 0
+notice "..."
+
+trap 'clean_lvol_cfg; error_exit "${FUNCNAME}" "${LINENO}"' SIGTERM SIGABRT ERR
+
+lvol_stores=()
+lvol_bdevs=()
+nest_lvol_stores=()
+nest_lvol_bdevs=()
+used_vms=""
+
+# On each NVMe create one lvol store
+for ((i = 0; i < max_disks; i++)); do
+
+ # Create base lvol store on NVMe
+ notice "Creating lvol store on device Nvme${i}n1"
+ ls_guid=$($rpc_py bdev_lvol_create_lvstore Nvme${i}n1 lvs_$i -c 4194304)
+ lvol_stores+=("$ls_guid")
+
+ if $nested_lvol; then
+ free_mb=$(get_lvs_free_mb "$ls_guid")
+ size=$((free_mb / (vm_count + 1)))
+
+ notice "Creating lvol bdev on lvol store: $ls_guid"
+ lb_name=$($rpc_py bdev_lvol_create -u $ls_guid lbd_nest $size $thin)
+
+ notice "Creating nested lvol store on lvol bdev: $lb_name"
+ nest_ls_guid=$($rpc_py bdev_lvol_create_lvstore $lb_name lvs_n_$i -c 4194304)
+ nest_lvol_stores+=("$nest_ls_guid")
+
+ for ((j = 0; j < vm_count; j++)); do
+ notice "Creating nested lvol bdev for VM $i on lvol store $nest_ls_guid"
+ free_mb=$(get_lvs_free_mb "$nest_ls_guid")
+ nest_size=$((free_mb / (vm_count - j)))
+ lb_name=$($rpc_py bdev_lvol_create -u $nest_ls_guid lbd_vm_$j $nest_size $thin)
+ nest_lvol_bdevs+=("$lb_name")
+ done
+ fi
+
+ # Create base lvol bdevs
+ for ((j = 0; j < vm_count; j++)); do
+ notice "Creating lvol bdev for VM $i on lvol store $ls_guid"
+ free_mb=$(get_lvs_free_mb "$ls_guid")
+ size=$((free_mb / (vm_count - j)))
+ lb_name=$($rpc_py bdev_lvol_create -u $ls_guid lbd_vm_$j $size $thin)
+ lvol_bdevs+=("$lb_name")
+ done
+done
+
+bdev_info=$($rpc_py bdev_get_bdevs)
+notice "Configuration after initial set-up:"
+$rpc_py bdev_lvol_get_lvstores
+echo "$bdev_info"
+
+# Set up VMs
+for ((i = 0; i < vm_count; i++)); do
+ vm="vm_$i"
+
+ # Get all lvol bdevs associated with this VM number
+ bdevs=$(jq -r "map(select(.aliases[] | contains(\"$vm\")) | \
+ .aliases[]) | join(\" \")" <<< "$bdev_info")
+ bdevs=($bdevs)
+
+ setup_cmd="vm_setup --disk-type=$ctrl_type --force=$i"
+ if [[ $i%2 -ne 0 ]] && [[ $multi_os ]]; then
+ setup_cmd+=" --os=/home/sys_sgsw/spdk_vhost_CentOS_vm_image.qcow2"
+ else
+ setup_cmd+=" --os=$VM_IMAGE"
+ fi
+
+ # Create single SCSI controller or multiple BLK controllers for this VM
+ if $distribute_cores; then
+ mask="VM_${i}_qemu_mask"
+ mask_arg="--cpumask ${!mask}"
+ fi
+
+ if [[ "$ctrl_type" == "spdk_vhost_scsi" ]]; then
+ $rpc_py vhost_create_scsi_controller naa.0.$i $mask_arg
+ for ((j = 0; j < ${#bdevs[@]}; j++)); do
+ $rpc_py vhost_scsi_controller_add_target naa.0.$i $j ${bdevs[$j]}
+ done
+ setup_cmd+=" --disks=0"
+ elif [[ "$ctrl_type" == "spdk_vhost_blk" ]]; then
+ disk=""
+ for ((j = 0; j < ${#bdevs[@]}; j++)); do
+ $rpc_py vhost_create_blk_controller naa.$j.$i ${bdevs[$j]} $mask_arg
+ disk+="${j}:"
+ done
+ disk="${disk::-1}"
+ setup_cmd+=" --disks=$disk"
+ fi
+
+ $setup_cmd
+ used_vms+=" $i"
+done
+
+$rpc_py vhost_get_controllers
+
+# Run VMs
+vm_run $used_vms
+vm_wait_for_boot 300 $used_vms
+
+# Get disk names from VMs and run FIO traffic
+
+fio_disks=""
+for vm_num in $used_vms; do
+ qemu_mask_param="VM_${vm_num}_qemu_mask"
+
+ host_name="VM-$vm_num-${!qemu_mask_param}"
+ vm_exec $vm_num "hostname $host_name"
+ vm_start_fio_server $fio_bin $vm_num
+
+ if [[ "$ctrl_type" == "spdk_vhost_scsi" ]]; then
+ vm_check_scsi_location $vm_num
+ elif [[ "$ctrl_type" == "spdk_vhost_blk" ]]; then
+ vm_check_blk_location $vm_num
+ fi
+
+ fio_disks+=" --vm=${vm_num}$(printf ':/dev/%s' $SCSI_DISK)"
+done
+
+if [[ $RUN_NIGHTLY -eq 1 ]]; then
+ job_file="default_integrity_nightly.job"
+else
+ job_file="default_integrity.job"
+fi
+# Run FIO traffic
+run_fio $fio_bin --job-file=$rootdir/test/vhost/common/fio_jobs/$job_file --out="$VHOST_DIR/fio_results" $fio_disks
+
+notice "Shutting down virtual machines..."
+vm_shutdown_all
+sleep 2
+
+notice "Cleaning up vhost - remove LUNs, controllers, lvol bdevs and lvol stores"
+if [[ "$ctrl_type" == "spdk_vhost_scsi" ]]; then
+ for ((i = 0; i < vm_count; i++)); do
+ notice "Removing devices from vhost SCSI controller naa.0.$i"
+ for ((j = 0; j < ${#bdevs[@]}; j++)); do
+ $rpc_py vhost_scsi_controller_remove_target naa.0.$i $j
+ notice "Removed device $j"
+ done
+ notice "Removing vhost SCSI controller naa.0.$i"
+ $rpc_py vhost_delete_controller naa.0.$i
+ done
+elif [[ "$ctrl_type" == "spdk_vhost_blk" ]]; then
+ for ((i = 0; i < vm_count; i++)); do
+ for ((j = 0; j < ${#bdevs[@]}; j++)); do
+ notice "Removing vhost BLK controller naa.$j.$i"
+ $rpc_py vhost_delete_controller naa.$j.$i
+ notice "Removed naa.$j.$i"
+ done
+ done
+fi
+
+clean_lvol_cfg
+
+$rpc_py bdev_lvol_get_lvstores
+$rpc_py bdev_get_bdevs
+$rpc_py vhost_get_controllers
+
+notice "Shutting down SPDK vhost app..."
+vhost_kill 0
+
+vhosttestfini
diff --git a/src/spdk/test/vhost/manual.sh b/src/spdk/test/vhost/manual.sh
new file mode 100755
index 000000000..187a0225e
--- /dev/null
+++ b/src/spdk/test/vhost/manual.sh
@@ -0,0 +1,86 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/vhost/common.sh
+
+CENTOS_VM_IMAGE="/home/sys_sgsw/spdk_vhost_CentOS_vm_image.qcow2"
+DEFAULT_FIO_BIN="/home/sys_sgsw/fio_ubuntu"
+CENTOS_FIO_BIN="/home/sys_sgsw/fio_ubuntu_bak"
+
+case $1 in
+ -h | --help)
+ echo "usage: $(basename $0) TEST_TYPE"
+ echo "Test type can be:"
+ echo " -p |--performance for running a performance test with vhost scsi"
+ echo " -pb|--performance-blk for running a performance test with vhost blk"
+ echo " -hp|--hotplug for running hotplug tests"
+ echo " -shr|--scsi-hot-remove for running scsi hot remove tests"
+ echo " -bhr|--blk-hot-remove for running blk hot remove tests"
+ echo " -h |--help prints this message"
+ echo ""
+ echo "Environment:"
+ echo " VM_IMAGE path to QCOW2 VM image used during test (default: $HOME/vhost_vm_image.qcow2)"
+ echo ""
+ echo "Tests are performed only on Linux machine. For other OS no action is performed."
+ echo ""
+ exit 0
+ ;;
+esac
+
+echo "Running SPDK vhost fio autotest..."
+if [[ $(uname -s) != Linux ]]; then
+ echo ""
+ echo "INFO: Vhost tests are only for Linux machine."
+ echo ""
+ exit 0
+fi
+
+: ${FIO_BIN="$DEFAULT_FIO_BIN"}
+
+if [[ ! -r "${VM_IMAGE}" ]]; then
+ echo ""
+ echo "ERROR: VM image '${VM_IMAGE}' does not exist."
+ echo ""
+ exit 1
+fi
+
+DISKS_NUMBER=$(lspci -mm -n | grep 0108 | tr -d '"' | awk -F " " '{print "0000:"$1}' | wc -l)
+
+WORKDIR=$(readlink -f $(dirname $0))
+
+case $1 in
+ -hp | --hotplug)
+ echo 'Running hotplug tests suite...'
+ run_test "vhost_hotplug" $WORKDIR/hotplug/scsi_hotplug.sh --fio-bin=$FIO_BIN \
+ --vm=0,$VM_IMAGE,Nvme0n1p0:Nvme0n1p1 \
+ --vm=1,$VM_IMAGE,Nvme0n1p2:Nvme0n1p3 \
+ --vm=2,$VM_IMAGE,Nvme0n1p4:Nvme0n1p5 \
+ --vm=3,$VM_IMAGE,Nvme0n1p6:Nvme0n1p7 \
+ --test-type=spdk_vhost_scsi \
+ --fio-jobs=$WORKDIR/hotplug/fio_jobs/default_integrity.job -x
+ ;;
+ -shr | --scsi-hot-remove)
+ echo 'Running scsi hotremove tests suite...'
+ run_test "vhost_scsi_hot_remove" $WORKDIR/hotplug/scsi_hotplug.sh --fio-bin=$FIO_BIN \
+ --vm=0,$VM_IMAGE,Nvme0n1p0:Nvme0n1p1 \
+ --vm=1,$VM_IMAGE,Nvme0n1p2:Nvme0n1p3 \
+ --test-type=spdk_vhost_scsi \
+ --scsi-hotremove-test \
+ --fio-jobs=$WORKDIR/hotplug/fio_jobs/default_integrity.job
+ ;;
+ -bhr | --blk-hot-remove)
+ echo 'Running blk hotremove tests suite...'
+ run_test "vhost_blk_hot_remove" $WORKDIR/hotplug/scsi_hotplug.sh --fio-bin=$FIO_BIN \
+ --vm=0,$VM_IMAGE,Nvme0n1p0:Nvme0n1p1 \
+ --vm=1,$VM_IMAGE,Nvme0n1p2:Nvme0n1p3 \
+ --test-type=spdk_vhost_blk \
+ --blk-hotremove-test \
+ --fio-jobs=$WORKDIR/hotplug/fio_jobs/default_integrity.job
+ ;;
+ *)
+ echo "unknown test type: $1"
+ exit 1
+ ;;
+esac
diff --git a/src/spdk/test/vhost/migration/autotest.config b/src/spdk/test/vhost/migration/autotest.config
new file mode 100644
index 000000000..ccda306ea
--- /dev/null
+++ b/src/spdk/test/vhost/migration/autotest.config
@@ -0,0 +1,14 @@
+vhost_0_reactor_mask=["0"]
+vhost_0_master_core=0
+
+vhost_1_reactor_mask=["0"]
+vhost_1_master_core=0
+
+VM_0_qemu_mask=1
+VM_0_qemu_numa_node=0
+
+VM_1_qemu_mask=1
+VM_1_qemu_numa_node=0
+
+VM_2_qemu_mask=1
+VM_2_qemu_numa_node=0
diff --git a/src/spdk/test/vhost/migration/migration-tc1.job b/src/spdk/test/vhost/migration/migration-tc1.job
new file mode 100644
index 000000000..5383b243f
--- /dev/null
+++ b/src/spdk/test/vhost/migration/migration-tc1.job
@@ -0,0 +1,25 @@
+[global]
+blocksize_range=4k-512k
+#bs=512k
+iodepth=128
+ioengine=libaio
+filename=
+group_reporting
+thread
+numjobs=1
+direct=1
+do_verify=1
+verify=md5
+verify_fatal=1
+verify_dump=1
+size=100%
+
+[write]
+rw=write
+stonewall
+
+[randread]
+rw=randread
+runtime=10
+time_based
+stonewall
diff --git a/src/spdk/test/vhost/migration/migration-tc1.sh b/src/spdk/test/vhost/migration/migration-tc1.sh
new file mode 100644
index 000000000..6d5a436ef
--- /dev/null
+++ b/src/spdk/test/vhost/migration/migration-tc1.sh
@@ -0,0 +1,119 @@
+function migration_tc1_clean_vhost_config() {
+ # Restore trap
+ trap 'error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT
+
+ notice "Removing vhost devices & controllers via RPC ..."
+ # Delete bdev first to remove all LUNs and SCSI targets
+ $rpc bdev_malloc_delete Malloc0
+
+ # Delete controllers
+ $rpc vhost_delete_controller $incoming_vm_ctrlr
+ $rpc vhost_delete_controller $target_vm_ctrlr
+
+ unset -v incoming_vm target_vm incoming_vm_ctrlr target_vm_ctrlr rpc
+}
+
+function migration_tc1_configure_vhost() {
+ # Those are global intentionally - they will be unset in cleanup handler
+ incoming_vm=0
+ target_vm=1
+ incoming_vm_ctrlr=naa.Malloc0.$incoming_vm
+ target_vm_ctrlr=naa.Malloc0.$target_vm
+ rpc="$rootdir/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock"
+
+ trap 'migration_tc1_error_handler; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT
+
+ # Construct shared Malloc Bdev
+ $rpc bdev_malloc_create -b Malloc0 128 4096
+
+ # And two controllers - one for each VM. Both are using the same Malloc Bdev as LUN 0
+ $rpc vhost_create_scsi_controller $incoming_vm_ctrlr
+ $rpc vhost_scsi_controller_add_target $incoming_vm_ctrlr 0 Malloc0
+
+ $rpc vhost_create_scsi_controller $target_vm_ctrlr
+ $rpc vhost_scsi_controller_add_target $target_vm_ctrlr 0 Malloc0
+}
+
+function migration_tc1_error_handler() {
+ trap - SIGINT ERR EXIT
+ warning "Migration TC1 ERROR HANDLER"
+ print_backtrace
+ set -x
+
+ vm_kill_all
+ migration_tc1_clean_vhost_config
+
+ warning "Migration TC1 FAILED"
+}
+
+function migration_tc1() {
+ # Use 2 VMs:
+ # incoming VM - the one we want to migrate
+ # targe VM - the one which will accept migration
+ local job_file="$testdir/migration-tc1.job"
+ local log_file
+ log_file="/root/$(basename ${job_file%%.*}).log"
+
+ # Run vhost
+ vhost_run 0
+ migration_tc1_configure_vhost
+
+ notice "Setting up VMs"
+ vm_setup --os="$os_image" --force=$incoming_vm --disk-type=spdk_vhost_scsi --disks=Malloc0 --migrate-to=$target_vm
+ vm_setup --force=$target_vm --disk-type=spdk_vhost_scsi --disks=Malloc0 --incoming=$incoming_vm
+
+ # Run everything
+ vm_run $incoming_vm $target_vm
+
+ # Wait only for incoming VM, as target is waiting for migration
+ vm_wait_for_boot 300 $incoming_vm
+
+ # Run fio before migration
+ notice "Starting FIO"
+
+ vm_check_scsi_location $incoming_vm
+ run_fio $fio_bin --job-file="$job_file" --no-wait-for-fio --local --vm="${incoming_vm}$(printf ':/dev/%s' $SCSI_DISK)"
+
+ # Wait a while to let the FIO time to issue some IO
+ sleep 5
+
+ # Check if fio is still running before migration
+ if ! is_fio_running $incoming_vm; then
+ vm_exec $incoming_vm "cat $log_file"
+ error "FIO is not running before migration: process crashed or finished too early"
+ fi
+
+ vm_migrate $incoming_vm
+ sleep 3
+
+ # Check if fio is still running after migration
+ if ! is_fio_running $target_vm; then
+ vm_exec $target_vm "cat $log_file"
+ error "FIO is not running after migration: process crashed or finished too early"
+ fi
+
+ notice "Waiting for fio to finish"
+ local timeout=40
+ while is_fio_running $target_vm; do
+ sleep 1
+ echo -n "."
+ if ((timeout-- == 0)); then
+ error "timeout while waiting for FIO!"
+ fi
+ done
+
+ notice "Fio result is:"
+ vm_exec $target_vm "cat $log_file"
+
+ notice "Migration DONE"
+
+ notice "Shutting down all VMs"
+ vm_shutdown_all
+
+ migration_tc1_clean_vhost_config
+
+ notice "killing vhost app"
+ vhost_kill 0
+
+ notice "Migration TC1 SUCCESS"
+}
diff --git a/src/spdk/test/vhost/migration/migration-tc2.job b/src/spdk/test/vhost/migration/migration-tc2.job
new file mode 100644
index 000000000..df78a3cd6
--- /dev/null
+++ b/src/spdk/test/vhost/migration/migration-tc2.job
@@ -0,0 +1,20 @@
+[global]
+blocksize_range=4k-512k
+iodepth=128
+ioengine=libaio
+filename=
+group_reporting
+thread
+numjobs=1
+direct=1
+do_verify=1
+verify=md5
+verify_fatal=1
+verify_dump=1
+verify_backlog=8
+
+[randwrite]
+rw=randwrite
+runtime=15
+time_based
+stonewall
diff --git a/src/spdk/test/vhost/migration/migration-tc2.sh b/src/spdk/test/vhost/migration/migration-tc2.sh
new file mode 100644
index 000000000..aa234d842
--- /dev/null
+++ b/src/spdk/test/vhost/migration/migration-tc2.sh
@@ -0,0 +1,203 @@
+source $rootdir/test/nvmf/common.sh
+
+function migration_tc2_cleanup_nvmf_tgt() {
+ local i
+
+ if [[ ! -r "$nvmf_dir/nvmf_tgt.pid" ]]; then
+ warning "Pid file '$nvmf_dir/nvmf_tgt.pid' does not exist. "
+ return
+ fi
+
+ if [[ -n "$1" ]]; then
+ trap 'error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT
+ pkill --signal $1 -F $nvmf_dir/nvmf_tgt.pid || true
+ sleep 5
+ if ! pkill -F $nvmf_dir/nvmf_tgt.pid; then
+ fail "failed to kill nvmf_tgt app"
+ fi
+ else
+ pkill --signal SIGTERM -F $nvmf_dir/nvmf_tgt.pid || true
+ for ((i = 0; i < 20; i++)); do
+ if ! pkill --signal 0 -F $nvmf_dir/nvmf_tgt.pid; then
+ break
+ fi
+ sleep 0.5
+ done
+
+ if pkill --signal 0 -F $nvmf_dir/nvmf_tgt.pid; then
+ error "nvmf_tgt failed to shutdown"
+ fi
+ fi
+
+ rm $nvmf_dir/nvmf_tgt.pid
+ unset -v nvmf_dir rpc_nvmf
+}
+
+function migration_tc2_cleanup_vhost_config() {
+ timing_enter migration_tc2_cleanup_vhost_config
+
+ trap 'migration_tc2_cleanup_nvmf_tgt SIGKILL; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT
+
+ notice "Shutting down all VMs"
+ vm_shutdown_all
+
+ notice "Removing vhost devices & controllers via RPC ..."
+ # Delete bdev first to remove all LUNs and SCSI targets
+ $rpc_0 bdev_nvme_detach_controller Nvme0
+ $rpc_0 vhost_delete_controller $incoming_vm_ctrlr
+
+ $rpc_1 delete_nvme_controller Nvme0
+ $rpc_1 vhost_delete_controller $target_vm_ctrlr
+
+ notice "killing vhost app"
+ vhost_kill 0
+ vhost_kill 1
+
+ unset -v incoming_vm target_vm incoming_vm_ctrlr target_vm_ctrlr
+ unset -v rpc_0 rpc_1
+
+ trap 'error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT
+ migration_tc2_cleanup_nvmf_tgt
+
+ timing_exit migration_tc2_cleanup_vhost_config
+}
+
+function migration_tc2_configure_vhost() {
+ timing_enter migration_tc2_configure_vhost
+
+ # Those are global intentionally - they will be unset in cleanup handler
+ nvmf_dir="$VHOST_DIR/nvmf_tgt"
+
+ incoming_vm=1
+ target_vm=2
+ incoming_vm_ctrlr=naa.VhostScsi0.$incoming_vm
+ target_vm_ctrlr=naa.VhostScsi0.$target_vm
+
+ rpc_nvmf="$rootdir/scripts/rpc.py -s $nvmf_dir/rpc.sock"
+ rpc_0="$rootdir/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock"
+ rpc_1="$rootdir/scripts/rpc.py -s $(get_vhost_dir 1)/rpc.sock"
+
+ # Default cleanup/error handlers will not shutdown nvmf_tgt app so setup it
+ # here to teardown in cleanup function
+ trap 'migration_tc2_error_cleanup; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT
+
+ # Run nvmf_tgt and two vhost instances:
+ # nvmf_tgt uses core id 2 (-m 0x4)
+ # First uses core id 0
+ # Second uses core id 1
+ # This force to use VM 1 and 2.
+ timing_enter start_nvmf_tgt
+ notice "Running nvmf_tgt..."
+ mkdir -p $nvmf_dir
+ rm -f $nvmf_dir/*
+ $SPDK_BIN_DIR/nvmf_tgt -s 512 -m 0x4 -r $nvmf_dir/rpc.sock --wait-for-rpc &
+ local nvmf_tgt_pid=$!
+ echo $nvmf_tgt_pid > $nvmf_dir/nvmf_tgt.pid
+ waitforlisten "$nvmf_tgt_pid" "$nvmf_dir/rpc.sock"
+ $rpc_nvmf framework_start_init
+ $rpc_nvmf nvmf_create_transport -t RDMA -u 8192
+ $rootdir/scripts/gen_nvme.sh --json | $rpc_nvmf load_subsystem_config
+ timing_exit start_nvmf_tgt
+
+ vhost_run 0 "-m 0x1 -s 512 -u"
+ vhost_run 1 "-m 0x2 -s 512 -u"
+
+ local rdma_ip_list
+ local nvmf_target_ip
+ rdma_ip_list=$(get_available_rdma_ips)
+ nvmf_target_ip=$(echo "$rdma_ip_list" | head -n 1)
+
+ if [[ -z "$nvmf_target_ip" ]]; then
+ fail "no NIC for nvmf target"
+ fi
+
+ notice "Configuring nvmf_tgt, vhost devices & controllers via RPC ..."
+
+ # Construct shared bdevs and controllers
+ $rpc_nvmf nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
+ $rpc_nvmf nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Nvme0n1
+ $rpc_nvmf nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t rdma -a $nvmf_target_ip -s 4420
+
+ $rpc_0 bdev_nvme_attach_controller -b Nvme0 -t rdma -f ipv4 -a $nvmf_target_ip -s 4420 -n "nqn.2016-06.io.spdk:cnode1"
+ $rpc_0 vhost_create_scsi_controller $incoming_vm_ctrlr
+ $rpc_0 vhost_scsi_controller_add_target $incoming_vm_ctrlr 0 Nvme0n1
+
+ $rpc_1 bdev_nvme_attach_controller -b Nvme0 -t rdma -f ipv4 -a $nvmf_target_ip -s 4420 -n "nqn.2016-06.io.spdk:cnode1"
+ $rpc_1 vhost_create_scsi_controller $target_vm_ctrlr
+ $rpc_1 vhost_scsi_controller_add_target $target_vm_ctrlr 0 Nvme0n1
+
+ notice "Setting up VMs"
+ vm_setup --os="$os_image" --force=$incoming_vm --disk-type=spdk_vhost_scsi --disks=VhostScsi0 \
+ --migrate-to=$target_vm --memory=1024 --vhost-name=0
+ vm_setup --force=$target_vm --disk-type=spdk_vhost_scsi --disks=VhostScsi0 --incoming=$incoming_vm --memory=1024 \
+ --vhost-name=1
+
+ # Run everything
+ vm_run $incoming_vm $target_vm
+
+ # Wait only for incoming VM, as target is waiting for migration
+ vm_wait_for_boot 300 $incoming_vm
+
+ notice "Configuration done"
+
+ timing_exit migration_tc2_configure_vhost
+}
+
+function migration_tc2_error_cleanup() {
+ trap - SIGINT ERR EXIT
+ set -x
+
+ vm_kill_all
+ migration_tc2_cleanup_vhost_config
+ notice "Migration TC2 FAILED"
+}
+
+function migration_tc2() {
+ # Use 2 VMs:
+ # incoming VM - the one we want to migrate
+ # targe VM - the one which will accept migration
+ local job_file="$testdir/migration-tc2.job"
+ local log_file
+ log_file="/root/$(basename ${job_file%%.*}).log"
+
+ migration_tc2_configure_vhost
+
+ # Run fio before migration
+ notice "Starting FIO"
+ vm_check_scsi_location $incoming_vm
+ run_fio $fio_bin --job-file="$job_file" --no-wait-for-fio --local --vm="${incoming_vm}$(printf ':/dev/%s' $SCSI_DISK)"
+
+ # Wait a while to let the FIO time to issue some IO
+ sleep 5
+
+ # Check if fio is still running before migration
+ if ! is_fio_running $incoming_vm; then
+ vm_exec $incoming_vm "cat $log_file"
+ error "FIO is not running before migration: process crashed or finished too early"
+ fi
+
+ vm_migrate $incoming_vm
+ sleep 3
+
+ # Check if fio is still running after migration
+ if ! is_fio_running $target_vm; then
+ vm_exec $target_vm "cat $log_file"
+ error "FIO is not running after migration: process crashed or finished too early"
+ fi
+
+ notice "Waiting for fio to finish"
+ local timeout=40
+ while is_fio_running $target_vm; do
+ sleep 1
+ echo -n "."
+ if ((timeout-- == 0)); then
+ error "timeout while waiting for FIO!"
+ fi
+ done
+
+ notice "Fio result is:"
+ vm_exec $target_vm "cat $log_file"
+
+ migration_tc2_cleanup_vhost_config
+ notice "Migration TC2 SUCCESS"
+}
diff --git a/src/spdk/test/vhost/migration/migration-tc3.job b/src/spdk/test/vhost/migration/migration-tc3.job
new file mode 100644
index 000000000..fe1929662
--- /dev/null
+++ b/src/spdk/test/vhost/migration/migration-tc3.job
@@ -0,0 +1,20 @@
+[global]
+blocksize=4k-512k
+iodepth=128
+ioengine=libaio
+filename=
+group_reporting
+thread
+numjobs=1
+direct=1
+do_verify=1
+verify=md5
+verify_fatal=1
+verify_dump=1
+verify_backlog=8
+
+[randwrite]
+rw=randwrite
+runtime=15
+time_based
+stonewall
diff --git a/src/spdk/test/vhost/migration/migration-tc3a.sh b/src/spdk/test/vhost/migration/migration-tc3a.sh
new file mode 100644
index 000000000..b8f06a8d0
--- /dev/null
+++ b/src/spdk/test/vhost/migration/migration-tc3a.sh
@@ -0,0 +1,218 @@
+source $rootdir/test/nvmf/common.sh
+source $testdir/autotest.config
+
+incoming_vm=1
+target_vm=2
+incoming_vm_ctrlr=naa.VhostScsi0.$incoming_vm
+target_vm_ctrlr=naa.VhostScsi0.$target_vm
+share_dir=$TEST_DIR/share
+spdk_repo_share_dir=$TEST_DIR/share_spdk
+job_file=$testdir/migration-tc3.job
+log_file="/root/$(basename ${job_file%%.*}).log"
+
+if [ -z "$MGMT_TARGET_IP" ]; then
+ error "No IP address of target is given"
+fi
+
+if [ -z "$MGMT_INITIATOR_IP" ]; then
+ error "No IP address of initiator is given"
+fi
+
+if [ -z "$RDMA_TARGET_IP" ]; then
+ error "No IP address of targets RDMA capable NIC is given"
+fi
+
+if [ -z "$RDMA_INITIATOR_IP" ]; then
+ error "No IP address of initiators RDMA capable NIC is given"
+fi
+
+function ssh_remote() {
+ local ssh_cmd="sshpass -p root ssh \
+ -o UserKnownHostsFile=/dev/null \
+ -o StrictHostKeyChecking=no \
+ -o ControlMaster=auto \
+ -o User=root \
+ $1"
+
+ shift
+ $ssh_cmd "$@"
+}
+
+function wait_for_remote() {
+ local timeout=40
+ set +x
+ while [[ ! -f $share_dir/DONE ]]; do
+ echo -n "."
+ if ((timeout-- == 0)); then
+ error "timeout while waiting for FIO!"
+ fi
+ sleep 1
+ done
+ set -x
+ rm -f $share_dir/DONE
+}
+
+function check_rdma_connection() {
+ local nic_name
+ nic_name=$(ip -4 -o addr show to $RDMA_TARGET_IP up | cut -d' ' -f2)
+ if [[ -z $nic_name ]]; then
+ error "There is no NIC with IP address $RDMA_TARGET_IP configured"
+ fi
+
+ if ! ls /sys/class/infiniband/*/device/net/$nic_name &> /dev/null; then
+ error "$nic_name with IP $RDMA_TARGET_IP is not a RDMA capable NIC"
+ fi
+
+}
+
+function host1_cleanup_nvmf() {
+ notice "Shutting down nvmf_tgt on local server"
+ if [[ -n "$1" ]]; then
+ pkill --signal $1 -F $nvmf_dir/nvmf_tgt.pid
+ else
+ pkill -F $nvmf_dir/nvmf_tgt.pid
+ fi
+ rm -f $nvmf_dir/nvmf_tgt.pid
+}
+
+function host1_cleanup_vhost() {
+ trap 'host1_cleanup_nvmf SIGKILL; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT
+ notice "Shutting down VM $incoming_vm"
+ vm_kill $incoming_vm
+
+ notice "Removing bdev & controller from vhost on local server"
+ $rpc_0 bdev_nvme_detach_controller Nvme0
+ $rpc_0 vhost_delete_controller $incoming_vm_ctrlr
+
+ notice "Shutting down vhost app"
+ vhost_kill 0
+
+ host1_cleanup_nvmf
+}
+
+function host1_start_nvmf() {
+ nvmf_dir="$TEST_DIR/nvmf_tgt"
+ rpc_nvmf="$rootdir/scripts/rpc.py -s $nvmf_dir/nvmf_rpc.sock"
+
+ notice "Starting nvmf_tgt instance on local server"
+ mkdir -p $nvmf_dir
+ rm -rf "${nvmf_dir:?}/"*
+
+ trap 'host1_cleanup_nvmf SIGKILL; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT
+ $SPDK_BIN_DIR/nvmf_tgt -s 512 -m 0xF -r $nvmf_dir/nvmf_rpc.sock --wait-for-rpc &
+ nvmf_tgt_pid=$!
+ echo $nvmf_tgt_pid > $nvmf_dir/nvmf_tgt.pid
+ waitforlisten "$nvmf_tgt_pid" "$nvmf_dir/nvmf_rpc.sock"
+ $rpc_nvmf framework_start_init
+ $rpc_nvmf nvmf_create_transport -t RDMA -u 8192
+ $rootdir/scripts/gen_nvme.sh --json | $rpc_nvmf load_subsystem_config
+
+ $rpc_nvmf nvmf_create_subsystem nqn.2018-02.io.spdk:cnode1 -a -s SPDK01
+ $rpc_nvmf nvmf_subsystem_add_ns nqn.2018-02.io.spdk:cnode1 Nvme0n1
+ $rpc_nvmf nvmf_subsystem_add_listener nqn.2018-02.io.spdk:cnode1 -t rdma -a $RDMA_TARGET_IP -s 4420
+}
+
+function host1_start_vhost() {
+ rpc_0="$rootdir/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock"
+
+ notice "Starting vhost0 instance on local server"
+ trap 'host1_cleanup_vhost; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT
+ vhost_run 0 "-u"
+ $rpc_0 bdev_nvme_attach_controller -b Nvme0 -t rdma -f ipv4 -a $RDMA_TARGET_IP -s 4420 -n "nqn.2018-02.io.spdk:cnode1"
+ $rpc_0 vhost_create_scsi_controller $incoming_vm_ctrlr
+ $rpc_0 vhost_scsi_controller_add_target $incoming_vm_ctrlr 0 Nvme0n1
+
+ vm_setup --os="$share_dir/migration.qcow2" --force=$incoming_vm --disk-type=spdk_vhost_scsi --disks=VhostScsi0 \
+ --migrate-to=$target_vm --memory=512 --queue_num=1
+
+ # TODO: Fix loop calculating cpu_num in common.sh
+ # We need -smp 1 and -queue_num 1 for this test to work, and this loop
+ # in some cases calculates wrong cpu_num.
+ sed -i "s#smp 2#smp 1#g" $VM_BASE_DIR/$incoming_vm/run.sh
+ vm_run $incoming_vm
+ vm_wait_for_boot 300 $incoming_vm
+}
+
+function cleanup_share() {
+ set +e
+ notice "Cleaning up share directory on remote and local server"
+ ssh_remote $MGMT_INITIATOR_IP "umount $VM_BASE_DIR"
+ ssh_remote $MGMT_INITIATOR_IP "umount $share_dir; rm -f $share_dir/* rm -rf $spdk_repo_share_dir"
+ rm -f $share_dir/migration.qcow2
+ rm -f $share_dir/spdk.tar.gz
+ set -e
+}
+
+function host_1_create_share() {
+ notice "Creating share directory on local server to re-use on remote"
+ mkdir -p $share_dir
+ mkdir -p $VM_BASE_DIR # This dir would've been created later but we need it now
+ rm -rf $share_dir/spdk.tar.gz $share_dir/spdk || true
+ cp $os_image $share_dir/migration.qcow2
+ tar --exclude="*.o" --exclude="*.d" --exclude="*.git" -C $rootdir -zcf $share_dir/spdk.tar.gz .
+}
+
+function host_2_create_share() {
+ # Copy & compile the sources for later use on remote server.
+ ssh_remote $MGMT_INITIATOR_IP "uname -a"
+ ssh_remote $MGMT_INITIATOR_IP "mkdir -p $share_dir"
+ ssh_remote $MGMT_INITIATOR_IP "mkdir -p $spdk_repo_share_dir"
+ ssh_remote $MGMT_INITIATOR_IP "mkdir -p $VM_BASE_DIR"
+ ssh_remote $MGMT_INITIATOR_IP "sshfs -o\
+ ssh_command=\"ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ControlMaster=auto\
+ -i $SPDK_VHOST_SSH_KEY_FILE\" root@$MGMT_TARGET_IP:$VM_BASE_DIR $VM_BASE_DIR"
+ ssh_remote $MGMT_INITIATOR_IP "sshfs -o\
+ ssh_command=\"ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ControlMaster=auto\
+ -i $SPDK_VHOST_SSH_KEY_FILE\" root@$MGMT_TARGET_IP:$share_dir $share_dir"
+ ssh_remote $MGMT_INITIATOR_IP "mkdir -p $spdk_repo_share_dir/spdk"
+ ssh_remote $MGMT_INITIATOR_IP "tar -zxf $share_dir/spdk.tar.gz -C $spdk_repo_share_dir/spdk --strip-components=1"
+ ssh_remote $MGMT_INITIATOR_IP "cd $spdk_repo_share_dir/spdk; make clean; ./configure --with-rdma --enable-debug; make -j40"
+}
+
+function host_2_start_vhost() {
+ ssh_remote $MGMT_INITIATOR_IP "nohup $spdk_repo_share_dir/spdk/test/vhost/migration/migration.sh\
+ --test-cases=3b --os=$share_dir/migration.qcow2\
+ --rdma-tgt-ip=$RDMA_TARGET_IP &>$share_dir/output.log &"
+ notice "Waiting for remote to be done with vhost & VM setup..."
+ wait_for_remote
+}
+
+function setup_share() {
+ trap 'cleanup_share; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT
+ host_1_create_share
+ host_2_create_share
+}
+
+function migration_tc3() {
+ check_rdma_connection
+ setup_share
+ host1_start_nvmf
+ host1_start_vhost
+ host_2_start_vhost
+
+ # Do migration
+ notice "Starting fio on local VM"
+ vm_check_scsi_location $incoming_vm
+
+ run_fio $fio_bin --job-file="$job_file" --no-wait-for-fio --local --vm="${incoming_vm}$(printf ':/dev/%s' $SCSI_DISK)"
+ sleep 5
+
+ if ! is_fio_running $incoming_vm; then
+ vm_exec $incoming_vm "cat $log_file"
+ error "Fio not running on local VM before starting migration!"
+ fi
+
+ vm_migrate $incoming_vm $RDMA_INITIATOR_IP
+ sleep 1
+
+ # Verify migration on remote host and clean up vhost
+ ssh_remote $MGMT_INITIATOR_IP "pkill -CONT -F $TEST_DIR/tc3b.pid"
+ notice "Waiting for remote to finish FIO on VM and clean up..."
+ wait_for_remote
+
+ # Clean up local stuff
+ host1_cleanup_vhost
+ cleanup_share
+}
+
+migration_tc3
diff --git a/src/spdk/test/vhost/migration/migration-tc3b.sh b/src/spdk/test/vhost/migration/migration-tc3b.sh
new file mode 100644
index 000000000..22d54df73
--- /dev/null
+++ b/src/spdk/test/vhost/migration/migration-tc3b.sh
@@ -0,0 +1,77 @@
+# Set -m option is needed to be able to use "suspend" command
+# as we are usin non-interactive session to connect to remote.
+# Without -m it would be not possible to suspend the process.
+set -m
+source $testdir/autotest.config
+
+incoming_vm=1
+target_vm=2
+target_vm_ctrl=naa.VhostScsi0.$target_vm
+rpc="$rootdir/scripts/rpc.py -s $(get_vhost_dir 1)/rpc.sock"
+share_dir=$VHOST_DIR/share
+
+function host_2_cleanup_vhost() {
+ notice "Shutting down VM $target_vm"
+ vm_kill $target_vm
+
+ notice "Removing bdev & controller from vhost 1 on remote server"
+ $rpc bdev_nvme_detach_controller Nvme0
+ $rpc vhost_delete_controller $target_vm_ctrl
+
+ notice "Shutting down vhost app"
+ vhost_kill 1
+ sleep 1
+}
+
+function host_2_start_vhost() {
+ echo "BASE DIR $VHOST_DIR"
+ vhost_work_dir=$VHOST_DIR/vhost1
+ mkdir -p $vhost_work_dir
+ rm -f $vhost_work_dir/*
+
+ notice "Starting vhost 1 instance on remote server"
+ trap 'host_2_cleanup_vhost; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT
+ vhost_run 1 "-u"
+
+ $rpc bdev_nvme_attach_controller -b Nvme0 -t rdma -f ipv4 -a $RDMA_TARGET_IP -s 4420 -n "nqn.2018-02.io.spdk:cnode1"
+ $rpc vhost_create_scsi_controller $target_vm_ctrl
+ $rpc vhost_scsi_controller_add_target $target_vm_ctrl 0 Nvme0n1
+
+ vm_setup --os="$os_image" --force=$target_vm --disk-type=spdk_vhost_scsi --disks=VhostScsi0 \
+ --memory=512 --vhost-name=1 --incoming=$incoming_vm
+ vm_run $target_vm
+ sleep 1
+
+ # Use this file as a flag to notify main script
+ # that setup on remote server is done
+ echo "DONE" > $share_dir/DONE
+}
+
+echo $$ > $VHOST_DIR/tc3b.pid
+host_2_start_vhost
+suspend -f
+
+if ! vm_os_booted $target_vm; then
+ fail "VM$target_vm is not running!"
+fi
+
+if ! is_fio_running $target_vm; then
+ vm_exec $target_vm "cat /root/migration-tc3.log"
+ error "FIO is not running on remote server after migration!"
+fi
+
+notice "Waiting for FIO to finish on remote server VM"
+timeout=40
+while is_fio_running $target_vm; do
+ sleep 1
+ echo -n "."
+ if ((timeout-- == 0)); then
+ error "timeout while waiting for FIO!"
+ fi
+done
+
+notice "FIO result after migration:"
+vm_exec $target_vm "cat /root/migration-tc3.log"
+
+host_2_cleanup_vhost
+echo "DONE" > $share_dir/DONE
diff --git a/src/spdk/test/vhost/migration/migration.sh b/src/spdk/test/vhost/migration/migration.sh
new file mode 100755
index 000000000..8f461e6ca
--- /dev/null
+++ b/src/spdk/test/vhost/migration/migration.sh
@@ -0,0 +1,143 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/vhost/common.sh
+source $testdir/migration-tc1.sh
+source $testdir/migration-tc2.sh
+
+vms=()
+declare -A vms_os
+declare -A vms_raw_disks
+declare -A vms_ctrlrs
+declare -A vms_ctrlrs_disks
+
+# By default use Guest fio
+fio_bin=""
+MGMT_TARGET_IP=""
+MGMT_INITIATOR_IP=""
+RDMA_TARGET_IP=""
+RDMA_INITIATOR_IP=""
+function usage() {
+ [[ -n $2 ]] && (
+ echo "$2"
+ echo ""
+ )
+ echo "Shortcut script for doing automated test of live migration."
+ echo "Usage: $(basename $1) [OPTIONS]"
+ echo
+ echo " --os ARGS VM configuration. This parameter might be used more than once:"
+ echo " --fio-bin=FIO Use specific fio binary (will be uploaded to VM)"
+ echo " --mgmt-tgt-ip=IP IP address of target."
+ echo " --mgmt-init-ip=IP IP address of initiator."
+ echo " --rdma-tgt-ip=IP IP address of targets rdma capable NIC."
+ echo " --rdma-init-ip=IP IP address of initiators rdma capable NIC."
+ echo "-x set -x for script debug"
+}
+
+for param in "$@"; do
+ case "$param" in
+ --help | -h)
+ usage $0
+ exit 0
+ ;;
+ --os=*) os_image="${param#*=}" ;;
+ --fio-bin=*) fio_bin="${param}" ;;
+ --mgmt-tgt-ip=*) MGMT_TARGET_IP="${param#*=}" ;;
+ --mgmt-init-ip=*) MGMT_INITIATOR_IP="${param#*=}" ;;
+ --rdma-tgt-ip=*) RDMA_TARGET_IP="${param#*=}" ;;
+ --rdma-init-ip=*) RDMA_INITIATOR_IP="${param#*=}" ;;
+ -x) set -x ;;
+ -v) SPDK_VHOST_VERBOSE=true ;;
+ *)
+ usage $0 "Invalid argument '$param'"
+ exit 1
+ ;;
+ esac
+done
+
+vhosttestinit
+
+trap 'error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT
+
+function vm_monitor_send() {
+ local vm_num=$1
+ local cmd_result_file="$2"
+ local vm_dir="$VM_DIR/$1"
+ local vm_monitor_port
+ vm_monitor_port=$(cat $vm_dir/monitor_port)
+
+ [[ -n "$vm_monitor_port" ]] || fail "No monitor port!"
+
+ shift 2
+ nc 127.0.0.1 $vm_monitor_port "$@" > $cmd_result_file
+}
+
+# Migrate VM $1
+function vm_migrate() {
+ local from_vm_dir="$VM_DIR/$1"
+ local target_vm_dir
+ local target_vm
+ local target_vm_migration_port
+ target_vm_dir="$(readlink -e $from_vm_dir/vm_migrate_to)"
+ target_vm="$(basename $target_vm_dir)"
+ target_vm_migration_port="$(cat $target_vm_dir/migration_port)"
+ if [[ -n "$2" ]]; then
+ local target_ip=$2
+ else
+ local target_ip="127.0.0.1"
+ fi
+
+ # Sanity check if target VM (QEMU) is configured to accept source VM (QEMU) migration
+ if [[ "$(readlink -e ${target_vm_dir}/vm_incoming)" != "$(readlink -e ${from_vm_dir})" ]]; then
+ fail "source VM $1 or destination VM is not properly configured for live migration"
+ fi
+
+ timing_enter vm_migrate
+ notice "Migrating VM $1 to VM "$(basename $target_vm_dir)
+ echo -e \
+ "migrate_set_speed 1g\n" \
+ "migrate tcp:$target_ip:$target_vm_migration_port\n" \
+ "info migrate\n" \
+ "quit" | vm_monitor_send $1 "$from_vm_dir/migration_result"
+
+ # Post migration checks:
+ if ! grep "Migration status: completed" $from_vm_dir/migration_result -q; then
+ cat $from_vm_dir/migration_result
+ fail "Migration failed:\n"
+ fi
+
+ # Don't perform the following check if target VM is on remote server
+ # as we won't have access to it.
+ # If you need this check then perform it on your own.
+ if [[ "$target_ip" == "127.0.0.1" ]]; then
+ if ! vm_os_booted $target_vm; then
+ fail "VM$target_vm is not running"
+ cat $target_vm $target_vm_dir/cont_result
+ fi
+ fi
+
+ notice "Migration complete"
+ timing_exit vm_migrate
+}
+
+function is_fio_running() {
+ xtrace_disable
+
+ if vm_exec $1 'kill -0 $(cat /root/fio.pid)'; then
+ local ret=0
+ else
+ local ret=1
+ fi
+
+ xtrace_restore
+ return $ret
+}
+
+run_test "vhost_migration_tc1" migration_tc1
+run_test "vhost_migration_tc2" migration_tc2
+
+trap - SIGINT ERR EXIT
+
+vhosttestfini
diff --git a/src/spdk/test/vhost/other/invalid.config b/src/spdk/test/vhost/other/invalid.config
new file mode 100644
index 000000000..58b703068
--- /dev/null
+++ b/src/spdk/test/vhost/other/invalid.config
@@ -0,0 +1,18 @@
+# SPDK vhost configuration file
+#
+# Please write all parameters using ASCII.
+# The parameter must be quoted if it includes whitespace.
+
+# Configuration syntax:
+# Leading whitespace is ignored.
+# Lines starting with '#' are comments.
+# Lines ending with '\' are concatenated with the next line.
+# Bracketed ([]) names define sections
+
+[Global]
+ # Instance ID for multi-process support
+ # Default: 0
+ #InstanceID 0
+
+[Null]
+ Dev null0 512 513
diff --git a/src/spdk/test/vhost/other/negative.sh b/src/spdk/test/vhost/other/negative.sh
new file mode 100755
index 000000000..81461c26f
--- /dev/null
+++ b/src/spdk/test/vhost/other/negative.sh
@@ -0,0 +1,209 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/vhost/common.sh
+
+function usage() {
+ [[ -n $2 ]] && (
+ echo "$2"
+ echo ""
+ )
+ echo "Shortcut script for running vhost app."
+ echo "Usage: $(basename $1) [-x] [-h|--help] [--clean-build]"
+ echo "-h, --help print help and exit"
+ echo "-x Set -x for script debug"
+
+ exit 0
+}
+
+run_in_background=false
+while getopts 'xh-:' optchar; do
+ case "$optchar" in
+ -)
+ case "$OPTARG" in
+ help) usage $0 ;;
+ conf-dir=*) CONF_DIR="${OPTARG#*=}" ;;
+ *) usage $0 echo "Invalid argument '$OPTARG'" ;;
+ esac
+ ;;
+ h) usage $0 ;;
+ x) set -x ;;
+ *) usage $0 "Invalid argument '$optchar'" ;;
+ esac
+done
+
+vhosttestinit
+
+trap error_exit ERR
+
+notice "Testing vhost command line arguments"
+# Printing help will force vhost to exit without error
+"${VHOST_APP[@]}" -c /path/to/non_existing_file/conf -S $testdir -e 0x0 -s 1024 -d -h --silence-noticelog
+
+# Testing vhost create pid file option. Vhost will exit with error as invalid config path is given
+if "${VHOST_APP[@]}" -c /path/to/non_existing_file/conf -f "$VHOST_DIR/vhost/vhost.pid"; then
+ fail "vhost started when specifying invalid config file"
+fi
+rm -f $VHOST_DIR/vhost/vhost.pid
+
+# Testing vhost start with invalid config. Vhost will exit with error as bdev module init failed
+if "${VHOST_APP[@]}" -c $testdir/invalid.config; then
+ fail "vhost started when specifying invalid config file"
+fi
+
+# Expecting vhost to fail if an incorrect argument is given
+if "${VHOST_APP[@]}" -x -h; then
+ fail "vhost started with invalid -x command line option"
+fi
+
+# Passing trace flags if spdk is build without CONFIG_DEBUG=y option make vhost exit with error
+if ! "${VHOST_APP[@]}" -t vhost_scsi -h; then
+ warning "vhost did not started with trace flags enabled but ignoring this as it might not be a debug build"
+fi
+
+# Run with valid config and try some negative rpc calls
+notice "==============="
+notice ""
+notice "running SPDK"
+notice ""
+vhost_run 0
+notice ""
+rpc_py="$rootdir/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock"
+$rpc_py bdev_malloc_create -b Malloc0 128 4096
+$rpc_py bdev_malloc_create -b Malloc1 128 4096
+$rpc_py bdev_malloc_create -b Malloc2 128 4096
+$rpc_py bdev_split_create Malloc2 8
+
+# Try to get nonexistent vhost controller
+if $rpc_py vhost_get_controllers -n nonexistent; then
+ error "vhost returned controller that does not exist"
+fi
+
+notice "Set coalescing for nonexistent controller"
+if $rpc_py vhost_controller_set_coalescing nonexistent 1 100; then
+ error "Set coalescing for nonexistent controller should fail"
+fi
+
+# General commands
+notice "Trying to remove nonexistent controller"
+if $rpc_py vhost_delete_controller unk0 > /dev/null; then
+ error "Removing nonexistent controller succeeded, but it shouldn't"
+fi
+
+# SCSI
+notice "Trying to create scsi controller with incorrect cpumask"
+if $rpc_py vhost_create_scsi_controller vhost.invalid.cpumask --cpumask 0x2; then
+ error "Creating scsi controller with incorrect cpumask succeeded, but it shouldn't"
+fi
+
+notice "Trying to remove device from nonexistent scsi controller"
+if $rpc_py vhost_scsi_controller_remove_target vhost.nonexistent.name 0; then
+ error "Removing device from nonexistent scsi controller succeeded, but it shouldn't"
+fi
+
+notice "Trying to add device to nonexistent scsi controller"
+if $rpc_py vhost_scsi_controller_add_target vhost.nonexistent.name 0 Malloc0; then
+ error "Adding device to nonexistent scsi controller succeeded, but it shouldn't"
+fi
+
+notice "Trying to create scsi controller with incorrect name"
+if $rpc_py vhost_create_scsi_controller .; then
+ error "Creating scsi controller with incorrect name succeeded, but it shouldn't"
+fi
+
+notice "Creating controller naa.0"
+$rpc_py vhost_create_scsi_controller naa.0
+
+notice "Pass invalid parameter for vhost_controller_set_coalescing"
+if $rpc_py vhost_controller_set_coalescing naa.0 -1 100; then
+ error "Set coalescing with invalid parameter should fail"
+fi
+
+notice "Trying to add nonexistent device to scsi controller"
+if $rpc_py vhost_scsi_controller_add_target naa.0 0 nonexistent_bdev; then
+ error "Adding nonexistent device to scsi controller succeeded, but it shouldn't"
+fi
+
+notice "Adding device to naa.0 with slot number exceeding max"
+if $rpc_py vhost_scsi_controller_add_target naa.0 8 Malloc0; then
+ error "Adding device to naa.0 should fail but succeeded"
+fi
+
+for i in $(seq 0 7); do
+ $rpc_py vhost_scsi_controller_add_target naa.0 -1 Malloc2p$i
+done
+notice "All slots are occupied. Try to add one more device to naa.0"
+if $rpc_py vhost_scsi_controller_add_target naa.0 -1 Malloc0; then
+ error "Adding device to naa.0 should fail but succeeded"
+fi
+for i in $(seq 0 7); do
+ $rpc_py vhost_scsi_controller_remove_target naa.0 $i
+done
+
+notice "Adding initial device (0) to naa.0"
+$rpc_py vhost_scsi_controller_add_target naa.0 0 Malloc0
+
+notice "Adding device to naa.0 with slot number 0"
+if $rpc_py vhost_scsi_controller_add_target naa.0 0 Malloc1; then
+ error "Adding device to naa.0 occupied slot should fail but succeeded"
+fi
+
+notice "Trying to remove nonexistent device on existing controller"
+if $rpc_py vhost_scsi_controller_remove_target naa.0 1 > /dev/null; then
+ error "Removing nonexistent device (1) from controller naa.0 succeeded, but it shouldn't"
+fi
+
+notice "Trying to remove existing device from a controller"
+$rpc_py vhost_scsi_controller_remove_target naa.0 0
+
+notice "Trying to remove a just-deleted device from a controller again"
+if $rpc_py vhost_scsi_controller_remove_target naa.0 0 > /dev/null; then
+ error "Removing device 0 from controller naa.0 succeeded, but it shouldn't"
+fi
+
+notice "Trying to remove scsi target with invalid slot number"
+if $rpc_py vhost_scsi_controller_remove_target naa.0 8 > /dev/null; then
+ error "Removing device 8 from controller naa.0 succeeded, but it shouldn't"
+fi
+
+notice "Re-adding device 0 to naa.0"
+$rpc_py vhost_scsi_controller_add_target naa.0 0 Malloc0
+
+# BLK
+notice "Trying to create block controller with incorrect cpumask"
+if $rpc_py vhost_create_blk_controller vhost.invalid.cpumask Malloc0 --cpumask 0x2; then
+ error "Creating block controller with incorrect cpumask succeeded, but it shouldn't"
+fi
+
+notice "Trying to remove nonexistent block controller"
+if $rpc_py vhost_delete_controller vhost.nonexistent.name; then
+ error "Removing nonexistent block controller succeeded, but it shouldn't"
+fi
+
+notice "Trying to create block controller with incorrect name"
+if $rpc_py vhost_create_blk_controller . Malloc0; then
+ error "Creating block controller with incorrect name succeeded, but it shouldn't"
+fi
+
+notice "Trying to create block controller with nonexistent bdev"
+if $rpc_py vhost_create_blk_controller blk_ctrl Malloc3; then
+ error "Creating block controller with nonexistent bdev succeeded, but shouldn't"
+fi
+
+notice "Trying to create block controller with claimed bdev"
+$rpc_py bdev_lvol_create_lvstore Malloc0 lvs
+if $rpc_py vhost_create_blk_controller blk_ctrl Malloc0; then
+ error "Creating block controller with claimed bdev succeeded, but shouldn't"
+fi
+$rpc_py bdev_lvol_delete_lvstore -l lvs
+
+notice "Testing done -> shutting down"
+notice "killing vhost app"
+vhost_kill 0
+
+notice "EXIT DONE"
+notice "==============="
+
+vhosttestfini
diff --git a/src/spdk/test/vhost/perf_bench/vhost_perf.sh b/src/spdk/test/vhost/perf_bench/vhost_perf.sh
new file mode 100755
index 000000000..98c6a8e3c
--- /dev/null
+++ b/src/spdk/test/vhost/perf_bench/vhost_perf.sh
@@ -0,0 +1,473 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/vhost/common.sh
+
+vhost_num="0"
+vm_memory=2048
+vm_sar_enable=false
+host_sar_enable=false
+sar_delay="0"
+sar_interval="1"
+sar_count="10"
+vm_throttle=""
+ctrl_type="spdk_vhost_scsi"
+use_split=false
+kernel_cpus=""
+run_precondition=false
+lvol_stores=()
+lvol_bdevs=()
+split_bdevs=()
+used_vms=""
+wwpn_prefix="naa.5001405bc6498"
+packed_ring=false
+
+fio_iterations=1
+fio_gtod=""
+precond_fio_bin=$CONFIG_FIO_SOURCE_DIR/fio
+disk_map=""
+
+disk_cfg_bdfs=()
+disk_cfg_spdk_names=()
+disk_cfg_splits=()
+disk_cfg_vms=()
+disk_cfg_kernel_names=()
+
+function usage() {
+ [[ -n $2 ]] && (
+ echo "$2"
+ echo ""
+ )
+ echo "Shortcut script for doing automated test"
+ echo "Usage: $(basename $1) [OPTIONS]"
+ echo
+ echo "-h, --help Print help and exit"
+ echo " --fio-bin=PATH Path to FIO binary on host.;"
+ echo " Binary will be copied to VM, static compilation"
+ echo " of binary is recommended."
+ echo " --fio-jobs=PATH Comma separated list of fio config files to use for test."
+ echo " --fio-iterations=INT Number of times to run specified workload."
+ echo " --fio-gtod-reduce Enable fio gtod_reduce option in test."
+ echo " --vm-memory=INT Amount of RAM memory (in MB) to pass to a single VM."
+ echo " Default: 2048 MB"
+ echo " --vm-image=PATH OS image to use for running the VMs."
+ echo " Default: \$HOME/vhost_vm_image.qcow2"
+ echo " --vm-sar-enable Measure CPU utilization in guest VMs using sar."
+ echo " --host-sar-enable Measure CPU utilization on host using sar."
+ echo " --sar-delay=INT Wait for X seconds before starting SAR measurement. Default: 0."
+ echo " --sar-interval=INT Interval (seconds) argument for SAR. Default: 1s."
+ echo " --sar-count=INT Count argument for SAR. Default: 10."
+ echo " --vm-throttle-iops=INT I/Os throttle rate in IOPS for each device on the VMs."
+ echo " --ctrl-type=TYPE Controller type to use for test:"
+ echo " spdk_vhost_scsi - use spdk vhost scsi"
+ echo " spdk_vhost_blk - use spdk vhost block"
+ echo " kernel_vhost - use kernel vhost scsi"
+ echo " Default: spdk_vhost_scsi"
+ echo " --packed-ring Use packed ring support. Requires Qemu 4.2.0 or greater. Default: disabled."
+ echo " --use-split Use split vbdevs instead of Logical Volumes"
+ echo " --limit-kernel-vhost=INT Limit kernel vhost to run only on a number of CPU cores."
+ echo " --run-precondition Precondition lvols after creating. Default: true."
+ echo " --precond-fio-bin FIO binary used for SPDK fio plugin precondition. Default: $CONFIG_FIO_SOURCE_DIR/fio."
+ echo " --custom-cpu-cfg=PATH Custom CPU config for test."
+ echo " Default: spdk/test/vhost/common/autotest.config"
+ echo " --disk-map Disk map for given test. Specify which disks to use, their SPDK name,"
+ echo " how many times to split them and which VMs should be attached to created bdevs."
+ echo " Example:"
+ echo " NVME PCI BDF,Spdk Bdev Name,Split Count,VM List"
+ echo " 0000:1a:00.0,Nvme0,2,0 1"
+ echo " 0000:1b:00.0,Nvme1,2,2 3"
+ echo "-x set -x for script debug"
+ exit 0
+}
+
+function cleanup_lvol_cfg() {
+ notice "Removing lvol bdevs"
+ for lvol_bdev in "${lvol_bdevs[@]}"; do
+ $rpc_py bdev_lvol_delete $lvol_bdev
+ notice "lvol bdev $lvol_bdev removed"
+ done
+
+ notice "Removing lvol stores"
+ for lvol_store in "${lvol_stores[@]}"; do
+ $rpc_py bdev_lvol_delete_lvstore -u $lvol_store
+ notice "lvol store $lvol_store removed"
+ done
+}
+
+function cleanup_split_cfg() {
+ notice "Removing split vbdevs"
+ for disk in "${disk_cfg_spdk_names[@]}"; do
+ $rpc_py bdev_split_delete ${disk}n1
+ done
+}
+
+function cleanup_parted_config() {
+ notice "Removing parted disk configuration"
+ for disk in "${disk_cfg_kernel_names[@]}"; do
+ parted -s /dev/${disk}n1 rm 1
+ done
+}
+
+function cleanup_kernel_vhost() {
+ notice "Cleaning kernel vhost configration"
+ targetcli clearconfig confirm=True
+ cleanup_parted_config
+}
+
+function create_vm() {
+ vm_num=$1
+ setup_cmd="vm_setup --disk-type=$ctrl_type --force=$vm_num --memory=$vm_memory --os=$VM_IMAGE"
+ if [[ "$ctrl_type" == "kernel_vhost" ]]; then
+ x=$(printf %03d $vm_num)
+ setup_cmd+=" --disks=${wwpn_prefix}${x}"
+ else
+ setup_cmd+=" --disks=0"
+ fi
+
+ if $packed_ring; then
+ setup_cmd+=" --packed"
+ fi
+
+ $setup_cmd
+ used_vms+=" $vm_num"
+ echo "Added to used vms"
+ echo $used_vms
+}
+
+function create_spdk_controller() {
+ vm_num=$1
+ bdev=$2
+
+ if [[ "$ctrl_type" == "spdk_vhost_scsi" ]]; then
+ $rpc_py vhost_create_scsi_controller naa.0.$vm_num
+ notice "Created vhost scsi controller naa.0.$vm_num"
+ $rpc_py vhost_scsi_controller_add_target naa.0.$vm_num 0 $bdev
+ notice "Added LUN 0/$bdev to controller naa.0.$vm_num"
+ elif [[ "$ctrl_type" == "spdk_vhost_blk" ]]; then
+ if $packed_ring; then
+ p_opt="-p"
+ fi
+
+ $rpc_py vhost_create_blk_controller naa.0.$vm_num $bdev $p_opt
+ notice "Created vhost blk controller naa.0.$vm_num $bdev"
+ fi
+}
+
+while getopts 'xh-:' optchar; do
+ case "$optchar" in
+ -)
+ case "$OPTARG" in
+ help) usage $0 ;;
+ fio-bin=*) fio_bin="--fio-bin=${OPTARG#*=}" ;;
+ fio-jobs=*) fio_jobs="${OPTARG#*=}" ;;
+ fio-iterations=*) fio_iterations="${OPTARG#*=}" ;;
+ fio-gtod-reduce) fio_gtod="--gtod-reduce" ;;
+ vm-memory=*) vm_memory="${OPTARG#*=}" ;;
+ vm-image=*) VM_IMAGE="${OPTARG#*=}" ;;
+ vm-sar-enable) vm_sar_enable=true ;;
+ host-sar-enable) host_sar_enable=true ;;
+ sar-delay=*) sar_delay="${OPTARG#*=}" ;;
+ sar-interval=*) sar_interval="${OPTARG#*=}" ;;
+ sar-count=*) sar_count="${OPTARG#*=}" ;;
+ vm-throttle-iops=*) vm_throttle="${OPTARG#*=}" ;;
+ ctrl-type=*) ctrl_type="${OPTARG#*=}" ;;
+ packed-ring) packed_ring=true ;;
+ use-split) use_split=true ;;
+ run-precondition) run_precondition=true ;;
+ precond-fio-bin=*) precond_fio_bin="${OPTARG#*=}" ;;
+ limit-kernel-vhost=*) kernel_cpus="${OPTARG#*=}" ;;
+ custom-cpu-cfg=*) custom_cpu_cfg="${OPTARG#*=}" ;;
+ disk-map=*) disk_map="${OPTARG#*=}" ;;
+ *) usage $0 "Invalid argument '$OPTARG'" ;;
+ esac
+ ;;
+ h) usage $0 ;;
+ x)
+ set -x
+ x="-x"
+ ;;
+ *) usage $0 "Invalid argument '$OPTARG'" ;;
+ esac
+done
+
+rpc_py="$rootdir/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock"
+
+if [[ -n $custom_cpu_cfg ]]; then
+ source $custom_cpu_cfg
+ vhost_reactor_mask="vhost_${vhost_num}_reactor_mask"
+ vhost_reactor_mask="${!vhost_reactor_mask}"
+ vhost_master_core="vhost_${vhost_num}_master_core"
+ vhost_master_core="${!vhost_master_core}"
+fi
+
+if [[ -z $fio_jobs ]]; then
+ error "No FIO job specified!"
+fi
+
+trap 'error_exit "${FUNCNAME}" "${LINENO}"' INT ERR
+
+if [[ -z $disk_map ]]; then
+ fail "No disk map provided for test. Exiting."
+fi
+
+# ===== Precondition NVMes if specified =====
+if [[ $run_precondition == true ]]; then
+ # Using the same precondition routine possible for lvols thanks
+ # to --clear-method option. Lvols should not UNMAP on creation.
+ json_cfg=$rootdir/nvme.json
+ cat <<- JSON > "$json_cfg"
+ {"subsystems":[
+ $("$rootdir/scripts/gen_nvme.sh" --json)
+ ]}
+ JSON
+ mapfile -t nvmes < <(grep -oP "Nvme\d+" "$json_cfg")
+ fio_filename=$(printf ":%sn1" "${nvmes[@]}")
+ fio_filename=${fio_filename:1}
+ $precond_fio_bin --name="precondition" \
+ --ioengine="${rootdir}/build/fio/spdk_bdev" \
+ --rw="write" --spdk_json_conf="$json_cfg" --thread="1" \
+ --group_reporting --direct="1" --size="100%" --loops="2" --bs="256k" \
+ --iodepth=32 --filename="${fio_filename}" || true
+fi
+
+set +x
+readarray disk_cfg < $disk_map
+for line in "${disk_cfg[@]}"; do
+ echo $line
+ IFS=","
+ s=($line)
+ disk_cfg_bdfs+=(${s[0]})
+ disk_cfg_spdk_names+=(${s[1]})
+ disk_cfg_splits+=(${s[2]})
+ disk_cfg_vms+=("${s[3]}")
+
+ # Find kernel nvme names
+ if [[ "$ctrl_type" == "kernel_vhost" ]]; then
+ tmp=$(find /sys/devices/pci* -name ${s[0]} -print0 | xargs sh -c 'ls $0/nvme')
+ disk_cfg_kernel_names+=($tmp)
+ IFS=" "
+ fi
+done
+unset IFS
+set -x
+
+if [[ "$ctrl_type" == "kernel_vhost" ]]; then
+ notice "Configuring kernel vhost..."
+ trap 'vm_kill_all; sleep 1; cleanup_kernel_vhost; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR
+
+ # Split disks using parted for kernel vhost
+ newline=$'\n'
+ backstores=()
+ for ((i = 0; i < ${#disk_cfg_kernel_names[@]}; i++)); do
+ nvme=${disk_cfg_kernel_names[$i]}
+ splits=${disk_cfg_splits[$i]}
+ notice " Creating extended partition on disk /dev/${nvme}n1"
+ parted -s /dev/${nvme}n1 mklabel msdos
+ parted -s /dev/${nvme}n1 mkpart extended 2048s 100%
+
+ part_size=$((100 / ${disk_cfg_splits[$i]})) # Split 100% of disk into roughly even parts
+ echo " Creating ${splits} partitions of relative disk size ${part_size}"
+ for p in $(seq 0 $((splits - 1))); do
+ p_start=$((p * part_size))
+ p_end=$((p_start + part_size))
+ parted -s /dev/${nvme}n1 mkpart logical ${p_start}% ${p_end}%
+ sleep 3
+ done
+
+ # Prepare kernel vhost configuration
+ # Below grep: match only NVMe partitions which are not "Extended" type.
+ # For example: will match nvme0n1p15 but not nvme0n1p1
+ partitions=$(find /dev -name "${nvme}n1*" | sort --version-sort | grep -P 'p(?!1$)\d+')
+ # Create block backstores for vhost kernel process
+ for p in $partitions; do
+ backstore_name=$(basename $p)
+ backstores+=("$backstore_name")
+ targetcli backstores/block create $backstore_name $p
+ done
+ partitions=($partitions)
+
+ # Create kernel vhost controllers and add LUNs
+ # Setup VM configurations
+ vms_to_run=(${disk_cfg_vms[i]})
+ for ((j = 0; j < ${#vms_to_run[@]}; j++)); do
+ # WWPN prefix misses 3 characters. Need to complete it
+ # using block backstore number
+ x=$(printf %03d ${vms_to_run[$j]})
+ wwpn="${wwpn_prefix}${x}"
+ targetcli vhost/ create $wwpn
+ targetcli vhost/$wwpn/tpg1/luns create /backstores/block/$(basename ${partitions[$j]})
+ create_vm ${vms_to_run[j]}
+ sleep 1
+ done
+ done
+ targetcli ls
+else
+ notice "Configuring SPDK vhost..."
+ vhost_run "${vhost_num}" "--no-gen-nvme" "-p ${vhost_master_core}" "-m ${vhost_reactor_mask}"
+ notice "..."
+
+ if [[ $use_split == true ]]; then
+ notice "Configuring split bdevs configuration..."
+ trap 'cleanup_split_cfg; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR
+ for ((i = 0; i < ${#disk_cfg_bdfs[@]}; i++)); do
+ nvme_bdev=$($rpc_py bdev_nvme_attach_controller -b ${disk_cfg_spdk_names[$i]} -t pcie -a ${disk_cfg_bdfs[$i]})
+ notice "Created NVMe Bdev: $nvme_bdev with BDF ${disk_cfg_bdfs[$i]}"
+
+ splits=$($rpc_py bdev_split_create $nvme_bdev ${disk_cfg_splits[$i]})
+ splits=($splits)
+ notice "Created splits: ${splits[*]} on Bdev ${nvme_bdev}"
+ for s in "${splits[@]}"; do
+ split_bdevs+=($s)
+ done
+
+ vms_to_run=(${disk_cfg_vms[i]})
+ for ((j = 0; j < ${#vms_to_run[@]}; j++)); do
+ notice "Setting up VM ${vms_to_run[j]}"
+ create_spdk_controller "${vms_to_run[j]}" ${splits[j]}
+ create_vm ${vms_to_run[j]}
+ done
+ echo " "
+ done
+ bdevs=("${split_bdevs[@]}")
+ else
+ notice "Configuring LVOLs..."
+ trap 'cleanup_lvol_cfg; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR
+ for ((i = 0; i < ${#disk_cfg_bdfs[@]}; i++)); do
+ nvme_bdev=$($rpc_py bdev_nvme_attach_controller -b ${disk_cfg_spdk_names[$i]} -t pcie -a ${disk_cfg_bdfs[$i]})
+ notice "Created NVMe Bdev: $nvme_bdev with BDF ${disk_cfg_bdfs[$i]}"
+
+ ls_guid=$($rpc_py bdev_lvol_create_lvstore $nvme_bdev lvs_$i --clear-method none)
+ lvol_stores+=("$ls_guid")
+ notice "Created Lvol Store: $ls_guid on Bdev $nvme_bdev"
+
+ vms_to_run=(${disk_cfg_vms[i]})
+ for ((j = 0; j < ${disk_cfg_splits[$i]}; j++)); do
+ free_mb=$(get_lvs_free_mb "$ls_guid")
+ size=$((free_mb / ((${disk_cfg_splits[$i]} - j))))
+ lb_name=$($rpc_py bdev_lvol_create -u $ls_guid lbd_$j $size --clear-method none)
+ lvol_bdevs+=("$lb_name")
+ notice "Created LVOL Bdev $lb_name on Lvol Store $ls_guid on Bdev $nvme_bdev"
+
+ notice "Setting up VM ${vms_to_run[j]}"
+ create_spdk_controller "${vms_to_run[j]}" ${lb_name}
+ create_vm ${vms_to_run[j]}
+ done
+ echo " "
+ done
+ $rpc_py bdev_lvol_get_lvstores
+ fi
+ $rpc_py bdev_get_bdevs
+ $rpc_py vhost_get_controllers
+fi
+
+# Start VMs
+# Run VMs
+vm_run $used_vms
+vm_wait_for_boot 300 $used_vms
+
+if [[ -n "$kernel_cpus" ]]; then
+ mkdir -p /sys/fs/cgroup/cpuset/spdk
+ kernel_mask=$vhost_0_reactor_mask
+ kernel_mask=${kernel_mask#"["}
+ kernel_mask=${kernel_mask%"]"}
+
+ echo "$kernel_mask" >> /sys/fs/cgroup/cpuset/spdk/cpuset.cpus
+ echo "0-1" >> /sys/fs/cgroup/cpuset/spdk/cpuset.mems
+
+ kernel_vhost_pids=$(pgrep "vhost" -U root)
+ for kpid in $kernel_vhost_pids; do
+ echo "Limiting kernel vhost pid ${kpid}"
+ echo "${kpid}" >> /sys/fs/cgroup/cpuset/spdk/tasks
+ done
+fi
+
+# Run FIO
+fio_disks=""
+for vm_num in $used_vms; do
+ host_name="VM-$vm_num"
+ vm_exec $vm_num "hostname $host_name"
+ vm_start_fio_server $fio_bin $vm_num
+
+ if [[ "$ctrl_type" == "spdk_vhost_scsi" ]]; then
+ vm_check_scsi_location $vm_num
+ elif [[ "$ctrl_type" == "spdk_vhost_blk" ]]; then
+ vm_check_blk_location $vm_num
+ elif [[ "$ctrl_type" == "kernel_vhost" ]]; then
+ vm_check_scsi_location $vm_num
+ fi
+
+ if [[ -n "$vm_throttle" ]]; then
+ block=$(printf '%s' $SCSI_DISK)
+ major_minor=$(vm_exec "$vm_num" "cat /sys/block/$block/dev")
+ vm_exec "$vm_num" "echo \"$major_minor $vm_throttle\" > /sys/fs/cgroup/blkio/blkio.throttle.read_iops_device"
+ vm_exec "$vm_num" "echo \"$major_minor $vm_throttle\" > /sys/fs/cgroup/blkio/blkio.throttle.write_iops_device"
+ fi
+
+ fio_disks+=" --vm=${vm_num}$(printf ':/dev/%s' $SCSI_DISK)"
+done
+
+# Run FIO traffic
+for fio_job in ${fio_jobs//,/ }; do
+ fio_job_fname=$(basename $fio_job)
+ fio_log_fname="${fio_job_fname%%.*}.log"
+ for i in $(seq 1 $fio_iterations); do
+ echo "Running FIO iteration $i for $fio_job_fname"
+ run_fio $fio_bin --hide-results --job-file="$fio_job" --out="$VHOST_DIR/fio_results" --json $fio_disks $fio_gtod &
+ fio_pid=$!
+
+ if $host_sar_enable || $vm_sar_enable; then
+ pids=""
+ mkdir -p $VHOST_DIR/fio_results/sar_stats
+ sleep $sar_delay
+ fi
+
+ if $host_sar_enable; then
+ sar -P ALL $sar_interval $sar_count > "$VHOST_DIR/fio_results/sar_stats/sar_stats_host.txt" &
+ pids+=" $!"
+ fi
+
+ if $vm_sar_enable; then
+ for vm_num in $used_vms; do
+ vm_exec "$vm_num" "mkdir -p /root/sar; sar -P ALL $sar_interval $sar_count >> /root/sar/sar_stats_VM${vm_num}_run${i}.txt" &
+ pids+=" $!"
+ done
+ fi
+
+ for j in $pids; do
+ wait $j
+ done
+
+ if $vm_sar_enable; then
+ for vm_num in $used_vms; do
+ vm_scp "$vm_num" "root@127.0.0.1:/root/sar/sar_stats_VM${vm_num}_run${i}.txt" "$VHOST_DIR/fio_results/sar_stats"
+ done
+ fi
+
+ wait $fio_pid
+ mv $VHOST_DIR/fio_results/$fio_log_fname $VHOST_DIR/fio_results/$fio_log_fname.$i
+ sleep 1
+ done
+
+ parse_fio_results "$VHOST_DIR/fio_results" "$fio_log_fname"
+done
+
+notice "Shutting down virtual machines..."
+vm_shutdown_all
+
+if [[ "$ctrl_type" == "kernel_vhost" ]]; then
+ cleanup_kernel_vhost || true
+else
+ notice "Shutting down SPDK vhost app..."
+ if [[ $use_split == true ]]; then
+ cleanup_split_cfg
+ else
+ cleanup_lvol_cfg
+ fi
+ vhost_kill "${vhost_num}"
+fi
+
+if [[ -n "$kernel_cpus" ]]; then
+ rmdir /sys/fs/cgroup/cpuset/spdk
+fi
diff --git a/src/spdk/test/vhost/readonly/delete_partition_vm.sh b/src/spdk/test/vhost/readonly/delete_partition_vm.sh
new file mode 100755
index 000000000..efba257f0
--- /dev/null
+++ b/src/spdk/test/vhost/readonly/delete_partition_vm.sh
@@ -0,0 +1,42 @@
+#!/usr/bin/env bash
+
+set -xe
+
+testdir=$(readlink -f $(dirname $0))
+
+disk_name="vda"
+test_folder_name="readonly_test"
+test_file_name="some_test_file"
+
+function error() {
+ echo "==========="
+ echo -e "ERROR: $*"
+ echo "==========="
+ trap - ERR
+ set +e
+ umount "$test_folder_name"
+ rm -rf "${testdir:?}/${test_folder_name:?}"
+ exit 1
+}
+
+trap 'error "In delete_partition_vm.sh, line:" "${LINENO}"' ERR
+
+if [[ ! -d "/sys/block/$disk_name" ]]; then
+ error "No vhost-blk disk found!"
+fi
+
+if (($(lsblk -r -n -o RO -d "/dev/$disk_name") == 1)); then
+ error "Vhost-blk disk is set as readonly!"
+fi
+
+mkdir -p $test_folder_name
+
+echo "INFO: Mounting disk"
+mount /dev/$disk_name"1" $test_folder_name
+
+echo "INFO: Removing folder and unmounting $test_folder_name"
+umount "$test_folder_name"
+rm -rf "${testdir:?}/${test_folder_name:?}"
+
+echo "INFO: Deleting partition"
+echo -e "d\n1\nw" | fdisk /dev/$disk_name
diff --git a/src/spdk/test/vhost/readonly/disabled_readonly_vm.sh b/src/spdk/test/vhost/readonly/disabled_readonly_vm.sh
new file mode 100755
index 000000000..2aec5b80a
--- /dev/null
+++ b/src/spdk/test/vhost/readonly/disabled_readonly_vm.sh
@@ -0,0 +1,47 @@
+#!/usr/bin/env bash
+
+set -xe
+
+testdir=$(readlink -f $(dirname $0))
+
+disk_name="vda"
+test_folder_name="readonly_test"
+test_file_name="some_test_file"
+
+function error() {
+ echo "==========="
+ echo -e "ERROR: $*"
+ echo "==========="
+ trap - ERR
+ set +e
+ umount "$test_folder_name"
+ rm -rf "${testdir:?}/${test_folder_name:?}"
+ exit 1
+}
+
+trap 'error "In disabled_readonly_vm.sh, line:" "${LINENO}"' ERR
+
+if [[ ! -d "/sys/block/$disk_name" ]]; then
+ error "No vhost-blk disk found!"
+fi
+
+if (($(lsblk -r -n -o RO -d "/dev/$disk_name") == 1)); then
+ error "Vhost-blk disk is set as readonly!"
+fi
+
+parted -s /dev/$disk_name mklabel gpt
+parted -s /dev/$disk_name mkpart primary 2048s 100%
+partprobe
+sleep 0.1
+
+echo "INFO: Creating file system"
+mkfs.ext4 -F /dev/$disk_name"1"
+
+echo "INFO: Mounting disk"
+mkdir -p $test_folder_name
+mount /dev/$disk_name"1" $test_folder_name
+
+echo "INFO: Creating a test file $test_file_name"
+truncate -s "200M" $test_folder_name/$test_file_name
+umount "$test_folder_name"
+rm -rf "${testdir:?}/${test_folder_name:?}"
diff --git a/src/spdk/test/vhost/readonly/enabled_readonly_vm.sh b/src/spdk/test/vhost/readonly/enabled_readonly_vm.sh
new file mode 100755
index 000000000..939af6f08
--- /dev/null
+++ b/src/spdk/test/vhost/readonly/enabled_readonly_vm.sh
@@ -0,0 +1,72 @@
+#!/usr/bin/env bash
+
+set -x
+
+testdir=$(readlink -f $(dirname $0))
+
+disk_name="vda"
+test_folder_name="readonly_test"
+test_file_name="some_test_file"
+
+function error() {
+ echo "==========="
+ echo -e "ERROR: $*"
+ echo "==========="
+ umount "$test_folder_name"
+ rm -rf "${testdir:?}/${test_folder_name:?}"
+ exit 1
+}
+
+if [[ ! -d "/sys/block/$disk_name" ]]; then
+ error "No vhost-blk disk found!"
+fi
+
+if (($(lsblk -r -n -o RO -d "/dev/$disk_name") == 0)); then
+ error "Vhost-blk disk is not set as readonly!"
+fi
+
+echo "INFO: Found vhost-blk disk with readonly flag"
+if [[ ! -b "/dev/${disk_name}1" ]]; then
+ error "Partition not found!"
+fi
+
+if ! mkdir $testdir/$test_folder_name; then
+ error "Failed to create test folder $test_folder_name"
+fi
+
+echo "INFO: Mounting partition"
+if ! mount /dev/$disk_name"1" $testdir/$test_folder_name; then
+ error "Failed to mount partition $disk_name""1"
+fi
+
+echo "INFO: Trying to create file on readonly disk"
+if truncate -s "200M" $test_folder_name/$test_file_name"_on_readonly"; then
+ error "Created a file on a readonly disk!"
+fi
+
+if [[ -f $test_folder_name/$test_file_name ]]; then
+ echo "INFO: Trying to delete previously created file"
+ if rm $test_folder_name/$test_file_name; then
+ error "Deleted a file from a readonly disk!"
+ fi
+else
+ error "Previously created file not found!"
+fi
+
+echo "INFO: Copying file from readonly disk"
+cp $test_folder_name/$test_file_name $testdir
+if ! rm $testdir/$test_file_name; then
+ error "Copied file from a readonly disk was not found!"
+fi
+
+umount "$test_folder_name"
+rm -rf "${testdir:?}/${test_folder_name:?}"
+echo "INFO: Trying to create file system on a readonly disk"
+if mkfs.ext4 -F /dev/$disk_name"1"; then
+ error "Created file system on a readonly disk!"
+fi
+
+echo "INFO: Trying to delete partition from readonly disk"
+if echo -e "d\n1\nw" | fdisk /dev/$disk_name; then
+ error "Deleted partition from readonly disk!"
+fi
diff --git a/src/spdk/test/vhost/readonly/readonly.sh b/src/spdk/test/vhost/readonly/readonly.sh
new file mode 100755
index 000000000..ad66f72e0
--- /dev/null
+++ b/src/spdk/test/vhost/readonly/readonly.sh
@@ -0,0 +1,136 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/vhost/common.sh
+
+rpc_py="$testdir/../../../scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock"
+
+vm_img=""
+disk="Nvme0n1"
+x=""
+
+function usage() {
+ [[ -n $2 ]] && (
+ echo "$2"
+ echo ""
+ )
+ echo "Shortcut script for automated readonly test for vhost-block"
+ echo "For test details check test_plan.md"
+ echo
+ echo "Usage: $(basename $1) [OPTIONS]"
+ echo
+ echo "-h, --help Print help and exit"
+ echo " --vm_image= Path to VM image"
+ echo " --disk= Disk name."
+ echo " If disk=malloc, then creates malloc disk. For malloc disks, size is always 512M,"
+ echo " e.g. --disk=malloc. (Default: Nvme0n1)"
+ echo "-x set -x for script debug"
+}
+
+while getopts 'xh-:' optchar; do
+ case "$optchar" in
+ -)
+ case "$OPTARG" in
+ help) usage $0 && exit 0 ;;
+ vm_image=*) vm_img="${OPTARG#*=}" ;;
+ disk=*) disk="${OPTARG#*=}" ;;
+ *) usage $0 "Invalid argument '$OPTARG'" && exit 1 ;;
+ esac
+ ;;
+ h) usage $0 && exit 0 ;;
+ x)
+ set -x
+ x="-x"
+ ;;
+ *) usage $0 "Invalid argument '$OPTARG'" && exit 1 ;;
+ esac
+done
+
+vhosttestinit
+
+trap 'error_exit "${FUNCNAME}" "${LINENO}"' ERR
+
+if [[ $EUID -ne 0 ]]; then
+ fail "Go away user come back as root"
+fi
+
+function print_tc_name() {
+ notice ""
+ notice "==============================================================="
+ notice "Now running: $1"
+ notice "==============================================================="
+}
+
+function blk_ro_tc1() {
+ print_tc_name ${FUNCNAME[0]}
+ local vm_no="0"
+ local disk_name=$disk
+ local vhost_blk_name=""
+ local vm_dir="$VHOST_DIR/vms/$vm_no"
+
+ if [[ $disk =~ .*malloc.* ]]; then
+ if ! disk_name=$($rpc_py bdev_malloc_create 512 4096); then
+ fail "Failed to create malloc bdev"
+ fi
+
+ disk=$disk_name
+ else
+ disk_name=${disk%%_*}
+ if ! $rpc_py bdev_get_bdevs | jq -r '.[] .name' | grep -qi $disk_name$; then
+ fail "$disk_name bdev not found!"
+ fi
+ fi
+
+ #Create controller and create file on disk for later test
+ notice "Creating vhost_blk controller"
+ vhost_blk_name="naa.$disk_name.$vm_no"
+ $rpc_py vhost_create_blk_controller $vhost_blk_name $disk_name
+ vm_setup --disk-type=spdk_vhost_blk --force=$vm_no --os=$vm_img --disks=$disk --read-only=true
+
+ vm_run $vm_no
+ vm_wait_for_boot 300 $vm_no
+ notice "Preparing partition and file on guest VM"
+ vm_exec $vm_no "bash -s" < $testdir/disabled_readonly_vm.sh
+ sleep 1
+
+ vm_shutdown_all
+ #Create readonly controller and test readonly feature
+ notice "Removing controller and creating new one with readonly flag"
+ $rpc_py vhost_delete_controller $vhost_blk_name
+ $rpc_py vhost_create_blk_controller -r $vhost_blk_name $disk_name
+
+ vm_run $vm_no
+ vm_wait_for_boot 300 $vm_no
+ notice "Testing readonly feature on guest VM"
+ vm_exec $vm_no "bash -s" < $testdir/enabled_readonly_vm.sh
+ sleep 3
+
+ vm_shutdown_all
+ #Delete file from disk and delete partition
+ echo "INFO: Removing controller and creating new one"
+ $rpc_py vhost_delete_controller $vhost_blk_name
+ $rpc_py vhost_create_blk_controller $vhost_blk_name $disk_name
+
+ vm_run $vm_no
+ vm_wait_for_boot 300 $vm_no
+ notice "Removing partition and file from test disk on guest VM"
+ vm_exec $vm_no "bash -s" < $testdir/delete_partition_vm.sh
+ sleep 1
+
+ vm_shutdown_all
+}
+
+vhost_run 0
+if [[ -z $x ]]; then
+ set +x
+fi
+
+blk_ro_tc1
+
+$rpc_py bdev_nvme_detach_controller Nvme0
+
+vhost_kill 0
+
+vhosttestfini
diff --git a/src/spdk/test/vhost/shared/bdev.json b/src/spdk/test/vhost/shared/bdev.json
new file mode 100644
index 000000000..ad28314a5
--- /dev/null
+++ b/src/spdk/test/vhost/shared/bdev.json
@@ -0,0 +1,20 @@
+{
+ "subsystems": [
+ {
+ "subsystem": "bdev",
+ "config": [
+ {
+ "method": "bdev_virtio_attach_controller",
+ "params": {
+ "vq_count": 2,
+ "traddr": "Malloc.0",
+ "dev_type": "blk",
+ "vq_size": 512,
+ "name": "VirtioBlk0",
+ "trtype": "user"
+ }
+ }
+ ]
+ }
+ ]
+}
diff --git a/src/spdk/test/vhost/shared/shared.sh b/src/spdk/test/vhost/shared/shared.sh
new file mode 100755
index 000000000..bbf0fd858
--- /dev/null
+++ b/src/spdk/test/vhost/shared/shared.sh
@@ -0,0 +1,32 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/vhost/common.sh
+
+rpc_py="$rootdir/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock"
+
+function run_spdk_fio() {
+ fio_bdev --ioengine=spdk_bdev \
+ "$rootdir/test/vhost/common/fio_jobs/default_initiator.job" --runtime=10 --rw=randrw \
+ --spdk_mem=1024 --spdk_single_seg=1 --spdk_json_conf=$testdir/bdev.json "$@"
+}
+
+vhosttestinit
+
+trap 'error_exit "${FUNCNAME}" "${LINENO}"' ERR SIGTERM SIGABRT
+
+vhost_run 0
+
+$rpc_py bdev_malloc_create -b Malloc 124 4096
+$rpc_py vhost_create_blk_controller Malloc.0 Malloc
+
+run_spdk_fio --size=50% --offset=0 --filename=VirtioBlk0 &
+run_fio_pid=$!
+sleep 1
+run_spdk_fio --size=50% --offset=50% --filename=VirtioBlk0
+wait $run_fio_pid
+vhost_kill 0
+
+vhosttestfini
diff --git a/src/spdk/test/vhost/vhost.sh b/src/spdk/test/vhost/vhost.sh
new file mode 100755
index 000000000..5b050fe40
--- /dev/null
+++ b/src/spdk/test/vhost/vhost.sh
@@ -0,0 +1,107 @@
+#!/usr/bin/env bash
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/vhost/common.sh
+
+echo "Running SPDK vhost fio autotest..."
+if [[ $(uname -s) != Linux ]]; then
+ echo ""
+ echo "INFO: Vhost tests are only for Linux machine."
+ echo ""
+ exit 0
+fi
+
+CENTOS_VM_IMAGE="/home/sys_sgsw/spdk_vhost_CentOS_vm_image.qcow2"
+DEFAULT_FIO_BIN="/home/sys_sgsw/fio_ubuntu"
+CENTOS_FIO_BIN="/home/sys_sgsw/fio_ubuntu_bak"
+
+: ${FIO_BIN="$DEFAULT_FIO_BIN"}
+
+if [[ ! -r "${VM_IMAGE}" ]]; then
+ echo ""
+ echo "ERROR: VM image '${VM_IMAGE}' does not exist."
+ echo ""
+ exit 1
+fi
+
+DISKS_NUMBER=$(lspci -mm -n | grep 0108 | tr -d '"' | awk -F " " '{print "0000:"$1}' | wc -l)
+
+WORKDIR=$(readlink -f $(dirname $0))
+
+run_test "vhost_negative" $WORKDIR/other/negative.sh
+
+run_test "vhost_boot" $WORKDIR/vhost_boot/vhost_boot.sh --vm_image=$VM_IMAGE
+
+if [ $RUN_NIGHTLY -eq 1 ]; then
+ echo 'Running blk integrity suite...'
+ run_test "vhost_blk_integrity" $WORKDIR/fiotest/fio.sh -x --fio-bin=$FIO_BIN \
+ --vm=0,$VM_IMAGE,Nvme0n1p0:RaidBdev0:RaidBdev1:RaidBdev2 \
+ --test-type=spdk_vhost_blk \
+ --fio-job=$WORKDIR/common/fio_jobs/default_integrity.job
+
+ echo 'Running SCSI integrity suite...'
+ run_test "vhost_scsi_integrity" $WORKDIR/fiotest/fio.sh -x --fio-bin=$FIO_BIN \
+ --vm=0,$VM_IMAGE,Nvme0n1p0:RaidBdev0:RaidBdev1:RaidBdev2 \
+ --test-type=spdk_vhost_scsi \
+ --fio-job=$WORKDIR/common/fio_jobs/default_integrity.job
+
+ echo 'Running filesystem integrity suite with SCSI...'
+ run_test "vhost_scsi_fs_integrity" $WORKDIR/integrity/integrity_start.sh --ctrl-type=spdk_vhost_scsi --fs="xfs ntfs btrfs ext4"
+
+ echo 'Running filesystem integrity suite with BLK...'
+ run_test "vhost_blk_fs_integrity" $WORKDIR/integrity/integrity_start.sh --ctrl-type=spdk_vhost_blk --fs="xfs ntfs btrfs ext4"
+
+ if [[ $DISKS_NUMBER -ge 2 ]]; then
+ echo 'Running lvol integrity nightly suite with two cores and two controllers'
+ run_test "vhost_scsi_2core_2ctrl" $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
+ --ctrl-type=spdk_vhost_scsi --max-disks=2 --distribute-cores --vm-count=2
+
+ echo 'Running lvol integrity nightly suite with one core and two controllers'
+ run_test "vhost_scsi_1core_2ctrl" $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
+ --ctrl-type=spdk_vhost_scsi --max-disks=2 --vm-count=2
+ fi
+ if [[ -e $CENTOS_VM_IMAGE ]]; then
+ echo 'Running lvol integrity nightly suite with different os types'
+ run_test "vhost_scsi_nightly" $WORKDIR/lvol/lvol_test.sh --fio-bin=$CENTOS_FIO_BIN \
+ --ctrl-type=spdk_vhost_scsi --vm-count=2 --multi-os
+ fi
+ echo 'Running lvol integrity nightly suite with one core and one controller'
+ run_test "vhost_scsi_1core_1ctrl" $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
+ --ctrl-type=spdk_vhost_scsi --max-disks=1
+
+ if [[ $DISKS_NUMBER -ge 2 ]]; then
+ echo 'Running lvol integrity nightly suite with two cores and two controllers'
+ run_test "vhost_blk_2core_2ctrl" $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
+ --ctrl-type=spdk_vhost_blk --max-disks=2 --distribute-cores --vm-count=2
+
+ echo 'Running lvol integrity nightly suite with one core and two controllers'
+ run_test "vhost_blk_1core_2ctrl" $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
+ --ctrl-type=spdk_vhost_blk --max-disks=2 --vm-count=2
+ fi
+ if [[ -e $CENTOS_VM_IMAGE ]]; then
+ echo 'Running lvol integrity nightly suite with different os types'
+ run_test "vhost_blk_nightly" $WORKDIR/lvol/lvol_test.sh --fio-bin=$CENTOS_FIO_BIN \
+ --ctrl-type=spdk_vhost_blk --vm-count=2 --multi-os
+ fi
+ echo 'Running lvol integrity nightly suite with one core and one controller'
+ run_test "vhost_lvol_integrity_1core_1ctrl" $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
+ --ctrl-type=spdk_vhost_blk --max-disks=1
+
+ echo 'Running readonly tests suite...'
+ run_test "vhost_readonly" $WORKDIR/readonly/readonly.sh --vm_image=$VM_IMAGE --disk=Nvme0n1 -x
+
+ echo 'Running migration suite...'
+ run_test "vhost_migration" $WORKDIR/migration/migration.sh -x \
+ --fio-bin=$FIO_BIN --os=$VM_IMAGE
+fi
+
+echo 'Running lvol integrity suite...'
+run_test "vhost_scsi_lvol_integrity" $WORKDIR/lvol/lvol_test.sh -x --fio-bin=$FIO_BIN \
+ --ctrl-type=spdk_vhost_scsi --thin-provisioning
+
+echo 'Running lvol integrity suite...'
+run_test "vhost_blk_lvol_integrity" $WORKDIR/lvol/lvol_test.sh -x --fio-bin=$FIO_BIN \
+ --ctrl-type=spdk_vhost_blk
+
+run_test "spdkcli_vhost" ./test/spdkcli/vhost.sh
diff --git a/src/spdk/test/vhost/vhost_boot/vhost_boot.sh b/src/spdk/test/vhost/vhost_boot/vhost_boot.sh
new file mode 100755
index 000000000..9df2bd970
--- /dev/null
+++ b/src/spdk/test/vhost/vhost_boot/vhost_boot.sh
@@ -0,0 +1,126 @@
+#!/usr/bin/env bash
+set -xe
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/vhost/common.sh
+source $rootdir/test/bdev/nbd_common.sh
+
+rpc_py="$rootdir/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock"
+vm_no="0"
+
+function err_clean() {
+ trap - ERR
+ print_backtrace
+ set +e
+ error "Error on $1 $2"
+ vm_kill_all
+ $rpc_py vhost_scsi_controller_remove_target naa.vhost_vm.$vm_no 0
+ $rpc_py vhost_delete_controller naa.vhost_vm.$vm_no
+ $rpc_py bdev_lvol_delete $lvb_u
+ $rpc_py bdev_lvol_delete_lvstore -u $lvs_u
+ vhost_kill 0
+ exit 1
+}
+
+function usage() {
+ [[ -n $2 ]] && (
+ echo "$2"
+ echo ""
+ )
+ echo "Usage: $(basename $1) vm_image=PATH [-h|--help]"
+ echo "-h, --help Print help and exit"
+ echo " --vm_image=PATH Path to VM image used in these tests"
+}
+
+while getopts 'h-:' optchar; do
+ case "$optchar" in
+ -)
+ case "$OPTARG" in
+ vm_image=*) os_image="${OPTARG#*=}" ;;
+ *) usage $0 echo "Invalid argument '$OPTARG'" && exit 1 ;;
+ esac
+ ;;
+ h) usage $0 && exit 0 ;;
+ *) usage $0 "Invalid argument '$optchar'" && exit 1 ;;
+ esac
+done
+
+if [[ $EUID -ne 0 ]]; then
+ echo "INFO: Go away user come back as root"
+ exit 1
+fi
+
+if [[ -z $os_image ]]; then
+ echo "No path to os image is given"
+ exit 1
+fi
+
+vhosttestinit
+
+trap 'err_clean "${FUNCNAME}" "${LINENO}"' ERR
+timing_enter start_vhost
+vhost_run 0
+timing_exit start_vhost
+
+timing_enter create_lvol
+
+nvme_bdev=$($rpc_py bdev_get_bdevs -b Nvme0n1)
+nvme_bdev_bs=$(jq ".[] .block_size" <<< "$nvme_bdev")
+nvme_bdev_name=$(jq ".[] .name" <<< "$nvme_bdev")
+if [[ $nvme_bdev_bs != 512 ]]; then
+ echo "ERROR: Your device $nvme_bdev_name block size is $nvme_bdev_bs, but should be 512 bytes."
+ false
+fi
+
+lvs_u=$($rpc_py bdev_lvol_create_lvstore Nvme0n1 lvs0)
+lvb_u=$($rpc_py bdev_lvol_create -u $lvs_u lvb0 20000)
+timing_exit create_lvol
+
+timing_enter convert_vm_image
+modprobe nbd
+trap 'nbd_stop_disks $(get_vhost_dir 0)/rpc.sock /dev/nbd0; err_clean "${FUNCNAME}" "${LINENO}"' ERR
+nbd_start_disks "$(get_vhost_dir 0)/rpc.sock" $lvb_u /dev/nbd0
+qemu-img convert $os_image -O raw /dev/nbd0
+sync
+nbd_stop_disks $(get_vhost_dir 0)/rpc.sock /dev/nbd0
+sleep 1
+timing_exit convert_vm_image
+
+trap 'err_clean "${FUNCNAME}" "${LINENO}"' ERR
+timing_enter create_vhost_controller
+$rpc_py vhost_create_scsi_controller naa.vhost_vm.$vm_no
+$rpc_py vhost_scsi_controller_add_target naa.vhost_vm.$vm_no 0 $lvb_u
+timing_exit create_vhost_controller
+
+timing_enter setup_vm
+vm_setup --disk-type=spdk_vhost_scsi --force=$vm_no --disks="vhost_vm" --spdk-boot="vhost_vm"
+vm_run $vm_no
+vm_wait_for_boot 300 $vm_no
+timing_exit setup_vm
+
+timing_enter run_vm_cmd
+vm_exec $vm_no "parted -s /dev/sda mkpart primary 10GB 100%; partprobe; sleep 0.1;"
+vm_exec $vm_no "mkfs.ext4 -F /dev/sda2; mkdir -p /mnt/sda2test; mount /dev/sda2 /mnt/sda2test;"
+vm_exec $vm_no "fio --name=integrity --bsrange=4k-512k --iodepth=128 --numjobs=1 --direct=1 \
+ --thread=1 --group_reporting=1 --rw=randrw --rwmixread=70 --filename=/mnt/sda2test/test_file \
+ --verify=md5 --do_verify=1 --verify_backlog=1024 --fsync_on_close=1 --runtime=20 \
+ --time_based=1 --size=1024m"
+vm_exec $vm_no "umount /mnt/sda2test; rm -rf /mnt/sda2test"
+alignment_offset=$(vm_exec $vm_no "cat /sys/block/sda/sda1/alignment_offset")
+echo "alignment_offset: $alignment_offset"
+timing_exit run_vm_cmd
+
+vm_shutdown_all
+
+timing_enter clean_vhost
+$rpc_py vhost_scsi_controller_remove_target naa.vhost_vm.$vm_no 0
+$rpc_py vhost_delete_controller naa.vhost_vm.$vm_no
+$rpc_py bdev_lvol_delete $lvb_u
+$rpc_py bdev_lvol_delete_lvstore -u $lvs_u
+timing_exit clean_vhost
+
+vhost_kill 0
+
+vhosttestfini
diff --git a/src/spdk/test/vhost/windows/windows.sh b/src/spdk/test/vhost/windows/windows.sh
new file mode 100755
index 000000000..6bf8573f7
--- /dev/null
+++ b/src/spdk/test/vhost/windows/windows.sh
@@ -0,0 +1,141 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/vhost/common.sh
+
+rpc_py="$rootdir/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock"
+ctrl_type="spdk_vhost_scsi"
+ssh_pass=""
+vm_num="0"
+vm_image="/home/sys_sgsw/windows_server.qcow2"
+
+function usage() {
+ [[ -n $2 ]] && (
+ echo "$2"
+ echo ""
+ )
+ echo "Windows Server automated test"
+ echo "Usage: $(basename $1) [OPTIONS]"
+ echo "--vm-ssh-pass=PASSWORD Text password for the VM"
+ echo "--vm-image=PATH Path to qcow2 image of Windows VM"
+ echo "--ctrl-type=TYPE Controller type to use for test:"
+ echo " spdk_vhost_scsi - use spdk vhost scsi"
+ echo " spdk_vhost_blk - use spdk vhost block"
+ echo "-x set -x for script debug"
+ echo "-h, --help Print help and exit"
+
+ exit 0
+}
+
+while getopts 'xh-:' optchar; do
+ case "$optchar" in
+ -)
+ case "$OPTARG" in
+ help) usage $0 ;;
+ vm-ssh-pass=*) ssh_pass="${OPTARG#*=}" ;;
+ vm-image=*) vm_image="${OPTARG#*=}" ;;
+ ctrl-type=*) ctrl_type="${OPTARG#*=}" ;;
+ esac
+ ;;
+ h) usage $0 ;;
+ x)
+ set -x
+ x="-x"
+ ;;
+ *) usage $0 "Invalid argument '$OPTARG'" ;;
+ esac
+done
+
+# For some reason there is a problem between using SSH key authentication
+# and Windows UAC. Some of the powershell commands fail due to lack of
+# permissons, despite script running in elevated mode.
+# There are some clues about this setup that suggest this might not work properly:
+# https://superuser.com/questions/181581/how-can-i-run-something-as-administrator-via-cygwins-ssh
+# https://cygwin.com/ml/cygwin/2004-09/msg00087.html
+# But they apply to rather old Windows distributions.
+# Potentially using Windows Server 2016 and newer may solve the issue
+# due to OpenSSH being available directly from Windows Store.
+function vm_sshpass() {
+ vm_num_is_valid $1 || return 1
+
+ local ssh_cmd
+ ssh_cmd="sshpass -p $2 ssh \
+ -o UserKnownHostsFile=/dev/null \
+ -o StrictHostKeyChecking=no \
+ -o User=root \
+ -p $(vm_ssh_socket $1) $VM_SSH_OPTIONS 127.0.0.1"
+
+ shift 2
+ $ssh_cmd "$@"
+}
+
+if [[ -z "$ssh_pass" ]]; then
+ error "Please specify --vm-ssh-pass parameter"
+fi
+
+trap 'error_exit "${FUNCNAME}" "${LINENO}"; rm -f $aio_file' SIGTERM SIGABRT ERR
+
+vm_kill_all
+
+# Run vhost without debug!
+# Windows Virtio drivers use indirect descriptors without negotiating
+# their feature flag, which is explicitly forbidden by the Virtio 1.0 spec.
+# "(2.4.5.3.1 Driver Requirements: Indirect Descriptors)
+# The driver MUST NOT set the VIRTQ_DESC_F_INDIRECT flag unless the
+# VIRTIO_F_INDIRECT_DESC feature was negotiated.".
+# Violating this rule doesn't cause any issues for SPDK vhost,
+# but triggers an assert, so we can only run Windows VMs with non-debug SPDK builds.
+notice "running SPDK vhost"
+vhost_run 0
+notice "..."
+
+# Prepare bdevs for later vhost controllers use
+# Nvme bdev is automatically constructed during vhost_run
+# by using scripts/gen_nvme.sh. No need to add it manually.
+# Using various sizes to better identify bdevs if no name in BLK
+# is available
+# TODO: use a param for blocksize for AIO and Malloc bdevs
+aio_file="$SPDK_TEST_STORAGE/aio_disk"
+dd if=/dev/zero of=$aio_file bs=1M count=512
+$rpc_py bdev_aio_create $aio_file Aio0 512
+$rpc_py bdev_malloc_create -b Malloc0 256 512
+$rpc_py bdev_get_bdevs
+
+# Create vhost controllers
+# Prepare VM setup command
+setup_cmd="vm_setup --force=0 --memory=8192"
+setup_cmd+=" --os=$vm_image"
+
+if [[ "$ctrl_type" == "spdk_vhost_scsi" ]]; then
+ $rpc_py vhost_create_scsi_controller naa.0.0
+ $rpc_py vhost_scsi_controller_add_target naa.0.0 0 Nvme0n1
+ $rpc_py vhost_scsi_controller_add_target naa.0.0 1 Malloc0
+ $rpc_py vhost_scsi_controller_add_target naa.0.0 2 Aio0
+ setup_cmd+=" --disk-type=spdk_vhost_scsi --disks=0"
+elif [[ "$ctrl_type" == "spdk_vhost_blk" ]]; then
+ $rpc_py vhost_create_blk_controller naa.0.0 Nvme0n1
+ $rpc_py vhost_create_blk_controller naa.1.0 Malloc0
+ $rpc_py vhost_create_blk_controller naa.2.0 Aio0
+ setup_cmd+=" --disk-type=spdk_vhost_blk --disks=0:1:2"
+fi
+$rpc_py vhost_get_controllers
+$setup_cmd
+
+# Spin up VM
+vm_run "$vm_num"
+vm_wait_for_boot "300" "$vm_num"
+
+vm_sshpass "$vm_num" "$ssh_pass" "mkdir /cygdrive/c/fs_test"
+vm_scp "$vm_num" "$testdir/windows_fs_test.ps1" "127.0.0.1:/cygdrive/c/fs_test"
+vm_sshpass "$vm_num" "$ssh_pass" "cd /cygdrive/c/fs_test; powershell.exe -file windows_fs_test.ps1"
+
+notice "Shutting down Windows VM..."
+# Killing, actually. #TODO: implement vm_windwows_shutdown() function
+vm_kill $vm_num
+
+notice "Shutting down SPDK vhost app..."
+vhost_kill 0
+
+rm -f $aio_file
diff --git a/src/spdk/test/vhost/windows/windows_fs_test.ps1 b/src/spdk/test/vhost/windows/windows_fs_test.ps1
new file mode 100644
index 000000000..cda1b53f2
--- /dev/null
+++ b/src/spdk/test/vhost/windows/windows_fs_test.ps1
@@ -0,0 +1,78 @@
+# Get the ID and security principal of the current user account
+$myWindowsID=[System.Security.Principal.WindowsIdentity]::GetCurrent()
+$myWindowsPrincipal=new-object System.Security.Principal.WindowsPrincipal($myWindowsID)
+
+# Get the security principal for the Administrator role
+$adminRole=[System.Security.Principal.WindowsBuiltInRole]::Administrator
+
+# Check to see if we are currently running "as Administrator"
+if ($myWindowsPrincipal.IsInRole($adminRole)) {
+ # We are running "as Administrator" - so change the title and background color to indicate this
+ $Host.UI.RawUI.WindowTitle = $myInvocation.MyCommand.Definition + "(Elevated)"
+ $Host.UI.RawUI.BackgroundColor = "DarkBlue"
+ clear-host
+} else
+ {
+ # We are not running "as Administrator" - so relaunch as administrator
+
+ # Create a new process object that starts PowerShell
+ $newProcess = new-object System.Diagnostics.ProcessStartInfo "PowerShell";
+
+ # Specify the current script path and name as a parameter
+ $newProcess.Arguments = $myInvocation.MyCommand.Definition;
+
+ # Indicate that the process should be elevated
+ $newProcess.Verb = "runas";
+
+ # Start the new process
+ [System.Diagnostics.Process]::Start($newProcess);
+
+ # Exit from the current, unelevated, process
+ exit
+}
+
+# Set bash -e equivalent
+$ErrorActionPreference = "Stop"
+
+$filesystems=@("NTFS", "FAT32", "FAT")
+$disks = get-disk | Where-Object FriendlyName -NotMatch "QEMU"
+Start-Sleep 2
+foreach($disk in $disks)
+{
+ $size = $disk.Size
+ $number = $disk.Number
+ $serial = $disk.SerialNumber
+ $model = $disk.model.Trim()
+ $size = $size -replace " ", "_"
+ $model = $model -replace " ", "_"
+
+ $label = "${number}_${model}_${serial}_${size}"
+ echo "Running tests for disk $label"
+ start-sleep 2
+
+ Try {
+ Initialize-Disk -Number $disk.Number -PartitionStyle MBR
+ } Catch {
+ Clear-Disk -Number $disk.Number -RemoveData -Confirm:$false
+ Initialize-Disk -Number $disk.Number -PartitionStyle MBR
+ }
+ echo "`tDisk initialized"
+ start-sleep 2
+
+ $part = New-Partition -DiskNumber $disk.Number -UseMaximumSize -AssignDriveLetter
+ echo "`tCreated partition $($part.DriveLetter)"
+ start-sleep 2
+
+ foreach($fs in $filesystems) {
+ echo "`tTrying to format $($part.DriveLetter) with $fs"
+ Try {
+ $vol = Format-Volume -DriveLetter $part.DriveLetter -FileSystem $fs -Confirm:$false
+ } Catch [Exception] {
+ echo $_.Exception.GetType().FullName, $_.Exception.Message
+ echo $_.Exception | format-list -force
+ exit 1
+ }
+ echo "`tPartition $($part.DriveLetter) formatted with $fs filesystem"
+ start-sleep 2
+ }
+}
diff --git a/src/spdk/test/vhost/windows/windows_scsi_compliance.ps1 b/src/spdk/test/vhost/windows/windows_scsi_compliance.ps1
new file mode 100644
index 000000000..80d86e805
--- /dev/null
+++ b/src/spdk/test/vhost/windows/windows_scsi_compliance.ps1
@@ -0,0 +1,73 @@
+# Get the ID and security principal of the current user account
+$myWindowsID=[System.Security.Principal.WindowsIdentity]::GetCurrent()
+$myWindowsPrincipal=new-object System.Security.Principal.WindowsPrincipal($myWindowsID)
+
+# Get the security principal for the Administrator role
+$adminRole=[System.Security.Principal.WindowsBuiltInRole]::Administrator
+
+# Check to see if we are currently running "as Administrator"
+if ($myWindowsPrincipal.IsInRole($adminRole))
+ {
+ # We are running "as Administrator" - so change the title and background color to indicate this
+ $Host.UI.RawUI.WindowTitle = $myInvocation.MyCommand.Definition + "(Elevated)"
+ $Host.UI.RawUI.BackgroundColor = "DarkBlue"
+ clear-host
+ }
+else
+ {
+ # We are not running "as Administrator" - so relaunch as administrator
+
+ # Create a new process object that starts PowerShell
+ $newProcess = new-object System.Diagnostics.ProcessStartInfo "PowerShell";
+
+ # Specify the current script path and name as a parameter
+ $newProcess.Arguments = $myInvocation.MyCommand.Definition;
+
+ # Indicate that the process should be elevated
+ $newProcess.Verb = "runas";
+
+ # Start the new process
+ [System.Diagnostics.Process]::Start($newProcess);
+
+ # Exit from the current, unelevated, process
+ exit
+ }
+# Run your code that needs to be elevated here
+get-disk | Where-Object FriendlyName -NotMatch "QEMU" | Initialize-Disk -PartitionStyle MBR
+Start-Sleep 2
+get-disk | Where-Object FriendlyName -NotMatch "QEMU" | Clear-Disk -RemoveData -Confirm:$false
+Start-Sleep 2
+get-disk | Where-Object FriendlyName -NotMatch "QEMU" | Initialize-Disk -PartitionStyle MBR
+Start-Sleep 2
+
+$disks = get-disk | Where-Object FriendlyName -NotMatch "QEMU"
+Start-Sleep 2
+foreach($disk in $disks)
+{
+
+ $phy_bs = $disk.PhysicalSectorSize
+ $model = $disk.model
+ $serial = $disk.SerialNumber
+
+ $label = ""
+ $label += $model.Trim() + "_" + $serial + "_" + $phy_bs
+ $label = $label -replace " ", "_"
+ echo $label
+ start-sleep 2
+
+ $part = New-Partition -DiskNumber $disk.Number -UseMaximumSize -AssignDriveLetter
+ echo $part.DriveLetter
+ start-sleep 2
+
+ $vol = Format-Volume -DriveLetter $part.DriveLetter -FileSystem NTFS -Confirm:$false
+ echo $vol
+ start-sleep 2
+
+ cd C:\SCSI
+ .\scsicompliancetest.exe \\.\$($vol.DriveLetter): -full | tee "C:\SCSI\WIN_SCSI_1_$label.log"
+ start-sleep 2
+ mv .\scsicompliance.log.wtl ".\WIN_SCSI_1_$label.wtl"
+ .\scsicompliance.exe /Device \\.\$($vol.DriveLetter): /Operation Test /Scenario Common | tee "C:\SCSI\WIN_SCSI_2_$label.log"
+ start-sleep 2
+ mv .\scsicompliance.wtl ".\WIN_SCSI_2_$label.wtl"
+}
diff --git a/src/spdk/test/vhost/windows/windows_scsi_compliance.py b/src/spdk/test/vhost/windows/windows_scsi_compliance.py
new file mode 100755
index 000000000..a0f4ea63c
--- /dev/null
+++ b/src/spdk/test/vhost/windows/windows_scsi_compliance.py
@@ -0,0 +1,147 @@
+#!/usr/bin/env python3
+import os
+import sys
+import re
+import pprint
+import collections
+
+os.chdir(os.path.join(os.path.dirname(sys.argv[0]), "results"))
+
+scsi_logs = filter(lambda x: x.endswith(".log"), os.listdir("./"))
+scsi_1_pattern = re.compile(r"(ASSERTION\s[1-9][\d+]?\.\d+\s)(.+\s)([\w\W]+?)(Result:\s)(\w+)", re.I | re.M)
+scsi_2_pattern = re.compile(r"(?:Start:\s)(ASSERTION:\s)?(.+)(?:,.+=\s)([\w\W]+?)(End:\s)(\w+)(,.*)", re.I | re.M)
+fails = []
+warns = []
+
+expected_warns = [
+ "MODE_SELECT_6_MODE_SENSE_6_Checking_Parameters_Savable_PS_bit",
+ "MODE_SELECT_10_MODE_SENSE_10_Checking_Parameters_Savable_PS_bit",
+ "MODE_SELECT_10_Changing_WCE",
+ "MODE_SELECT_10_MODE_SENSE_10_Checking_that_WCE_has_been_cleared",
+ "MODE_SELECT_10_MODE_SENSE_10_Checking_that_Saved_Values_have_changed",
+ "MODE_SELECT_10_setting_WCE",
+ "MODE_SELECT_10_MODE_SENSE_10_Checking_that_WCE_has_been_set",
+ "MODE_SELECT_10_Attempting_to_restore_original_values",
+ "MODE_SELECT_10_MODE_SENSE_10_Verifying_values_were_restored",
+ "ASSERTION_VERIFY_16_Support_Test",
+]
+
+expected_fails = [
+ "ASSERTION_READ_6_Read-With-Disk-Cache-Cleared_Test",
+ "ASSERTION_READ_10_Read-With-Disk-Cache-Cleared_Test",
+ "ASSERTION_READ_16_Read-With-Disk-Cache-Cleared_Test",
+ "ASSERTION_INQUIRY_Checking_Identification_Descriptors_in_VPD_page_0x83",
+ "ASSERTION_VERIFY_10_Support_Test",
+]
+
+results = {"1": collections.OrderedDict(),
+ "2": collections.OrderedDict()}
+
+for log in scsi_logs:
+ # Choose regex pattern depending on tests version
+ pattern = scsi_1_pattern if "WIN_SCSI_1" in log else scsi_2_pattern
+
+ # Read log file contents
+ try:
+ with open(log, 'r') as fh:
+ fh = open(log, 'r')
+ log_text = fh.read()
+ # Dir name for saving split result files of currently processed log file
+ d_name = log.split(".")[0]
+ try:
+ os.mkdir(d_name)
+ except OSError:
+ pass
+ except IOError as e:
+ print("ERROR: While opening log file: {log_file}".format(log_file=log))
+ exit(1)
+
+ # Parse log file contents
+ matches_found = re.findall(pattern, log_text)
+ if len(matches_found) < 1:
+ print("ERROR: No results found in file {log_file}!".format(log_file=log))
+ exit(1)
+
+ # Go through output for each test from log file; parse and save to dict
+ for m in matches_found:
+ test_name = re.sub(r"\s+", "_", (m[0] + m[1]).strip())
+ test_name = re.sub(r"[():]", "", test_name)
+ test_name = test_name[0:-1] if "." in test_name[-1] else test_name
+ tc_result = m[4].upper()
+
+ if "FAIL" in tc_result.upper():
+ fails.append([log, test_name, tc_result])
+ elif "WARN" in tc_result.upper():
+ warns.append([log, test_name, tc_result])
+
+ # Save output to separate file
+ with open(os.path.join("./", d_name, test_name), 'w') as fh:
+ for line in m:
+ fh.write(line)
+
+ # Also save in dictionary for later use in generating HTML results summary
+ ver = "1" if "WIN_SCSI_1" in log else "2"
+ try:
+ results[ver][test_name][d_name] = tc_result
+ except KeyError:
+ results[ver][test_name] = collections.OrderedDict()
+ results[ver][test_name][d_name] = tc_result
+
+
+# Generate HTML file with results table
+with open(os.path.join("./", "results.html"), 'a') as fh:
+ html = "<html>"
+ for suite_ver in results.keys():
+ html += """"<h2> WIN_SCSI_{ver} </h2>
+ <table bgcolor=\"#ffffff\" border=\"1px solid black;>\"""".format(ver=suite_ver)
+
+ # Print header
+ html += "<tr><th>Test name</th>"
+ disks_header = set()
+
+ for _ in results[suite_ver].keys():
+ for disk in results[suite_ver][_].keys():
+ disks_header.add(disk)
+
+ for disk in disks_header:
+ html += "<th>{disk}</th>".format(disk=disk)
+ html += "</tr>"
+
+ # Print results
+ for test in results[suite_ver].keys():
+ html += "<tr><td>{f_name}</td>".format(f_name=test)
+ for disk in disks_header:
+ try:
+ result = results[suite_ver][test][disk]
+
+ html += "<td"
+ if "PASS" in result:
+ html += " bgcolor=\"#99ff33\">"
+ else:
+ html += " bgcolor=\"#ff5050\">"
+
+ html += "<a href={file}>{result}</a>".format(result=result, file=os.path.join("./", disk, test))
+ html += "</td>"
+
+ except KeyError:
+ html += "<td bgcolor=\"#ffff99\"></br></td>"
+ html += "</tr>"
+ html += "</table></br>"
+ html += "</html>"
+ fh.write(html)
+
+if warns:
+ not_expected_warns = [w for w in warns if w[1] not in expected_warns and "WIN_SCSI_2" in w[0]]
+ print("INFO: Windows SCSI compliance warnings:")
+ pprint.pprint(warns, width=150)
+
+if fails:
+ not_expected_fails = [f for f in fails if f[1] not in expected_fails and "WIN_SCSI_2" in f[0]]
+ print("INFO: Windows SCSI compliance fails:")
+ pprint.pprint(fails, width=150)
+
+if not_expected_warns or not_expected_fails:
+ print("Not expected fails / warnings:")
+ pprint.pprint(not_expected_warns, width=150)
+ pprint.pprint(not_expected_fails, width=150)
+ exit(1)
diff --git a/src/spdk/test/vhost/windows/windows_scsi_compliance.sh b/src/spdk/test/vhost/windows/windows_scsi_compliance.sh
new file mode 100755
index 000000000..d7c854592
--- /dev/null
+++ b/src/spdk/test/vhost/windows/windows_scsi_compliance.sh
@@ -0,0 +1,89 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/vhost/common.sh
+
+# Tested with windows vm with OS Name: Microsoft Windows Server 2012 R2 Datacenter
+# and OS Version: 6.3.9600 N/A Build 9600
+# In order to run this test with windows vm
+# windows virtio scsi driver must be installed
+WINDOWS_IMG="/home/sys_sgsw/windows_scsi_compliance/windows_vm_image.qcow2"
+aio_file="$SPDK_TEST_STORAGE/aio_disk"
+ssh_pass=""
+vm_num=1
+keep_results_dir=false
+rpc_py="$rootdir/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock"
+
+function usage() {
+ [[ -n $2 ]] && (
+ echo "$2"
+ echo ""
+ )
+ echo "Windows Server scsi compliance test"
+ echo "Usage: $(basename $1) [OPTIONS]"
+ echo " --vm-ssh-pass=PASSWORD Text password for the VM"
+ echo " --vm-image-path Path of windows image"
+ echo " --keep_results Do not delete dir with results"
+
+ exit 0
+}
+
+while getopts 'h-:' optchar; do
+ case "$optchar" in
+ -)
+ case "$OPTARG" in
+ help) usage $0 ;;
+ vm-ssh-pass=*) ssh_pass="${OPTARG#*=}" ;;
+ vm-image-path=*) WINDOWS_IMG="${OPTARG#*=}" ;;
+ keep_results*) keep_results_dir=true ;;
+ esac
+ ;;
+ h) usage $0 ;;
+ *) usage $0 "Invalid argument '$OPTARG'" ;;
+ esac
+done
+
+trap 'rm -f $aio_file; rm -rf $testdir/results; error_exit' SIGINT SIGTERM ERR
+
+VM_PASSWORD="$ssh_pass"
+mkdir -p $testdir/results
+dd if=/dev/zero of=$aio_file bs=1M count=512
+
+timing_enter vhost_run
+vhost_run 0
+$rpc_py bdev_nvme_set_hotplug -e
+$rpc_py bdev_malloc_create 256 4096 -b Malloc0
+$rpc_py bdev_aio_create $aio_file Aio0 512
+$rpc_py bdev_get_bdevs
+$rpc_py vhost_create_scsi_controller naa.vhost.1
+$rpc_py vhost_scsi_controller_add_target naa.vhost.1 0 Nvme0n1
+$rpc_py vhost_scsi_controller_add_target naa.vhost.1 1 Malloc0
+# TODO: Currently there is bug for aio device. Disable this test
+# $rpc_py vhost_scsi_controller_add_target naa.vhost.1 2 Aio0
+timing_exit vhost_run
+
+timing_enter start_vm
+vm_setup --force=1 --disk-type=spdk_vhost_scsi --os=$WINDOWS_IMG --disks=vhost --memory=4096
+vm_run "1"
+# Wait until VM goes up
+vm_wait_for_boot "300" "$vm_num"
+timing_exit start_vm
+
+vm_scp "$vm_num" $testdir/windows_scsi_compliance.ps1 127.0.0.1:/cygdrive/c/SCSI/
+vm_sshpass "$vm_num" "$ssh_pass" "cd /cygdrive/c/SCSI; powershell.exe -file windows_scsi_compliance.ps1"
+vm_scp "$vm_num" 127.0.0.1:/cygdrive/c/SCSI/WIN_SCSI_* $testdir/results/
+dos2unix $testdir/results/WIN_SCSI_*.log
+
+notice "Kill vm 1"
+vm_kill "$vm_num"
+notice "Kill spdk"
+vhost_kill 0
+notice "Remove $aio_file"
+rm -f $aio_file
+
+python3 $testdir/windows_scsi_compliance.py
+if ! $keep_results_dir; then
+ rm -rf $testdir/results
+fi