summaryrefslogtreecommitdiffstats
path: root/src/spdk/test/nvmf
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/spdk/test/nvmf/README.md5
-rw-r--r--src/spdk/test/nvmf/common.sh292
-rwxr-xr-xsrc/spdk/test/nvmf/host/aer.sh50
-rwxr-xr-xsrc/spdk/test/nvmf/host/bdevperf.sh50
-rwxr-xr-xsrc/spdk/test/nvmf/host/fio.sh82
-rwxr-xr-xsrc/spdk/test/nvmf/host/identify.sh54
-rwxr-xr-xsrc/spdk/test/nvmf/host/identify_kernel_nvmf.sh71
-rwxr-xr-xsrc/spdk/test/nvmf/host/perf.sh93
-rwxr-xr-xsrc/spdk/test/nvmf/host/target_disconnect.sh89
-rwxr-xr-xsrc/spdk/test/nvmf/nvmf.sh60
-rwxr-xr-xsrc/spdk/test/nvmf/target/abort.sh35
-rwxr-xr-xsrc/spdk/test/nvmf/target/bdev_io_wait.sh45
-rwxr-xr-xsrc/spdk/test/nvmf/target/bdevio.sh29
-rwxr-xr-xsrc/spdk/test/nvmf/target/connect_disconnect.sh43
-rwxr-xr-xsrc/spdk/test/nvmf/target/create_transport.sh52
-rwxr-xr-xsrc/spdk/test/nvmf/target/discovery.sh49
-rwxr-xr-xsrc/spdk/test/nvmf/target/filesystem.sh92
-rwxr-xr-xsrc/spdk/test/nvmf/target/fio.sh77
-rwxr-xr-xsrc/spdk/test/nvmf/target/fuzz.sh43
-rwxr-xr-xsrc/spdk/test/nvmf/target/identify_passthru.sh76
-rwxr-xr-xsrc/spdk/test/nvmf/target/initiator_timeout.sh71
-rwxr-xr-xsrc/spdk/test/nvmf/target/invalid.sh63
-rwxr-xr-xsrc/spdk/test/nvmf/target/multiconnection.sh53
-rwxr-xr-xsrc/spdk/test/nvmf/target/multitarget.sh37
-rwxr-xr-xsrc/spdk/test/nvmf/target/multitarget_rpc.py84
-rwxr-xr-xsrc/spdk/test/nvmf/target/nmic.sh56
-rwxr-xr-xsrc/spdk/test/nvmf/target/nvme_cli.sh75
-rwxr-xr-xsrc/spdk/test/nvmf/target/nvmf_example.sh59
-rwxr-xr-xsrc/spdk/test/nvmf/target/nvmf_lvol.sh60
-rwxr-xr-xsrc/spdk/test/nvmf/target/nvmf_vhost.sh69
-rw-r--r--src/spdk/test/nvmf/target/nvmf_vhost_fio.job19
-rwxr-xr-xsrc/spdk/test/nvmf/target/rpc.sh124
-rwxr-xr-xsrc/spdk/test/nvmf/target/shutdown.sh155
-rwxr-xr-xsrc/spdk/test/nvmf/target/srq_overwhelm.sh50
34 files changed, 2362 insertions, 0 deletions
diff --git a/src/spdk/test/nvmf/README.md b/src/spdk/test/nvmf/README.md
new file mode 100644
index 000000000..19d6954c2
--- /dev/null
+++ b/src/spdk/test/nvmf/README.md
@@ -0,0 +1,5 @@
+# NVMe-oF test scripts
+
+The test scripts in this directory hierarchy can be run in isolation by passing
+the --iso flag when running the test script. This will set up the RDMA NIC for
+testing and then tear it back down again when the test is completed.
diff --git a/src/spdk/test/nvmf/common.sh b/src/spdk/test/nvmf/common.sh
new file mode 100644
index 000000000..5f52ef127
--- /dev/null
+++ b/src/spdk/test/nvmf/common.sh
@@ -0,0 +1,292 @@
+NVMF_PORT=4420
+NVMF_IP_PREFIX="192.168.100"
+NVMF_IP_LEAST_ADDR=8
+NVMF_TCP_IP_ADDRESS="127.0.0.1"
+NVMF_TRANSPORT_OPTS=""
+NVMF_SERIAL=SPDK00000000000001
+
+function build_nvmf_app_args() {
+ if [ $SPDK_RUN_NON_ROOT -eq 1 ]; then
+ NVMF_APP=(sudo -u "$USER" "${NVMF_APP[@]}")
+ NVMF_APP+=(-i "$NVMF_APP_SHM_ID" -e 0xFFFF)
+ else
+ NVMF_APP+=(-i "$NVMF_APP_SHM_ID" -e 0xFFFF)
+ fi
+}
+
+: ${NVMF_APP_SHM_ID="0"}
+export NVMF_APP_SHM_ID
+build_nvmf_app_args
+
+have_pci_nics=0
+
+function rxe_cfg() {
+ "$rootdir/scripts/rxe_cfg_small.sh" "$@"
+}
+
+function load_ib_rdma_modules() {
+ if [ $(uname) != Linux ]; then
+ return 0
+ fi
+
+ modprobe ib_cm
+ modprobe ib_core
+ # Newer kernels do not have the ib_ucm module
+ modprobe ib_ucm || true
+ modprobe ib_umad
+ modprobe ib_uverbs
+ modprobe iw_cm
+ modprobe rdma_cm
+ modprobe rdma_ucm
+}
+
+function detect_soft_roce_nics() {
+ rxe_cfg start
+}
+
+# args 1 and 2 represent the grep filters for finding our NICS.
+# subsequent args are all drivers that should be loaded if we find these NICs.
+# Those drivers should be supplied in the correct order.
+function detect_nics_and_probe_drivers() {
+ NIC_VENDOR="$1"
+ NIC_CLASS="$2"
+
+ nvmf_nic_bdfs=$(lspci | grep Ethernet | grep "$NIC_VENDOR" | grep "$NIC_CLASS" | awk -F ' ' '{print "0000:"$1}')
+
+ if [ -z "$nvmf_nic_bdfs" ]; then
+ return 0
+ fi
+
+ have_pci_nics=1
+ if [ $# -ge 2 ]; then
+ # shift out the first two positional arguments.
+ shift 2
+ # Iterate through the remaining arguments.
+ for i; do
+ modprobe "$i"
+ done
+ fi
+}
+
+function detect_pci_nics() {
+
+ if ! hash lspci; then
+ return 0
+ fi
+
+ detect_nics_and_probe_drivers "Mellanox" "ConnectX-4" "mlx4_core" "mlx4_ib" "mlx4_en"
+ detect_nics_and_probe_drivers "Mellanox" "ConnectX-5" "mlx5_core" "mlx5_ib"
+ detect_nics_and_probe_drivers "Intel" "X722" "i40e" "i40iw"
+ detect_nics_and_probe_drivers "Chelsio" "Unified Wire" "cxgb4" "iw_cxgb4"
+
+ if [ "$have_pci_nics" -eq "0" ]; then
+ return 0
+ fi
+
+ # Provide time for drivers to properly load.
+ sleep 5
+}
+
+function detect_rdma_nics() {
+ detect_pci_nics
+ if [ "$have_pci_nics" -eq "0" ]; then
+ detect_soft_roce_nics
+ fi
+}
+
+function allocate_nic_ips() {
+ ((count = NVMF_IP_LEAST_ADDR))
+ for nic_name in $(get_rdma_if_list); do
+ ip="$(get_ip_address $nic_name)"
+ if [ -z $ip ]; then
+ ip addr add $NVMF_IP_PREFIX.$count/24 dev $nic_name
+ ip link set $nic_name up
+ ((count = count + 1))
+ fi
+ # dump configuration for debug log
+ ip addr show $nic_name
+ done
+}
+
+function get_available_rdma_ips() {
+ for nic_name in $(get_rdma_if_list); do
+ get_ip_address $nic_name
+ done
+}
+
+function get_rdma_if_list() {
+ for nic_type in /sys/class/infiniband/*; do
+ [[ -e "$nic_type" ]] || break
+ for nic_name in /sys/class/infiniband/"$(basename ${nic_type})"/device/net/*; do
+ [[ -e "$nic_name" ]] || break
+ basename "$nic_name"
+ done
+ done
+}
+
+function get_ip_address() {
+ interface=$1
+ ip -o -4 addr show $interface | awk '{print $4}' | cut -d"/" -f1
+}
+
+function nvmfcleanup() {
+ sync
+ set +e
+ for i in {1..20}; do
+ modprobe -v -r nvme-$TEST_TRANSPORT
+ if modprobe -v -r nvme-fabrics; then
+ set -e
+ return 0
+ fi
+ sleep 1
+ done
+ set -e
+
+ # So far unable to remove the kernel modules. Try
+ # one more time and let it fail.
+ # Allow the transport module to fail for now. See Jim's comment
+ # about the nvme-tcp module below.
+ modprobe -v -r nvme-$TEST_TRANSPORT || true
+ modprobe -v -r nvme-fabrics
+}
+
+function nvmftestinit() {
+ if [ -z $TEST_TRANSPORT ]; then
+ echo "transport not specified - use --transport= to specify"
+ return 1
+ fi
+ if [ "$TEST_MODE" == "iso" ]; then
+ $rootdir/scripts/setup.sh
+ if [ "$TEST_TRANSPORT" == "rdma" ]; then
+ rdma_device_init
+ fi
+ fi
+
+ NVMF_TRANSPORT_OPTS="-t $TEST_TRANSPORT"
+ if [ "$TEST_TRANSPORT" == "rdma" ]; then
+ RDMA_IP_LIST=$(get_available_rdma_ips)
+ NVMF_FIRST_TARGET_IP=$(echo "$RDMA_IP_LIST" | head -n 1)
+ NVMF_SECOND_TARGET_IP=$(echo "$RDMA_IP_LIST" | tail -n +2 | head -n 1)
+ if [ -z $NVMF_FIRST_TARGET_IP ]; then
+ echo "no NIC for nvmf test"
+ exit 0
+ fi
+ elif [ "$TEST_TRANSPORT" == "tcp" ]; then
+ NVMF_FIRST_TARGET_IP=127.0.0.1
+ NVMF_TRANSPORT_OPTS="$NVMF_TRANSPORT_OPTS -o"
+ fi
+
+ # currently we run the host/perf test for TCP even on systems without kernel nvme-tcp
+ # support; that's fine since the host/perf test uses the SPDK initiator
+ # maybe later we will enforce modprobe to succeed once we have systems in the test pool
+ # with nvme-tcp kernel support - but until then let this pass so we can still run the
+ # host/perf test with the tcp transport
+ modprobe nvme-$TEST_TRANSPORT || true
+}
+
+function nvmfappstart() {
+ timing_enter start_nvmf_tgt
+ "${NVMF_APP[@]}" "$@" &
+ nvmfpid=$!
+ trap 'process_shm --id $NVMF_APP_SHM_ID; nvmftestfini; exit 1' SIGINT SIGTERM EXIT
+ waitforlisten $nvmfpid
+ timing_exit start_nvmf_tgt
+}
+
+function nvmftestfini() {
+ nvmfcleanup || :
+ if [ -n "$nvmfpid" ]; then
+ killprocess $nvmfpid
+ fi
+ if [ "$TEST_MODE" == "iso" ]; then
+ $rootdir/scripts/setup.sh reset
+ if [ "$TEST_TRANSPORT" == "rdma" ]; then
+ rdma_device_init
+ fi
+ fi
+}
+
+function rdma_device_init() {
+ load_ib_rdma_modules
+ detect_rdma_nics
+ allocate_nic_ips
+}
+
+function revert_soft_roce() {
+ rxe_cfg stop
+}
+
+function check_ip_is_soft_roce() {
+ rxe_cfg status rxe | grep -wq "$1"
+}
+
+function nvme_connect() {
+ local init_count
+ init_count=$(nvme list | wc -l)
+
+ if ! nvme connect "$@"; then return $?; fi
+
+ for i in $(seq 1 10); do
+ if [ $(nvme list | wc -l) -gt $init_count ]; then
+ return 0
+ else
+ sleep 1s
+ fi
+ done
+ return 1
+}
+
+function get_nvme_devs() {
+ local dev rest
+
+ nvmes=()
+ while read -r dev rest; do
+ if [[ $dev == /dev/nvme* ]]; then
+ nvmes+=("$dev")
+ fi
+ if [[ $1 == print ]]; then
+ echo "$dev $rest"
+ fi
+ done < <(nvme list)
+ ((${#nvmes[@]})) || return 1
+ echo "${#nvmes[@]}" >&2
+}
+
+function gen_nvmf_target_json() {
+ local subsystem config=()
+
+ for subsystem in "${@:-1}"; do
+ config+=(
+ "$(
+ cat <<- EOF
+ {
+ "params": {
+ "name": "Nvme$subsystem",
+ "trtype": "$TEST_TRANSPORT",
+ "traddr": "$NVMF_FIRST_TARGET_IP",
+ "adrfam": "ipv4",
+ "trsvcid": "$NVMF_PORT",
+ "subnqn": "nqn.2016-06.io.spdk:cnode$subsystem"
+ },
+ "method": "bdev_nvme_attach_controller"
+ }
+ EOF
+ )"
+ )
+ done
+ jq . <<- JSON
+ {
+ "subsystems": [
+ {
+ "subsystem": "bdev",
+ "config": [
+ $(
+ IFS=","
+ printf '%s\n' "${config[*]}"
+ )
+ ]
+ }
+ ]
+ }
+ JSON
+}
diff --git a/src/spdk/test/nvmf/host/aer.sh b/src/spdk/test/nvmf/host/aer.sh
new file mode 100755
index 000000000..1c438c686
--- /dev/null
+++ b/src/spdk/test/nvmf/host/aer.sh
@@ -0,0 +1,50 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+nvmftestinit
+nvmfappstart -m 0xF
+
+$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
+
+$rpc_py bdev_malloc_create 64 512 --name Malloc0
+$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001 -m 2
+$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc0
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+
+$rpc_py nvmf_get_subsystems
+
+AER_TOUCH_FILE=/tmp/aer_touch_file
+rm -f $AER_TOUCH_FILE
+
+# Namespace Attribute Notice Tests
+$rootdir/test/nvme/aer/aer -r "\
+ trtype:$TEST_TRANSPORT \
+ adrfam:IPv4 \
+ traddr:$NVMF_FIRST_TARGET_IP \
+ trsvcid:$NVMF_PORT \
+ subnqn:nqn.2016-06.io.spdk:cnode1" -n 2 -t $AER_TOUCH_FILE &
+aerpid=$!
+
+# Waiting for aer start to work
+waitforfile $AER_TOUCH_FILE
+
+# Add a new namespace
+$rpc_py bdev_malloc_create 64 4096 --name Malloc1
+$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc1 -n 2
+$rpc_py nvmf_get_subsystems
+
+wait $aerpid
+
+$rpc_py bdev_malloc_delete Malloc0
+$rpc_py bdev_malloc_delete Malloc1
+$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1
+
+trap - SIGINT SIGTERM EXIT
+
+nvmftestfini
diff --git a/src/spdk/test/nvmf/host/bdevperf.sh b/src/spdk/test/nvmf/host/bdevperf.sh
new file mode 100755
index 000000000..776550c4d
--- /dev/null
+++ b/src/spdk/test/nvmf/host/bdevperf.sh
@@ -0,0 +1,50 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+function tgt_init() {
+ nvmfappstart -m 0xF
+
+ $rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
+ $rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc0
+ $rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc0
+ $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+}
+
+nvmftestinit
+# There is an intermittent error relating to this test and Soft-RoCE. for now, just
+# skip this test if we are using rxe. TODO: get to the bottom of GitHub issue #1165
+if [ $TEST_TRANSPORT == "rdma" ] && check_ip_is_soft_roce $NVMF_FIRST_TARGET_IP; then
+ echo "Using software RDMA, skipping the host bdevperf tests."
+ exit 0
+fi
+
+tgt_init
+
+"$rootdir/test/bdev/bdevperf/bdevperf" --json <(gen_nvmf_target_json) -q 128 -o 4096 -w verify -t 1
+
+"$rootdir/test/bdev/bdevperf/bdevperf" --json <(gen_nvmf_target_json) -q 128 -o 4096 -w verify -t 15 -f &
+bdevperfpid=$!
+
+sleep 3
+kill -9 $nvmfpid
+
+sleep 3
+tgt_init
+
+wait $bdevperfpid
+sync
+$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1
+
+trap - SIGINT SIGTERM EXIT
+
+nvmftestfini
diff --git a/src/spdk/test/nvmf/host/fio.sh b/src/spdk/test/nvmf/host/fio.sh
new file mode 100755
index 000000000..85f9a00f1
--- /dev/null
+++ b/src/spdk/test/nvmf/host/fio.sh
@@ -0,0 +1,82 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/scripts/common.sh
+source $rootdir/test/nvmf/common.sh
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+nvmftestinit
+
+if [[ $CONFIG_FIO_PLUGIN != y ]]; then
+ echo "FIO not available"
+ exit 1
+fi
+
+timing_enter start_nvmf_tgt
+
+"${NVMF_APP[@]}" -m 0xF &
+nvmfpid=$!
+
+trap 'process_shm --id $NVMF_APP_SHM_ID; nvmftestfini; exit 1' SIGINT SIGTERM EXIT
+
+waitforlisten $nvmfpid
+$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
+timing_exit start_nvmf_tgt
+
+$rpc_py bdev_malloc_create 64 512 -b Malloc1
+$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
+$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc1
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+
+PLUGIN_DIR=$rootdir/examples/nvme/fio_plugin
+
+# Test fio_plugin as host with malloc backend
+fio_nvme $PLUGIN_DIR/example_config.fio --filename="trtype=$TEST_TRANSPORT adrfam=IPv4 \
+traddr=$NVMF_FIRST_TARGET_IP trsvcid=$NVMF_PORT ns=1"
+
+# second test mocking multiple SGL elements
+fio_nvme $PLUGIN_DIR/mock_sgl_config.fio --filename="trtype=$TEST_TRANSPORT adrfam=IPv4 \
+traddr=$NVMF_FIRST_TARGET_IP trsvcid=$NVMF_PORT ns=1"
+$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1
+
+if [ $RUN_NIGHTLY -eq 1 ]; then
+ # Test fio_plugin as host with nvme lvol backend
+ bdfs=$(get_nvme_bdfs)
+ $rpc_py bdev_nvme_attach_controller -b Nvme0 -t PCIe -a $(echo $bdfs | awk '{ print $1 }') -i $NVMF_FIRST_TARGET_IP
+ ls_guid=$($rpc_py bdev_lvol_create_lvstore -c 1073741824 Nvme0n1 lvs_0)
+ get_lvs_free_mb $ls_guid
+ $rpc_py bdev_lvol_create -l lvs_0 lbd_0 $free_mb
+ $rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode2 -a -s SPDK00000000000001
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode2 lvs_0/lbd_0
+ $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode2 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+ fio_nvme $PLUGIN_DIR/example_config.fio --filename="trtype=$TEST_TRANSPORT adrfam=IPv4 \
+ traddr=$NVMF_FIRST_TARGET_IP trsvcid=$NVMF_PORT ns=1"
+ $rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode2
+
+ # Test fio_plugin as host with nvme lvol nested backend
+ ls_nested_guid=$($rpc_py bdev_lvol_create_lvstore --clear-method none lvs_0/lbd_0 lvs_n_0)
+ get_lvs_free_mb $ls_nested_guid
+ $rpc_py bdev_lvol_create -l lvs_n_0 lbd_nest_0 $free_mb
+ $rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode3 -a -s SPDK00000000000001
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode3 lvs_n_0/lbd_nest_0
+ $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode3 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+ fio_nvme $PLUGIN_DIR/example_config.fio --filename="trtype=$TEST_TRANSPORT adrfam=IPv4 \
+ traddr=$NVMF_FIRST_TARGET_IP trsvcid=$NVMF_PORT ns=1"
+ $rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode3
+
+ sync
+ # Delete lvol_bdev and destroy lvol_store.
+ $rpc_py bdev_lvol_delete lvs_n_0/lbd_nest_0
+ $rpc_py bdev_lvol_delete_lvstore -l lvs_n_0
+ $rpc_py bdev_lvol_delete lvs_0/lbd_0
+ $rpc_py bdev_lvol_delete_lvstore -l lvs_0
+ $rpc_py bdev_nvme_detach_controller Nvme0
+fi
+
+trap - SIGINT SIGTERM EXIT
+
+rm -f ./local-test-0-verify.state
+nvmftestfini
diff --git a/src/spdk/test/nvmf/host/identify.sh b/src/spdk/test/nvmf/host/identify.sh
new file mode 100755
index 000000000..412626388
--- /dev/null
+++ b/src/spdk/test/nvmf/host/identify.sh
@@ -0,0 +1,54 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+nvmftestinit
+
+timing_enter start_nvmf_tgt
+
+"${NVMF_APP[@]}" -m 0xF &
+nvmfpid=$!
+
+trap 'process_shm --id $NVMF_APP_SHM_ID; nvmftestfini; exit 1' SIGINT SIGTERM EXIT
+
+waitforlisten $nvmfpid
+$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
+timing_exit start_nvmf_tgt
+
+$rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc0
+$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
+# NOTE: This will assign the same NGUID and EUI64 to all bdevs,
+# but currently we only have one (see above), so this is OK.
+$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc0 \
+ --nguid "ABCDEF0123456789ABCDEF0123456789" \
+ --eui64 "ABCDEF0123456789"
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+
+$rpc_py nvmf_get_subsystems
+
+$SPDK_EXAMPLE_DIR/identify -r "\
+ trtype:$TEST_TRANSPORT \
+ adrfam:IPv4 \
+ traddr:$NVMF_FIRST_TARGET_IP \
+ trsvcid:$NVMF_PORT \
+ subnqn:nqn.2014-08.org.nvmexpress.discovery" -L all
+$SPDK_EXAMPLE_DIR/identify -r "\
+ trtype:$TEST_TRANSPORT \
+ adrfam:IPv4 \
+ traddr:$NVMF_FIRST_TARGET_IP \
+ trsvcid:$NVMF_PORT \
+ subnqn:nqn.2016-06.io.spdk:cnode1" -L all
+sync
+$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1
+
+trap - SIGINT SIGTERM EXIT
+
+nvmftestfini
diff --git a/src/spdk/test/nvmf/host/identify_kernel_nvmf.sh b/src/spdk/test/nvmf/host/identify_kernel_nvmf.sh
new file mode 100755
index 000000000..d6dd2916b
--- /dev/null
+++ b/src/spdk/test/nvmf/host/identify_kernel_nvmf.sh
@@ -0,0 +1,71 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+nvmftestinit
+
+subsystemname=nqn.2016-06.io.spdk:testnqn
+
+modprobe null_blk nr_devices=1
+modprobe nvmet
+modprobe nvmet-rdma
+modprobe nvmet-fc
+modprobe lpfc
+
+if [ ! -d /sys/kernel/config/nvmet/subsystems/$subsystemname ]; then
+ mkdir /sys/kernel/config/nvmet/subsystems/$subsystemname
+fi
+echo 1 > /sys/kernel/config/nvmet/subsystems/$subsystemname/attr_allow_any_host
+
+if [ ! -d /sys/kernel/config/nvmet/subsystems/$subsystemname/namespaces/1 ]; then
+ mkdir /sys/kernel/config/nvmet/subsystems/$subsystemname/namespaces/1
+fi
+
+echo -n /dev/nullb0 > /sys/kernel/config/nvmet/subsystems/$subsystemname/namespaces/1/device_path
+echo 1 > /sys/kernel/config/nvmet/subsystems/$subsystemname/namespaces/1/enable
+
+if [ ! -d /sys/kernel/config/nvmet/ports/1 ]; then
+ mkdir /sys/kernel/config/nvmet/ports/1
+fi
+
+echo -n rdma > /sys/kernel/config/nvmet/ports/1/addr_trtype
+echo -n ipv4 > /sys/kernel/config/nvmet/ports/1/addr_adrfam
+echo -n $NVMF_FIRST_TARGET_IP > /sys/kernel/config/nvmet/ports/1/addr_traddr
+echo -n $NVMF_PORT > /sys/kernel/config/nvmet/ports/1/addr_trsvcid
+
+ln -s /sys/kernel/config/nvmet/subsystems/$subsystemname /sys/kernel/config/nvmet/ports/1/subsystems/$subsystemname
+
+sleep 4
+
+$SPDK_EXAMPLE_DIR/identify -r "\
+ trtype:$TEST_TRANSPORT \
+ adrfam:IPv4 \
+ traddr:$NVMF_FIRST_TARGET_IP \
+ trsvcid:$NVMF_PORT \
+ subnqn:nqn.2014-08.org.nvmexpress.discovery" -t all
+$SPDK_EXAMPLE_DIR/identify -r "\
+ trtype:$TEST_TRANSPORT \
+ adrfam:IPv4 \
+ traddr:$NVMF_FIRST_TARGET_IP \
+ trsvcid:$NVMF_PORT \
+ subnqn:$subsystemname"
+
+rm -rf /sys/kernel/config/nvmet/ports/1/subsystems/$subsystemname
+
+echo 0 > /sys/kernel/config/nvmet/subsystems/$subsystemname/namespaces/1/enable
+echo -n 0 > /sys/kernel/config/nvmet/subsystems/$subsystemname/namespaces/1/device_path
+
+rmdir --ignore-fail-on-non-empty /sys/kernel/config/nvmet/subsystems/$subsystemname/namespaces/1
+rmdir --ignore-fail-on-non-empty /sys/kernel/config/nvmet/subsystems/$subsystemname
+rmdir --ignore-fail-on-non-empty /sys/kernel/config/nvmet/ports/1
+
+rmmod lpfc
+rmmod nvmet_fc
+rmmod nvmet-rdma
+rmmod null_blk
+rmmod nvmet
+
+nvmftestfini
diff --git a/src/spdk/test/nvmf/host/perf.sh b/src/spdk/test/nvmf/host/perf.sh
new file mode 100755
index 000000000..69fa28f0b
--- /dev/null
+++ b/src/spdk/test/nvmf/host/perf.sh
@@ -0,0 +1,93 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+nvmftestinit
+nvmfappstart -m 0xF
+
+$rootdir/scripts/gen_nvme.sh --json | $rpc_py load_subsystem_config
+
+local_nvme_trid="trtype:PCIe traddr:"$($rpc_py framework_get_config bdev | jq -r '.[].params | select(.name=="Nvme0").traddr')
+bdevs="$bdevs $($rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
+
+if [ -n "$local_nvme_trid" ]; then
+ bdevs="$bdevs Nvme0n1"
+fi
+
+$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS
+$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
+for bdev in $bdevs; do
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 $bdev
+done
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+
+# Test multi-process access to local NVMe device
+if [ -n "$local_nvme_trid" ]; then
+ if [ $SPDK_RUN_NON_ROOT -eq 1 ]; then
+ perf_app="sudo -u $(logname) $SPDK_EXAMPLE_DIR/perf"
+ else
+ perf_app="$SPDK_EXAMPLE_DIR/perf"
+ fi
+ $perf_app -i $NVMF_APP_SHM_ID -q 32 -o 4096 -w randrw -M 50 -t 1 -r "$local_nvme_trid"
+fi
+
+$SPDK_EXAMPLE_DIR/perf -q 1 -o 4096 -w randrw -M 50 -t 1 -r "trtype:$TEST_TRANSPORT adrfam:IPv4 traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT"
+$SPDK_EXAMPLE_DIR/perf -q 32 -o 4096 -w randrw -M 50 -t 1 -r "trtype:$TEST_TRANSPORT adrfam:IPv4 traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT"
+$SPDK_EXAMPLE_DIR/perf -q 128 -o 262144 -w randrw -M 50 -t 2 -r "trtype:$TEST_TRANSPORT adrfam:IPv4 traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT"
+sync
+$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1
+
+if [ $RUN_NIGHTLY -eq 1 ]; then
+ # Configure nvme devices with nvmf lvol_bdev backend
+ if [ -n "$local_nvme_trid" ]; then
+ ls_guid=$($rpc_py bdev_lvol_create_lvstore Nvme0n1 lvs_0)
+ get_lvs_free_mb $ls_guid
+ # We don't need to create an lvol larger than 20G for this test.
+ # decreasing the size of the nested lvol allows us to take less time setting up
+ #before running I/O.
+ if [ $free_mb -gt 20480 ]; then
+ free_mb=20480
+ fi
+ lb_guid=$($rpc_py bdev_lvol_create -u $ls_guid lbd_0 $free_mb)
+
+ # Create lvol bdev for nested lvol stores
+ ls_nested_guid=$($rpc_py bdev_lvol_create_lvstore $lb_guid lvs_n_0)
+ get_lvs_free_mb $ls_nested_guid
+ if [ $free_mb -gt 20480 ]; then
+ free_mb=20480
+ fi
+ lb_nested_guid=$($rpc_py bdev_lvol_create -u $ls_nested_guid lbd_nest_0 $free_mb)
+ $rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
+ for bdev in $lb_nested_guid; do
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 $bdev
+ done
+ $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+ # Test perf as host with different io_size and qd_depth in nightly
+ qd_depth=("1" "32" "128")
+ io_size=("512" "131072")
+ for qd in "${qd_depth[@]}"; do
+ for o in "${io_size[@]}"; do
+ $SPDK_EXAMPLE_DIR/perf -q $qd -o $o -w randrw -M 50 -t 10 -r "trtype:$TEST_TRANSPORT adrfam:IPv4 traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT"
+ done
+ done
+
+ # Delete subsystems, lvol_bdev and destroy lvol_store.
+ $rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1
+ $rpc_py bdev_lvol_delete "$lb_nested_guid"
+ $rpc_py bdev_lvol_delete_lvstore -l lvs_n_0
+ $rpc_py bdev_lvol_delete "$lb_guid"
+ $rpc_py bdev_lvol_delete_lvstore -l lvs_0
+ fi
+fi
+
+trap - SIGINT SIGTERM EXIT
+
+nvmftestfini
diff --git a/src/spdk/test/nvmf/host/target_disconnect.sh b/src/spdk/test/nvmf/host/target_disconnect.sh
new file mode 100755
index 000000000..82521196b
--- /dev/null
+++ b/src/spdk/test/nvmf/host/target_disconnect.sh
@@ -0,0 +1,89 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+PLUGIN_DIR=$rootdir/examples/nvme/fio_plugin
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+function disconnect_init() {
+ nvmfappstart -m 0xF0
+
+ $rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc0
+
+ $rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS
+ $rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
+
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc0
+ $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $1 -s $NVMF_PORT
+}
+
+# Test to make sure we don't segfault or access null pointers when we try to connect to
+# a discovery controller that doesn't exist yet.
+function nvmf_target_disconnect_tc1() {
+ set +e
+ $SPDK_EXAMPLE_DIR/reconnect -q 32 -o 4096 -w randrw -M 50 -t 10 -c 0xF \
+ -r "trtype:$TEST_TRANSPORT adrfam:IPv4 traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT"
+ # If the program crashes, the high bit of $? will be set so we will get a value in the hundreds.
+ # But if the reconnect code detects errors and exits normally it will return 1.
+ if [ $? != 1 ]; then
+ set -e
+ exit 1
+ fi
+ set -e
+}
+
+function nvmf_target_disconnect_tc2() {
+ disconnect_init $NVMF_FIRST_TARGET_IP
+
+ # If perf doesn't shut down, this test will time out.
+ $SPDK_EXAMPLE_DIR/reconnect -q 32 -o 4096 -w randrw -M 50 -t 10 -c 0xF \
+ -r "trtype:$TEST_TRANSPORT adrfam:IPv4 traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT" &
+ reconnectpid=$!
+
+ sleep 2
+ kill -9 $nvmfpid
+
+ sleep 2
+ disconnect_init $NVMF_FIRST_TARGET_IP
+
+ wait $reconnectpid
+ sync
+}
+
+function nvmf_target_disconnect_tc3() {
+ $SPDK_EXAMPLE_DIR/reconnect -q 32 -o 4096 -w randrw -M 50 -t 10 -c 0xF \
+ -r "trtype:$TEST_TRANSPORT adrfam:IPv4 traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT alt_traddr:$NVMF_SECOND_TARGET_IP" &
+ reconnectpid=$!
+
+ sleep 2
+ kill -9 $nvmfpid
+
+ sleep 2
+ disconnect_init $NVMF_SECOND_TARGET_IP
+
+ wait $reconnectpid
+ sync
+}
+
+nvmftestinit
+# There is an intermittent error relating to this test and Soft-RoCE. for now, just
+# skip this test if we are using rxe. TODO: get to the bottom of GitHub issue #1043
+if [ $TEST_TRANSPORT == "rdma" ] && check_ip_is_soft_roce $NVMF_FIRST_TARGET_IP; then
+ echo "Using software RDMA, skipping the target disconnect tests."
+else
+ run_test "nvmf_target_disconnect_tc1" nvmf_target_disconnect_tc1
+ run_test "nvmf_target_disconnect_tc2" nvmf_target_disconnect_tc2
+ if [ -n "$NVMF_SECOND_TARGET_IP" ]; then
+ run_test "nvmf_target_disconnect_tc3" nvmf_target_disconnect_tc3
+ fi
+fi
+
+trap - SIGINT SIGTERM EXIT
+nvmftestfini
diff --git a/src/spdk/test/nvmf/nvmf.sh b/src/spdk/test/nvmf/nvmf.sh
new file mode 100755
index 000000000..b33d55cff
--- /dev/null
+++ b/src/spdk/test/nvmf/nvmf.sh
@@ -0,0 +1,60 @@
+#!/usr/bin/env bash
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/test/common/autotest_common.sh
+
+if [ ! $(uname -s) = Linux ]; then
+ exit 0
+fi
+
+source $rootdir/test/nvmf/common.sh
+
+trap "exit 1" SIGINT SIGTERM EXIT
+
+TEST_ARGS=("$@")
+
+run_test "nvmf_example" test/nvmf/target/nvmf_example.sh "${TEST_ARGS[@]}"
+run_test "nvmf_filesystem" test/nvmf/target/filesystem.sh "${TEST_ARGS[@]}"
+run_test "nvmf_discovery" test/nvmf/target/discovery.sh "${TEST_ARGS[@]}"
+run_test "nvmf_connect_disconnect" test/nvmf/target/connect_disconnect.sh "${TEST_ARGS[@]}"
+if [ $SPDK_TEST_NVME_CLI -eq 1 ]; then
+ run_test "nvmf_nvme_cli" test/nvmf/target/nvme_cli.sh "${TEST_ARGS[@]}"
+fi
+run_test "nvmf_lvol" test/nvmf/target/nvmf_lvol.sh "${TEST_ARGS[@]}"
+#TODO: disabled due to intermittent failures. Need to triage.
+# run_test "nvmf_srq_overwhelm" test/nvmf/target/srq_overwhelm.sh $TEST_ARGS
+run_test "nvmf_vhost" test/nvmf/target/nvmf_vhost.sh "${TEST_ARGS[@]}"
+run_test "nvmf_bdev_io_wait" test/nvmf/target/bdev_io_wait.sh "${TEST_ARGS[@]}"
+run_test "nvmf_create_transport." test/nvmf/target/create_transport.sh "${TEST_ARGS[@]}"
+run_test "nvmf_multitarget" test/nvmf/target/multitarget.sh "${TEST_ARGS[@]}"
+
+if [ $RUN_NIGHTLY -eq 1 ]; then
+ run_test "nvmf_fuzz" test/nvmf/target/fuzz.sh "${TEST_ARGS[@]}"
+ run_test "nvmf_multiconnection" test/nvmf/target/multiconnection.sh "${TEST_ARGS[@]}"
+ run_test "nvmf_initiator_timeout" test/nvmf/target/initiator_timeout.sh "${TEST_ARGS[@]}"
+fi
+
+run_test "nvmf_nmic" test/nvmf/target/nmic.sh "${TEST_ARGS[@]}"
+run_test "nvmf_rpc" test/nvmf/target/rpc.sh "${TEST_ARGS[@]}"
+run_test "nvmf_fio" test/nvmf/target/fio.sh "${TEST_ARGS[@]}"
+run_test "nvmf_shutdown" test/nvmf/target/shutdown.sh "${TEST_ARGS[@]}"
+run_test "nvmf_bdevio" test/nvmf/target/bdevio.sh "${TEST_ARGS[@]}"
+run_test "nvmf_invalid" test/nvmf/target/invalid.sh "${TEST_ARGS[@]}"
+run_test "nvmf_abort" test/nvmf/target/abort.sh "${TEST_ARGS[@]}"
+
+timing_enter host
+
+run_test "nvmf_bdevperf" test/nvmf/host/bdevperf.sh "${TEST_ARGS[@]}"
+run_test "nvmf_identify" test/nvmf/host/identify.sh "${TEST_ARGS[@]}"
+run_test "nvmf_perf" test/nvmf/host/perf.sh "${TEST_ARGS[@]}"
+
+# TODO: disabled due to intermittent failures (RDMA_CM_EVENT_UNREACHABLE/ETIMEDOUT)
+#run_test test/nvmf/host/identify_kernel_nvmf.sh $TEST_ARGS
+run_test "nvmf_aer" test/nvmf/host/aer.sh "${TEST_ARGS[@]}"
+run_test "nvmf_fio" test/nvmf/host/fio.sh "${TEST_ARGS[@]}"
+run_test "nvmf_target_disconnect" test/nvmf/host/target_disconnect.sh "${TEST_ARGS[@]}"
+
+timing_exit host
+
+trap - SIGINT SIGTERM EXIT
+revert_soft_roce
diff --git a/src/spdk/test/nvmf/target/abort.sh b/src/spdk/test/nvmf/target/abort.sh
new file mode 100755
index 000000000..913c17e19
--- /dev/null
+++ b/src/spdk/test/nvmf/target/abort.sh
@@ -0,0 +1,35 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=4096
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+nvmftestinit
+nvmfappstart -m 0xE
+
+$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
+
+# Construct a delay bdev on a malloc bdev which has constant 10ms delay for all read or write I/Os
+$rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc0
+$rpc_py bdev_delay_create -b Malloc0 -d Delay0 -r 1000000 -t 1000000 -w 1000000 -n 1000000
+
+# Create an NVMe-oF subsystem and add the delay bdev as a namespace
+$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode0 -a -s SPDK0
+$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode0 Delay0
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode0 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+
+# Run abort application
+$SPDK_EXAMPLE_DIR/abort -r "trtype:$TEST_TRANSPORT adrfam:IPv4 traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT" -c 0x1
+
+# Clean up
+$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode0
+
+trap - SIGINT SIGTERM EXIT
+
+nvmftestfini
diff --git a/src/spdk/test/nvmf/target/bdev_io_wait.sh b/src/spdk/test/nvmf/target/bdev_io_wait.sh
new file mode 100755
index 000000000..e57ffc36d
--- /dev/null
+++ b/src/spdk/test/nvmf/target/bdev_io_wait.sh
@@ -0,0 +1,45 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+nvmftestinit
+nvmfappstart -m 0xF --wait-for-rpc
+
+# Minimal number of bdev io pool (5) and cache (1)
+$rpc_py bdev_set_options -p 5 -c 1
+$rpc_py framework_start_init
+$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
+
+$rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc0
+$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
+$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc0
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+
+"$rootdir/test/bdev/bdevperf/bdevperf" -m 0x10 -i 1 --json <(gen_nvmf_target_json) -q 128 -o 4096 -w write -t 1 &
+WRITE_PID=$!
+"$rootdir/test/bdev/bdevperf/bdevperf" -m 0x20 -i 2 --json <(gen_nvmf_target_json) -q 128 -o 4096 -w read -t 1 &
+READ_PID=$!
+"$rootdir/test/bdev/bdevperf/bdevperf" -m 0x40 -i 3 --json <(gen_nvmf_target_json) -q 128 -o 4096 -w flush -t 1 &
+FLUSH_PID=$!
+"$rootdir/test/bdev/bdevperf/bdevperf" -m 0x80 -i 4 --json <(gen_nvmf_target_json) -q 128 -o 4096 -w unmap -t 1 &
+UNMAP_PID=$!
+sync
+
+wait $WRITE_PID
+wait $READ_PID
+wait $FLUSH_PID
+wait $UNMAP_PID
+
+$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1
+
+trap - SIGINT SIGTERM EXIT
+
+nvmftestfini
diff --git a/src/spdk/test/nvmf/target/bdevio.sh b/src/spdk/test/nvmf/target/bdevio.sh
new file mode 100755
index 000000000..f4d7eb1b5
--- /dev/null
+++ b/src/spdk/test/nvmf/target/bdevio.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+nvmftestinit
+# Don't use cores 0 - 2 to avoid overlap with bdevio.
+nvmfappstart -m 0x78
+
+$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
+$rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc0
+$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
+$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc0
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+
+$rootdir/test/bdev/bdevio/bdevio --json <(gen_nvmf_target_json)
+
+$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1
+
+trap - SIGINT SIGTERM EXIT
+
+nvmftestfini
diff --git a/src/spdk/test/nvmf/target/connect_disconnect.sh b/src/spdk/test/nvmf/target/connect_disconnect.sh
new file mode 100755
index 000000000..b74394123
--- /dev/null
+++ b/src/spdk/test/nvmf/target/connect_disconnect.sh
@@ -0,0 +1,43 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+# connect disconnect is geared towards ensuring that we are properly freeing resources after disconnecting qpairs.
+nvmftestinit
+nvmfappstart -m 0xF
+
+$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192 -c 0
+
+bdev="$($rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
+
+$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s $NVMF_SERIAL
+$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 $bdev
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+
+if [ $RUN_NIGHTLY -eq 1 ]; then
+ num_iterations=200
+ IO_QUEUES="-i 8"
+else
+ num_iterations=10
+fi
+
+set +x
+for i in $(seq 1 $num_iterations); do
+ nvme connect -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT" $IO_QUEUES
+ waitforserial "$NVMF_SERIAL"
+ nvme disconnect -n "nqn.2016-06.io.spdk:cnode1"
+ waitforserial_disconnect "$NVMF_SERIAL"
+done
+set -x
+
+trap - SIGINT SIGTERM EXIT
+
+nvmftestfini
diff --git a/src/spdk/test/nvmf/target/create_transport.sh b/src/spdk/test/nvmf/target/create_transport.sh
new file mode 100755
index 000000000..e2766467b
--- /dev/null
+++ b/src/spdk/test/nvmf/target/create_transport.sh
@@ -0,0 +1,52 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+NULL_BDEV_SIZE=102400
+NULL_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+if ! hash nvme; then
+ echo "nvme command not found; skipping create transport test"
+ exit 0
+fi
+
+nvmftestinit
+nvmfappstart -m 0xF
+
+# Use nvmf_create_transport call to create transport
+$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
+
+null_bdevs="$($rpc_py bdev_null_create Null0 $NULL_BDEV_SIZE $NULL_BLOCK_SIZE) "
+null_bdevs+="$($rpc_py bdev_null_create Null1 $NULL_BDEV_SIZE $NULL_BLOCK_SIZE)"
+
+$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
+for null_bdev in $null_bdevs; do
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 $null_bdev
+done
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+
+nvme discover -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+
+echo "Perform nvmf subsystem discovery via RPC"
+$rpc_py nvmf_get_subsystems
+
+$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1
+
+for null_bdev in $null_bdevs; do
+ $rpc_py bdev_null_delete $null_bdev
+done
+
+check_bdevs=$($rpc_py bdev_get_bdevs | jq -r '.[].name')
+if [ -n "$check_bdevs" ]; then
+ echo $check_bdevs
+ exit 1
+fi
+
+trap - SIGINT SIGTERM EXIT
+
+nvmftestfini
diff --git a/src/spdk/test/nvmf/target/discovery.sh b/src/spdk/test/nvmf/target/discovery.sh
new file mode 100755
index 000000000..ad5a6ce96
--- /dev/null
+++ b/src/spdk/test/nvmf/target/discovery.sh
@@ -0,0 +1,49 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+NULL_BDEV_SIZE=102400
+NULL_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+if ! hash nvme; then
+ echo "nvme command not found; skipping discovery test"
+ exit 0
+fi
+
+nvmftestinit
+nvmfappstart -m 0xF
+
+$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
+
+# Use at least 4 subsystems so they spill over to a second discovery log page
+for i in $(seq 1 4); do
+ $rpc_py bdev_null_create Null$i $NULL_BDEV_SIZE $NULL_BLOCK_SIZE
+ $rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode$i -a -s SPDK0000000000000$i
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode$i Null$i
+ $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode$i -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+done
+
+nvme discover -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+
+echo "Perform nvmf subsystem discovery via RPC"
+$rpc_py nvmf_get_subsystems
+
+for i in $(seq 1 4); do
+ $rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode$i
+ $rpc_py bdev_null_delete Null$i
+done
+
+check_bdevs=$($rpc_py bdev_get_bdevs | jq -r '.[].name')
+if [ -n "$check_bdevs" ]; then
+ echo $check_bdevs
+ exit 1
+fi
+
+trap - SIGINT SIGTERM EXIT
+
+nvmftestfini
diff --git a/src/spdk/test/nvmf/target/filesystem.sh b/src/spdk/test/nvmf/target/filesystem.sh
new file mode 100755
index 000000000..ff819fdb6
--- /dev/null
+++ b/src/spdk/test/nvmf/target/filesystem.sh
@@ -0,0 +1,92 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+nvmftestinit
+
+function nvmf_filesystem_create() {
+ fstype=$1
+ nvme_name=$2
+
+ make_filesystem ${fstype} /dev/${nvme_name}p1
+
+ mount /dev/${nvme_name}p1 /mnt/device
+ touch /mnt/device/aaa
+ sync
+ rm /mnt/device/aaa
+ sync
+
+ i=0
+ while ! umount /mnt/device; do
+ [ $i -lt 15 ] || break
+ i=$((i + 1))
+ sleep 1
+ done
+
+ # Make sure the target did not crash
+ kill -0 $nvmfpid
+
+ # Make sure the device is still present
+ lsblk -l -o NAME | grep -q -w "${nvme_name}"
+
+ # Make sure the partition is still present
+ lsblk -l -o NAME | grep -q -w "${nvme_name}p1"
+}
+
+function nvmf_filesystem_part() {
+ incapsule=$1
+
+ nvmfappstart -m 0xF
+
+ $rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192 -c $incapsule
+ $rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc1
+ $rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s $NVMF_SERIAL
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc1
+ $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+
+ nvme connect -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
+
+ waitforserial "$NVMF_SERIAL"
+ nvme_name=$(lsblk -l -o NAME,SERIAL | grep -oP "([\w]*)(?=\s+${NVMF_SERIAL})")
+
+ mkdir -p /mnt/device
+
+ parted -s /dev/${nvme_name} mklabel msdos mkpart primary '0%' '100%'
+ partprobe
+ sleep 1
+
+ if [ $incapsule -eq 0 ]; then
+ run_test "filesystem_ext4" nvmf_filesystem_create "ext4" ${nvme_name}
+ run_test "filesystem_btrfs" nvmf_filesystem_create "btrfs" ${nvme_name}
+ run_test "filesystem_xfs" nvmf_filesystem_create "xfs" ${nvme_name}
+ else
+ run_test "filesystem_incapsule_ext4" nvmf_filesystem_create "ext4" ${nvme_name}
+ run_test "filesystem_incapsule_btrfs" nvmf_filesystem_create "btrfs" ${nvme_name}
+ run_test "filesystem_incapsule_xfs" nvmf_filesystem_create "xfs" ${nvme_name}
+ fi
+
+ parted -s /dev/${nvme_name} rm 1
+
+ sync
+ nvme disconnect -n "nqn.2016-06.io.spdk:cnode1" || true
+
+ $rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1
+
+ trap - SIGINT SIGTERM EXIT
+
+ killprocess $nvmfpid
+ nvmfpid=
+}
+
+run_test "nvmf_filesystem_no_incapsule" nvmf_filesystem_part 0
+run_test "nvmf_filesystem_incapsule" nvmf_filesystem_part 4096
+
+nvmftestfini
diff --git a/src/spdk/test/nvmf/target/fio.sh b/src/spdk/test/nvmf/target/fio.sh
new file mode 100755
index 000000000..4e98d7083
--- /dev/null
+++ b/src/spdk/test/nvmf/target/fio.sh
@@ -0,0 +1,77 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+nvmftestinit
+nvmfappstart -m 0xF
+
+$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
+
+malloc_bdevs="$($rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE) "
+malloc_bdevs+="$($rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
+# Create a RAID-0 bdev from two malloc bdevs
+raid_malloc_bdevs="$($rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE) "
+raid_malloc_bdevs+="$($rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
+$rpc_py bdev_raid_create -n raid0 -z 64 -r 0 -b "$raid_malloc_bdevs"
+
+$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s $NVMF_SERIAL
+for malloc_bdev in $malloc_bdevs; do
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 "$malloc_bdev"
+done
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+
+# Append the raid0 bdev into subsystem
+$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 raid0
+
+nvme connect -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
+
+waitforserial $NVMF_SERIAL 3
+
+$rootdir/scripts/fio.py -p nvmf -i 4096 -d 1 -t write -r 1 -v
+$rootdir/scripts/fio.py -p nvmf -i 4096 -d 1 -t randwrite -r 1 -v
+$rootdir/scripts/fio.py -p nvmf -i 4096 -d 128 -t write -r 1 -v
+$rootdir/scripts/fio.py -p nvmf -i 4096 -d 128 -t randwrite -r 1 -v
+
+sync
+
+#start hotplug test case
+$rootdir/scripts/fio.py -p nvmf -i 4096 -d 1 -t read -r 10 &
+fio_pid=$!
+
+sleep 3
+
+$rpc_py bdev_raid_delete "raid0"
+for malloc_bdev in $malloc_bdevs; do
+ $rpc_py bdev_malloc_delete "$malloc_bdev"
+done
+
+fio_status=0
+wait $fio_pid || fio_status=$?
+
+nvme disconnect -n "nqn.2016-06.io.spdk:cnode1" || true
+
+if [ $fio_status -eq 0 ]; then
+ echo "nvmf hotplug test: fio successful - expected failure"
+ nvmftestfini
+ exit 1
+else
+ echo "nvmf hotplug test: fio failed as expected"
+fi
+
+$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1
+
+rm -f ./local-job0-0-verify.state
+rm -f ./local-job1-1-verify.state
+rm -f ./local-job2-2-verify.state
+
+trap - SIGINT SIGTERM EXIT
+
+nvmftestfini
diff --git a/src/spdk/test/nvmf/target/fuzz.sh b/src/spdk/test/nvmf/target/fuzz.sh
new file mode 100755
index 000000000..5a18be856
--- /dev/null
+++ b/src/spdk/test/nvmf/target/fuzz.sh
@@ -0,0 +1,43 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+nvmftestinit
+
+"${NVMF_APP[@]}" -m 0xF > $output_dir/nvmf_fuzz_tgt_output.txt 2>&1 &
+nvmfpid=$!
+
+trap 'process_shm --id $NVMF_APP_SHM_ID; rm -f $testdir/nvmf_fuzz.conf; killprocess $nvmfpid; nvmftestfini $1; exit 1' SIGINT SIGTERM EXIT
+
+waitforlisten $nvmfpid
+$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
+
+$rpc_py bdev_malloc_create -b Malloc0 64 512
+
+$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
+$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc0
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+
+echo "[Nvme]" > $testdir/nvmf_fuzz.conf
+echo " TransportID \"trtype:$TEST_TRANSPORT adrfam:IPv4 subnqn:nqn.2016-06.io.spdk:cnode1 traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT\" Nvme0" >> $testdir/nvmf_fuzz.conf
+
+# Note that we chose a consistent seed to ensure that this test is consistent in nightly builds.
+$rootdir/test/app/fuzz/nvme_fuzz/nvme_fuzz -m 0xF0 -r "/var/tmp/nvme_fuzz" -t 30 -S 123456 -C $testdir/nvmf_fuzz.conf -N -a 2> $output_dir/nvmf_fuzz_logs1.txt
+# We don't specify a seed for this test. Instead we run a static list of commands from example.json.
+$rootdir/test/app/fuzz/nvme_fuzz/nvme_fuzz -m 0xF0 -r "/var/tmp/nvme_fuzz" -C $testdir/nvmf_fuzz.conf -j $rootdir/test/app/fuzz/nvme_fuzz/example.json -a 2> $output_dir/nvmf_fuzz_logs2.txt
+
+rm -f $testdir/nvmf_fuzz.conf
+$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1
+
+trap - SIGINT SIGTERM EXIT
+
+nvmfcleanup
+killprocess $nvmfpid
+nvmfpid=
+
+nvmftestfini
diff --git a/src/spdk/test/nvmf/target/identify_passthru.sh b/src/spdk/test/nvmf/target/identify_passthru.sh
new file mode 100755
index 000000000..2ce52fe38
--- /dev/null
+++ b/src/spdk/test/nvmf/target/identify_passthru.sh
@@ -0,0 +1,76 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+source $rootdir/scripts/common.sh
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+nvmftestinit
+
+timing_enter nvme_identify
+
+bdf=$(get_first_nvme_bdf)
+if [ -z "${bdf}" ]; then
+ echo "No NVMe drive found but test requires it. Failing the test."
+ exit 1
+fi
+
+# Expected values
+nvme_serial_number=$($SPDK_EXAMPLE_DIR/identify -r "trtype:PCIe traddr:${bdf}" -i 0 | grep "Serial Number:" | awk '{print $3}')
+nvme_model_number=$($SPDK_EXAMPLE_DIR/identify -r "trtype:PCIe traddr:${bdf}" -i 0 | grep "Model Number:" | awk '{print $3}')
+
+timing_exit nvme_identify
+
+timing_enter start_nvmf_tgt
+
+"${NVMF_APP[@]}" -m 0xF --wait-for-rpc &
+nvmfpid=$!
+
+trap 'process_shm --id $NVMF_APP_SHM_ID; nvmftestfini; exit 1' SIGINT SIGTERM EXIT
+
+waitforlisten $nvmfpid
+$rpc_py -v nvmf_set_config --passthru-identify-ctrlr
+$rpc_py -v framework_start_init
+$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
+timing_exit start_nvmf_tgt
+
+$rpc_py bdev_nvme_attach_controller -b Nvme0 -t PCIe -a ${bdf}
+$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
+$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Nvme0n1
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+
+$rpc_py nvmf_get_subsystems
+
+# Discovered values
+nvmf_serial_number=$($SPDK_EXAMPLE_DIR/identify -r "\
+ trtype:$TEST_TRANSPORT \
+ adrfam:IPv4 \
+ traddr:$NVMF_FIRST_TARGET_IP \
+ trsvcid:$NVMF_PORT \
+ subnqn:nqn.2016-06.io.spdk:cnode1" | grep "Serial Number:" | awk '{print $3}')
+
+nvmf_model_number=$($SPDK_EXAMPLE_DIR/identify -r "\
+ trtype:$TEST_TRANSPORT \
+ adrfam:IPv4 \
+ traddr:$NVMF_FIRST_TARGET_IP \
+ trsvcid:$NVMF_PORT \
+ subnqn:nqn.2016-06.io.spdk:cnode1" | grep "Model Number:" | awk '{print $3}')
+
+if [ ${nvme_serial_number} != ${nvmf_serial_number} ]; then
+ echo "Serial number doesn't match"
+ exit 1
+fi
+
+if [ ${nvme_model_number} != ${nvmf_model_number} ]; then
+ echo "Model number doesn't match"
+ exit 1
+fi
+
+$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1
+
+trap - SIGINT SIGTERM EXIT
+
+nvmftestfini
diff --git a/src/spdk/test/nvmf/target/initiator_timeout.sh b/src/spdk/test/nvmf/target/initiator_timeout.sh
new file mode 100755
index 000000000..199983be5
--- /dev/null
+++ b/src/spdk/test/nvmf/target/initiator_timeout.sh
@@ -0,0 +1,71 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+nvmftestinit
+nvmfappstart -m 0xF
+
+trap 'process_shm --id $NVMF_APP_SHM_ID; killprocess $nvmfpid; nvmftestfini $1; exit 1' SIGINT SIGTERM EXIT
+
+$rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc0
+
+# We cannot configure the bdev with an incredibly high latency up front because connect will not work properly.
+$rpc_py bdev_delay_create -b Malloc0 -d Delay0 -r 30 -t 30 -w 30 -n 30
+
+$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
+$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s $NVMF_SERIAL
+$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Delay0
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+
+nvme connect -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
+
+waitforserial "$NVMF_SERIAL"
+
+# Once our timed out I/O complete, we will still have 10 sec of I/O.
+$rootdir/scripts/fio.py -p nvmf -i 4096 -d 1 -t write -r 60 -v &
+fio_pid=$!
+
+sleep 3
+
+# The kernel initiator has a default timeout of 30 seconds. delay for 31 to trigger initiator reconnect.
+$rpc_py bdev_delay_update_latency Delay0 avg_read 31000000
+$rpc_py bdev_delay_update_latency Delay0 avg_write 31000000
+$rpc_py bdev_delay_update_latency Delay0 p99_read 31000000
+$rpc_py bdev_delay_update_latency Delay0 p99_write 310000000
+
+sleep 3
+
+# Reset these values so that subsequent I/O will complete in a timely manner.
+$rpc_py bdev_delay_update_latency Delay0 avg_read 30
+$rpc_py bdev_delay_update_latency Delay0 avg_write 30
+$rpc_py bdev_delay_update_latency Delay0 p99_read 30
+$rpc_py bdev_delay_update_latency Delay0 p99_write 30
+
+fio_status=0
+wait $fio_pid || fio_status=$?
+
+nvme disconnect -n "nqn.2016-06.io.spdk:cnode1" || true
+
+if [ $fio_status -eq 0 ]; then
+ echo "nvmf hotplug test: fio successful as expected"
+else
+ echo "nvmf hotplug test: fio failed, expected success"
+ nvmftestfini
+ exit 1
+fi
+
+$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1
+
+rm -f ./local-job0-0-verify.state
+
+trap - SIGINT SIGTERM EXIT
+
+nvmftestfini
diff --git a/src/spdk/test/nvmf/target/invalid.sh b/src/spdk/test/nvmf/target/invalid.sh
new file mode 100755
index 000000000..98246efeb
--- /dev/null
+++ b/src/spdk/test/nvmf/target/invalid.sh
@@ -0,0 +1,63 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f "$(dirname "$0")")
+rootdir=$(readlink -f "$testdir/../../..")
+source "$rootdir/test/common/autotest_common.sh"
+source "$rootdir/test/nvmf/common.sh"
+
+multi_target_rpc=$rootdir/test/nvmf/target/multitarget_rpc.py
+rpc=$rootdir/scripts/rpc.py
+nqn=nqn.2016-06.io.spdk:cnode
+target=foobar
+# pre-seed the rng to generate predictive values across different test runs
+RANDOM=0
+
+gen_random_s() {
+ local length=$1 ll
+ # generate ascii table which nvme supports
+ local chars=({32..127})
+ local string
+
+ for ((ll = 0; ll < length; ll++)); do
+ string+="$(echo -e "\x$(printf '%x' "${chars[RANDOM % ${#chars[@]}]}")")"
+ done
+ # Be nice to rpc.py's arg parser and escape `-` in case it's a first character
+ if [[ ${string::1} == "-" ]]; then
+ string=${string/-/\\-}
+ fi
+ echo "$string"
+}
+
+nvmftestinit
+nvmfappstart -m 0xF
+
+trap 'process_shm --id $NVMF_APP_SHM_ID; nvmftestfini $1; exit 1' SIGINT SIGTERM EXIT
+
+# Attempt to create subsystem with non-existing target
+out=$("$rpc" nvmf_create_subsystem -t "$target" "$nqn$RANDOM" 2>&1) && false
+[[ $out == *"Unable to find target"* ]]
+
+# Attempt to create subsystem with invalid serial number - inject ASCII char that's
+# not in the range (0x20-0x7e) of these supported by the nvme spec.
+out=$("$rpc" nvmf_create_subsystem -s "$NVMF_SERIAL$(echo -e "\x1f")" "$nqn$RANDOM" 2>&1) && false
+[[ $out == *"Invalid SN"* ]]
+
+# Attempt to create subsystem with invalid model - inject ASCII char that's not in the
+# range (0x20-0x7e) of these supported by the nvme spec.
+out=$("$rpc" nvmf_create_subsystem -d "SPDK_Controller$(echo -e "\x1f")" "$nqn$RANDOM" 2>&1) && false
+[[ $out == *"Invalid MN"* ]]
+
+# Attempt to create subsystem with invalid serial number - exceed SPDK_NVME_CTRLR_SN_LEN (20)
+out=$("$rpc" nvmf_create_subsystem -s "$(gen_random_s 21)" "$nqn$RANDOM" 2>&1) && false
+[[ $out == *"Invalid SN"* ]]
+
+# Attempt to create subsystem with invalid model - exceed SPDK_NVME_CTRLR_MN_LEN (40)
+out=$("$rpc" nvmf_create_subsystem -d "$(gen_random_s 41)" "$nqn$RANDOM" 2>&1) && false
+[[ $out == *"Invalid MN"* ]]
+
+# Attempt to delete non-existing target
+out=$("$multi_target_rpc" nvmf_delete_target --name "$target" 2>&1) && false
+[[ $out == *"The specified target doesn't exist, cannot delete it."* ]]
+
+trap - SIGINT SIGTERM EXIT
+nvmftestfini
diff --git a/src/spdk/test/nvmf/target/multiconnection.sh b/src/spdk/test/nvmf/target/multiconnection.sh
new file mode 100755
index 000000000..d7e490861
--- /dev/null
+++ b/src/spdk/test/nvmf/target/multiconnection.sh
@@ -0,0 +1,53 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+NVMF_SUBSYS=11
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+nvmftestinit
+nvmfappstart -m 0xF
+
+# SoftRoce does not have enough queues available for
+# multiconnection tests. Detect if we're using software RDMA.
+# If so - lower the number of subsystems for test.
+if check_ip_is_soft_roce $NVMF_FIRST_TARGET_IP; then
+ echo "Using software RDMA, lowering number of NVMeOF subsystems."
+ NVMF_SUBSYS=1
+fi
+
+$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
+
+for i in $(seq 1 $NVMF_SUBSYS); do
+ $rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc$i
+ $rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode$i -a -s SPDK$i
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode$i Malloc$i
+ $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode$i -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+done
+
+for i in $(seq 1 $NVMF_SUBSYS); do
+ nvme connect -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode${i}" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
+ waitforserial SPDK$i
+done
+
+$rootdir/scripts/fio.py -p nvmf -i 262144 -d 64 -t read -r 10
+$rootdir/scripts/fio.py -p nvmf -i 262144 -d 64 -t randwrite -r 10
+
+sync
+for i in $(seq 1 $NVMF_SUBSYS); do
+ nvme disconnect -n "nqn.2016-06.io.spdk:cnode${i}" || true
+ $rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode${i}
+done
+
+rm -f ./local-job0-0-verify.state
+
+trap - SIGINT SIGTERM EXIT
+
+nvmftestfini
diff --git a/src/spdk/test/nvmf/target/multitarget.sh b/src/spdk/test/nvmf/target/multitarget.sh
new file mode 100755
index 000000000..4c3ece7c0
--- /dev/null
+++ b/src/spdk/test/nvmf/target/multitarget.sh
@@ -0,0 +1,37 @@
+#!/usr/bin/env bash
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+# For the time being this script is just menat to confirm the basic functionality of the
+# multitarget RPCs as the in-tree applications don't support multi-target functionality.
+rpc_py="$rootdir/test/nvmf/target/multitarget_rpc.py"
+
+nvmftestinit
+nvmfappstart -m 0xF
+
+trap 'process_shm --id $NVMF_APP_SHM_ID; nvmftestfini $1; exit 1' SIGINT SIGTERM EXIT
+
+# Target application should start with a single target.
+if [ "$($rpc_py nvmf_get_targets | jq 'length')" != "1" ]; then
+ echo "SPDK application did not start with the proper number of targets." && false
+fi
+
+$rpc_py nvmf_create_target -n nvmf_tgt_1 -s 32
+$rpc_py nvmf_create_target -n nvmf_tgt_2 -s 32
+
+if [ "$($rpc_py nvmf_get_targets | jq 'length')" != "3" ]; then
+ echo "nvmf_create_target RPC didn't properly create targets." && false
+fi
+
+$rpc_py nvmf_delete_target -n nvmf_tgt_1
+$rpc_py nvmf_delete_target -n nvmf_tgt_2
+
+if [ "$($rpc_py nvmf_get_targets | jq 'length')" != "1" ]; then
+ echo "nvmf_delete_target RPC didn't properly destroy targets." && false
+fi
+
+trap - SIGINT SIGTERM EXIT
+
+nvmftestfini
diff --git a/src/spdk/test/nvmf/target/multitarget_rpc.py b/src/spdk/test/nvmf/target/multitarget_rpc.py
new file mode 100755
index 000000000..c5ccbcece
--- /dev/null
+++ b/src/spdk/test/nvmf/target/multitarget_rpc.py
@@ -0,0 +1,84 @@
+#!/usr/bin/env python3
+
+# Not for use in production. Please see the changelog for v19.10.
+
+from rpc.client import print_dict, JSONRPCException
+
+import logging
+import argparse
+import rpc
+import sys
+import shlex
+
+try:
+ from shlex import quote
+except ImportError:
+ from pipes import quote
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description='NVMe-oF RPC command line interface. NOTE: spdk/scripts/ is expected in PYTHONPATH')
+ parser.add_argument('-s', dest='server_addr',
+ help='RPC domain socket path or IP address', default='/var/tmp/spdk.sock')
+ parser.add_argument('-p', dest='port',
+ help='RPC port number (if server_addr is IP address)',
+ default=5260, type=int)
+ parser.add_argument('-t', dest='timeout',
+ help='Timeout as a floating point number expressed in seconds waiting for response. Default: 60.0',
+ default=60.0, type=float)
+ parser.add_argument('-v', dest='verbose', action='store_const', const="INFO",
+ help='Set verbose mode to INFO', default="ERROR")
+ parser.add_argument('--verbose', dest='verbose', choices=['DEBUG', 'INFO', 'ERROR'],
+ help="""Set verbose level. """)
+ subparsers = parser.add_subparsers(help='RPC methods')
+
+ def nvmf_create_target(args):
+ print_dict(rpc.nvmf.nvmf_create_target(args.client,
+ name=args.name,
+ max_subsystems=args.max_subsystems))
+
+ p = subparsers.add_parser('nvmf_create_target', help='Create a new NVMe-oF target')
+ p.add_argument('-n', '--name', help='Target name (unique to application)', type=str, required=True)
+ p.add_argument('-s', '--max-subsystems', help='Max number of NVMf subsystems defaults to SPDK_NVMF_DEFAULT_MAX_SUBSYSTEMS',
+ type=int, required=False)
+ p.set_defaults(func=nvmf_create_target)
+
+ def nvmf_delete_target(args):
+ print_dict(rpc.nvmf.nvmf_delete_target(args.client,
+ name=args.name))
+
+ p = subparsers.add_parser('nvmf_delete_target', help='Destroy the given NVMe-oF Target')
+ p.add_argument('-n', '--name', help='Target name (unique to application)', type=str, required=True)
+ p.set_defaults(func=nvmf_delete_target)
+
+ def nvmf_get_targets(args):
+ print_dict(rpc.nvmf.nvmf_get_targets(args.client))
+
+ p = subparsers.add_parser('nvmf_get_targets', help='Get the list of NVMe-oF Targets')
+ p.set_defaults(func=nvmf_get_targets)
+
+ def call_rpc_func(args):
+ try:
+ args.func(args)
+ except JSONRPCException as ex:
+ print(ex.message)
+ exit(1)
+
+ def execute_script(parser, client, fd):
+ for rpc_call in map(str.rstrip, fd):
+ if not rpc_call.strip():
+ continue
+ args = parser.parse_args(shlex.split(rpc_call))
+ args.client = client
+ call_rpc_func(args)
+
+ args = parser.parse_args()
+ args.client = rpc.client.JSONRPCClient(args.server_addr, args.port, args.timeout, log_level=getattr(logging, args.verbose.upper()))
+ if hasattr(args, 'func'):
+ call_rpc_func(args)
+ elif sys.stdin.isatty():
+ # No arguments and no data piped through stdin
+ parser.print_help()
+ exit(1)
+ else:
+ execute_script(parser, args.client, sys.stdin)
diff --git a/src/spdk/test/nvmf/target/nmic.sh b/src/spdk/test/nvmf/target/nmic.sh
new file mode 100755
index 000000000..6a967dc08
--- /dev/null
+++ b/src/spdk/test/nvmf/target/nmic.sh
@@ -0,0 +1,56 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+nvmftestinit
+nvmfappstart -m 0xF
+
+NVMF_SECOND_TARGET_IP=$(echo "$RDMA_IP_LIST" | sed -n 2p)
+
+$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
+
+# Create subsystems
+$rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc0
+$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s $NVMF_SERIAL
+$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc0
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s "$NVMF_PORT"
+
+echo "test case1: single bdev can't be used in multiple subsystems"
+$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode2 -a -s SPDK2
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode2 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s "$NVMF_PORT"
+nmic_status=0
+$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode2 Malloc0 || nmic_status=$?
+
+if [ $nmic_status -eq 0 ]; then
+ echo " Adding namespace passed - failure expected."
+ nvmftestfini
+ exit 1
+else
+ echo " Adding namespace failed - expected result."
+fi
+
+echo "test case2: host connect to nvmf target in multiple paths"
+if [ -n "$NVMF_SECOND_TARGET_IP" ]; then
+ $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_SECOND_TARGET_IP -s $NVMF_PORT
+
+ nvme connect -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
+ nvme connect -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_SECOND_TARGET_IP" -s "$NVMF_PORT"
+
+ waitforserial "$NVMF_SERIAL"
+
+ $rootdir/scripts/fio.py -p nvmf -i 4096 -d 1 -t write -r 1 -v
+fi
+
+nvme disconnect -n "nqn.2016-06.io.spdk:cnode1" || true
+
+trap - SIGINT SIGTERM EXIT
+
+nvmftestfini
diff --git a/src/spdk/test/nvmf/target/nvme_cli.sh b/src/spdk/test/nvmf/target/nvme_cli.sh
new file mode 100755
index 000000000..29359689b
--- /dev/null
+++ b/src/spdk/test/nvmf/target/nvme_cli.sh
@@ -0,0 +1,75 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+if [ -z "${DEPENDENCY_DIR}" ]; then
+ echo DEPENDENCY_DIR not defined!
+ exit 1
+fi
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+nvmftestinit
+nvmfappstart -m 0xF
+
+$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
+
+$rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc0
+$rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc1
+
+$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s $NVMF_SERIAL -d SPDK_Controller1
+$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc0
+$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc1
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+
+nvme connect -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
+
+waitforserial $NVMF_SERIAL 2
+if ! get_nvme_devs print 2> /dev/null; then
+ echo "Could not find any nvme devices to work with, aborting the test" >&2
+ exit 1
+fi
+
+for ctrl in "${nvmes[@]}"; do
+ nvme id-ctrl $ctrl
+ nvme smart-log $ctrl
+ nvme_model=$(nvme id-ctrl $ctrl | grep -w mn | sed 's/^.*: //' | sed 's/ *$//')
+ if [ "$nvme_model" != "SPDK_Controller1" ]; then
+ echo "Wrong model number for controller" $nvme_model
+ exit 1
+ fi
+done
+
+for ns in "${nvmes[@]}"; do
+ nvme id-ns $ns
+done
+
+nvme disconnect -n "nqn.2016-06.io.spdk:cnode1"
+
+# Test spdk/nvme-cli NVMe-oF commands: discover, connect and disconnect
+nvme_cli_build
+pushd "${DEPENDENCY_DIR}/nvme-cli"
+
+sed -i 's/shm_id=.*/shm_id=-1/g' spdk.conf
+./nvme discover -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s "$NVMF_PORT"
+nvme_num_before_connection=$(get_nvme_devs 2>&1 || echo 0)
+./nvme connect -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
+sleep 1
+nvme_num=$(get_nvme_devs 2>&1)
+./nvme disconnect -n "nqn.2016-06.io.spdk:cnode1"
+if [ $nvme_num -le $nvme_num_before_connection ]; then
+ echo "spdk/nvme-cli connect target devices failed"
+ exit 1
+fi
+popd
+
+$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1
+trap - SIGINT SIGTERM EXIT
+
+nvmftestfini
diff --git a/src/spdk/test/nvmf/target/nvmf_example.sh b/src/spdk/test/nvmf/target/nvmf_example.sh
new file mode 100755
index 000000000..28045bc49
--- /dev/null
+++ b/src/spdk/test/nvmf/target/nvmf_example.sh
@@ -0,0 +1,59 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+function build_nvmf_example_args() {
+ if [ $SPDK_RUN_NON_ROOT -eq 1 ]; then
+ echo "sudo -u $(logname) $SPDK_EXAMPLE_DIR/nvmf -i $NVMF_APP_SHM_ID" -g 10000
+ else
+ echo "$SPDK_EXAMPLE_DIR/nvmf -i $NVMF_APP_SHM_ID" -g 10000
+ fi
+}
+
+NVMF_EXAMPLE="$(build_nvmf_example_args)"
+
+function nvmfexamplestart() {
+ timing_enter start_nvmf_example
+ $NVMF_EXAMPLE $1 &
+ nvmfpid=$!
+ trap 'process_shm --id $NVMF_APP_SHM_ID; nvmftestfini; exit 1' SIGINT SIGTERM EXIT
+ waitforlisten $nvmfpid
+ timing_exit start_nvmf_example
+}
+
+timing_enter nvmf_example_test
+nvmftestinit
+nvmfexamplestart "-m 0xF"
+
+#create transport
+$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
+#create malloc bdev
+malloc_bdevs="$($rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE) "
+#create subsystem
+$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
+
+#add ns to subsystem
+for malloc_bdev in $malloc_bdevs; do
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 "$malloc_bdev"
+done
+
+#add listener to subsystem
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+
+perf="$SPDK_EXAMPLE_DIR/perf"
+
+$perf -q 64 -o 4096 -w randrw -M 30 -t 10 \
+ -r "trtype:${TEST_TRANSPORT} adrfam:IPv4 traddr:${NVMF_FIRST_TARGET_IP} trsvcid:${NVMF_PORT} \
+subnqn:nqn.2016-06.io.spdk:cnode1"
+
+trap - SIGINT SIGTERM EXIT
+nvmftestfini
+timing_exit nvmf_example_test
diff --git a/src/spdk/test/nvmf/target/nvmf_lvol.sh b/src/spdk/test/nvmf/target/nvmf_lvol.sh
new file mode 100755
index 000000000..d44bc9332
--- /dev/null
+++ b/src/spdk/test/nvmf/target/nvmf_lvol.sh
@@ -0,0 +1,60 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+LVOL_BDEV_INIT_SIZE=20
+LVOL_BDEV_FINAL_SIZE=30
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+nvmftestinit
+nvmfappstart -m 0x7
+
+$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
+
+# Construct a RAID volume for the logical volume store
+base_bdevs="$($rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE) "
+base_bdevs+=$($rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)
+$rpc_py bdev_raid_create -n raid0 -z 64 -r 0 -b "$base_bdevs"
+
+# Create the logical volume store on the RAID volume
+lvs=$($rpc_py bdev_lvol_create_lvstore raid0 lvs)
+
+# Create a logical volume on the logical volume store
+lvol=$($rpc_py bdev_lvol_create -u $lvs lvol $LVOL_BDEV_INIT_SIZE)
+
+# Create an NVMe-oF subsystem and add the logical volume as a namespace
+$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode0 -a -s SPDK0
+$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode0 $lvol
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode0 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+
+# Start random writes in the background
+$SPDK_EXAMPLE_DIR/perf -r "trtype:$TEST_TRANSPORT adrfam:IPv4 traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT" -o 4096 -q 128 -s 512 -w randwrite -t 10 -c 0x18 &
+perf_pid=$!
+
+sleep 1
+
+# Perform some operations on the logical volume
+snapshot=$($rpc_py bdev_lvol_snapshot $lvol "MY_SNAPSHOT")
+$rpc_py bdev_lvol_resize $lvol $LVOL_BDEV_FINAL_SIZE
+clone=$($rpc_py bdev_lvol_clone $snapshot "MY_CLONE")
+$rpc_py bdev_lvol_inflate $clone
+
+# Wait for I/O to complete
+wait $perf_pid
+
+# Clean up
+$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode0
+$rpc_py bdev_lvol_delete $lvol
+$rpc_py bdev_lvol_delete_lvstore -u $lvs
+
+rm -f ./local-job*
+
+trap - SIGINT SIGTERM EXIT
+
+nvmftestfini
diff --git a/src/spdk/test/nvmf/target/nvmf_vhost.sh b/src/spdk/test/nvmf/target/nvmf_vhost.sh
new file mode 100755
index 000000000..48e78d6d2
--- /dev/null
+++ b/src/spdk/test/nvmf/target/nvmf_vhost.sh
@@ -0,0 +1,69 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$testdir/../../..
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+if [ ! -d "/usr/local/qemu/spdk-3.0.0" ]; then
+ echo "Qemu not installed on this machine. It may be a VM. Skipping nvmf_vhost test."
+ exit 0
+fi
+
+source $rootdir/test/vhost/common.sh
+
+MALLOC_BDEV_SIZE=128
+MALLOC_BLOCK_SIZE=512
+NVMF_SOCK="/tmp/nvmf_rpc.sock"
+NVMF_RPC="$rootdir/scripts/rpc.py -s $NVMF_SOCK"
+
+VHOST_SOCK="/tmp/vhost_rpc.sock"
+VHOST_APP+=(-p 0 -r "$VHOST_SOCK" -u)
+VHOST_RPC="$rootdir/scripts/rpc.py -s $VHOST_SOCK"
+
+nvmftestinit
+
+# Start Apps
+"${NVMF_APP[@]}" -r $NVMF_SOCK &
+nvmfpid=$!
+waitforlisten $nvmfpid $NVMF_SOCK
+
+trap 'process_shm --id $NVMF_APP_SHM_ID; nvmftestfini; exit 1' SIGINT SIGTERM EXIT
+
+mkdir -p "$(get_vhost_dir 3)"
+
+"${VHOST_APP[@]}" -S "$(get_vhost_dir 3)" &
+vhostpid=$!
+waitforlisten $vhostpid $NVMF_SOCK
+
+trap 'process_shm --id $NVMF_APP_SHM_ID; killprocess $vhostpid nvmftestfini; exit 1' SIGINT SIGTERM EXIT
+
+# Configure NVMF tgt on host machine
+malloc_bdev="$($NVMF_RPC bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
+
+$NVMF_RPC nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192 -p 4
+$NVMF_RPC nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
+$NVMF_RPC nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 "$malloc_bdev"
+$NVMF_RPC nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+
+# Configure VHost on host machine
+$VHOST_RPC bdev_nvme_attach_controller -b Nvme0 -t $TEST_TRANSPORT -f ipv4 -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT -n nqn.2016-06.io.spdk:cnode1
+$VHOST_RPC vhost_create_scsi_controller naa.VhostScsi0.3
+$VHOST_RPC vhost_scsi_controller_add_target naa.VhostScsi0.3 0 "Nvme0n1"
+
+# start qemu based VM.
+vm_setup --os="$VM_IMAGE" --disk-type=spdk_vhost_scsi --disks="VhostScsi0" --force=3 --vhost-name=3
+
+vm_run 3
+
+vm_wait_for_boot 300 3
+
+# Run the fio workload remotely
+vm_scp 3 $testdir/nvmf_vhost_fio.job 127.0.0.1:/root/nvmf_vhost_fio.job
+vm_exec 3 "fio /root/nvmf_vhost_fio.job"
+vm_shutdown_all
+
+trap - SIGINT SIGTERM EXIT
+
+killprocess $vhostpid
+nvmftestfini
diff --git a/src/spdk/test/nvmf/target/nvmf_vhost_fio.job b/src/spdk/test/nvmf/target/nvmf_vhost_fio.job
new file mode 100644
index 000000000..350aa895e
--- /dev/null
+++ b/src/spdk/test/nvmf/target/nvmf_vhost_fio.job
@@ -0,0 +1,19 @@
+[global]
+blocksize=4k-512k
+iodepth=128
+ioengine=libaio
+filename=/dev/sdb
+group_reporting
+thread
+numjobs=1
+direct=1
+do_verify=1
+verify=md5
+verify_fatal=1
+verify_dump=1
+verify_backlog=8
+
+[randwrite]
+rw=randwrite
+runtime=15
+time_based
diff --git a/src/spdk/test/nvmf/target/rpc.sh b/src/spdk/test/nvmf/target/rpc.sh
new file mode 100755
index 000000000..d715e4b4f
--- /dev/null
+++ b/src/spdk/test/nvmf/target/rpc.sh
@@ -0,0 +1,124 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+function jcount() {
+ local filter=$1
+ jq "$filter" | wc -l
+}
+
+function jsum() {
+ local filter=$1
+ jq "$filter" | awk '{s+=$1}END{print s}'
+}
+
+nvmftestinit
+nvmfappstart -m 0xF
+
+stats=$($rpc_py nvmf_get_stats)
+# Expect 4 poll groups (from CPU mask) and no transports yet
+[ "4" -eq $(jcount .poll_groups[].name <<< "$stats") ]
+[ "null" == $(jq .poll_groups[0].transports[0] <<< "$stats") ]
+
+$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
+
+stats=$($rpc_py nvmf_get_stats)
+# Expect no QPs
+[ "0" -eq $(jsum .poll_groups[].admin_qpairs <<< "$stats") ]
+[ "0" -eq $(jsum .poll_groups[].io_qpairs <<< "$stats") ]
+# Transport statistics is currently implemented for RDMA only
+if [ 'rdma' == $TEST_TRANSPORT ]; then
+ # Expect RDMA transport and some devices
+ [ "1" -eq $(jcount .poll_groups[0].transports[].trtype <<< "$stats") ]
+ transport_type=$(jq -r .poll_groups[0].transports[0].trtype <<< "$stats")
+ [ "${transport_type,,}" == "${TEST_TRANSPORT,,}" ]
+ [ "0" -lt $(jcount .poll_groups[0].transports[0].devices[].name <<< "$stats") ]
+fi
+
+# set times for subsystem construct/delete
+if [ $RUN_NIGHTLY -eq 1 ]; then
+ times=50
+else
+ times=3
+fi
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+$rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc1
+
+# Disallow host NQN and make sure connect fails
+$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s $NVMF_SERIAL
+$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc1
+$rpc_py nvmf_subsystem_allow_any_host -d nqn.2016-06.io.spdk:cnode1
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+
+# This connect should fail - the host NQN is not allowed
+! nvme connect -t $TEST_TRANSPORT -n nqn.2016-06.io.spdk:cnode1 -q nqn.2016-06.io.spdk:host1 -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
+
+# Add the host NQN and verify that the connect succeeds
+$rpc_py nvmf_subsystem_add_host nqn.2016-06.io.spdk:cnode1 nqn.2016-06.io.spdk:host1
+nvme connect -t $TEST_TRANSPORT -n nqn.2016-06.io.spdk:cnode1 -q nqn.2016-06.io.spdk:host1 -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
+waitforserial "$NVMF_SERIAL"
+nvme disconnect -n nqn.2016-06.io.spdk:cnode1
+
+# Remove the host and verify that the connect fails
+$rpc_py nvmf_subsystem_remove_host nqn.2016-06.io.spdk:cnode1 nqn.2016-06.io.spdk:host1
+! nvme connect -t $TEST_TRANSPORT -n nqn.2016-06.io.spdk:cnode1 -q nqn.2016-06.io.spdk:host1 -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
+
+# Allow any host and verify that the connect succeeds
+$rpc_py nvmf_subsystem_allow_any_host -e nqn.2016-06.io.spdk:cnode1
+nvme connect -t $TEST_TRANSPORT -n nqn.2016-06.io.spdk:cnode1 -q nqn.2016-06.io.spdk:host1 -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
+waitforserial "$NVMF_SERIAL"
+nvme disconnect -n nqn.2016-06.io.spdk:cnode1
+
+$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1
+
+# do frequent add delete of namespaces with different nsid.
+for i in $(seq 1 $times); do
+ $rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -s $NVMF_SERIAL
+ $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc1 -n 5
+ $rpc_py nvmf_subsystem_allow_any_host nqn.2016-06.io.spdk:cnode1
+ nvme connect -t $TEST_TRANSPORT -n nqn.2016-06.io.spdk:cnode1 -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
+
+ waitforserial "$NVMF_SERIAL"
+
+ nvme disconnect -n nqn.2016-06.io.spdk:cnode1
+
+ $rpc_py nvmf_subsystem_remove_ns nqn.2016-06.io.spdk:cnode1 5
+ $rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1
+
+done
+
+# do frequent add delete.
+for i in $(seq 1 $times); do
+ $rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -s $NVMF_SERIAL
+ $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc1
+ $rpc_py nvmf_subsystem_allow_any_host nqn.2016-06.io.spdk:cnode1
+
+ $rpc_py nvmf_subsystem_remove_ns nqn.2016-06.io.spdk:cnode1 1
+
+ $rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1
+done
+
+stats=$($rpc_py nvmf_get_stats)
+# Expect some admin and IO qpairs
+[ "0" -lt $(jsum .poll_groups[].admin_qpairs <<< "$stats") ]
+[ "0" -lt $(jsum .poll_groups[].io_qpairs <<< "$stats") ]
+# Transport statistics is currently implemented for RDMA only
+if [ 'rdma' == $TEST_TRANSPORT ]; then
+ # Expect non-zero completions and request latencies accumulated
+ [ "0" -lt $(jsum .poll_groups[].transports[].devices[].completions <<< "$stats") ]
+ [ "0" -lt $(jsum .poll_groups[].transports[].devices[].request_latency <<< "$stats") ]
+fi
+
+trap - SIGINT SIGTERM EXIT
+
+nvmftestfini
diff --git a/src/spdk/test/nvmf/target/shutdown.sh b/src/spdk/test/nvmf/target/shutdown.sh
new file mode 100755
index 000000000..8ad73bd6f
--- /dev/null
+++ b/src/spdk/test/nvmf/target/shutdown.sh
@@ -0,0 +1,155 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+function starttarget() {
+ # Start the target
+ nvmfappstart -m 0x1E
+
+ $rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
+
+ num_subsystems=({1..10})
+ # SoftRoce does not have enough queues available for
+ # this test. Detect if we're using software RDMA.
+ # If so, only use two subsystem.
+ if check_ip_is_soft_roce "$NVMF_FIRST_TARGET_IP"; then
+ num_subsystems=({1..2})
+ fi
+
+ timing_enter create_subsystems
+ # Create subsystems
+ rm -rf $testdir/rpcs.txt
+ for i in "${num_subsystems[@]}"; do
+ cat <<- EOL >> $testdir/rpcs.txt
+ bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc$i
+ nvmf_create_subsystem nqn.2016-06.io.spdk:cnode$i -a -s SPDK$i
+ nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode$i Malloc$i
+ nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode$i -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+ EOL
+ done
+ $rpc_py < $testdir/rpcs.txt
+ timing_exit create_subsystems
+
+}
+
+function stoptarget() {
+ rm -f ./local-job0-0-verify.state
+ rm -rf $testdir/bdevperf.conf
+ rm -rf $testdir/rpcs.txt
+
+ nvmftestfini
+}
+
+function waitforio() {
+ # $1 = RPC socket
+ if [ -z "$1" ]; then
+ exit 1
+ fi
+ # $2 = bdev name
+ if [ -z "$2" ]; then
+ exit 1
+ fi
+ local ret=1
+ local i
+ for ((i = 10; i != 0; i--)); do
+ read_io_count=$($rpc_py -s $1 bdev_get_iostat -b $2 | jq -r '.bdevs[0].num_read_ops')
+ # A few I/O will happen during initial examine. So wait until at least 100 I/O
+ # have completed to know that bdevperf is really generating the I/O.
+ if [ $read_io_count -ge 100 ]; then
+ ret=0
+ break
+ fi
+ sleep 0.25
+ done
+ return $ret
+}
+
+# Test 1: Kill the initiator unexpectedly with no I/O outstanding
+function nvmf_shutdown_tc1() {
+ starttarget
+
+ # Run bdev_svc, which connects but does not issue I/O
+ $rootdir/test/app/bdev_svc/bdev_svc -m 0x1 -i 1 -r /var/tmp/bdevperf.sock --json <(gen_nvmf_target_json "${num_subsystems[@]}") &
+ perfpid=$!
+ waitforlisten $perfpid /var/tmp/bdevperf.sock
+ $rpc_py -s /var/tmp/bdevperf.sock framework_wait_init
+
+ # Kill bdev_svc
+ kill -9 $perfpid || true
+ rm -f /var/run/spdk_bdev1
+
+ # Verify the target stays up
+ sleep 1
+ kill -0 $nvmfpid
+
+ # Connect with bdevperf and confirm it works
+ $rootdir/test/bdev/bdevperf/bdevperf -r /var/tmp/bdevperf.sock --json <(gen_nvmf_target_json "${num_subsystems[@]}") -q 64 -o 65536 -w verify -t 1
+
+ stoptarget
+}
+
+# Test 2: Kill initiator unexpectedly with I/O outstanding
+function nvmf_shutdown_tc2() {
+ starttarget
+
+ # Run bdevperf
+ $rootdir/test/bdev/bdevperf/bdevperf -r /var/tmp/bdevperf.sock --json <(gen_nvmf_target_json "${num_subsystems[@]}") -q 64 -o 65536 -w verify -t 10 &
+ perfpid=$!
+ waitforlisten $perfpid /var/tmp/bdevperf.sock
+ $rpc_py -s /var/tmp/bdevperf.sock framework_wait_init
+
+ waitforio /var/tmp/bdevperf.sock Nvme1n1
+
+ # Kill bdevperf half way through
+ killprocess $perfpid
+
+ # Verify the target stays up
+ sleep 1
+ kill -0 $nvmfpid
+
+ stoptarget
+}
+
+# Test 3: Kill the target unexpectedly with I/O outstanding
+function nvmf_shutdown_tc3() {
+ starttarget
+
+ # Run bdevperf
+ $rootdir/test/bdev/bdevperf/bdevperf -r /var/tmp/bdevperf.sock --json <(gen_nvmf_target_json "${num_subsystems[@]}") -q 64 -o 65536 -w verify -t 10 &
+ perfpid=$!
+ waitforlisten $perfpid /var/tmp/bdevperf.sock
+ $rpc_py -s /var/tmp/bdevperf.sock framework_wait_init
+
+ # Expand the trap to clean up bdevperf if something goes wrong
+ trap 'process_shm --id $NVMF_APP_SHM_ID; kill -9 $perfpid || true; nvmftestfini; exit 1' SIGINT SIGTERM EXIT
+
+ waitforio /var/tmp/bdevperf.sock Nvme1n1
+
+ # Kill the target half way through
+ killprocess $nvmfpid
+ nvmfpid=
+
+ # Verify bdevperf exits successfully
+ sleep 1
+ # TODO: Right now the NVMe-oF initiator will not correctly detect broken connections
+ # and so it will never shut down. Just kill it.
+ kill -9 $perfpid || true
+
+ stoptarget
+}
+
+nvmftestinit
+
+run_test "nvmf_shutdown_tc1" nvmf_shutdown_tc1
+run_test "nvmf_shutdown_tc2" nvmf_shutdown_tc2
+run_test "nvmf_shutdown_tc3" nvmf_shutdown_tc3
+
+trap - SIGINT SIGTERM EXIT
diff --git a/src/spdk/test/nvmf/target/srq_overwhelm.sh b/src/spdk/test/nvmf/target/srq_overwhelm.sh
new file mode 100755
index 000000000..fe4dd7d29
--- /dev/null
+++ b/src/spdk/test/nvmf/target/srq_overwhelm.sh
@@ -0,0 +1,50 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+nvmftestinit
+
+if check_ip_is_soft_roce $NVMF_FIRST_TARGET_IP; then
+ echo "Using software RDMA, Likely not enough memory to run this test. aborting."
+ exit 0
+fi
+
+nvmfappstart -m 0xF
+
+# create the rdma transport with an intentionally small SRQ depth
+$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192 -s 1024
+
+for i in $(seq 0 5); do
+ $rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode$i -a -s SPDK00000000000001
+ $rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc$i
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode$i Malloc$i
+ $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode$i -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+ nvme connect -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode${i}" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT" -i 16
+ waitforblk "nvme${i}n1"
+done
+
+# by running 6 different FIO jobs, each with 13 subjobs, we end up with 78 fio threads trying to write to
+# our target at once. This completely overwhelms the target SRQ, but allows us to verify that rnr_retry is
+# working even at very high queue depths because the rdma qpair doesn't fail.
+# It is normal to see the initiator timeout and reconnect waiting for completions from an overwhelmmed target,
+# but the connection should come up and FIO should complete without errors.
+$rootdir/scripts/fio.py -p nvmf -i 1048576 -d 128 -t read -r 10 -n 13
+
+sync
+
+for i in $(seq 0 5); do
+ nvme disconnect -n "nqn.2016-06.io.spdk:cnode${i}"
+ $rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode$i
+done
+
+trap - SIGINT SIGTERM EXIT
+
+nvmftestfini