summaryrefslogtreecommitdiffstats
path: root/src/spdk/test/nvmf
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 18:24:20 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 18:24:20 +0000
commit483eb2f56657e8e7f419ab1a4fab8dce9ade8609 (patch)
treee5d88d25d870d5dedacb6bbdbe2a966086a0a5cf /src/spdk/test/nvmf
parentInitial commit. (diff)
downloadceph-upstream.tar.xz
ceph-upstream.zip
Adding upstream version 14.2.21.upstream/14.2.21upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/spdk/test/nvmf')
-rwxr-xr-xsrc/spdk/test/nvmf/bdev_io_wait/bdev_io_wait.sh66
-rwxr-xr-xsrc/spdk/test/nvmf/common.sh200
-rwxr-xr-xsrc/spdk/test/nvmf/create_transport/create_transport.sh77
-rwxr-xr-xsrc/spdk/test/nvmf/discovery/discovery.sh76
-rwxr-xr-xsrc/spdk/test/nvmf/filesystem/filesystem.sh98
-rwxr-xr-xsrc/spdk/test/nvmf/fio/fio.sh108
-rwxr-xr-xsrc/spdk/test/nvmf/fio/nvmf_fio.py133
-rwxr-xr-xsrc/spdk/test/nvmf/host/aer.sh78
-rwxr-xr-xsrc/spdk/test/nvmf/host/bdevperf.sh52
-rwxr-xr-xsrc/spdk/test/nvmf/host/fio.sh92
-rwxr-xr-xsrc/spdk/test/nvmf/host/identify.sh65
-rwxr-xr-xsrc/spdk/test/nvmf/host/identify_kernel_nvmf.sh80
-rwxr-xr-xsrc/spdk/test/nvmf/host/perf.sh95
-rwxr-xr-xsrc/spdk/test/nvmf/lvol/nvmf_lvol.sh118
-rwxr-xr-xsrc/spdk/test/nvmf/multiconnection/multiconnection.sh82
-rwxr-xr-xsrc/spdk/test/nvmf/nmic/nmic.sh87
-rwxr-xr-xsrc/spdk/test/nvmf/nvme_cli/nvme_cli.sh96
-rwxr-xr-xsrc/spdk/test/nvmf/nvmf.sh64
-rwxr-xr-xsrc/spdk/test/nvmf/nvmfjson/json_config.sh40
-rwxr-xr-xsrc/spdk/test/nvmf/rpc/rpc.sh145
-rwxr-xr-xsrc/spdk/test/nvmf/shutdown/shutdown.sh88
-rw-r--r--src/spdk/test/nvmf/test_plan.md95
22 files changed, 2035 insertions, 0 deletions
diff --git a/src/spdk/test/nvmf/bdev_io_wait/bdev_io_wait.sh b/src/spdk/test/nvmf/bdev_io_wait/bdev_io_wait.sh
new file mode 100755
index 00000000..29106bde
--- /dev/null
+++ b/src/spdk/test/nvmf/bdev_io_wait/bdev_io_wait.sh
@@ -0,0 +1,66 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+set -e
+
+# pass the parameter 'iso' to this script when running it in isolation to trigger rdma device initialization.
+# e.g. sudo ./bdev_io_wait.sh iso
+nvmftestinit $1
+
+RDMA_IP_LIST=$(get_available_rdma_ips)
+NVMF_FIRST_TARGET_IP=$(echo "$RDMA_IP_LIST" | head -n 1)
+if [ -z $NVMF_FIRST_TARGET_IP ]; then
+ echo "no NIC for nvmf test"
+ exit 0
+fi
+
+timing_enter bdev_io_wait
+timing_enter start_nvmf_tgt
+
+$NVMF_APP -m 0xF --wait-for-rpc &
+nvmfpid=$!
+
+trap "process_shm --id $NVMF_APP_SHM_ID; killprocess $nvmfpid; nvmftestfini $1; exit 1" SIGINT SIGTERM EXIT
+
+waitforlisten $nvmfpid
+# Minimal number of bdev io pool (5) and cache (1)
+$rpc_py set_bdev_options -p 5 -c 1
+$rpc_py start_subsystem_init
+$rpc_py nvmf_create_transport -t RDMA -u 8192 -p 4
+timing_exit start_nvmf_tgt
+
+modprobe -v nvme-rdma
+
+bdevs="$bdevs $($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
+
+$rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
+for bdev in $bdevs; do
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 $bdev
+done
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t rdma -a $NVMF_FIRST_TARGET_IP -s 4420
+
+echo "[Nvme]" > $testdir/bdevperf.conf
+echo " TransportID \"trtype:RDMA adrfam:IPv4 subnqn:nqn.2016-06.io.spdk:cnode1 traddr:$NVMF_FIRST_TARGET_IP trsvcid:4420\" Nvme0" >> $testdir/bdevperf.conf
+$rootdir/test/bdev/bdevperf/bdevperf -c $testdir/bdevperf.conf -q 128 -o 4096 -w write -t 1
+$rootdir/test/bdev/bdevperf/bdevperf -c $testdir/bdevperf.conf -q 128 -o 4096 -w read -t 1
+$rootdir/test/bdev/bdevperf/bdevperf -c $testdir/bdevperf.conf -q 128 -o 4096 -w flush -t 1
+$rootdir/test/bdev/bdevperf/bdevperf -c $testdir/bdevperf.conf -q 128 -o 4096 -w unmap -t 1
+sync
+rm -rf $testdir/bdevperf.conf
+$rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode1
+
+trap - SIGINT SIGTERM EXIT
+
+nvmfcleanup
+killprocess $nvmfpid
+nvmftestfini $1
+timing_exit bdev_io_wait
diff --git a/src/spdk/test/nvmf/common.sh b/src/spdk/test/nvmf/common.sh
new file mode 100755
index 00000000..af79e3f7
--- /dev/null
+++ b/src/spdk/test/nvmf/common.sh
@@ -0,0 +1,200 @@
+#!/usr/bin/env bash
+
+NVMF_PORT=4420
+NVMF_IP_PREFIX="192.168.100"
+NVMF_IP_LEAST_ADDR=8
+
+if [ -z "$NVMF_APP" ]; then
+ NVMF_APP=./app/nvmf_tgt/nvmf_tgt
+fi
+
+if [ -z "$NVMF_TEST_CORE_MASK" ]; then
+ NVMF_TEST_CORE_MASK=0xFF
+fi
+
+function load_ib_rdma_modules()
+{
+ if [ `uname` != Linux ]; then
+ return 0
+ fi
+
+ modprobe ib_cm
+ modprobe ib_core
+ # Newer kernels do not have the ib_ucm module
+ modprobe ib_ucm || true
+ modprobe ib_umad
+ modprobe ib_uverbs
+ modprobe iw_cm
+ modprobe rdma_cm
+ modprobe rdma_ucm
+}
+
+
+function detect_soft_roce_nics()
+{
+ if hash rxe_cfg; then
+ rxe_cfg start
+ rdma_nics=$(get_rdma_if_list)
+ all_nics=$(ip -o link | awk '{print $2}' | cut -d":" -f1)
+ non_rdma_nics=$(echo -e "$rdma_nics\n$all_nics" | sort | uniq -u)
+ for nic in $non_rdma_nics; do
+ if [[ -d /sys/class/net/${nic}/bridge ]]; then
+ continue
+ fi
+ rxe_cfg add $nic || true
+ done
+ fi
+}
+
+function detect_mellanox_nics()
+{
+ if ! hash lspci; then
+ echo "No NICs"
+ return 0
+ fi
+
+ nvmf_nic_bdfs=`lspci | grep Ethernet | grep Mellanox | awk -F ' ' '{print "0000:"$1}'`
+ mlx_core_driver="mlx4_core"
+ mlx_ib_driver="mlx4_ib"
+ mlx_en_driver="mlx4_en"
+
+ if [ -z "$nvmf_nic_bdfs" ]; then
+ echo "No NICs"
+ return 0
+ fi
+
+ # for nvmf target loopback test, suppose we only have one type of card.
+ for nvmf_nic_bdf in $nvmf_nic_bdfs
+ do
+ result=`lspci -vvv -s $nvmf_nic_bdf | grep 'Kernel modules' | awk -F ' ' '{print $3}'`
+ if [ "$result" == "mlx5_core" ]; then
+ mlx_core_driver="mlx5_core"
+ mlx_ib_driver="mlx5_ib"
+ mlx_en_driver=""
+ fi
+ break;
+ done
+
+ modprobe $mlx_core_driver
+ modprobe $mlx_ib_driver
+ if [ -n "$mlx_en_driver" ]; then
+ modprobe $mlx_en_driver
+ fi
+
+ # The mlx4 driver takes an extra few seconds to load after modprobe returns,
+ # otherwise iproute2 operations will do nothing.
+ sleep 5
+}
+
+function detect_rdma_nics()
+{
+ nics=$(detect_mellanox_nics)
+ if [ "$nics" == "No NICs" ]; then
+ detect_soft_roce_nics
+ fi
+}
+
+function allocate_nic_ips()
+{
+ let count=$NVMF_IP_LEAST_ADDR
+ for nic_name in $(get_rdma_if_list); do
+ ip="$(get_ip_address $nic_name)"
+ if [ -z $ip ]; then
+ ip addr add $NVMF_IP_PREFIX.$count/24 dev $nic_name
+ ip link set $nic_name up
+ let count=$count+1
+ fi
+ # dump configuration for debug log
+ ip addr show $nic_name
+ done
+}
+
+function get_available_rdma_ips()
+{
+ for nic_name in $(get_rdma_if_list); do
+ get_ip_address $nic_name
+ done
+}
+
+function get_rdma_if_list()
+{
+ for nic_type in `ls /sys/class/infiniband`; do
+ for nic_name in `ls /sys/class/infiniband/${nic_type}/device/net`; do
+ echo "$nic_name"
+ done
+ done
+}
+
+function get_ip_address()
+{
+ interface=$1
+ ip -o -4 addr show $interface | awk '{print $4}' | cut -d"/" -f1
+}
+
+function nvmfcleanup()
+{
+ sync
+ set +e
+ for i in {1..20}; do
+ modprobe -v -r nvme-rdma nvme-fabrics
+ if [ $? -eq 0 ]; then
+ set -e
+ return
+ fi
+ sleep 1
+ done
+ set -e
+
+ # So far unable to remove the kernel modules. Try
+ # one more time and let it fail.
+ modprobe -v -r nvme-rdma nvme-fabrics
+}
+
+function nvmftestinit()
+{
+ if [ "$1" == "iso" ]; then
+ $rootdir/scripts/setup.sh
+ rdma_device_init
+ fi
+}
+
+function nvmftestfini()
+{
+ if [ "$1" == "iso" ]; then
+ $rootdir/scripts/setup.sh reset
+ rdma_device_init
+ fi
+}
+
+function rdma_device_init()
+{
+ load_ib_rdma_modules
+ detect_rdma_nics
+ allocate_nic_ips
+}
+
+function revert_soft_roce()
+{
+ if hash rxe_cfg; then
+ interfaces="$(ip -o link | awk '{print $2}' | cut -d":" -f1)"
+ for interface in $interfaces; do
+ rxe_cfg remove $interface || true
+ done
+ rxe_cfg stop || true
+ fi
+}
+
+function check_ip_is_soft_roce()
+{
+ IP=$1
+ if hash rxe_cfg; then
+ dev=$(ip -4 -o addr show | grep $IP | cut -d" " -f2)
+ if rxe_cfg | grep $dev; then
+ return 0
+ else
+ return 1
+ fi
+ else
+ return 1
+ fi
+}
diff --git a/src/spdk/test/nvmf/create_transport/create_transport.sh b/src/spdk/test/nvmf/create_transport/create_transport.sh
new file mode 100755
index 00000000..3514dbc1
--- /dev/null
+++ b/src/spdk/test/nvmf/create_transport/create_transport.sh
@@ -0,0 +1,77 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+NULL_BDEV_SIZE=102400
+NULL_BLOCK_SIZE=512
+
+rpc_py="python $rootdir/scripts/rpc.py"
+
+set -e
+
+# pass the parameter 'iso' to this script when running it in isolation to trigger rdma device initialization.
+# e.g. sudo ./crt_trprt.sh iso
+nvmftestinit $1
+
+if ! hash nvme; then
+ echo "nvme command not found; skipping create transport test"
+ exit 0
+fi
+
+RDMA_IP_LIST=$(get_available_rdma_ips)
+NVMF_FIRST_TARGET_IP=$(echo "$RDMA_IP_LIST" | head -n 1)
+if [ -z $NVMF_FIRST_TARGET_IP ]; then
+ echo "no NIC for nvmf test"
+ exit 0
+fi
+
+timing_enter cr_trprt
+timing_enter start_nvmf_tgt
+# Start up the NVMf target in another process
+$NVMF_APP -m 0xF &
+nvmfpid=$!
+
+trap "killprocess $nvmfpid; nvmftestfini $1; exit 1" SIGINT SIGTERM EXIT
+
+waitforlisten $nvmfpid
+# Use nvmf_create_transport call to create transport
+$rpc_py nvmf_create_transport -t RDMA -u 8192 -p 4
+timing_exit start_nvmf_tgt
+
+null_bdevs="$($rpc_py construct_null_bdev Null0 $NULL_BDEV_SIZE $NULL_BLOCK_SIZE) "
+null_bdevs+="$($rpc_py construct_null_bdev Null1 $NULL_BDEV_SIZE $NULL_BLOCK_SIZE)"
+
+modprobe -v nvme-rdma
+
+$rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
+for null_bdev in $null_bdevs; do
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 $null_bdev
+done
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t rdma -a $NVMF_FIRST_TARGET_IP -s 4420
+
+nvme discover -t rdma -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+
+echo "Perform nvmf subsystem discovery via RPC"
+$rpc_py get_nvmf_subsystems
+
+$rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode1
+
+for null_bdev in $null_bdevs; do
+ $rpc_py delete_null_bdev $null_bdev
+done
+
+check_bdevs=$($rpc_py get_bdevs | jq -r '.[].name')
+if [ -n "$check_bdevs" ]; then
+ echo $check_bdevs
+ exit 1
+fi
+
+trap - SIGINT SIGTERM EXIT
+
+nvmfcleanup
+killprocess $nvmfpid
+nvmftestfini $1
+timing_exit crt_trprt
diff --git a/src/spdk/test/nvmf/discovery/discovery.sh b/src/spdk/test/nvmf/discovery/discovery.sh
new file mode 100755
index 00000000..f0db8ab3
--- /dev/null
+++ b/src/spdk/test/nvmf/discovery/discovery.sh
@@ -0,0 +1,76 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+NULL_BDEV_SIZE=102400
+NULL_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+set -e
+
+# pass the parameter 'iso' to this script when running it in isolation to trigger rdma device initialization.
+# e.g. sudo ./discovery.sh iso
+nvmftestinit $1
+
+if ! hash nvme; then
+ echo "nvme command not found; skipping discovery test"
+ exit 0
+fi
+
+RDMA_IP_LIST=$(get_available_rdma_ips)
+NVMF_FIRST_TARGET_IP=$(echo "$RDMA_IP_LIST" | head -n 1)
+if [ -z $NVMF_FIRST_TARGET_IP ]; then
+ echo "no NIC for nvmf test"
+ exit 0
+fi
+
+timing_enter discovery
+timing_enter start_nvmf_tgt
+# Start up the NVMf target in another process
+$NVMF_APP -m 0xF &
+nvmfpid=$!
+
+trap "process_shm --id $NVMF_APP_SHM_ID; killprocess $nvmfpid; nvmftestfini $1; exit 1" SIGINT SIGTERM EXIT
+
+waitforlisten $nvmfpid
+$rpc_py nvmf_create_transport -t RDMA -u 8192 -p 4
+timing_exit start_nvmf_tgt
+
+null_bdevs="$($rpc_py construct_null_bdev Null0 $NULL_BDEV_SIZE $NULL_BLOCK_SIZE) "
+null_bdevs+="$($rpc_py construct_null_bdev Null1 $NULL_BDEV_SIZE $NULL_BLOCK_SIZE)"
+
+modprobe -v nvme-rdma
+
+$rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
+for null_bdev in $null_bdevs; do
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 $null_bdev
+done
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t rdma -a $NVMF_FIRST_TARGET_IP -s 4420
+
+nvme discover -t rdma -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+
+echo "Perform nvmf subsystem discovery via RPC"
+$rpc_py get_nvmf_subsystems
+
+$rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode1
+
+for null_bdev in $null_bdevs; do
+ $rpc_py delete_null_bdev $null_bdev
+done
+
+check_bdevs=$($rpc_py get_bdevs | jq -r '.[].name')
+if [ -n "$check_bdevs" ]; then
+ echo $check_bdevs
+ exit 1
+fi
+
+trap - SIGINT SIGTERM EXIT
+
+nvmfcleanup
+killprocess $nvmfpid
+nvmftestfini $1
+timing_exit discovery
diff --git a/src/spdk/test/nvmf/filesystem/filesystem.sh b/src/spdk/test/nvmf/filesystem/filesystem.sh
new file mode 100755
index 00000000..057fc579
--- /dev/null
+++ b/src/spdk/test/nvmf/filesystem/filesystem.sh
@@ -0,0 +1,98 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+set -e
+
+# pass the parameter 'iso' to this script when running it in isolation to trigger rdma device initialization.
+# e.g. sudo ./filesystem.sh iso
+nvmftestinit $1
+
+RDMA_IP_LIST=$(get_available_rdma_ips)
+NVMF_FIRST_TARGET_IP=$(echo "$RDMA_IP_LIST" | head -n 1)
+if [ -z $NVMF_FIRST_TARGET_IP ]; then
+ echo "no NIC for nvmf test"
+ exit 0
+fi
+
+timing_enter fs_test
+
+for incapsule in 0 4096; do
+ # Start up the NVMf target in another process
+ $NVMF_APP -m 0xF &
+ nvmfpid=$!
+
+ trap "process_shm --id $NVMF_APP_SHM_ID; killprocess $nvmfpid; nvmftestfini $1; exit 1" SIGINT SIGTERM EXIT
+
+ waitforlisten $nvmfpid
+ $rpc_py nvmf_create_transport -t RDMA -u 8192 -p 4 -c $incapsule
+
+ bdevs="$($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
+ bdevs+=" $($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
+
+ modprobe -v nvme-rdma
+
+ $rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
+ for bdev in $bdevs; do
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 $bdev
+ done
+ $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t rdma -a $NVMF_FIRST_TARGET_IP -s 4420
+
+ nvme connect -t rdma -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
+
+ waitforblk "nvme0n1"
+ waitforblk "nvme0n2"
+
+ mkdir -p /mnt/device
+
+ devs=`lsblk -l -o NAME | grep nvme`
+
+ for dev in $devs; do
+ timing_enter parted
+ parted -s /dev/$dev mklabel msdos mkpart primary '0%' '100%'
+ timing_exit parted
+ sleep 1
+
+ for fstype in "ext4" "btrfs" "xfs"; do
+ timing_enter $fstype
+ if [ $fstype = ext4 ]; then
+ force=-F
+ else
+ force=-f
+ fi
+
+ mkfs.${fstype} $force /dev/${dev}p1
+
+ mount /dev/${dev}p1 /mnt/device
+ touch /mnt/device/aaa
+ sync
+ rm /mnt/device/aaa
+ sync
+ umount /mnt/device
+ timing_exit $fstype
+ done
+
+ parted -s /dev/$dev rm 1
+ done
+
+ sync
+ nvme disconnect -n "nqn.2016-06.io.spdk:cnode1" || true
+
+ $rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode1
+
+ trap - SIGINT SIGTERM EXIT
+
+ nvmfcleanup
+ killprocess $nvmfpid
+done
+
+nvmftestfini $1
+timing_exit fs_test
diff --git a/src/spdk/test/nvmf/fio/fio.sh b/src/spdk/test/nvmf/fio/fio.sh
new file mode 100755
index 00000000..ba3b12b3
--- /dev/null
+++ b/src/spdk/test/nvmf/fio/fio.sh
@@ -0,0 +1,108 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+set -e
+
+# pass the parameter 'iso' to this script when running it in isolation to trigger rdma device initialization.
+# e.g. sudo ./fio.sh iso
+nvmftestinit $1
+
+RDMA_IP_LIST=$(get_available_rdma_ips)
+NVMF_FIRST_TARGET_IP=$(echo "$RDMA_IP_LIST" | head -n 1)
+if [ -z $NVMF_FIRST_TARGET_IP ]; then
+ echo "no NIC for nvmf test"
+ exit 0
+fi
+
+timing_enter fio
+timing_enter start_nvmf_tgt
+# Start up the NVMf target in another process
+$NVMF_APP -m 0xF &
+nvmfpid=$!
+
+trap "process_shm --id $NVMF_APP_SHM_ID; killprocess $nvmfpid; nvmftestfini $1; exit 1" SIGINT SIGTERM EXIT
+
+waitforlisten $nvmfpid
+$rpc_py nvmf_create_transport -t RDMA -u 8192 -p 4
+
+timing_exit start_nvmf_tgt
+
+malloc_bdevs="$($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE) "
+malloc_bdevs+="$($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
+# Create a RAID-0 bdev from two malloc bdevs
+raid_malloc_bdevs="$($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE) "
+raid_malloc_bdevs+="$($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
+$rpc_py construct_raid_bdev -n raid0 -s 64 -r 0 -b "$raid_malloc_bdevs"
+
+modprobe -v nvme-rdma
+
+$rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
+for malloc_bdev in $malloc_bdevs; do
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 "$malloc_bdev"
+done
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t rdma -a $NVMF_FIRST_TARGET_IP -s 4420
+
+# Append the raid0 bdev into subsystem
+$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 raid0
+
+nvme connect -t rdma -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
+
+waitforblk "nvme0n1"
+waitforblk "nvme0n2"
+waitforblk "nvme0n3"
+
+$testdir/nvmf_fio.py 4096 1 write 1 verify
+$testdir/nvmf_fio.py 4096 1 randwrite 1 verify
+$testdir/nvmf_fio.py 4096 128 write 1 verify
+$testdir/nvmf_fio.py 4096 128 randwrite 1 verify
+
+sync
+
+#start hotplug test case
+$testdir/nvmf_fio.py 4096 1 read 10 &
+fio_pid=$!
+
+sleep 3
+set +e
+
+$rpc_py destroy_raid_bdev "raid0"
+for malloc_bdev in $malloc_bdevs; do
+ $rpc_py delete_malloc_bdev "$malloc_bdev"
+done
+
+wait $fio_pid
+fio_status=$?
+
+nvme disconnect -n "nqn.2016-06.io.spdk:cnode1" || true
+
+if [ $fio_status -eq 0 ]; then
+ echo "nvmf hotplug test: fio successful - expected failure"
+ nvmfcleanup
+ killprocess $nvmfpid
+ exit 1
+else
+ echo "nvmf hotplug test: fio failed as expected"
+fi
+set -e
+
+$rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode1
+
+rm -f ./local-job0-0-verify.state
+rm -f ./local-job1-1-verify.state
+rm -f ./local-job2-2-verify.state
+
+trap - SIGINT SIGTERM EXIT
+
+nvmfcleanup
+killprocess $nvmfpid
+nvmftestfini $1
+timing_exit fio
diff --git a/src/spdk/test/nvmf/fio/nvmf_fio.py b/src/spdk/test/nvmf/fio/nvmf_fio.py
new file mode 100755
index 00000000..6096dd72
--- /dev/null
+++ b/src/spdk/test/nvmf/fio/nvmf_fio.py
@@ -0,0 +1,133 @@
+#!/usr/bin/env python3
+
+from subprocess import check_call, call, check_output, Popen, PIPE, CalledProcessError
+import re
+import sys
+import signal
+
+fio_template = """
+[global]
+thread=1
+invalidate=1
+rw=%(testtype)s
+time_based=1
+runtime=%(runtime)s
+ioengine=libaio
+direct=1
+bs=%(blocksize)d
+iodepth=%(iodepth)d
+%(verify)s
+verify_dump=1
+
+"""
+
+verify_template = """
+do_verify=1
+verify=meta
+verify_pattern="meta"
+"""
+
+
+fio_job_template = """
+[job%(jobnumber)d]
+filename=%(device)s
+
+"""
+
+
+def interrupt_handler(signum, frame):
+ fio.terminate()
+ print("FIO terminated")
+ sys.exit(0)
+
+
+def main():
+
+ global fio
+ if (len(sys.argv) < 5):
+ print("usage:")
+ print(" " + sys.argv[0] + " <io_size> <queue_depth> <test_type> <runtime>")
+ print("advanced usage:")
+ print("If you want to run fio with verify, please add verify string after runtime.")
+ print("Currently fio.py only support write rw randwrite randrw with verify enabled.")
+ sys.exit(1)
+
+ io_size = int(sys.argv[1])
+ queue_depth = int(sys.argv[2])
+ test_type = sys.argv[3]
+ runtime = sys.argv[4]
+ if len(sys.argv) > 5:
+ verify = True
+ else:
+ verify = False
+
+ devices = get_target_devices()
+ print("Found devices: ", devices)
+
+ # configure_devices(devices)
+ try:
+ fio_executable = check_output("which fio", shell=True).split()[0]
+ except CalledProcessError as e:
+ sys.stderr.write(str(e))
+ sys.stderr.write("\nCan't find the fio binary, please install it.\n")
+ sys.exit(1)
+
+ device_paths = ['/dev/' + dev for dev in devices]
+ print(device_paths)
+ sys.stdout.flush()
+ signal.signal(signal.SIGTERM, interrupt_handler)
+ signal.signal(signal.SIGINT, interrupt_handler)
+ fio = Popen([fio_executable, '-'], stdin=PIPE)
+ fio.communicate(create_fio_config(io_size, queue_depth, device_paths, test_type, runtime, verify))
+ fio.stdin.close()
+ rc = fio.wait()
+ print("FIO completed with code %d\n" % rc)
+ sys.stdout.flush()
+ sys.exit(rc)
+
+
+def get_target_devices():
+ output = str(check_output('lsblk -l -o NAME', shell=True).decode())
+ return re.findall("(nvme[0-9]+n[0-9]+)\n", output)
+
+
+def create_fio_config(size, q_depth, devices, test, run_time, verify):
+ if not verify:
+ verifyfio = ""
+ else:
+ verifyfio = verify_template
+ fiofile = fio_template % {"blocksize": size, "iodepth": q_depth,
+ "testtype": test, "runtime": run_time, "verify": verifyfio}
+ for (i, dev) in enumerate(devices):
+ fiofile += fio_job_template % {"jobnumber": i, "device": dev}
+ return fiofile.encode()
+
+
+def set_device_parameter(devices, filename_template, value):
+ for dev in devices:
+ filename = filename_template % dev
+ f = open(filename, 'r+b')
+ f.write(value)
+ f.close()
+
+
+def configure_devices(devices):
+ set_device_parameter(devices, "/sys/block/%s/queue/nomerges", "2")
+ set_device_parameter(devices, "/sys/block/%s/queue/nr_requests", "128")
+ requested_qd = 128
+ qd = requested_qd
+ while qd > 0:
+ try:
+ set_device_parameter(devices, "/sys/block/%s/device/queue_depth", str(qd))
+ break
+ except IOError:
+ qd = qd - 1
+ if qd == 0:
+ print("Could not set block device queue depths.")
+ else:
+ print("Requested queue_depth {} but only {} is supported.".format(str(requested_qd), str(qd)))
+ set_device_parameter(devices, "/sys/block/%s/queue/scheduler", "noop")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/src/spdk/test/nvmf/host/aer.sh b/src/spdk/test/nvmf/host/aer.sh
new file mode 100755
index 00000000..66e597aa
--- /dev/null
+++ b/src/spdk/test/nvmf/host/aer.sh
@@ -0,0 +1,78 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+set -e
+
+RDMA_IP_LIST=$(get_available_rdma_ips)
+NVMF_FIRST_TARGET_IP=$(echo "$RDMA_IP_LIST" | head -n 1)
+if [ -z $NVMF_FIRST_TARGET_IP ]; then
+ echo "no NIC for nvmf test"
+ exit 0
+fi
+
+timing_enter aer
+timing_enter start_nvmf_tgt
+
+$NVMF_APP -m 0xF &
+nvmfpid=$!
+
+trap "process_shm --id $NVMF_APP_SHM_ID; killprocess $nvmfpid; exit 1" SIGINT SIGTERM EXIT
+
+waitforlisten $nvmfpid
+$rpc_py nvmf_create_transport -t RDMA -u 8192 -p 4
+timing_exit start_nvmf_tgt
+
+modprobe -v nvme-rdma
+
+$rpc_py construct_malloc_bdev 64 512 --name Malloc0
+$rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001 -m 2
+$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc0
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t rdma -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+
+$rpc_py get_nvmf_subsystems
+
+# TODO: this aer test tries to invoke an AER completion by setting the temperature
+#threshold to a very low value. This does not work with emulated controllers
+#though so currently the test is disabled.
+
+#$rootdir/test/nvme/aer/aer -r "\
+# trtype:RDMA \
+# adrfam:IPv4 \
+# traddr:$NVMF_FIRST_TARGET_IP \
+# trsvcid:$NVMF_PORT \
+# subnqn:nqn.2014-08.org.nvmexpress.discovery"
+
+# Namespace Attribute Notice Tests
+$rootdir/test/nvme/aer/aer -r "\
+ trtype:RDMA \
+ adrfam:IPv4 \
+ traddr:$NVMF_FIRST_TARGET_IP \
+ trsvcid:$NVMF_PORT \
+ subnqn:nqn.2016-06.io.spdk:cnode1" -n 2 &
+aerpid=$!
+
+# Waiting for aer start to work
+sleep 5
+
+# Add a new namespace
+$rpc_py construct_malloc_bdev 64 4096 --name Malloc1
+$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc1 -n 2
+$rpc_py get_nvmf_subsystems
+
+wait $aerpid
+
+$rpc_py delete_malloc_bdev Malloc0
+$rpc_py delete_malloc_bdev Malloc1
+$rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode1
+
+trap - SIGINT SIGTERM EXIT
+
+nvmfcleanup
+killprocess $nvmfpid
+timing_exit aer
diff --git a/src/spdk/test/nvmf/host/bdevperf.sh b/src/spdk/test/nvmf/host/bdevperf.sh
new file mode 100755
index 00000000..1247177f
--- /dev/null
+++ b/src/spdk/test/nvmf/host/bdevperf.sh
@@ -0,0 +1,52 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+set -e
+
+RDMA_IP_LIST=$(get_available_rdma_ips)
+NVMF_FIRST_TARGET_IP=$(echo "$RDMA_IP_LIST" | head -n 1)
+if [ -z $NVMF_FIRST_TARGET_IP ]; then
+ echo "no NIC for nvmf test"
+ exit 0
+fi
+
+timing_enter bdevperf
+timing_enter start_nvmf_tgt
+
+$NVMF_APP -m 0xF &
+nvmfpid=$!
+
+trap "process_shm --id $NVMF_APP_SHM_ID; killprocess $nvmfpid; exit 1" SIGINT SIGTERM EXIT
+
+waitforlisten $nvmfpid
+$rpc_py nvmf_create_transport -t RDMA -u 8192 -p 4
+timing_exit start_nvmf_tgt
+
+bdevs="$bdevs $($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
+
+$rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
+for bdev in $bdevs; do
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 $bdev
+done
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t rdma -a $NVMF_FIRST_TARGET_IP -s 4420
+
+echo "[Nvme]" > $testdir/bdevperf.conf
+echo " TransportID \"trtype:RDMA adrfam:IPv4 subnqn:nqn.2016-06.io.spdk:cnode1 traddr:$NVMF_FIRST_TARGET_IP trsvcid:4420\" Nvme0" >> $testdir/bdevperf.conf
+$rootdir/test/bdev/bdevperf/bdevperf -c $testdir/bdevperf.conf -q 128 -o 4096 -w verify -t 1
+sync
+rm -rf $testdir/bdevperf.conf
+$rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode1
+
+trap - SIGINT SIGTERM EXIT
+
+killprocess $nvmfpid
+timing_exit bdevperf
diff --git a/src/spdk/test/nvmf/host/fio.sh b/src/spdk/test/nvmf/host/fio.sh
new file mode 100755
index 00000000..ceed86b8
--- /dev/null
+++ b/src/spdk/test/nvmf/host/fio.sh
@@ -0,0 +1,92 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/scripts/common.sh
+source $rootdir/test/nvmf/common.sh
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+set -e
+
+RDMA_IP_LIST=$(get_available_rdma_ips)
+NVMF_FIRST_TARGET_IP=$(echo "$RDMA_IP_LIST" | head -n 1)
+if [ -z $NVMF_FIRST_TARGET_IP ]; then
+ echo "no NIC for nvmf test"
+ exit 0
+fi
+
+if [ ! -d /usr/src/fio ]; then
+ echo "FIO not available"
+ exit 0
+fi
+
+timing_enter fio
+timing_enter start_nvmf_tgt
+
+$NVMF_APP -m 0xF &
+nvmfpid=$!
+
+trap "process_shm --id $NVMF_APP_SHM_ID; killprocess $nvmfpid; exit 1" SIGINT SIGTERM EXIT
+
+waitforlisten $nvmfpid
+$rpc_py nvmf_create_transport -t RDMA -u 8192 -p 4
+timing_exit start_nvmf_tgt
+
+bdevs="$bdevs $($rpc_py construct_malloc_bdev 64 512)"
+$rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
+for bdev in $bdevs; do
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 $bdev
+done
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t rdma -a $NVMF_FIRST_TARGET_IP -s 4420
+
+PLUGIN_DIR=$rootdir/examples/nvme/fio_plugin
+
+# Test fio_plugin as host with malloc backend
+LD_PRELOAD=$PLUGIN_DIR/fio_plugin /usr/src/fio/fio $PLUGIN_DIR/example_config.fio --filename="trtype=RDMA adrfam=IPv4 \
+traddr=$NVMF_FIRST_TARGET_IP trsvcid=4420 ns=1"
+$rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode1
+
+if [ $RUN_NIGHTLY -eq 1 ]; then
+ # Test fio_plugin as host with nvme lvol backend
+ bdfs=$(iter_pci_class_code 01 08 02)
+ $rpc_py construct_nvme_bdev -b Nvme0 -t PCIe -a $(echo $bdfs | awk '{ print $1 }')
+ ls_guid=$($rpc_py construct_lvol_store Nvme0n1 lvs_0)
+ get_lvs_free_mb $ls_guid
+ lb_guid=$($rpc_py construct_lvol_bdev -u $ls_guid lbd_0 $free_mb)
+ $rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode2 -a -s SPDK00000000000001
+ for bdev in $lb_guid; do
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode2 $bdev
+ done
+ $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode2 -t rdma -a $NVMF_FIRST_TARGET_IP -s 4420
+ LD_PRELOAD=$PLUGIN_DIR/fio_plugin /usr/src/fio/fio $PLUGIN_DIR/example_config.fio --filename="trtype=RDMA adrfam=IPv4 \
+ traddr=$NVMF_FIRST_TARGET_IP trsvcid=4420 ns=1"
+ $rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode2
+
+ # Test fio_plugin as host with nvme lvol nested backend
+ ls_nested_guid=$($rpc_py construct_lvol_store $lb_guid lvs_n_0)
+ get_lvs_free_mb $ls_nested_guid
+ lb_nested_guid=$($rpc_py construct_lvol_bdev -u $ls_nested_guid lbd_nest_0 $free_mb)
+ $rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode3 -a -s SPDK00000000000001
+ for bdev in $lb_nested_guid; do
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode3 $bdev
+ done
+ $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode3 -t rdma -a $NVMF_FIRST_TARGET_IP -s 4420
+ LD_PRELOAD=$PLUGIN_DIR/fio_plugin /usr/src/fio/fio $PLUGIN_DIR/example_config.fio --filename="trtype=RDMA adrfam=IPv4 \
+ traddr=$NVMF_FIRST_TARGET_IP trsvcid=4420 ns=1"
+ $rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode3
+
+ sync
+ # Delete lvol_bdev and destroy lvol_store.
+ $rpc_py destroy_lvol_bdev "$lb_nested_guid"
+ $rpc_py destroy_lvol_store -l lvs_n_0
+ $rpc_py destroy_lvol_bdev "$lb_guid"
+ $rpc_py destroy_lvol_store -l lvs_0
+ $rpc_py delete_nvme_controller Nvme0
+fi
+
+trap - SIGINT SIGTERM EXIT
+
+killprocess $nvmfpid
+timing_exit fio
diff --git a/src/spdk/test/nvmf/host/identify.sh b/src/spdk/test/nvmf/host/identify.sh
new file mode 100755
index 00000000..ad101980
--- /dev/null
+++ b/src/spdk/test/nvmf/host/identify.sh
@@ -0,0 +1,65 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+set -e
+
+RDMA_IP_LIST=$(get_available_rdma_ips)
+NVMF_FIRST_TARGET_IP=$(echo "$RDMA_IP_LIST" | head -n 1)
+if [ -z $NVMF_FIRST_TARGET_IP ]; then
+ echo "no NIC for nvmf test"
+ exit 0
+fi
+timing_enter identify
+timing_enter start_nvmf_tgt
+
+$NVMF_APP -m 0xF &
+nvmfpid=$!
+
+trap "process_shm --id $NVMF_APP_SHM_ID; killprocess $nvmfpid; exit 1" SIGINT SIGTERM EXIT
+
+waitforlisten $nvmfpid
+$rpc_py nvmf_create_transport -t RDMA -u 8192 -p 4
+timing_exit start_nvmf_tgt
+
+bdevs="$bdevs $($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
+
+$rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
+for bdev in $bdevs; do
+ # NOTE: This will assign the same NGUID and EUI64 to all bdevs,
+ # but currently we only have one (see above), so this is OK.
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 "$bdev" \
+ --nguid "ABCDEF0123456789ABCDEF0123456789" \
+ --eui64 "ABCDEF0123456789"
+done
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t RDMA -a $NVMF_FIRST_TARGET_IP -s 4420
+
+$rpc_py get_nvmf_subsystems
+
+$rootdir/examples/nvme/identify/identify -r "\
+ trtype:RDMA \
+ adrfam:IPv4 \
+ traddr:$NVMF_FIRST_TARGET_IP \
+ trsvcid:$NVMF_PORT \
+ subnqn:nqn.2014-08.org.nvmexpress.discovery" -L all
+$rootdir/examples/nvme/identify/identify -r "\
+ trtype:RDMA \
+ adrfam:IPv4 \
+ traddr:$NVMF_FIRST_TARGET_IP \
+ trsvcid:$NVMF_PORT \
+ subnqn:nqn.2016-06.io.spdk:cnode1" -L all
+sync
+$rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode1
+
+trap - SIGINT SIGTERM EXIT
+
+killprocess $nvmfpid
+timing_exit identify
diff --git a/src/spdk/test/nvmf/host/identify_kernel_nvmf.sh b/src/spdk/test/nvmf/host/identify_kernel_nvmf.sh
new file mode 100755
index 00000000..d6afe52f
--- /dev/null
+++ b/src/spdk/test/nvmf/host/identify_kernel_nvmf.sh
@@ -0,0 +1,80 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+set -e
+
+RDMA_IP_LIST=$(get_available_rdma_ips)
+NVMF_FIRST_TARGET_IP=$(echo "$RDMA_IP_LIST" | head -n 1)
+if [ -z $NVMF_FIRST_TARGET_IP ]; then
+ echo "no NIC for nvmf test"
+ exit 0
+fi
+
+timing_enter identify_kernel_nvmf_tgt
+
+subsystemname=nqn.2016-06.io.spdk:testnqn
+
+modprobe null_blk nr_devices=1
+modprobe nvmet
+modprobe nvmet-rdma
+modprobe nvmet-fc
+modprobe lpfc
+
+if [ ! -d /sys/kernel/config/nvmet/subsystems/$subsystemname ]; then
+ mkdir /sys/kernel/config/nvmet/subsystems/$subsystemname
+fi
+echo 1 > /sys/kernel/config/nvmet/subsystems/$subsystemname/attr_allow_any_host
+
+if [ ! -d /sys/kernel/config/nvmet/subsystems/$subsystemname/namespaces/1 ]; then
+ mkdir /sys/kernel/config/nvmet/subsystems/$subsystemname/namespaces/1
+fi
+
+echo -n /dev/nullb0 > /sys/kernel/config/nvmet/subsystems/$subsystemname/namespaces/1/device_path
+echo 1 > /sys/kernel/config/nvmet/subsystems/$subsystemname/namespaces/1/enable
+
+if [ ! -d /sys/kernel/config/nvmet/ports/1 ]; then
+ mkdir /sys/kernel/config/nvmet/ports/1
+fi
+
+echo -n rdma > /sys/kernel/config/nvmet/ports/1/addr_trtype
+echo -n ipv4 > /sys/kernel/config/nvmet/ports/1/addr_adrfam
+echo -n $NVMF_FIRST_TARGET_IP > /sys/kernel/config/nvmet/ports/1/addr_traddr
+echo -n $NVMF_PORT > /sys/kernel/config/nvmet/ports/1/addr_trsvcid
+
+ln -s /sys/kernel/config/nvmet/subsystems/$subsystemname /sys/kernel/config/nvmet/ports/1/subsystems/$subsystemname
+
+sleep 4
+
+$rootdir/examples/nvme/identify/identify -r "\
+ trtype:RDMA \
+ adrfam:IPv4 \
+ traddr:$NVMF_FIRST_TARGET_IP \
+ trsvcid:$NVMF_PORT \
+ subnqn:nqn.2014-08.org.nvmexpress.discovery" -t all
+$rootdir/examples/nvme/identify/identify -r "\
+ trtype:RDMA \
+ adrfam:IPv4 \
+ traddr:$NVMF_FIRST_TARGET_IP \
+ trsvcid:$NVMF_PORT \
+ subnqn:$subsystemname"
+
+rm -rf /sys/kernel/config/nvmet/ports/1/subsystems/$subsystemname
+
+echo 0 > /sys/kernel/config/nvmet/subsystems/$subsystemname/namespaces/1/enable
+echo -n 0 > /sys/kernel/config/nvmet/subsystems/$subsystemname/namespaces/1/device_path
+
+rmdir --ignore-fail-on-non-empty /sys/kernel/config/nvmet/subsystems/$subsystemname/namespaces/1
+rmdir --ignore-fail-on-non-empty /sys/kernel/config/nvmet/subsystems/$subsystemname
+rmdir --ignore-fail-on-non-empty /sys/kernel/config/nvmet/ports/1
+
+rmmod lpfc
+rmmod nvmet_fc
+rmmod nvmet-rdma
+rmmod null_blk
+rmmod nvmet
+
+timing_exit identify_kernel_nvmf_tgt
diff --git a/src/spdk/test/nvmf/host/perf.sh b/src/spdk/test/nvmf/host/perf.sh
new file mode 100755
index 00000000..24faed5b
--- /dev/null
+++ b/src/spdk/test/nvmf/host/perf.sh
@@ -0,0 +1,95 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+set -e
+
+RDMA_IP_LIST=$(get_available_rdma_ips)
+NVMF_FIRST_TARGET_IP=$(echo "$RDMA_IP_LIST" | head -n 1)
+if [ -z $NVMF_FIRST_TARGET_IP ]; then
+ echo "no NIC for nvmf test"
+ exit 0
+fi
+
+timing_enter perf
+timing_enter start_nvmf_tgt
+
+$NVMF_APP -m 0xF -i 0 &
+nvmfpid=$!
+
+trap "process_shm --id $NVMF_APP_SHM_ID; killprocess $nvmfpid; exit 1" SIGINT SIGTERM EXIT
+
+waitforlisten $nvmfpid
+$rpc_py nvmf_create_transport -t RDMA -u 8192 -p 4
+$rootdir/scripts/gen_nvme.sh --json | $rpc_py load_subsystem_config
+timing_exit start_nvmf_tgt
+
+local_nvme_trid="trtype:PCIe traddr:"$($rpc_py get_subsystem_config bdev | jq -r '.[].params | select(.name=="Nvme0").traddr')
+bdevs="$bdevs $($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
+
+if [ -n "$local_nvme_trid" ]; then
+ bdevs="$bdevs Nvme0n1"
+fi
+
+$rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
+for bdev in $bdevs; do
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 $bdev
+done
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t rdma -a $NVMF_FIRST_TARGET_IP -s 4420
+
+# Test multi-process access to local NVMe device
+if [ -n "$local_nvme_trid" ]; then
+ $rootdir/examples/nvme/perf/perf -i 0 -q 32 -o 4096 -w randrw -M 50 -t 1 -r "$local_nvme_trid"
+fi
+
+$rootdir/examples/nvme/perf/perf -q 32 -o 4096 -w randrw -M 50 -t 1 -r "trtype:RDMA adrfam:IPv4 traddr:$NVMF_FIRST_TARGET_IP trsvcid:4420"
+sync
+$rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode1
+
+if [ $RUN_NIGHTLY -eq 1 ]; then
+ # Configure nvme devices with nvmf lvol_bdev backend
+ if [ -n "$local_nvme_trid" ]; then
+ ls_guid=$($rpc_py construct_lvol_store Nvme0n1 lvs_0)
+ get_lvs_free_mb $ls_guid
+ lb_guid=$($rpc_py construct_lvol_bdev -u $ls_guid lbd_0 $free_mb)
+
+ # Create lvol bdev for nested lvol stores
+ ls_nested_guid=$($rpc_py construct_lvol_store $lb_guid lvs_n_0)
+ get_lvs_free_mb $ls_nested_guid
+ lb_nested_guid=$($rpc_py construct_lvol_bdev -u $ls_nested_guid lbd_nest_0 $free_mb)
+ $rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
+ for bdev in $lb_nested_guid; do
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 $bdev
+ done
+ $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t rdma -a $NVMF_FIRST_TARGET_IP -s 4420
+ # Test perf as host with different io_size and qd_depth in nightly
+ qd_depth=("1" "128")
+ io_size=("512" "131072")
+ for qd in ${qd_depth[@]}; do
+ for o in ${io_size[@]}; do
+ $rootdir/examples/nvme/perf/perf -q $qd -o $o -w randrw -M 50 -t 10 -r "trtype:RDMA adrfam:IPv4 traddr:$NVMF_FIRST_TARGET_IP trsvcid:4420"
+ done
+ done
+
+ # Delete subsystems, lvol_bdev and destroy lvol_store.
+ $rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode1
+ $rpc_py destroy_lvol_bdev "$lb_nested_guid"
+ $rpc_py destroy_lvol_store -l lvs_n_0
+ $rpc_py destroy_lvol_bdev "$lb_guid"
+ $rpc_py destroy_lvol_store -l lvs_0
+ $rpc_py delete_nvme_controller Nvme0
+ fi
+fi
+
+trap - SIGINT SIGTERM EXIT
+
+killprocess $nvmfpid
+timing_exit perf
diff --git a/src/spdk/test/nvmf/lvol/nvmf_lvol.sh b/src/spdk/test/nvmf/lvol/nvmf_lvol.sh
new file mode 100755
index 00000000..7fe93a6c
--- /dev/null
+++ b/src/spdk/test/nvmf/lvol/nvmf_lvol.sh
@@ -0,0 +1,118 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+LVOL_BDEV_SIZE=10
+SUBSYS_NR=2
+LVOL_BDEVS_NR=6
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+function disconnect_nvmf()
+{
+ for i in `seq 1 $SUBSYS_NR`; do
+ nvme disconnect -n "nqn.2016-06.io.spdk:cnode${i}" || true
+ done
+}
+
+set -e
+
+# pass the parameter 'iso' to this script when running it in isolation to trigger rdma device initialization.
+# e.g. sudo ./nvmf_lvol.sh iso
+nvmftestinit $1
+
+RDMA_IP_LIST=$(get_available_rdma_ips)
+NVMF_FIRST_TARGET_IP=$(echo "$RDMA_IP_LIST" | head -n 1)
+if [ -z $NVMF_FIRST_TARGET_IP ]; then
+ echo "no NIC for nvmf test"
+ exit 0
+fi
+
+# SoftRoce does not have enough queues available for
+# multiconnection tests. Detect if we're using software RDMA.
+# If so - lower the number of subsystems for test.
+if check_ip_is_soft_roce $NVMF_FIRST_TARGET_IP; then
+ echo "Using software RDMA, lowering number of NVMeOF subsystems."
+ SUBSYS_NR=1
+fi
+
+timing_enter lvol_integrity
+timing_enter start_nvmf_tgt
+# Start up the NVMf target in another process
+$NVMF_APP -m 0xF &
+pid=$!
+
+trap "process_shm --id $NVMF_APP_SHM_ID; disconnect_nvmf; killprocess $pid; nvmftestfini $1; exit 1" SIGINT SIGTERM EXIT
+
+waitforlisten $pid
+$rpc_py nvmf_create_transport -t RDMA -u 8192 -p 4
+timing_exit start_nvmf_tgt
+
+modprobe -v nvme-rdma
+
+lvol_stores=()
+lvol_bdevs=()
+# Create the first LVS from a Raid-0 bdev, which is created from two malloc bdevs
+# Create remaining LVSs from a malloc bdev, respectively
+for i in `seq 1 $SUBSYS_NR`; do
+ if [ $i -eq 1 ]; then
+ # construct RAID bdev and put its name in $bdev
+ malloc_bdevs="$($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE) "
+ malloc_bdevs+="$($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
+ $rpc_py construct_raid_bdev -n raid0 -s 64 -r 0 -b "$malloc_bdevs"
+ bdev="raid0"
+ else
+ # construct malloc bdev and put its name in $bdev
+ bdev="$($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
+ fi
+ ls_guid="$($rpc_py construct_lvol_store $bdev lvs_$i -c 524288)"
+ lvol_stores+=("$ls_guid")
+
+ # 1 NVMe-OF subsystem per malloc bdev / lvol store / 10 lvol bdevs
+ ns_bdevs=""
+
+ # Create lvol bdevs on each lvol store
+ for j in `seq 1 $LVOL_BDEVS_NR`; do
+ lb_name="$($rpc_py construct_lvol_bdev -u $ls_guid lbd_$j $LVOL_BDEV_SIZE)"
+ lvol_bdevs+=("$lb_name")
+ ns_bdevs+="$lb_name "
+ done
+
+ $rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode$i -a -s SPDK$i
+ for bdev in $ns_bdevs; do
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode$i $bdev
+ done
+ $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode$i -t rdma -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+done
+
+for i in `seq 1 $SUBSYS_NR`; do
+ k=$[$i-1]
+ nvme connect -t rdma -n "nqn.2016-06.io.spdk:cnode${i}" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
+
+ for j in `seq 1 $LVOL_BDEVS_NR`; do
+ waitforblk "nvme${k}n${j}"
+ done
+done
+
+$testdir/../fio/nvmf_fio.py 262144 64 randwrite 10 verify
+
+sync
+disconnect_nvmf
+
+for i in `seq 1 $SUBSYS_NR`; do
+ $rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode$i
+done
+
+rm -f ./local-job*
+
+trap - SIGINT SIGTERM EXIT
+
+nvmfcleanup
+killprocess $pid
+nvmftestfini $1
+timing_exit lvol_integrity
diff --git a/src/spdk/test/nvmf/multiconnection/multiconnection.sh b/src/spdk/test/nvmf/multiconnection/multiconnection.sh
new file mode 100755
index 00000000..97155e78
--- /dev/null
+++ b/src/spdk/test/nvmf/multiconnection/multiconnection.sh
@@ -0,0 +1,82 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+MALLOC_BDEV_SIZE=128
+MALLOC_BLOCK_SIZE=512
+NVMF_SUBSYS=11
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+set -e
+
+# pass the parameter 'iso' to this script when running it in isolation to trigger rdma device initialization.
+# e.g. sudo ./multiconnection.sh iso
+nvmftestinit $1
+
+RDMA_IP_LIST=$(get_available_rdma_ips)
+NVMF_FIRST_TARGET_IP=$(echo "$RDMA_IP_LIST" | head -n 1)
+if [ -z $NVMF_FIRST_TARGET_IP ]; then
+ echo "no NIC for nvmf test"
+ exit 0
+fi
+
+# SoftRoce does not have enough queues available for
+# multiconnection tests. Detect if we're using software RDMA.
+# If so - lower the number of subsystems for test.
+if check_ip_is_soft_roce $NVMF_FIRST_TARGET_IP; then
+ echo "Using software RDMA, lowering number of NVMeOF subsystems."
+ NVMF_SUBSYS=1
+fi
+
+timing_enter multiconnection
+timing_enter start_nvmf_tgt
+# Start up the NVMf target in another process
+$NVMF_APP -m 0xF &
+pid=$!
+
+trap "process_shm --id $NVMF_APP_SHM_ID; killprocess $pid; nvmftestfini $1; exit 1" SIGINT SIGTERM EXIT
+
+waitforlisten $pid
+$rpc_py nvmf_create_transport -t RDMA -u 8192 -p 4
+timing_exit start_nvmf_tgt
+
+modprobe -v nvme-rdma
+
+for i in `seq 1 $NVMF_SUBSYS`
+do
+ bdevs="$($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
+ $rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode$i -a -s SPDK$i
+ for bdev in $bdevs; do
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode$i $bdev
+ done
+ $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode$i -t rdma -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+done
+
+for i in `seq 1 $NVMF_SUBSYS`; do
+ k=$[$i-1]
+ nvme connect -t rdma -n "nqn.2016-06.io.spdk:cnode${i}" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
+
+ waitforblk "nvme${k}n1"
+done
+
+$testdir/../fio/nvmf_fio.py 262144 64 read 10
+$testdir/../fio/nvmf_fio.py 262144 64 randwrite 10
+
+sync
+for i in `seq 1 $NVMF_SUBSYS`; do
+ nvme disconnect -n "nqn.2016-06.io.spdk:cnode${i}" || true
+ $rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode${i}
+done
+
+rm -f ./local-job0-0-verify.state
+
+trap - SIGINT SIGTERM EXIT
+
+nvmfcleanup
+killprocess $pid
+nvmftestfini $1
+timing_exit multiconnection
diff --git a/src/spdk/test/nvmf/nmic/nmic.sh b/src/spdk/test/nvmf/nmic/nmic.sh
new file mode 100755
index 00000000..7b66a977
--- /dev/null
+++ b/src/spdk/test/nvmf/nmic/nmic.sh
@@ -0,0 +1,87 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+MALLOC_BDEV_SIZE=128
+MALLOC_BLOCK_SIZE=512
+
+rpc_py="python $rootdir/scripts/rpc.py"
+
+set -e
+
+# pass the parameter 'iso' to this script when running it in isolation to trigger rdma device initialization.
+# e.g. sudo ./nmic.sh iso
+nvmftestinit $1
+
+RDMA_IP_LIST=$(get_available_rdma_ips)
+NVMF_FIRST_TARGET_IP=$(echo "$RDMA_IP_LIST" | head -n 1)
+NVMF_SECOND_TARGET_IP=$(echo "$RDMA_IP_LIST" | sed -n 2p)
+if [ -z $NVMF_FIRST_TARGET_IP ]; then
+ echo "no NIC for nvmf test"
+ exit 0
+fi
+
+timing_enter nmic
+timing_enter start_nvmf_tgt
+# Start up the NVMf target in another process
+$NVMF_APP -m 0xF &
+pid=$!
+
+trap "killprocess $pid; nvmftestfini $1; exit 1" SIGINT SIGTERM EXIT
+
+waitforlisten $pid
+$rpc_py nvmf_create_transport -t RDMA -u 8192 -p 4
+timing_exit start_nvmf_tgt
+
+# Create subsystems
+bdevs="$($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
+$rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode1 -a -s SPDK1
+for bdev in $bdevs; do
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 $bdev
+done
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t RDMA -a $NVMF_FIRST_TARGET_IP -s "$NVMF_PORT"
+
+echo "test case1: single bdev can't be used in multiple subsystems"
+set +e
+$rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode2 -a -s SPDK2
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode2 -t RDMA -a $NVMF_FIRST_TARGET_IP -s "$NVMF_PORT"
+for bdev in $bdevs; do
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode2 $bdev
+ nmic_status=$?
+
+ if [ $nmic_status -eq 0 ]; then
+ echo " Adding namespace passed - failure expected."
+ killprocess $pid
+ exit 1
+ else
+ echo " Adding namespace failed - expected result."
+ fi
+done
+set -e
+
+modprobe -v nvme-rdma
+
+echo "test case2: host connect to nvmf target in multiple paths"
+if [ ! -z $NVMF_SECOND_TARGET_IP ]; then
+ $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t RDMA -a $NVMF_SECOND_TARGET_IP -s $NVMF_PORT
+
+ nvme connect -t rdma -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
+ nvme connect -t rdma -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_SECOND_TARGET_IP" -s "$NVMF_PORT"
+
+ waitforblk "nvme0n1"
+
+ $testdir/../fio/nvmf_fio.py 4096 1 write 1 verify
+fi
+
+nvme disconnect -n "nqn.2016-06.io.spdk:cnode1" || true
+
+trap - SIGINT SIGTERM EXIT
+
+nvmfcleanup
+killprocess $pid
+
+nvmftestfini $1
+timing_exit nmic
diff --git a/src/spdk/test/nvmf/nvme_cli/nvme_cli.sh b/src/spdk/test/nvmf/nvme_cli/nvme_cli.sh
new file mode 100755
index 00000000..c8b40794
--- /dev/null
+++ b/src/spdk/test/nvmf/nvme_cli/nvme_cli.sh
@@ -0,0 +1,96 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+if [ -z "${DEPENDENCY_DIR}" ]; then
+ echo DEPENDENCY_DIR not defined!
+ exit 1
+fi
+
+spdk_nvme_cli="${DEPENDENCY_DIR}/nvme-cli"
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+set -e
+
+# pass the parameter 'iso' to this script when running it in isolation to trigger rdma device initialization.
+# e.g. sudo ./nvme_cli.sh iso
+nvmftestinit $1
+
+RDMA_IP_LIST=$(get_available_rdma_ips)
+NVMF_FIRST_TARGET_IP=$(echo "$RDMA_IP_LIST" | head -n 1)
+if [ -z $NVMF_FIRST_TARGET_IP ]; then
+ echo "no NIC for nvmf test"
+ exit 0
+fi
+
+timing_enter nvme_cli
+timing_enter start_nvmf_tgt
+$NVMF_APP -m 0xF &
+nvmfpid=$!
+
+trap "process_shm --id $NVMF_APP_SHM_ID; killprocess $nvmfpid; nvmftestfini $1; exit 1" SIGINT SIGTERM EXIT
+
+waitforlisten $nvmfpid
+$rpc_py nvmf_create_transport -t RDMA -u 8192 -p 4
+timing_exit start_nvmf_tgt
+
+bdevs="$($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE) "
+bdevs+="$($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
+
+modprobe -v nvme-rdma
+
+$rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
+for bdev in $bdevs; do
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 $bdev
+done
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t rdma -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+
+nvme connect -t rdma -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
+
+waitforblk "nvme0n1"
+waitforblk "nvme0n2"
+
+nvme list
+
+for ctrl in /dev/nvme?; do
+ nvme id-ctrl $ctrl
+ nvme smart-log $ctrl
+done
+
+for ns in /dev/nvme?n*; do
+ nvme id-ns $ns
+done
+
+nvme disconnect -n "nqn.2016-06.io.spdk:cnode1" || true
+nvme disconnect -n "nqn.2016-06.io.spdk:cnode2" || true
+
+if [ -d $spdk_nvme_cli ]; then
+ # Test spdk/nvme-cli NVMe-oF commands: discover, connect and disconnect
+ cd $spdk_nvme_cli
+ ./nvme discover -t rdma -a $NVMF_FIRST_TARGET_IP -s "$NVMF_PORT"
+ nvme_num_before_connection=$(nvme list |grep "/dev/nvme*"|awk '{print $1}'|wc -l)
+ ./nvme connect -t rdma -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
+ sleep 1
+ nvme_num=$(nvme list |grep "/dev/nvme*"|awk '{print $1}'|wc -l)
+ ./nvme disconnect -n "nqn.2016-06.io.spdk:cnode1"
+ if [ $nvme_num -le $nvme_num_before_connection ]; then
+ echo "spdk/nvme-cli connect target devices failed"
+ exit 1
+ fi
+fi
+
+$rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode1
+trap - SIGINT SIGTERM EXIT
+
+nvmfcleanup
+killprocess $nvmfpid
+nvmftestfini $1
+report_test_completion "nvmf_spdk_nvme_cli"
+timing_exit nvme_cli
diff --git a/src/spdk/test/nvmf/nvmf.sh b/src/spdk/test/nvmf/nvmf.sh
new file mode 100755
index 00000000..70055777
--- /dev/null
+++ b/src/spdk/test/nvmf/nvmf.sh
@@ -0,0 +1,64 @@
+#!/usr/bin/env bash
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/test/common/autotest_common.sh
+
+if [ ! $(uname -s) = Linux ]; then
+ exit 0
+fi
+
+source $rootdir/test/nvmf/common.sh
+
+timing_enter nvmf_tgt
+
+# NVMF_TEST_CORE_MASK is the biggest core mask specified by
+# any of the nvmf_tgt tests. Using this mask for the stub
+# ensures that if this mask spans CPU sockets, that we will
+# allocate memory from both sockets. The stub will *not*
+# run anything on the extra cores (and will sleep on master
+# core 0) so there is no impact to the nvmf_tgt tests by
+# specifying the bigger core mask.
+start_stub "-s 2048 -i 0 -m $NVMF_TEST_CORE_MASK"
+trap "kill_stub; exit 1" SIGINT SIGTERM EXIT
+
+export NVMF_APP_SHM_ID="0"
+export NVMF_APP="./app/nvmf_tgt/nvmf_tgt -i $NVMF_APP_SHM_ID -e 0xFFFF"
+
+run_test suite test/nvmf/filesystem/filesystem.sh
+run_test suite test/nvmf/discovery/discovery.sh
+if [ $SPDK_TEST_NVME_CLI -eq 1 ]; then
+ run_test suite test/nvmf/nvme_cli/nvme_cli.sh
+fi
+run_test suite test/nvmf/lvol/nvmf_lvol.sh
+run_test suite test/nvmf/shutdown/shutdown.sh
+run_test suite test/nvmf/bdev_io_wait/bdev_io_wait.sh
+run_test suite test/nvmf/create_transport/create_transport.sh
+
+if [ $RUN_NIGHTLY -eq 1 ]; then
+ run_test suite test/nvmf/multiconnection/multiconnection.sh
+fi
+
+timing_enter host
+
+run_test suite test/nvmf/host/bdevperf.sh
+run_test suite test/nvmf/host/identify.sh
+run_test suite test/nvmf/host/perf.sh
+# TODO: disabled due to intermittent failures (RDMA_CM_EVENT_UNREACHABLE/ETIMEDOUT)
+#run_test test/nvmf/host/identify_kernel_nvmf.sh
+run_test suite test/nvmf/host/aer.sh
+run_test suite test/nvmf/host/fio.sh
+
+run_test suite test/nvmf/nmic/nmic.sh
+
+timing_exit host
+trap - SIGINT SIGTERM EXIT
+kill_stub
+
+# TODO: enable nvme device detachment for multi-process so that
+# we can use the stub for this test
+run_test suite test/nvmf/rpc/rpc.sh
+run_test suite test/nvmf/fio/fio.sh
+revert_soft_roce
+
+report_test_completion "nvmf"
+timing_exit nvmf_tgt
diff --git a/src/spdk/test/nvmf/nvmfjson/json_config.sh b/src/spdk/test/nvmf/nvmfjson/json_config.sh
new file mode 100755
index 00000000..bc624d21
--- /dev/null
+++ b/src/spdk/test/nvmf/nvmfjson/json_config.sh
@@ -0,0 +1,40 @@
+#!/usr/bin/env bash
+set -xe
+NVMF_JSON_DIR=$(readlink -f $(dirname $0))
+. $NVMF_JSON_DIR/../../json_config/common.sh
+base_nvmf_config=$JSON_DIR/base_nvmf_config.json
+last_nvmf_config=$JSON_DIR/last_nvmf_config.json
+
+function test_subsystems() {
+ run_spdk_tgt
+
+ rpc_py="$spdk_rpc_py"
+ clear_config_py="$spdk_clear_config_py"
+
+ $rpc_py start_subsystem_init
+ create_nvmf_subsystem_config
+ $rpc_py save_config > $base_nvmf_config
+ test_json_config
+
+ clear_nvmf_subsystem_config
+ kill_targets
+
+ run_spdk_tgt
+ $rpc_py load_config < $base_nvmf_config
+ $rpc_py save_config > $last_nvmf_config
+
+ json_diff $base_nvmf_config $last_nvmf_config
+
+ clear_nvmf_subsystem_config
+ kill_targets
+ rm -f $base_nvmf_config $last_nvmf_config
+}
+
+trap 'on_error_exit "${FUNCNAME}" "${LINENO}"; rm -f $base_nvmf_config $last_nvmf_config' ERR
+
+timing_enter nvmf_json_config
+test_subsystems
+timing_exit nvmf_json_config
+revert_soft_roce
+
+report_test_completion nvmf_json_config
diff --git a/src/spdk/test/nvmf/rpc/rpc.sh b/src/spdk/test/nvmf/rpc/rpc.sh
new file mode 100755
index 00000000..5e8837d1
--- /dev/null
+++ b/src/spdk/test/nvmf/rpc/rpc.sh
@@ -0,0 +1,145 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+set -e
+
+# pass the parameter 'iso' to this script when running it in isolation to trigger rdma device initialization.
+# e.g. sudo ./rpc.sh iso
+nvmftestinit $1
+
+RDMA_IP_LIST=$(get_available_rdma_ips)
+NVMF_FIRST_TARGET_IP=$(echo "$RDMA_IP_LIST" | head -n 1)
+if [ -z $NVMF_FIRST_TARGET_IP ]; then
+ echo "no NIC for nvmf test"
+ exit 0
+fi
+
+timing_enter rpc
+timing_enter start_nvmf_tgt
+# Start up the NVMf target in another process
+$NVMF_APP -m 0xF &
+pid=$!
+
+trap "process_shm --id $NVMF_APP_SHM_ID; killprocess $pid; nvmftestfini $1; exit 1" SIGINT SIGTERM EXIT
+
+waitforlisten $pid
+$rpc_py nvmf_create_transport -t RDMA -u 8192 -p 4
+timing_exit start_nvmf_tgt
+
+# set times for subsystem construct/delete
+if [ $RUN_NIGHTLY -eq 1 ]; then
+ times=50
+else
+ times=3
+fi
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+bdevs="$bdevs $($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
+
+# Disallow host NQN and make sure connect fails
+$rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
+for bdev in $bdevs; do
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 $bdev
+done
+$rpc_py nvmf_subsystem_allow_any_host -d nqn.2016-06.io.spdk:cnode1
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t RDMA -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+
+modprobe -v nvme-rdma
+trap "killprocess $pid; nvmfcleanup; exit 1" SIGINT SIGTERM EXIT
+
+# This connect should fail - the host NQN is not allowed
+! nvme connect -t rdma -n nqn.2016-06.io.spdk:cnode1 -q nqn.2016-06.io.spdk:host1 -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
+
+# Add the host NQN and verify that the connect succeeds
+$rpc_py nvmf_subsystem_add_host nqn.2016-06.io.spdk:cnode1 nqn.2016-06.io.spdk:host1
+nvme connect -t rdma -n nqn.2016-06.io.spdk:cnode1 -q nqn.2016-06.io.spdk:host1 -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
+waitforblk "nvme0n1"
+nvme disconnect -n nqn.2016-06.io.spdk:cnode1
+
+# Remove the host and verify that the connect fails
+$rpc_py nvmf_subsystem_remove_host nqn.2016-06.io.spdk:cnode1 nqn.2016-06.io.spdk:host1
+! nvme connect -t rdma -n nqn.2016-06.io.spdk:cnode1 -q nqn.2016-06.io.spdk:host1 -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
+
+# Allow any host and verify that the connect succeeds
+$rpc_py nvmf_subsystem_allow_any_host -e nqn.2016-06.io.spdk:cnode1
+nvme connect -t rdma -n nqn.2016-06.io.spdk:cnode1 -q nqn.2016-06.io.spdk:host1 -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
+waitforblk "nvme0n1"
+nvme disconnect -n nqn.2016-06.io.spdk:cnode1
+
+$rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode1
+
+# do frequent add delete of namespaces with different nsid.
+for i in `seq 1 $times`
+do
+ j=0
+ for bdev in $bdevs; do
+ let j=j+1
+ $rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode$j -s SPDK00000000000001
+ $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode$j -t RDMA -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode$j $bdev -n 5
+ $rpc_py nvmf_subsystem_allow_any_host nqn.2016-06.io.spdk:cnode$j
+ nvme connect -t rdma -n nqn.2016-06.io.spdk:cnode$j -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
+ done
+
+ waitforblk "nvme0n1"
+ n=$j
+ for j in `seq 1 $n`
+ do
+ nvme disconnect -n nqn.2016-06.io.spdk:cnode$j
+ done
+
+ j=0
+ for bdev in $bdevs; do
+ let j=j+1
+ $rpc_py nvmf_subsystem_remove_ns nqn.2016-06.io.spdk:cnode$j 5
+ done
+
+ n=$j
+ for j in `seq 1 $n`
+ do
+ $rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode$j
+ done
+
+done
+
+nvmfcleanup
+trap "killprocess $pid; exit 1" SIGINT SIGTERM EXIT
+
+# do frequent add delete.
+for i in `seq 1 $times`
+do
+ j=0
+ for bdev in $bdevs; do
+ let j=j+1
+ $rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode$j -s SPDK00000000000001
+ $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode$j -t RDMA -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode$j $bdev
+ $rpc_py nvmf_subsystem_allow_any_host nqn.2016-06.io.spdk:cnode$j
+ done
+
+ j=0
+ for bdev in $bdevs; do
+ let j=j+1
+ $rpc_py nvmf_subsystem_remove_ns nqn.2016-06.io.spdk:cnode$j $j
+ done
+
+ n=$j
+ for j in `seq 1 $n`
+ do
+ $rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode$j
+ done
+done
+
+trap - SIGINT SIGTERM EXIT
+
+killprocess $pid
+nvmftestfini $1
+timing_exit rpc
diff --git a/src/spdk/test/nvmf/shutdown/shutdown.sh b/src/spdk/test/nvmf/shutdown/shutdown.sh
new file mode 100755
index 00000000..f68c4b21
--- /dev/null
+++ b/src/spdk/test/nvmf/shutdown/shutdown.sh
@@ -0,0 +1,88 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+MALLOC_BDEV_SIZE=128
+MALLOC_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+set -e
+
+# pass the parameter 'iso' to this script when running it in isolation to trigger rdma device initialization.
+# e.g. sudo ./shutdown.sh iso
+nvmftestinit $1
+
+RDMA_IP_LIST=$(get_available_rdma_ips)
+NVMF_FIRST_TARGET_IP=$(echo "$RDMA_IP_LIST" | head -n 1)
+if [ -z $NVMF_FIRST_TARGET_IP ]; then
+ echo "no NIC for nvmf test"
+ exit 0
+fi
+
+timing_enter shutdown
+timing_enter start_nvmf_tgt
+# Start up the NVMf target in another process
+$NVMF_APP -m 0xF &
+pid=$!
+
+trap "process_shm --id $NVMF_APP_SHM_ID; killprocess $pid; nvmfcleanup; nvmftestfini $1; exit 1" SIGINT SIGTERM EXIT
+
+waitforlisten $pid
+$rpc_py nvmf_create_transport -t RDMA -u 8192 -p 4
+timing_exit start_nvmf_tgt
+
+num_subsystems=10
+# SoftRoce does not have enough queues available for
+# this test. Detect if we're using software RDMA.
+# If so, only use four subsystems.
+if check_ip_is_soft_roce "$NVMF_FIRST_TARGET_IP"; then
+ num_subsystems=4
+fi
+
+# Create subsystems
+for i in `seq 1 $num_subsystems`
+do
+ bdevs="$($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
+ $rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode$i -a -s SPDK$i
+ for bdev in $bdevs; do
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode$i $bdev
+ done
+ $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode$i -t rdma -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
+done
+
+modprobe -v nvme-rdma
+modprobe -v nvme-fabrics
+
+# Repeatedly connect and disconnect
+for ((x=0; x<5;x++)); do
+ # Connect kernel host to subsystems
+ for i in `seq 1 $num_subsystems`; do
+ nvme connect -t rdma -n "nqn.2016-06.io.spdk:cnode${i}" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
+ done
+ # Disconnect the subsystems in reverse order
+ for i in `seq $num_subsystems -1 1`; do
+ nvme disconnect -n nqn.2016-06.io.spdk:cnode${i}
+ done
+done
+
+# Start a series of connects right before disconnecting
+for i in `seq 1 $num_subsystems`; do
+ nvme connect -t rdma -n "nqn.2016-06.io.spdk:cnode${i}" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
+done
+
+waitforblk "nvme0n1"
+
+# Kill nvmf tgt without removing any subsystem to check whether it can shutdown correctly
+rm -f ./local-job0-0-verify.state
+
+trap - SIGINT SIGTERM EXIT
+
+killprocess $pid
+
+nvmfcleanup
+nvmftestfini $1
+timing_exit shutdown
diff --git a/src/spdk/test/nvmf/test_plan.md b/src/spdk/test/nvmf/test_plan.md
new file mode 100644
index 00000000..94347ef8
--- /dev/null
+++ b/src/spdk/test/nvmf/test_plan.md
@@ -0,0 +1,95 @@
+# SPDK nvmf_tgt test plan
+
+## Objective
+The purpose of these tests is to verify correct behavior of SPDK NVMe-oF
+feature.
+These tests are run either per-commit or as nightly tests.
+
+## Configuration
+All tests share the same basic configuration file for SPDK nvmf_tgt to run.
+Static configuration from config file consists of setting number of per session
+queues and enabling RPC for further configuration via RPC calls.
+RPC calls used for dynamic configuration consist:
+- creating Malloc backend devices
+- creating Null Block backend devices
+- constructing NVMe-oF subsystems
+- deleting NVMe-oF subsystems
+
+### Tests
+
+#### Test 1: NVMe-oF namespace on a Logical Volumes device
+This test configures a SPDK NVMe-oF subsystem backed by logical volume
+devices and uses FIO to generate I/Os that target those subsystems.
+The logical volume bdevs are backed by malloc bdevs.
+Test steps:
+- Step 1: Assign IP addresses to RDMA NICs.
+- Step 2: Start SPDK nvmf_tgt application.
+- Step 3: Create malloc bdevs.
+- Step 4: Create logical volume stores on malloc bdevs.
+- Step 5: Create 10 logical volume bdevs on each logical volume store.
+- Step 6: Create NVMe-oF subsystems with logical volume bdev namespaces.
+- Step 7: Connect to NVMe-oF susbsystems with kernel initiator.
+- Step 8: Run FIO with workload parameters: blocksize=256k, iodepth=64,
+workload=randwrite; varify flag is enabled so that FIO reads and verifies
+the data written to the logical device. The run time is 10 seconds for a
+quick test an 10 minutes for longer nightly test.
+- Step 9: Disconnect kernel initiator from NVMe-oF subsystems.
+- Step 10: Delete NVMe-oF subsystems from configuration.
+
+### Compatibility testing
+
+- Verify functionality of SPDK `nvmf_tgt` with Linux kernel NVMe-oF host
+ - Exercise various kernel NVMe host parameters
+ - `nr_io_queues`
+ - `queue_size`
+ - Test discovery subsystem with `nvme` CLI tool
+ - Verify that discovery service works correctly with `nvme discover`
+ - Verify that large responses work (many subsystems)
+
+### Specification compliance
+
+- NVMe base spec compliance
+ - Verify all mandatory admin commands are implemented
+ - Get Log Page
+ - Identify (including all mandatory CNS values)
+ - Identify Namespace
+ - Identify Controller
+ - Active Namespace List
+ - Allocated Namespace List
+ - Identify Allocated Namespace
+ - Attached Controller List
+ - Controller List
+ - Abort
+ - Set Features
+ - Get Features
+ - Asynchronous Event Request
+ - Keep Alive
+ - Verify all mandatory NVM command set I/O commands are implemented
+ - Flush
+ - Write
+ - Read
+ - Verify all mandatory log pages
+ - Error Information
+ - SMART / Health Information
+ - Firmware Slot Information
+ - Verify all mandatory Get/Set Features
+ - Arbitration
+ - Power Management
+ - Temperature Threshold
+ - Error Recovery
+ - Number of Queues
+ - Write Atomicity Normal
+ - Asynchronous Event Configuration
+ - Verify all implemented commands behave as required by the specification
+- Fabric command processing
+ - Verify that Connect commands with invalid parameters are failed with correct response
+ - Invalid RECFMT
+ - Invalid SQSIZE
+ - Invalid SUBNQN, HOSTNQN (too long, incorrect format, not null terminated)
+ - QID != 0 before admin queue created
+ - CNTLID != 0xFFFF (static controller mode)
+ - Verify that non-Fabric commands are only allowed in the correct states
+
+### Configuration and RPC
+
+- Verify that invalid NQNs cannot be configured via conf file or RPC