diff options
Diffstat (limited to 'src/spdk/test/nvmf/target')
24 files changed, 1516 insertions, 0 deletions
diff --git a/src/spdk/test/nvmf/target/abort.sh b/src/spdk/test/nvmf/target/abort.sh new file mode 100755 index 000000000..913c17e19 --- /dev/null +++ b/src/spdk/test/nvmf/target/abort.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash + +testdir=$(readlink -f $(dirname $0)) +rootdir=$(readlink -f $testdir/../../..) +source $rootdir/test/common/autotest_common.sh +source $rootdir/test/nvmf/common.sh + +MALLOC_BDEV_SIZE=64 +MALLOC_BLOCK_SIZE=4096 + +rpc_py="$rootdir/scripts/rpc.py" + +nvmftestinit +nvmfappstart -m 0xE + +$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192 + +# Construct a delay bdev on a malloc bdev which has constant 10ms delay for all read or write I/Os +$rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc0 +$rpc_py bdev_delay_create -b Malloc0 -d Delay0 -r 1000000 -t 1000000 -w 1000000 -n 1000000 + +# Create an NVMe-oF subsystem and add the delay bdev as a namespace +$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode0 -a -s SPDK0 +$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode0 Delay0 +$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode0 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT + +# Run abort application +$SPDK_EXAMPLE_DIR/abort -r "trtype:$TEST_TRANSPORT adrfam:IPv4 traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT" -c 0x1 + +# Clean up +$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode0 + +trap - SIGINT SIGTERM EXIT + +nvmftestfini diff --git a/src/spdk/test/nvmf/target/bdev_io_wait.sh b/src/spdk/test/nvmf/target/bdev_io_wait.sh new file mode 100755 index 000000000..e57ffc36d --- /dev/null +++ b/src/spdk/test/nvmf/target/bdev_io_wait.sh @@ -0,0 +1,45 @@ +#!/usr/bin/env bash + +testdir=$(readlink -f $(dirname $0)) +rootdir=$(readlink -f $testdir/../../..) +source $rootdir/test/common/autotest_common.sh +source $rootdir/test/nvmf/common.sh + +MALLOC_BDEV_SIZE=64 +MALLOC_BLOCK_SIZE=512 + +rpc_py="$rootdir/scripts/rpc.py" + +nvmftestinit +nvmfappstart -m 0xF --wait-for-rpc + +# Minimal number of bdev io pool (5) and cache (1) +$rpc_py bdev_set_options -p 5 -c 1 +$rpc_py framework_start_init +$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192 + +$rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc0 +$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001 +$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc0 +$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT + +"$rootdir/test/bdev/bdevperf/bdevperf" -m 0x10 -i 1 --json <(gen_nvmf_target_json) -q 128 -o 4096 -w write -t 1 & +WRITE_PID=$! +"$rootdir/test/bdev/bdevperf/bdevperf" -m 0x20 -i 2 --json <(gen_nvmf_target_json) -q 128 -o 4096 -w read -t 1 & +READ_PID=$! +"$rootdir/test/bdev/bdevperf/bdevperf" -m 0x40 -i 3 --json <(gen_nvmf_target_json) -q 128 -o 4096 -w flush -t 1 & +FLUSH_PID=$! +"$rootdir/test/bdev/bdevperf/bdevperf" -m 0x80 -i 4 --json <(gen_nvmf_target_json) -q 128 -o 4096 -w unmap -t 1 & +UNMAP_PID=$! +sync + +wait $WRITE_PID +wait $READ_PID +wait $FLUSH_PID +wait $UNMAP_PID + +$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1 + +trap - SIGINT SIGTERM EXIT + +nvmftestfini diff --git a/src/spdk/test/nvmf/target/bdevio.sh b/src/spdk/test/nvmf/target/bdevio.sh new file mode 100755 index 000000000..f4d7eb1b5 --- /dev/null +++ b/src/spdk/test/nvmf/target/bdevio.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env bash + +testdir=$(readlink -f $(dirname $0)) +rootdir=$(readlink -f $testdir/../../..) +source $rootdir/test/common/autotest_common.sh +source $rootdir/test/nvmf/common.sh + +MALLOC_BDEV_SIZE=64 +MALLOC_BLOCK_SIZE=512 + +rpc_py="$rootdir/scripts/rpc.py" + +nvmftestinit +# Don't use cores 0 - 2 to avoid overlap with bdevio. +nvmfappstart -m 0x78 + +$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192 +$rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc0 +$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001 +$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc0 +$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT + +$rootdir/test/bdev/bdevio/bdevio --json <(gen_nvmf_target_json) + +$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1 + +trap - SIGINT SIGTERM EXIT + +nvmftestfini diff --git a/src/spdk/test/nvmf/target/connect_disconnect.sh b/src/spdk/test/nvmf/target/connect_disconnect.sh new file mode 100755 index 000000000..b74394123 --- /dev/null +++ b/src/spdk/test/nvmf/target/connect_disconnect.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash + +testdir=$(readlink -f $(dirname $0)) +rootdir=$(readlink -f $testdir/../../..) +source $rootdir/test/common/autotest_common.sh +source $rootdir/test/nvmf/common.sh + +MALLOC_BDEV_SIZE=64 +MALLOC_BLOCK_SIZE=512 + +rpc_py="$rootdir/scripts/rpc.py" + +# connect disconnect is geared towards ensuring that we are properly freeing resources after disconnecting qpairs. +nvmftestinit +nvmfappstart -m 0xF + +$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192 -c 0 + +bdev="$($rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)" + +$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s $NVMF_SERIAL +$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 $bdev +$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT + +if [ $RUN_NIGHTLY -eq 1 ]; then + num_iterations=200 + IO_QUEUES="-i 8" +else + num_iterations=10 +fi + +set +x +for i in $(seq 1 $num_iterations); do + nvme connect -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT" $IO_QUEUES + waitforserial "$NVMF_SERIAL" + nvme disconnect -n "nqn.2016-06.io.spdk:cnode1" + waitforserial_disconnect "$NVMF_SERIAL" +done +set -x + +trap - SIGINT SIGTERM EXIT + +nvmftestfini diff --git a/src/spdk/test/nvmf/target/create_transport.sh b/src/spdk/test/nvmf/target/create_transport.sh new file mode 100755 index 000000000..e2766467b --- /dev/null +++ b/src/spdk/test/nvmf/target/create_transport.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash + +testdir=$(readlink -f $(dirname $0)) +rootdir=$(readlink -f $testdir/../../..) +source $rootdir/test/common/autotest_common.sh +source $rootdir/test/nvmf/common.sh + +NULL_BDEV_SIZE=102400 +NULL_BLOCK_SIZE=512 + +rpc_py="$rootdir/scripts/rpc.py" + +if ! hash nvme; then + echo "nvme command not found; skipping create transport test" + exit 0 +fi + +nvmftestinit +nvmfappstart -m 0xF + +# Use nvmf_create_transport call to create transport +$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192 + +null_bdevs="$($rpc_py bdev_null_create Null0 $NULL_BDEV_SIZE $NULL_BLOCK_SIZE) " +null_bdevs+="$($rpc_py bdev_null_create Null1 $NULL_BDEV_SIZE $NULL_BLOCK_SIZE)" + +$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001 +for null_bdev in $null_bdevs; do + $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 $null_bdev +done +$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT + +nvme discover -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT + +echo "Perform nvmf subsystem discovery via RPC" +$rpc_py nvmf_get_subsystems + +$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1 + +for null_bdev in $null_bdevs; do + $rpc_py bdev_null_delete $null_bdev +done + +check_bdevs=$($rpc_py bdev_get_bdevs | jq -r '.[].name') +if [ -n "$check_bdevs" ]; then + echo $check_bdevs + exit 1 +fi + +trap - SIGINT SIGTERM EXIT + +nvmftestfini diff --git a/src/spdk/test/nvmf/target/discovery.sh b/src/spdk/test/nvmf/target/discovery.sh new file mode 100755 index 000000000..ad5a6ce96 --- /dev/null +++ b/src/spdk/test/nvmf/target/discovery.sh @@ -0,0 +1,49 @@ +#!/usr/bin/env bash + +testdir=$(readlink -f $(dirname $0)) +rootdir=$(readlink -f $testdir/../../..) +source $rootdir/test/common/autotest_common.sh +source $rootdir/test/nvmf/common.sh + +NULL_BDEV_SIZE=102400 +NULL_BLOCK_SIZE=512 + +rpc_py="$rootdir/scripts/rpc.py" + +if ! hash nvme; then + echo "nvme command not found; skipping discovery test" + exit 0 +fi + +nvmftestinit +nvmfappstart -m 0xF + +$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192 + +# Use at least 4 subsystems so they spill over to a second discovery log page +for i in $(seq 1 4); do + $rpc_py bdev_null_create Null$i $NULL_BDEV_SIZE $NULL_BLOCK_SIZE + $rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode$i -a -s SPDK0000000000000$i + $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode$i Null$i + $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode$i -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT +done + +nvme discover -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT + +echo "Perform nvmf subsystem discovery via RPC" +$rpc_py nvmf_get_subsystems + +for i in $(seq 1 4); do + $rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode$i + $rpc_py bdev_null_delete Null$i +done + +check_bdevs=$($rpc_py bdev_get_bdevs | jq -r '.[].name') +if [ -n "$check_bdevs" ]; then + echo $check_bdevs + exit 1 +fi + +trap - SIGINT SIGTERM EXIT + +nvmftestfini diff --git a/src/spdk/test/nvmf/target/filesystem.sh b/src/spdk/test/nvmf/target/filesystem.sh new file mode 100755 index 000000000..ff819fdb6 --- /dev/null +++ b/src/spdk/test/nvmf/target/filesystem.sh @@ -0,0 +1,92 @@ +#!/usr/bin/env bash + +testdir=$(readlink -f $(dirname $0)) +rootdir=$(readlink -f $testdir/../../..) +source $rootdir/test/common/autotest_common.sh +source $rootdir/test/nvmf/common.sh + +MALLOC_BDEV_SIZE=64 +MALLOC_BLOCK_SIZE=512 + +rpc_py="$rootdir/scripts/rpc.py" + +nvmftestinit + +function nvmf_filesystem_create() { + fstype=$1 + nvme_name=$2 + + make_filesystem ${fstype} /dev/${nvme_name}p1 + + mount /dev/${nvme_name}p1 /mnt/device + touch /mnt/device/aaa + sync + rm /mnt/device/aaa + sync + + i=0 + while ! umount /mnt/device; do + [ $i -lt 15 ] || break + i=$((i + 1)) + sleep 1 + done + + # Make sure the target did not crash + kill -0 $nvmfpid + + # Make sure the device is still present + lsblk -l -o NAME | grep -q -w "${nvme_name}" + + # Make sure the partition is still present + lsblk -l -o NAME | grep -q -w "${nvme_name}p1" +} + +function nvmf_filesystem_part() { + incapsule=$1 + + nvmfappstart -m 0xF + + $rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192 -c $incapsule + $rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc1 + $rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s $NVMF_SERIAL + $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc1 + $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT + + nvme connect -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT" + + waitforserial "$NVMF_SERIAL" + nvme_name=$(lsblk -l -o NAME,SERIAL | grep -oP "([\w]*)(?=\s+${NVMF_SERIAL})") + + mkdir -p /mnt/device + + parted -s /dev/${nvme_name} mklabel msdos mkpart primary '0%' '100%' + partprobe + sleep 1 + + if [ $incapsule -eq 0 ]; then + run_test "filesystem_ext4" nvmf_filesystem_create "ext4" ${nvme_name} + run_test "filesystem_btrfs" nvmf_filesystem_create "btrfs" ${nvme_name} + run_test "filesystem_xfs" nvmf_filesystem_create "xfs" ${nvme_name} + else + run_test "filesystem_incapsule_ext4" nvmf_filesystem_create "ext4" ${nvme_name} + run_test "filesystem_incapsule_btrfs" nvmf_filesystem_create "btrfs" ${nvme_name} + run_test "filesystem_incapsule_xfs" nvmf_filesystem_create "xfs" ${nvme_name} + fi + + parted -s /dev/${nvme_name} rm 1 + + sync + nvme disconnect -n "nqn.2016-06.io.spdk:cnode1" || true + + $rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1 + + trap - SIGINT SIGTERM EXIT + + killprocess $nvmfpid + nvmfpid= +} + +run_test "nvmf_filesystem_no_incapsule" nvmf_filesystem_part 0 +run_test "nvmf_filesystem_incapsule" nvmf_filesystem_part 4096 + +nvmftestfini diff --git a/src/spdk/test/nvmf/target/fio.sh b/src/spdk/test/nvmf/target/fio.sh new file mode 100755 index 000000000..4e98d7083 --- /dev/null +++ b/src/spdk/test/nvmf/target/fio.sh @@ -0,0 +1,77 @@ +#!/usr/bin/env bash + +testdir=$(readlink -f $(dirname $0)) +rootdir=$(readlink -f $testdir/../../..) +source $rootdir/test/common/autotest_common.sh +source $rootdir/test/nvmf/common.sh + +MALLOC_BDEV_SIZE=64 +MALLOC_BLOCK_SIZE=512 + +rpc_py="$rootdir/scripts/rpc.py" + +nvmftestinit +nvmfappstart -m 0xF + +$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192 + +malloc_bdevs="$($rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE) " +malloc_bdevs+="$($rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)" +# Create a RAID-0 bdev from two malloc bdevs +raid_malloc_bdevs="$($rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE) " +raid_malloc_bdevs+="$($rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)" +$rpc_py bdev_raid_create -n raid0 -z 64 -r 0 -b "$raid_malloc_bdevs" + +$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s $NVMF_SERIAL +for malloc_bdev in $malloc_bdevs; do + $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 "$malloc_bdev" +done +$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT + +# Append the raid0 bdev into subsystem +$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 raid0 + +nvme connect -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT" + +waitforserial $NVMF_SERIAL 3 + +$rootdir/scripts/fio.py -p nvmf -i 4096 -d 1 -t write -r 1 -v +$rootdir/scripts/fio.py -p nvmf -i 4096 -d 1 -t randwrite -r 1 -v +$rootdir/scripts/fio.py -p nvmf -i 4096 -d 128 -t write -r 1 -v +$rootdir/scripts/fio.py -p nvmf -i 4096 -d 128 -t randwrite -r 1 -v + +sync + +#start hotplug test case +$rootdir/scripts/fio.py -p nvmf -i 4096 -d 1 -t read -r 10 & +fio_pid=$! + +sleep 3 + +$rpc_py bdev_raid_delete "raid0" +for malloc_bdev in $malloc_bdevs; do + $rpc_py bdev_malloc_delete "$malloc_bdev" +done + +fio_status=0 +wait $fio_pid || fio_status=$? + +nvme disconnect -n "nqn.2016-06.io.spdk:cnode1" || true + +if [ $fio_status -eq 0 ]; then + echo "nvmf hotplug test: fio successful - expected failure" + nvmftestfini + exit 1 +else + echo "nvmf hotplug test: fio failed as expected" +fi + +$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1 + +rm -f ./local-job0-0-verify.state +rm -f ./local-job1-1-verify.state +rm -f ./local-job2-2-verify.state + +trap - SIGINT SIGTERM EXIT + +nvmftestfini diff --git a/src/spdk/test/nvmf/target/fuzz.sh b/src/spdk/test/nvmf/target/fuzz.sh new file mode 100755 index 000000000..5a18be856 --- /dev/null +++ b/src/spdk/test/nvmf/target/fuzz.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash + +testdir=$(readlink -f $(dirname $0)) +rootdir=$(readlink -f $testdir/../../..) +source $rootdir/test/common/autotest_common.sh +source $rootdir/test/nvmf/common.sh + +rpc_py="$rootdir/scripts/rpc.py" + +nvmftestinit + +"${NVMF_APP[@]}" -m 0xF > $output_dir/nvmf_fuzz_tgt_output.txt 2>&1 & +nvmfpid=$! + +trap 'process_shm --id $NVMF_APP_SHM_ID; rm -f $testdir/nvmf_fuzz.conf; killprocess $nvmfpid; nvmftestfini $1; exit 1' SIGINT SIGTERM EXIT + +waitforlisten $nvmfpid +$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192 + +$rpc_py bdev_malloc_create -b Malloc0 64 512 + +$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001 +$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc0 +$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT + +echo "[Nvme]" > $testdir/nvmf_fuzz.conf +echo " TransportID \"trtype:$TEST_TRANSPORT adrfam:IPv4 subnqn:nqn.2016-06.io.spdk:cnode1 traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT\" Nvme0" >> $testdir/nvmf_fuzz.conf + +# Note that we chose a consistent seed to ensure that this test is consistent in nightly builds. +$rootdir/test/app/fuzz/nvme_fuzz/nvme_fuzz -m 0xF0 -r "/var/tmp/nvme_fuzz" -t 30 -S 123456 -C $testdir/nvmf_fuzz.conf -N -a 2> $output_dir/nvmf_fuzz_logs1.txt +# We don't specify a seed for this test. Instead we run a static list of commands from example.json. +$rootdir/test/app/fuzz/nvme_fuzz/nvme_fuzz -m 0xF0 -r "/var/tmp/nvme_fuzz" -C $testdir/nvmf_fuzz.conf -j $rootdir/test/app/fuzz/nvme_fuzz/example.json -a 2> $output_dir/nvmf_fuzz_logs2.txt + +rm -f $testdir/nvmf_fuzz.conf +$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1 + +trap - SIGINT SIGTERM EXIT + +nvmfcleanup +killprocess $nvmfpid +nvmfpid= + +nvmftestfini diff --git a/src/spdk/test/nvmf/target/identify_passthru.sh b/src/spdk/test/nvmf/target/identify_passthru.sh new file mode 100755 index 000000000..2ce52fe38 --- /dev/null +++ b/src/spdk/test/nvmf/target/identify_passthru.sh @@ -0,0 +1,76 @@ +#!/usr/bin/env bash + +testdir=$(readlink -f $(dirname $0)) +rootdir=$(readlink -f $testdir/../../..) +source $rootdir/test/common/autotest_common.sh +source $rootdir/test/nvmf/common.sh +source $rootdir/scripts/common.sh + +rpc_py="$rootdir/scripts/rpc.py" + +nvmftestinit + +timing_enter nvme_identify + +bdf=$(get_first_nvme_bdf) +if [ -z "${bdf}" ]; then + echo "No NVMe drive found but test requires it. Failing the test." + exit 1 +fi + +# Expected values +nvme_serial_number=$($SPDK_EXAMPLE_DIR/identify -r "trtype:PCIe traddr:${bdf}" -i 0 | grep "Serial Number:" | awk '{print $3}') +nvme_model_number=$($SPDK_EXAMPLE_DIR/identify -r "trtype:PCIe traddr:${bdf}" -i 0 | grep "Model Number:" | awk '{print $3}') + +timing_exit nvme_identify + +timing_enter start_nvmf_tgt + +"${NVMF_APP[@]}" -m 0xF --wait-for-rpc & +nvmfpid=$! + +trap 'process_shm --id $NVMF_APP_SHM_ID; nvmftestfini; exit 1' SIGINT SIGTERM EXIT + +waitforlisten $nvmfpid +$rpc_py -v nvmf_set_config --passthru-identify-ctrlr +$rpc_py -v framework_start_init +$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192 +timing_exit start_nvmf_tgt + +$rpc_py bdev_nvme_attach_controller -b Nvme0 -t PCIe -a ${bdf} +$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001 +$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Nvme0n1 +$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT + +$rpc_py nvmf_get_subsystems + +# Discovered values +nvmf_serial_number=$($SPDK_EXAMPLE_DIR/identify -r "\ + trtype:$TEST_TRANSPORT \ + adrfam:IPv4 \ + traddr:$NVMF_FIRST_TARGET_IP \ + trsvcid:$NVMF_PORT \ + subnqn:nqn.2016-06.io.spdk:cnode1" | grep "Serial Number:" | awk '{print $3}') + +nvmf_model_number=$($SPDK_EXAMPLE_DIR/identify -r "\ + trtype:$TEST_TRANSPORT \ + adrfam:IPv4 \ + traddr:$NVMF_FIRST_TARGET_IP \ + trsvcid:$NVMF_PORT \ + subnqn:nqn.2016-06.io.spdk:cnode1" | grep "Model Number:" | awk '{print $3}') + +if [ ${nvme_serial_number} != ${nvmf_serial_number} ]; then + echo "Serial number doesn't match" + exit 1 +fi + +if [ ${nvme_model_number} != ${nvmf_model_number} ]; then + echo "Model number doesn't match" + exit 1 +fi + +$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1 + +trap - SIGINT SIGTERM EXIT + +nvmftestfini diff --git a/src/spdk/test/nvmf/target/initiator_timeout.sh b/src/spdk/test/nvmf/target/initiator_timeout.sh new file mode 100755 index 000000000..199983be5 --- /dev/null +++ b/src/spdk/test/nvmf/target/initiator_timeout.sh @@ -0,0 +1,71 @@ +#!/usr/bin/env bash + +testdir=$(readlink -f $(dirname $0)) +rootdir=$(readlink -f $testdir/../../..) +source $rootdir/test/common/autotest_common.sh +source $rootdir/test/nvmf/common.sh + +MALLOC_BDEV_SIZE=64 +MALLOC_BLOCK_SIZE=512 + +rpc_py="$rootdir/scripts/rpc.py" + +nvmftestinit +nvmfappstart -m 0xF + +trap 'process_shm --id $NVMF_APP_SHM_ID; killprocess $nvmfpid; nvmftestfini $1; exit 1' SIGINT SIGTERM EXIT + +$rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc0 + +# We cannot configure the bdev with an incredibly high latency up front because connect will not work properly. +$rpc_py bdev_delay_create -b Malloc0 -d Delay0 -r 30 -t 30 -w 30 -n 30 + +$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192 +$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s $NVMF_SERIAL +$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Delay0 +$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT + +nvme connect -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT" + +waitforserial "$NVMF_SERIAL" + +# Once our timed out I/O complete, we will still have 10 sec of I/O. +$rootdir/scripts/fio.py -p nvmf -i 4096 -d 1 -t write -r 60 -v & +fio_pid=$! + +sleep 3 + +# The kernel initiator has a default timeout of 30 seconds. delay for 31 to trigger initiator reconnect. +$rpc_py bdev_delay_update_latency Delay0 avg_read 31000000 +$rpc_py bdev_delay_update_latency Delay0 avg_write 31000000 +$rpc_py bdev_delay_update_latency Delay0 p99_read 31000000 +$rpc_py bdev_delay_update_latency Delay0 p99_write 310000000 + +sleep 3 + +# Reset these values so that subsequent I/O will complete in a timely manner. +$rpc_py bdev_delay_update_latency Delay0 avg_read 30 +$rpc_py bdev_delay_update_latency Delay0 avg_write 30 +$rpc_py bdev_delay_update_latency Delay0 p99_read 30 +$rpc_py bdev_delay_update_latency Delay0 p99_write 30 + +fio_status=0 +wait $fio_pid || fio_status=$? + +nvme disconnect -n "nqn.2016-06.io.spdk:cnode1" || true + +if [ $fio_status -eq 0 ]; then + echo "nvmf hotplug test: fio successful as expected" +else + echo "nvmf hotplug test: fio failed, expected success" + nvmftestfini + exit 1 +fi + +$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1 + +rm -f ./local-job0-0-verify.state + +trap - SIGINT SIGTERM EXIT + +nvmftestfini diff --git a/src/spdk/test/nvmf/target/invalid.sh b/src/spdk/test/nvmf/target/invalid.sh new file mode 100755 index 000000000..98246efeb --- /dev/null +++ b/src/spdk/test/nvmf/target/invalid.sh @@ -0,0 +1,63 @@ +#!/usr/bin/env bash + +testdir=$(readlink -f "$(dirname "$0")") +rootdir=$(readlink -f "$testdir/../../..") +source "$rootdir/test/common/autotest_common.sh" +source "$rootdir/test/nvmf/common.sh" + +multi_target_rpc=$rootdir/test/nvmf/target/multitarget_rpc.py +rpc=$rootdir/scripts/rpc.py +nqn=nqn.2016-06.io.spdk:cnode +target=foobar +# pre-seed the rng to generate predictive values across different test runs +RANDOM=0 + +gen_random_s() { + local length=$1 ll + # generate ascii table which nvme supports + local chars=({32..127}) + local string + + for ((ll = 0; ll < length; ll++)); do + string+="$(echo -e "\x$(printf '%x' "${chars[RANDOM % ${#chars[@]}]}")")" + done + # Be nice to rpc.py's arg parser and escape `-` in case it's a first character + if [[ ${string::1} == "-" ]]; then + string=${string/-/\\-} + fi + echo "$string" +} + +nvmftestinit +nvmfappstart -m 0xF + +trap 'process_shm --id $NVMF_APP_SHM_ID; nvmftestfini $1; exit 1' SIGINT SIGTERM EXIT + +# Attempt to create subsystem with non-existing target +out=$("$rpc" nvmf_create_subsystem -t "$target" "$nqn$RANDOM" 2>&1) && false +[[ $out == *"Unable to find target"* ]] + +# Attempt to create subsystem with invalid serial number - inject ASCII char that's +# not in the range (0x20-0x7e) of these supported by the nvme spec. +out=$("$rpc" nvmf_create_subsystem -s "$NVMF_SERIAL$(echo -e "\x1f")" "$nqn$RANDOM" 2>&1) && false +[[ $out == *"Invalid SN"* ]] + +# Attempt to create subsystem with invalid model - inject ASCII char that's not in the +# range (0x20-0x7e) of these supported by the nvme spec. +out=$("$rpc" nvmf_create_subsystem -d "SPDK_Controller$(echo -e "\x1f")" "$nqn$RANDOM" 2>&1) && false +[[ $out == *"Invalid MN"* ]] + +# Attempt to create subsystem with invalid serial number - exceed SPDK_NVME_CTRLR_SN_LEN (20) +out=$("$rpc" nvmf_create_subsystem -s "$(gen_random_s 21)" "$nqn$RANDOM" 2>&1) && false +[[ $out == *"Invalid SN"* ]] + +# Attempt to create subsystem with invalid model - exceed SPDK_NVME_CTRLR_MN_LEN (40) +out=$("$rpc" nvmf_create_subsystem -d "$(gen_random_s 41)" "$nqn$RANDOM" 2>&1) && false +[[ $out == *"Invalid MN"* ]] + +# Attempt to delete non-existing target +out=$("$multi_target_rpc" nvmf_delete_target --name "$target" 2>&1) && false +[[ $out == *"The specified target doesn't exist, cannot delete it."* ]] + +trap - SIGINT SIGTERM EXIT +nvmftestfini diff --git a/src/spdk/test/nvmf/target/multiconnection.sh b/src/spdk/test/nvmf/target/multiconnection.sh new file mode 100755 index 000000000..d7e490861 --- /dev/null +++ b/src/spdk/test/nvmf/target/multiconnection.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash + +testdir=$(readlink -f $(dirname $0)) +rootdir=$(readlink -f $testdir/../../..) +source $rootdir/test/common/autotest_common.sh +source $rootdir/test/nvmf/common.sh + +MALLOC_BDEV_SIZE=64 +MALLOC_BLOCK_SIZE=512 + +NVMF_SUBSYS=11 + +rpc_py="$rootdir/scripts/rpc.py" + +nvmftestinit +nvmfappstart -m 0xF + +# SoftRoce does not have enough queues available for +# multiconnection tests. Detect if we're using software RDMA. +# If so - lower the number of subsystems for test. +if check_ip_is_soft_roce $NVMF_FIRST_TARGET_IP; then + echo "Using software RDMA, lowering number of NVMeOF subsystems." + NVMF_SUBSYS=1 +fi + +$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192 + +for i in $(seq 1 $NVMF_SUBSYS); do + $rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc$i + $rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode$i -a -s SPDK$i + $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode$i Malloc$i + $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode$i -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT +done + +for i in $(seq 1 $NVMF_SUBSYS); do + nvme connect -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode${i}" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT" + waitforserial SPDK$i +done + +$rootdir/scripts/fio.py -p nvmf -i 262144 -d 64 -t read -r 10 +$rootdir/scripts/fio.py -p nvmf -i 262144 -d 64 -t randwrite -r 10 + +sync +for i in $(seq 1 $NVMF_SUBSYS); do + nvme disconnect -n "nqn.2016-06.io.spdk:cnode${i}" || true + $rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode${i} +done + +rm -f ./local-job0-0-verify.state + +trap - SIGINT SIGTERM EXIT + +nvmftestfini diff --git a/src/spdk/test/nvmf/target/multitarget.sh b/src/spdk/test/nvmf/target/multitarget.sh new file mode 100755 index 000000000..4c3ece7c0 --- /dev/null +++ b/src/spdk/test/nvmf/target/multitarget.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash +testdir=$(readlink -f $(dirname $0)) +rootdir=$(readlink -f $testdir/../../..) +source $rootdir/test/common/autotest_common.sh +source $rootdir/test/nvmf/common.sh + +# For the time being this script is just menat to confirm the basic functionality of the +# multitarget RPCs as the in-tree applications don't support multi-target functionality. +rpc_py="$rootdir/test/nvmf/target/multitarget_rpc.py" + +nvmftestinit +nvmfappstart -m 0xF + +trap 'process_shm --id $NVMF_APP_SHM_ID; nvmftestfini $1; exit 1' SIGINT SIGTERM EXIT + +# Target application should start with a single target. +if [ "$($rpc_py nvmf_get_targets | jq 'length')" != "1" ]; then + echo "SPDK application did not start with the proper number of targets." && false +fi + +$rpc_py nvmf_create_target -n nvmf_tgt_1 -s 32 +$rpc_py nvmf_create_target -n nvmf_tgt_2 -s 32 + +if [ "$($rpc_py nvmf_get_targets | jq 'length')" != "3" ]; then + echo "nvmf_create_target RPC didn't properly create targets." && false +fi + +$rpc_py nvmf_delete_target -n nvmf_tgt_1 +$rpc_py nvmf_delete_target -n nvmf_tgt_2 + +if [ "$($rpc_py nvmf_get_targets | jq 'length')" != "1" ]; then + echo "nvmf_delete_target RPC didn't properly destroy targets." && false +fi + +trap - SIGINT SIGTERM EXIT + +nvmftestfini diff --git a/src/spdk/test/nvmf/target/multitarget_rpc.py b/src/spdk/test/nvmf/target/multitarget_rpc.py new file mode 100755 index 000000000..c5ccbcece --- /dev/null +++ b/src/spdk/test/nvmf/target/multitarget_rpc.py @@ -0,0 +1,84 @@ +#!/usr/bin/env python3 + +# Not for use in production. Please see the changelog for v19.10. + +from rpc.client import print_dict, JSONRPCException + +import logging +import argparse +import rpc +import sys +import shlex + +try: + from shlex import quote +except ImportError: + from pipes import quote + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description='NVMe-oF RPC command line interface. NOTE: spdk/scripts/ is expected in PYTHONPATH') + parser.add_argument('-s', dest='server_addr', + help='RPC domain socket path or IP address', default='/var/tmp/spdk.sock') + parser.add_argument('-p', dest='port', + help='RPC port number (if server_addr is IP address)', + default=5260, type=int) + parser.add_argument('-t', dest='timeout', + help='Timeout as a floating point number expressed in seconds waiting for response. Default: 60.0', + default=60.0, type=float) + parser.add_argument('-v', dest='verbose', action='store_const', const="INFO", + help='Set verbose mode to INFO', default="ERROR") + parser.add_argument('--verbose', dest='verbose', choices=['DEBUG', 'INFO', 'ERROR'], + help="""Set verbose level. """) + subparsers = parser.add_subparsers(help='RPC methods') + + def nvmf_create_target(args): + print_dict(rpc.nvmf.nvmf_create_target(args.client, + name=args.name, + max_subsystems=args.max_subsystems)) + + p = subparsers.add_parser('nvmf_create_target', help='Create a new NVMe-oF target') + p.add_argument('-n', '--name', help='Target name (unique to application)', type=str, required=True) + p.add_argument('-s', '--max-subsystems', help='Max number of NVMf subsystems defaults to SPDK_NVMF_DEFAULT_MAX_SUBSYSTEMS', + type=int, required=False) + p.set_defaults(func=nvmf_create_target) + + def nvmf_delete_target(args): + print_dict(rpc.nvmf.nvmf_delete_target(args.client, + name=args.name)) + + p = subparsers.add_parser('nvmf_delete_target', help='Destroy the given NVMe-oF Target') + p.add_argument('-n', '--name', help='Target name (unique to application)', type=str, required=True) + p.set_defaults(func=nvmf_delete_target) + + def nvmf_get_targets(args): + print_dict(rpc.nvmf.nvmf_get_targets(args.client)) + + p = subparsers.add_parser('nvmf_get_targets', help='Get the list of NVMe-oF Targets') + p.set_defaults(func=nvmf_get_targets) + + def call_rpc_func(args): + try: + args.func(args) + except JSONRPCException as ex: + print(ex.message) + exit(1) + + def execute_script(parser, client, fd): + for rpc_call in map(str.rstrip, fd): + if not rpc_call.strip(): + continue + args = parser.parse_args(shlex.split(rpc_call)) + args.client = client + call_rpc_func(args) + + args = parser.parse_args() + args.client = rpc.client.JSONRPCClient(args.server_addr, args.port, args.timeout, log_level=getattr(logging, args.verbose.upper())) + if hasattr(args, 'func'): + call_rpc_func(args) + elif sys.stdin.isatty(): + # No arguments and no data piped through stdin + parser.print_help() + exit(1) + else: + execute_script(parser, args.client, sys.stdin) diff --git a/src/spdk/test/nvmf/target/nmic.sh b/src/spdk/test/nvmf/target/nmic.sh new file mode 100755 index 000000000..6a967dc08 --- /dev/null +++ b/src/spdk/test/nvmf/target/nmic.sh @@ -0,0 +1,56 @@ +#!/usr/bin/env bash + +testdir=$(readlink -f $(dirname $0)) +rootdir=$(readlink -f $testdir/../../..) +source $rootdir/test/common/autotest_common.sh +source $rootdir/test/nvmf/common.sh + +MALLOC_BDEV_SIZE=64 +MALLOC_BLOCK_SIZE=512 + +rpc_py="$rootdir/scripts/rpc.py" + +nvmftestinit +nvmfappstart -m 0xF + +NVMF_SECOND_TARGET_IP=$(echo "$RDMA_IP_LIST" | sed -n 2p) + +$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192 + +# Create subsystems +$rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc0 +$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s $NVMF_SERIAL +$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc0 +$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s "$NVMF_PORT" + +echo "test case1: single bdev can't be used in multiple subsystems" +$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode2 -a -s SPDK2 +$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode2 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s "$NVMF_PORT" +nmic_status=0 +$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode2 Malloc0 || nmic_status=$? + +if [ $nmic_status -eq 0 ]; then + echo " Adding namespace passed - failure expected." + nvmftestfini + exit 1 +else + echo " Adding namespace failed - expected result." +fi + +echo "test case2: host connect to nvmf target in multiple paths" +if [ -n "$NVMF_SECOND_TARGET_IP" ]; then + $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_SECOND_TARGET_IP -s $NVMF_PORT + + nvme connect -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT" + nvme connect -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_SECOND_TARGET_IP" -s "$NVMF_PORT" + + waitforserial "$NVMF_SERIAL" + + $rootdir/scripts/fio.py -p nvmf -i 4096 -d 1 -t write -r 1 -v +fi + +nvme disconnect -n "nqn.2016-06.io.spdk:cnode1" || true + +trap - SIGINT SIGTERM EXIT + +nvmftestfini diff --git a/src/spdk/test/nvmf/target/nvme_cli.sh b/src/spdk/test/nvmf/target/nvme_cli.sh new file mode 100755 index 000000000..29359689b --- /dev/null +++ b/src/spdk/test/nvmf/target/nvme_cli.sh @@ -0,0 +1,75 @@ +#!/usr/bin/env bash + +testdir=$(readlink -f $(dirname $0)) +rootdir=$(readlink -f $testdir/../../..) +source $rootdir/test/common/autotest_common.sh +source $rootdir/test/nvmf/common.sh + +if [ -z "${DEPENDENCY_DIR}" ]; then + echo DEPENDENCY_DIR not defined! + exit 1 +fi + +MALLOC_BDEV_SIZE=64 +MALLOC_BLOCK_SIZE=512 + +rpc_py="$rootdir/scripts/rpc.py" + +nvmftestinit +nvmfappstart -m 0xF + +$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192 + +$rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc0 +$rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc1 + +$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s $NVMF_SERIAL -d SPDK_Controller1 +$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc0 +$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc1 +$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT + +nvme connect -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT" + +waitforserial $NVMF_SERIAL 2 +if ! get_nvme_devs print 2> /dev/null; then + echo "Could not find any nvme devices to work with, aborting the test" >&2 + exit 1 +fi + +for ctrl in "${nvmes[@]}"; do + nvme id-ctrl $ctrl + nvme smart-log $ctrl + nvme_model=$(nvme id-ctrl $ctrl | grep -w mn | sed 's/^.*: //' | sed 's/ *$//') + if [ "$nvme_model" != "SPDK_Controller1" ]; then + echo "Wrong model number for controller" $nvme_model + exit 1 + fi +done + +for ns in "${nvmes[@]}"; do + nvme id-ns $ns +done + +nvme disconnect -n "nqn.2016-06.io.spdk:cnode1" + +# Test spdk/nvme-cli NVMe-oF commands: discover, connect and disconnect +nvme_cli_build +pushd "${DEPENDENCY_DIR}/nvme-cli" + +sed -i 's/shm_id=.*/shm_id=-1/g' spdk.conf +./nvme discover -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s "$NVMF_PORT" +nvme_num_before_connection=$(get_nvme_devs 2>&1 || echo 0) +./nvme connect -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT" +sleep 1 +nvme_num=$(get_nvme_devs 2>&1) +./nvme disconnect -n "nqn.2016-06.io.spdk:cnode1" +if [ $nvme_num -le $nvme_num_before_connection ]; then + echo "spdk/nvme-cli connect target devices failed" + exit 1 +fi +popd + +$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1 +trap - SIGINT SIGTERM EXIT + +nvmftestfini diff --git a/src/spdk/test/nvmf/target/nvmf_example.sh b/src/spdk/test/nvmf/target/nvmf_example.sh new file mode 100755 index 000000000..28045bc49 --- /dev/null +++ b/src/spdk/test/nvmf/target/nvmf_example.sh @@ -0,0 +1,59 @@ +#!/usr/bin/env bash + +testdir=$(readlink -f $(dirname $0)) +rootdir=$(readlink -f $testdir/../../..) +source $rootdir/test/common/autotest_common.sh +source $rootdir/test/nvmf/common.sh + +rpc_py="$rootdir/scripts/rpc.py" + +MALLOC_BDEV_SIZE=64 +MALLOC_BLOCK_SIZE=512 + +function build_nvmf_example_args() { + if [ $SPDK_RUN_NON_ROOT -eq 1 ]; then + echo "sudo -u $(logname) $SPDK_EXAMPLE_DIR/nvmf -i $NVMF_APP_SHM_ID" -g 10000 + else + echo "$SPDK_EXAMPLE_DIR/nvmf -i $NVMF_APP_SHM_ID" -g 10000 + fi +} + +NVMF_EXAMPLE="$(build_nvmf_example_args)" + +function nvmfexamplestart() { + timing_enter start_nvmf_example + $NVMF_EXAMPLE $1 & + nvmfpid=$! + trap 'process_shm --id $NVMF_APP_SHM_ID; nvmftestfini; exit 1' SIGINT SIGTERM EXIT + waitforlisten $nvmfpid + timing_exit start_nvmf_example +} + +timing_enter nvmf_example_test +nvmftestinit +nvmfexamplestart "-m 0xF" + +#create transport +$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192 +#create malloc bdev +malloc_bdevs="$($rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE) " +#create subsystem +$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001 + +#add ns to subsystem +for malloc_bdev in $malloc_bdevs; do + $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 "$malloc_bdev" +done + +#add listener to subsystem +$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT + +perf="$SPDK_EXAMPLE_DIR/perf" + +$perf -q 64 -o 4096 -w randrw -M 30 -t 10 \ + -r "trtype:${TEST_TRANSPORT} adrfam:IPv4 traddr:${NVMF_FIRST_TARGET_IP} trsvcid:${NVMF_PORT} \ +subnqn:nqn.2016-06.io.spdk:cnode1" + +trap - SIGINT SIGTERM EXIT +nvmftestfini +timing_exit nvmf_example_test diff --git a/src/spdk/test/nvmf/target/nvmf_lvol.sh b/src/spdk/test/nvmf/target/nvmf_lvol.sh new file mode 100755 index 000000000..d44bc9332 --- /dev/null +++ b/src/spdk/test/nvmf/target/nvmf_lvol.sh @@ -0,0 +1,60 @@ +#!/usr/bin/env bash + +testdir=$(readlink -f $(dirname $0)) +rootdir=$(readlink -f $testdir/../../..) +source $rootdir/test/common/autotest_common.sh +source $rootdir/test/nvmf/common.sh + +MALLOC_BDEV_SIZE=64 +MALLOC_BLOCK_SIZE=512 +LVOL_BDEV_INIT_SIZE=20 +LVOL_BDEV_FINAL_SIZE=30 + +rpc_py="$rootdir/scripts/rpc.py" + +nvmftestinit +nvmfappstart -m 0x7 + +$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192 + +# Construct a RAID volume for the logical volume store +base_bdevs="$($rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE) " +base_bdevs+=$($rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE) +$rpc_py bdev_raid_create -n raid0 -z 64 -r 0 -b "$base_bdevs" + +# Create the logical volume store on the RAID volume +lvs=$($rpc_py bdev_lvol_create_lvstore raid0 lvs) + +# Create a logical volume on the logical volume store +lvol=$($rpc_py bdev_lvol_create -u $lvs lvol $LVOL_BDEV_INIT_SIZE) + +# Create an NVMe-oF subsystem and add the logical volume as a namespace +$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode0 -a -s SPDK0 +$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode0 $lvol +$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode0 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT + +# Start random writes in the background +$SPDK_EXAMPLE_DIR/perf -r "trtype:$TEST_TRANSPORT adrfam:IPv4 traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT" -o 4096 -q 128 -s 512 -w randwrite -t 10 -c 0x18 & +perf_pid=$! + +sleep 1 + +# Perform some operations on the logical volume +snapshot=$($rpc_py bdev_lvol_snapshot $lvol "MY_SNAPSHOT") +$rpc_py bdev_lvol_resize $lvol $LVOL_BDEV_FINAL_SIZE +clone=$($rpc_py bdev_lvol_clone $snapshot "MY_CLONE") +$rpc_py bdev_lvol_inflate $clone + +# Wait for I/O to complete +wait $perf_pid + +# Clean up +$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode0 +$rpc_py bdev_lvol_delete $lvol +$rpc_py bdev_lvol_delete_lvstore -u $lvs + +rm -f ./local-job* + +trap - SIGINT SIGTERM EXIT + +nvmftestfini diff --git a/src/spdk/test/nvmf/target/nvmf_vhost.sh b/src/spdk/test/nvmf/target/nvmf_vhost.sh new file mode 100755 index 000000000..48e78d6d2 --- /dev/null +++ b/src/spdk/test/nvmf/target/nvmf_vhost.sh @@ -0,0 +1,69 @@ +#!/usr/bin/env bash + +testdir=$(readlink -f $(dirname $0)) +rootdir=$testdir/../../.. +source $rootdir/test/common/autotest_common.sh +source $rootdir/test/nvmf/common.sh + +if [ ! -d "/usr/local/qemu/spdk-3.0.0" ]; then + echo "Qemu not installed on this machine. It may be a VM. Skipping nvmf_vhost test." + exit 0 +fi + +source $rootdir/test/vhost/common.sh + +MALLOC_BDEV_SIZE=128 +MALLOC_BLOCK_SIZE=512 +NVMF_SOCK="/tmp/nvmf_rpc.sock" +NVMF_RPC="$rootdir/scripts/rpc.py -s $NVMF_SOCK" + +VHOST_SOCK="/tmp/vhost_rpc.sock" +VHOST_APP+=(-p 0 -r "$VHOST_SOCK" -u) +VHOST_RPC="$rootdir/scripts/rpc.py -s $VHOST_SOCK" + +nvmftestinit + +# Start Apps +"${NVMF_APP[@]}" -r $NVMF_SOCK & +nvmfpid=$! +waitforlisten $nvmfpid $NVMF_SOCK + +trap 'process_shm --id $NVMF_APP_SHM_ID; nvmftestfini; exit 1' SIGINT SIGTERM EXIT + +mkdir -p "$(get_vhost_dir 3)" + +"${VHOST_APP[@]}" -S "$(get_vhost_dir 3)" & +vhostpid=$! +waitforlisten $vhostpid $NVMF_SOCK + +trap 'process_shm --id $NVMF_APP_SHM_ID; killprocess $vhostpid nvmftestfini; exit 1' SIGINT SIGTERM EXIT + +# Configure NVMF tgt on host machine +malloc_bdev="$($NVMF_RPC bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)" + +$NVMF_RPC nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192 -p 4 +$NVMF_RPC nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001 +$NVMF_RPC nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 "$malloc_bdev" +$NVMF_RPC nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT + +# Configure VHost on host machine +$VHOST_RPC bdev_nvme_attach_controller -b Nvme0 -t $TEST_TRANSPORT -f ipv4 -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT -n nqn.2016-06.io.spdk:cnode1 +$VHOST_RPC vhost_create_scsi_controller naa.VhostScsi0.3 +$VHOST_RPC vhost_scsi_controller_add_target naa.VhostScsi0.3 0 "Nvme0n1" + +# start qemu based VM. +vm_setup --os="$VM_IMAGE" --disk-type=spdk_vhost_scsi --disks="VhostScsi0" --force=3 --vhost-name=3 + +vm_run 3 + +vm_wait_for_boot 300 3 + +# Run the fio workload remotely +vm_scp 3 $testdir/nvmf_vhost_fio.job 127.0.0.1:/root/nvmf_vhost_fio.job +vm_exec 3 "fio /root/nvmf_vhost_fio.job" +vm_shutdown_all + +trap - SIGINT SIGTERM EXIT + +killprocess $vhostpid +nvmftestfini diff --git a/src/spdk/test/nvmf/target/nvmf_vhost_fio.job b/src/spdk/test/nvmf/target/nvmf_vhost_fio.job new file mode 100644 index 000000000..350aa895e --- /dev/null +++ b/src/spdk/test/nvmf/target/nvmf_vhost_fio.job @@ -0,0 +1,19 @@ +[global] +blocksize=4k-512k +iodepth=128 +ioengine=libaio +filename=/dev/sdb +group_reporting +thread +numjobs=1 +direct=1 +do_verify=1 +verify=md5 +verify_fatal=1 +verify_dump=1 +verify_backlog=8 + +[randwrite] +rw=randwrite +runtime=15 +time_based diff --git a/src/spdk/test/nvmf/target/rpc.sh b/src/spdk/test/nvmf/target/rpc.sh new file mode 100755 index 000000000..d715e4b4f --- /dev/null +++ b/src/spdk/test/nvmf/target/rpc.sh @@ -0,0 +1,124 @@ +#!/usr/bin/env bash + +testdir=$(readlink -f $(dirname $0)) +rootdir=$(readlink -f $testdir/../../..) +source $rootdir/test/common/autotest_common.sh +source $rootdir/test/nvmf/common.sh + +rpc_py="$rootdir/scripts/rpc.py" + +function jcount() { + local filter=$1 + jq "$filter" | wc -l +} + +function jsum() { + local filter=$1 + jq "$filter" | awk '{s+=$1}END{print s}' +} + +nvmftestinit +nvmfappstart -m 0xF + +stats=$($rpc_py nvmf_get_stats) +# Expect 4 poll groups (from CPU mask) and no transports yet +[ "4" -eq $(jcount .poll_groups[].name <<< "$stats") ] +[ "null" == $(jq .poll_groups[0].transports[0] <<< "$stats") ] + +$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192 + +stats=$($rpc_py nvmf_get_stats) +# Expect no QPs +[ "0" -eq $(jsum .poll_groups[].admin_qpairs <<< "$stats") ] +[ "0" -eq $(jsum .poll_groups[].io_qpairs <<< "$stats") ] +# Transport statistics is currently implemented for RDMA only +if [ 'rdma' == $TEST_TRANSPORT ]; then + # Expect RDMA transport and some devices + [ "1" -eq $(jcount .poll_groups[0].transports[].trtype <<< "$stats") ] + transport_type=$(jq -r .poll_groups[0].transports[0].trtype <<< "$stats") + [ "${transport_type,,}" == "${TEST_TRANSPORT,,}" ] + [ "0" -lt $(jcount .poll_groups[0].transports[0].devices[].name <<< "$stats") ] +fi + +# set times for subsystem construct/delete +if [ $RUN_NIGHTLY -eq 1 ]; then + times=50 +else + times=3 +fi + +MALLOC_BDEV_SIZE=64 +MALLOC_BLOCK_SIZE=512 + +$rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc1 + +# Disallow host NQN and make sure connect fails +$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s $NVMF_SERIAL +$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc1 +$rpc_py nvmf_subsystem_allow_any_host -d nqn.2016-06.io.spdk:cnode1 +$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT + +# This connect should fail - the host NQN is not allowed +! nvme connect -t $TEST_TRANSPORT -n nqn.2016-06.io.spdk:cnode1 -q nqn.2016-06.io.spdk:host1 -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT" + +# Add the host NQN and verify that the connect succeeds +$rpc_py nvmf_subsystem_add_host nqn.2016-06.io.spdk:cnode1 nqn.2016-06.io.spdk:host1 +nvme connect -t $TEST_TRANSPORT -n nqn.2016-06.io.spdk:cnode1 -q nqn.2016-06.io.spdk:host1 -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT" +waitforserial "$NVMF_SERIAL" +nvme disconnect -n nqn.2016-06.io.spdk:cnode1 + +# Remove the host and verify that the connect fails +$rpc_py nvmf_subsystem_remove_host nqn.2016-06.io.spdk:cnode1 nqn.2016-06.io.spdk:host1 +! nvme connect -t $TEST_TRANSPORT -n nqn.2016-06.io.spdk:cnode1 -q nqn.2016-06.io.spdk:host1 -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT" + +# Allow any host and verify that the connect succeeds +$rpc_py nvmf_subsystem_allow_any_host -e nqn.2016-06.io.spdk:cnode1 +nvme connect -t $TEST_TRANSPORT -n nqn.2016-06.io.spdk:cnode1 -q nqn.2016-06.io.spdk:host1 -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT" +waitforserial "$NVMF_SERIAL" +nvme disconnect -n nqn.2016-06.io.spdk:cnode1 + +$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1 + +# do frequent add delete of namespaces with different nsid. +for i in $(seq 1 $times); do + $rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -s $NVMF_SERIAL + $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT + $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc1 -n 5 + $rpc_py nvmf_subsystem_allow_any_host nqn.2016-06.io.spdk:cnode1 + nvme connect -t $TEST_TRANSPORT -n nqn.2016-06.io.spdk:cnode1 -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT" + + waitforserial "$NVMF_SERIAL" + + nvme disconnect -n nqn.2016-06.io.spdk:cnode1 + + $rpc_py nvmf_subsystem_remove_ns nqn.2016-06.io.spdk:cnode1 5 + $rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1 + +done + +# do frequent add delete. +for i in $(seq 1 $times); do + $rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -s $NVMF_SERIAL + $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT + $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc1 + $rpc_py nvmf_subsystem_allow_any_host nqn.2016-06.io.spdk:cnode1 + + $rpc_py nvmf_subsystem_remove_ns nqn.2016-06.io.spdk:cnode1 1 + + $rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1 +done + +stats=$($rpc_py nvmf_get_stats) +# Expect some admin and IO qpairs +[ "0" -lt $(jsum .poll_groups[].admin_qpairs <<< "$stats") ] +[ "0" -lt $(jsum .poll_groups[].io_qpairs <<< "$stats") ] +# Transport statistics is currently implemented for RDMA only +if [ 'rdma' == $TEST_TRANSPORT ]; then + # Expect non-zero completions and request latencies accumulated + [ "0" -lt $(jsum .poll_groups[].transports[].devices[].completions <<< "$stats") ] + [ "0" -lt $(jsum .poll_groups[].transports[].devices[].request_latency <<< "$stats") ] +fi + +trap - SIGINT SIGTERM EXIT + +nvmftestfini diff --git a/src/spdk/test/nvmf/target/shutdown.sh b/src/spdk/test/nvmf/target/shutdown.sh new file mode 100755 index 000000000..8ad73bd6f --- /dev/null +++ b/src/spdk/test/nvmf/target/shutdown.sh @@ -0,0 +1,155 @@ +#!/usr/bin/env bash + +testdir=$(readlink -f $(dirname $0)) +rootdir=$(readlink -f $testdir/../../..) +source $rootdir/test/common/autotest_common.sh +source $rootdir/test/nvmf/common.sh + +MALLOC_BDEV_SIZE=64 +MALLOC_BLOCK_SIZE=512 + +rpc_py="$rootdir/scripts/rpc.py" + +function starttarget() { + # Start the target + nvmfappstart -m 0x1E + + $rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192 + + num_subsystems=({1..10}) + # SoftRoce does not have enough queues available for + # this test. Detect if we're using software RDMA. + # If so, only use two subsystem. + if check_ip_is_soft_roce "$NVMF_FIRST_TARGET_IP"; then + num_subsystems=({1..2}) + fi + + timing_enter create_subsystems + # Create subsystems + rm -rf $testdir/rpcs.txt + for i in "${num_subsystems[@]}"; do + cat <<- EOL >> $testdir/rpcs.txt + bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc$i + nvmf_create_subsystem nqn.2016-06.io.spdk:cnode$i -a -s SPDK$i + nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode$i Malloc$i + nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode$i -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT + EOL + done + $rpc_py < $testdir/rpcs.txt + timing_exit create_subsystems + +} + +function stoptarget() { + rm -f ./local-job0-0-verify.state + rm -rf $testdir/bdevperf.conf + rm -rf $testdir/rpcs.txt + + nvmftestfini +} + +function waitforio() { + # $1 = RPC socket + if [ -z "$1" ]; then + exit 1 + fi + # $2 = bdev name + if [ -z "$2" ]; then + exit 1 + fi + local ret=1 + local i + for ((i = 10; i != 0; i--)); do + read_io_count=$($rpc_py -s $1 bdev_get_iostat -b $2 | jq -r '.bdevs[0].num_read_ops') + # A few I/O will happen during initial examine. So wait until at least 100 I/O + # have completed to know that bdevperf is really generating the I/O. + if [ $read_io_count -ge 100 ]; then + ret=0 + break + fi + sleep 0.25 + done + return $ret +} + +# Test 1: Kill the initiator unexpectedly with no I/O outstanding +function nvmf_shutdown_tc1() { + starttarget + + # Run bdev_svc, which connects but does not issue I/O + $rootdir/test/app/bdev_svc/bdev_svc -m 0x1 -i 1 -r /var/tmp/bdevperf.sock --json <(gen_nvmf_target_json "${num_subsystems[@]}") & + perfpid=$! + waitforlisten $perfpid /var/tmp/bdevperf.sock + $rpc_py -s /var/tmp/bdevperf.sock framework_wait_init + + # Kill bdev_svc + kill -9 $perfpid || true + rm -f /var/run/spdk_bdev1 + + # Verify the target stays up + sleep 1 + kill -0 $nvmfpid + + # Connect with bdevperf and confirm it works + $rootdir/test/bdev/bdevperf/bdevperf -r /var/tmp/bdevperf.sock --json <(gen_nvmf_target_json "${num_subsystems[@]}") -q 64 -o 65536 -w verify -t 1 + + stoptarget +} + +# Test 2: Kill initiator unexpectedly with I/O outstanding +function nvmf_shutdown_tc2() { + starttarget + + # Run bdevperf + $rootdir/test/bdev/bdevperf/bdevperf -r /var/tmp/bdevperf.sock --json <(gen_nvmf_target_json "${num_subsystems[@]}") -q 64 -o 65536 -w verify -t 10 & + perfpid=$! + waitforlisten $perfpid /var/tmp/bdevperf.sock + $rpc_py -s /var/tmp/bdevperf.sock framework_wait_init + + waitforio /var/tmp/bdevperf.sock Nvme1n1 + + # Kill bdevperf half way through + killprocess $perfpid + + # Verify the target stays up + sleep 1 + kill -0 $nvmfpid + + stoptarget +} + +# Test 3: Kill the target unexpectedly with I/O outstanding +function nvmf_shutdown_tc3() { + starttarget + + # Run bdevperf + $rootdir/test/bdev/bdevperf/bdevperf -r /var/tmp/bdevperf.sock --json <(gen_nvmf_target_json "${num_subsystems[@]}") -q 64 -o 65536 -w verify -t 10 & + perfpid=$! + waitforlisten $perfpid /var/tmp/bdevperf.sock + $rpc_py -s /var/tmp/bdevperf.sock framework_wait_init + + # Expand the trap to clean up bdevperf if something goes wrong + trap 'process_shm --id $NVMF_APP_SHM_ID; kill -9 $perfpid || true; nvmftestfini; exit 1' SIGINT SIGTERM EXIT + + waitforio /var/tmp/bdevperf.sock Nvme1n1 + + # Kill the target half way through + killprocess $nvmfpid + nvmfpid= + + # Verify bdevperf exits successfully + sleep 1 + # TODO: Right now the NVMe-oF initiator will not correctly detect broken connections + # and so it will never shut down. Just kill it. + kill -9 $perfpid || true + + stoptarget +} + +nvmftestinit + +run_test "nvmf_shutdown_tc1" nvmf_shutdown_tc1 +run_test "nvmf_shutdown_tc2" nvmf_shutdown_tc2 +run_test "nvmf_shutdown_tc3" nvmf_shutdown_tc3 + +trap - SIGINT SIGTERM EXIT diff --git a/src/spdk/test/nvmf/target/srq_overwhelm.sh b/src/spdk/test/nvmf/target/srq_overwhelm.sh new file mode 100755 index 000000000..fe4dd7d29 --- /dev/null +++ b/src/spdk/test/nvmf/target/srq_overwhelm.sh @@ -0,0 +1,50 @@ +#!/usr/bin/env bash + +testdir=$(readlink -f $(dirname $0)) +rootdir=$(readlink -f $testdir/../../..) +source $rootdir/test/common/autotest_common.sh +source $rootdir/test/nvmf/common.sh + +MALLOC_BDEV_SIZE=64 +MALLOC_BLOCK_SIZE=512 + +rpc_py="$rootdir/scripts/rpc.py" + +nvmftestinit + +if check_ip_is_soft_roce $NVMF_FIRST_TARGET_IP; then + echo "Using software RDMA, Likely not enough memory to run this test. aborting." + exit 0 +fi + +nvmfappstart -m 0xF + +# create the rdma transport with an intentionally small SRQ depth +$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192 -s 1024 + +for i in $(seq 0 5); do + $rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode$i -a -s SPDK00000000000001 + $rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc$i + $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode$i Malloc$i + $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode$i -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT + nvme connect -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode${i}" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT" -i 16 + waitforblk "nvme${i}n1" +done + +# by running 6 different FIO jobs, each with 13 subjobs, we end up with 78 fio threads trying to write to +# our target at once. This completely overwhelms the target SRQ, but allows us to verify that rnr_retry is +# working even at very high queue depths because the rdma qpair doesn't fail. +# It is normal to see the initiator timeout and reconnect waiting for completions from an overwhelmmed target, +# but the connection should come up and FIO should complete without errors. +$rootdir/scripts/fio.py -p nvmf -i 1048576 -d 128 -t read -r 10 -n 13 + +sync + +for i in $(seq 0 5); do + nvme disconnect -n "nqn.2016-06.io.spdk:cnode${i}" + $rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode$i +done + +trap - SIGINT SIGTERM EXIT + +nvmftestfini |