summaryrefslogtreecommitdiffstats
path: root/src/spdk/test/nvmf/fio
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 18:24:20 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 18:24:20 +0000
commit483eb2f56657e8e7f419ab1a4fab8dce9ade8609 (patch)
treee5d88d25d870d5dedacb6bbdbe2a966086a0a5cf /src/spdk/test/nvmf/fio
parentInitial commit. (diff)
downloadceph-upstream.tar.xz
ceph-upstream.zip
Adding upstream version 14.2.21.upstream/14.2.21upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/spdk/test/nvmf/fio')
-rwxr-xr-xsrc/spdk/test/nvmf/fio/fio.sh108
-rwxr-xr-xsrc/spdk/test/nvmf/fio/nvmf_fio.py133
2 files changed, 241 insertions, 0 deletions
diff --git a/src/spdk/test/nvmf/fio/fio.sh b/src/spdk/test/nvmf/fio/fio.sh
new file mode 100755
index 00000000..ba3b12b3
--- /dev/null
+++ b/src/spdk/test/nvmf/fio/fio.sh
@@ -0,0 +1,108 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+set -e
+
+# pass the parameter 'iso' to this script when running it in isolation to trigger rdma device initialization.
+# e.g. sudo ./fio.sh iso
+nvmftestinit $1
+
+RDMA_IP_LIST=$(get_available_rdma_ips)
+NVMF_FIRST_TARGET_IP=$(echo "$RDMA_IP_LIST" | head -n 1)
+if [ -z $NVMF_FIRST_TARGET_IP ]; then
+ echo "no NIC for nvmf test"
+ exit 0
+fi
+
+timing_enter fio
+timing_enter start_nvmf_tgt
+# Start up the NVMf target in another process
+$NVMF_APP -m 0xF &
+nvmfpid=$!
+
+trap "process_shm --id $NVMF_APP_SHM_ID; killprocess $nvmfpid; nvmftestfini $1; exit 1" SIGINT SIGTERM EXIT
+
+waitforlisten $nvmfpid
+$rpc_py nvmf_create_transport -t RDMA -u 8192 -p 4
+
+timing_exit start_nvmf_tgt
+
+malloc_bdevs="$($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE) "
+malloc_bdevs+="$($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
+# Create a RAID-0 bdev from two malloc bdevs
+raid_malloc_bdevs="$($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE) "
+raid_malloc_bdevs+="$($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
+$rpc_py construct_raid_bdev -n raid0 -s 64 -r 0 -b "$raid_malloc_bdevs"
+
+modprobe -v nvme-rdma
+
+$rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
+for malloc_bdev in $malloc_bdevs; do
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 "$malloc_bdev"
+done
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t rdma -a $NVMF_FIRST_TARGET_IP -s 4420
+
+# Append the raid0 bdev into subsystem
+$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 raid0
+
+nvme connect -t rdma -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
+
+waitforblk "nvme0n1"
+waitforblk "nvme0n2"
+waitforblk "nvme0n3"
+
+$testdir/nvmf_fio.py 4096 1 write 1 verify
+$testdir/nvmf_fio.py 4096 1 randwrite 1 verify
+$testdir/nvmf_fio.py 4096 128 write 1 verify
+$testdir/nvmf_fio.py 4096 128 randwrite 1 verify
+
+sync
+
+#start hotplug test case
+$testdir/nvmf_fio.py 4096 1 read 10 &
+fio_pid=$!
+
+sleep 3
+set +e
+
+$rpc_py destroy_raid_bdev "raid0"
+for malloc_bdev in $malloc_bdevs; do
+ $rpc_py delete_malloc_bdev "$malloc_bdev"
+done
+
+wait $fio_pid
+fio_status=$?
+
+nvme disconnect -n "nqn.2016-06.io.spdk:cnode1" || true
+
+if [ $fio_status -eq 0 ]; then
+ echo "nvmf hotplug test: fio successful - expected failure"
+ nvmfcleanup
+ killprocess $nvmfpid
+ exit 1
+else
+ echo "nvmf hotplug test: fio failed as expected"
+fi
+set -e
+
+$rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode1
+
+rm -f ./local-job0-0-verify.state
+rm -f ./local-job1-1-verify.state
+rm -f ./local-job2-2-verify.state
+
+trap - SIGINT SIGTERM EXIT
+
+nvmfcleanup
+killprocess $nvmfpid
+nvmftestfini $1
+timing_exit fio
diff --git a/src/spdk/test/nvmf/fio/nvmf_fio.py b/src/spdk/test/nvmf/fio/nvmf_fio.py
new file mode 100755
index 00000000..6096dd72
--- /dev/null
+++ b/src/spdk/test/nvmf/fio/nvmf_fio.py
@@ -0,0 +1,133 @@
+#!/usr/bin/env python3
+
+from subprocess import check_call, call, check_output, Popen, PIPE, CalledProcessError
+import re
+import sys
+import signal
+
+fio_template = """
+[global]
+thread=1
+invalidate=1
+rw=%(testtype)s
+time_based=1
+runtime=%(runtime)s
+ioengine=libaio
+direct=1
+bs=%(blocksize)d
+iodepth=%(iodepth)d
+%(verify)s
+verify_dump=1
+
+"""
+
+verify_template = """
+do_verify=1
+verify=meta
+verify_pattern="meta"
+"""
+
+
+fio_job_template = """
+[job%(jobnumber)d]
+filename=%(device)s
+
+"""
+
+
+def interrupt_handler(signum, frame):
+ fio.terminate()
+ print("FIO terminated")
+ sys.exit(0)
+
+
+def main():
+
+ global fio
+ if (len(sys.argv) < 5):
+ print("usage:")
+ print(" " + sys.argv[0] + " <io_size> <queue_depth> <test_type> <runtime>")
+ print("advanced usage:")
+ print("If you want to run fio with verify, please add verify string after runtime.")
+ print("Currently fio.py only support write rw randwrite randrw with verify enabled.")
+ sys.exit(1)
+
+ io_size = int(sys.argv[1])
+ queue_depth = int(sys.argv[2])
+ test_type = sys.argv[3]
+ runtime = sys.argv[4]
+ if len(sys.argv) > 5:
+ verify = True
+ else:
+ verify = False
+
+ devices = get_target_devices()
+ print("Found devices: ", devices)
+
+ # configure_devices(devices)
+ try:
+ fio_executable = check_output("which fio", shell=True).split()[0]
+ except CalledProcessError as e:
+ sys.stderr.write(str(e))
+ sys.stderr.write("\nCan't find the fio binary, please install it.\n")
+ sys.exit(1)
+
+ device_paths = ['/dev/' + dev for dev in devices]
+ print(device_paths)
+ sys.stdout.flush()
+ signal.signal(signal.SIGTERM, interrupt_handler)
+ signal.signal(signal.SIGINT, interrupt_handler)
+ fio = Popen([fio_executable, '-'], stdin=PIPE)
+ fio.communicate(create_fio_config(io_size, queue_depth, device_paths, test_type, runtime, verify))
+ fio.stdin.close()
+ rc = fio.wait()
+ print("FIO completed with code %d\n" % rc)
+ sys.stdout.flush()
+ sys.exit(rc)
+
+
+def get_target_devices():
+ output = str(check_output('lsblk -l -o NAME', shell=True).decode())
+ return re.findall("(nvme[0-9]+n[0-9]+)\n", output)
+
+
+def create_fio_config(size, q_depth, devices, test, run_time, verify):
+ if not verify:
+ verifyfio = ""
+ else:
+ verifyfio = verify_template
+ fiofile = fio_template % {"blocksize": size, "iodepth": q_depth,
+ "testtype": test, "runtime": run_time, "verify": verifyfio}
+ for (i, dev) in enumerate(devices):
+ fiofile += fio_job_template % {"jobnumber": i, "device": dev}
+ return fiofile.encode()
+
+
+def set_device_parameter(devices, filename_template, value):
+ for dev in devices:
+ filename = filename_template % dev
+ f = open(filename, 'r+b')
+ f.write(value)
+ f.close()
+
+
+def configure_devices(devices):
+ set_device_parameter(devices, "/sys/block/%s/queue/nomerges", "2")
+ set_device_parameter(devices, "/sys/block/%s/queue/nr_requests", "128")
+ requested_qd = 128
+ qd = requested_qd
+ while qd > 0:
+ try:
+ set_device_parameter(devices, "/sys/block/%s/device/queue_depth", str(qd))
+ break
+ except IOError:
+ qd = qd - 1
+ if qd == 0:
+ print("Could not set block device queue depths.")
+ else:
+ print("Requested queue_depth {} but only {} is supported.".format(str(requested_qd), str(qd)))
+ set_device_parameter(devices, "/sys/block/%s/queue/scheduler", "noop")
+
+
+if __name__ == "__main__":
+ main()