summaryrefslogtreecommitdiffstats
path: root/src/spdk/test/ftl
diff options
context:
space:
mode:
Diffstat (limited to 'src/spdk/test/ftl')
-rwxr-xr-xsrc/spdk/test/ftl/bdevperf.sh31
-rw-r--r--src/spdk/test/ftl/common.sh68
-rw-r--r--src/spdk/test/ftl/config/.gitignore2
-rw-r--r--src/spdk/test/ftl/config/fio/drive-prep.fio15
-rw-r--r--src/spdk/test/ftl/config/fio/randr.fio19
-rw-r--r--src/spdk/test/ftl/config/fio/randrw.fio20
-rw-r--r--src/spdk/test/ftl/config/fio/randw-verify-depth128.fio20
-rw-r--r--src/spdk/test/ftl/config/fio/randw-verify-j2.fio25
-rw-r--r--src/spdk/test/ftl/config/fio/randw-verify-qd128-ext.fio20
-rw-r--r--src/spdk/test/ftl/config/fio/randw-verify.fio20
-rw-r--r--src/spdk/test/ftl/config/fio/randw.fio18
-rwxr-xr-xsrc/spdk/test/ftl/dirty_shutdown.sh93
-rwxr-xr-xsrc/spdk/test/ftl/fio.sh68
-rwxr-xr-xsrc/spdk/test/ftl/ftl.sh80
-rwxr-xr-xsrc/spdk/test/ftl/json.sh38
-rwxr-xr-xsrc/spdk/test/ftl/restore.sh99
16 files changed, 636 insertions, 0 deletions
diff --git a/src/spdk/test/ftl/bdevperf.sh b/src/spdk/test/ftl/bdevperf.sh
new file mode 100755
index 000000000..c0cbc27b9
--- /dev/null
+++ b/src/spdk/test/ftl/bdevperf.sh
@@ -0,0 +1,31 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/test/common/autotest_common.sh
+source $testdir/common.sh
+
+tests=('-q 1 -w randwrite -t 4 -o 69632' '-q 128 -w randwrite -t 4 -o 4096' '-q 128 -w verify -t 4 -o 4096')
+device=$1
+use_append=$2
+rpc_py=$rootdir/scripts/rpc.py
+
+for ((i = 0; i < ${#tests[@]}; i++)); do
+ timing_enter "${tests[$i]}"
+ "$rootdir/test/bdev/bdevperf/bdevperf" -z -T ftl0 ${tests[$i]} --json <(gen_ftl_nvme_conf) &
+ bdevperf_pid=$!
+
+ trap 'killprocess $bdevperf_pid; exit 1' SIGINT SIGTERM EXIT
+ waitforlisten $bdevperf_pid
+ $rpc_py bdev_nvme_attach_controller -b nvme0 -a $device -t pcie
+ $rpc_py bdev_ocssd_create -c nvme0 -b nvme0n1
+ $rpc_py bdev_ftl_create -b ftl0 -d nvme0n1 $use_append
+
+ $rootdir/test/bdev/bdevperf/bdevperf.py perform_tests
+ $rpc_py delete_ftl_bdev -b ftl0
+ $rpc_py bdev_ocssd_delete nvme0n1
+ $rpc_py bdev_nvme_detach_controller nvme0
+ killprocess $bdevperf_pid
+ trap - SIGINT SIGTERM EXIT
+ timing_exit "${tests[$i]}"
+done
diff --git a/src/spdk/test/ftl/common.sh b/src/spdk/test/ftl/common.sh
new file mode 100644
index 000000000..f4620ac3d
--- /dev/null
+++ b/src/spdk/test/ftl/common.sh
@@ -0,0 +1,68 @@
+# Common utility functions to be sourced by the libftl test scripts
+
+function get_chunk_size() {
+ $SPDK_EXAMPLE_DIR/identify -r "trtype:PCIe traddr:$1" \
+ | grep 'Logical blks per chunk' | sed 's/[^0-9]//g'
+}
+
+function get_num_group() {
+ $SPDK_EXAMPLE_DIR/identify -r "trtype:PCIe traddr:$1" \
+ | grep 'Groups' | sed 's/[^0-9]//g'
+}
+
+function get_num_pu() {
+ $SPDK_EXAMPLE_DIR/identify -r "trtype:PCIe traddr:$1" \
+ | grep 'PUs' | sed 's/[^0-9]//g'
+}
+
+function has_separate_md() {
+ local md_type
+ md_type=$($SPDK_EXAMPLE_DIR/identify -r "trtype:PCIe traddr:$1" \
+ | grep 'Metadata Transferred' | cut -d: -f2)
+ if [[ "$md_type" =~ Separate ]]; then
+ return 0
+ else
+ return 1
+ fi
+}
+
+function create_nv_cache_bdev() {
+ local name=$1
+ local ocssd_bdf=$2
+ local cache_bdf=$3
+ local num_punits=$4
+
+ local bytes_to_mb=$((1024 * 1024))
+ local chunk_size
+ chunk_size=$(get_chunk_size $ocssd_bdf)
+
+ # We need at least 2 bands worth of data + 1 block
+ local size=$((2 * 4096 * chunk_size * num_punits + 1))
+ # Round the size up to the nearest megabyte
+ local size=$(((size + bytes_to_mb) / bytes_to_mb))
+
+ # Create NVMe bdev on specified device and split it so that it has the desired size
+ local nvc_bdev
+ nvc_bdev=$($rootdir/scripts/rpc.py bdev_nvme_attach_controller -b $name -t PCIe -a $cache_bdf)
+ $rootdir/scripts/rpc.py bdev_split_create $nvc_bdev -s $size 1
+}
+
+function gen_ftl_nvme_conf() {
+ jq . <<- JSON
+ {
+ "subsystems": [
+ {
+ "subsystem": "bdev",
+ "config": [
+ {
+ "params": {
+ "nvme_adminq_poll_period_us": 100
+ },
+ "method": "bdev_nvme_set_options"
+ }
+ ]
+ }
+ ]
+ }
+ JSON
+}
diff --git a/src/spdk/test/ftl/config/.gitignore b/src/spdk/test/ftl/config/.gitignore
new file mode 100644
index 000000000..5523f29b3
--- /dev/null
+++ b/src/spdk/test/ftl/config/.gitignore
@@ -0,0 +1,2 @@
+ftl.conf
+fio/*.fio
diff --git a/src/spdk/test/ftl/config/fio/drive-prep.fio b/src/spdk/test/ftl/config/fio/drive-prep.fio
new file mode 100644
index 000000000..430172ca9
--- /dev/null
+++ b/src/spdk/test/ftl/config/fio/drive-prep.fio
@@ -0,0 +1,15 @@
+[drive_prep]
+ioengine=spdk_bdev
+spdk_json_conf=${FTL_JSON_CONF}
+filename=${FTL_BDEV_NAME}
+thread=1
+
+direct=1
+buffered=0
+size=100%
+randrepeat=0
+norandommap
+bs=4k
+iodepth=128
+numjobs=1
+rw=write
diff --git a/src/spdk/test/ftl/config/fio/randr.fio b/src/spdk/test/ftl/config/fio/randr.fio
new file mode 100644
index 000000000..f3f644476
--- /dev/null
+++ b/src/spdk/test/ftl/config/fio/randr.fio
@@ -0,0 +1,19 @@
+[global]
+ioengine=spdk_bdev
+spdk_json_conf=${FTL_JSON_CONF}
+filename=${FTL_BDEV_NAME}
+direct=1
+thread=1
+buffered=0
+size=100%
+randrepeat=0
+time_based
+norandommap
+
+[test]
+stonewall
+bs=4k
+numjobs=4
+rw=randread
+iodepth=128
+runtime=1200
diff --git a/src/spdk/test/ftl/config/fio/randrw.fio b/src/spdk/test/ftl/config/fio/randrw.fio
new file mode 100644
index 000000000..fdce9a477
--- /dev/null
+++ b/src/spdk/test/ftl/config/fio/randrw.fio
@@ -0,0 +1,20 @@
+[global]
+ioengine=spdk_bdev
+spdk_json_conf=${FTL_JSON_CONF}
+filename=${FTL_BDEV_NAME}
+direct=1
+thread=1
+buffered=0
+size=100%
+randrepeat=0
+time_based
+norandommap
+
+[test]
+stonewall
+bs=4k
+numjobs=4
+rw=randrw
+rwmixread=70
+iodepth=32
+runtime=1200
diff --git a/src/spdk/test/ftl/config/fio/randw-verify-depth128.fio b/src/spdk/test/ftl/config/fio/randw-verify-depth128.fio
new file mode 100644
index 000000000..9adee6cab
--- /dev/null
+++ b/src/spdk/test/ftl/config/fio/randw-verify-depth128.fio
@@ -0,0 +1,20 @@
+[global]
+ioengine=spdk_bdev
+spdk_json_conf=${FTL_JSON_CONF}
+filename=${FTL_BDEV_NAME}
+thread=1
+direct=1
+iodepth=128
+rw=randwrite
+verify=crc32c
+do_verify=1
+verify_dump=0
+verify_state_save=0
+verify_fatal=1
+bs=4k
+random_distribution=normal
+serialize_overlap=1
+io_size=256M
+
+[test]
+numjobs=1
diff --git a/src/spdk/test/ftl/config/fio/randw-verify-j2.fio b/src/spdk/test/ftl/config/fio/randw-verify-j2.fio
new file mode 100644
index 000000000..4610efa3f
--- /dev/null
+++ b/src/spdk/test/ftl/config/fio/randw-verify-j2.fio
@@ -0,0 +1,25 @@
+[global]
+ioengine=spdk_bdev
+spdk_json_conf=${FTL_JSON_CONF}
+filename=${FTL_BDEV_NAME}
+thread=1
+direct=1
+iodepth=128
+rw=randwrite
+verify=crc32c
+do_verify=1
+verify_dump=0
+verify_state_save=0
+verify_backlog=5000
+verify_fatal=1
+bs=4k
+random_distribution=normal
+serialize_overlap=1
+io_size=256M
+
+[first_half]
+offset=0%
+size=50%
+
+[second_half]
+offset=50%
diff --git a/src/spdk/test/ftl/config/fio/randw-verify-qd128-ext.fio b/src/spdk/test/ftl/config/fio/randw-verify-qd128-ext.fio
new file mode 100644
index 000000000..f22b1f2ec
--- /dev/null
+++ b/src/spdk/test/ftl/config/fio/randw-verify-qd128-ext.fio
@@ -0,0 +1,20 @@
+[global]
+ioengine=spdk_bdev
+spdk_json_conf=${FTL_JSON_CONF}
+filename=${FTL_BDEV_NAME}
+thread=1
+direct=1
+iodepth=128
+rw=randwrite
+verify=crc32c
+do_verify=1
+verify_dump=0
+verify_state_save=0
+verify_fatal=1
+bs=4k
+random_distribution=normal
+serialize_overlap=1
+
+[test]
+io_size=64G
+numjobs=1
diff --git a/src/spdk/test/ftl/config/fio/randw-verify.fio b/src/spdk/test/ftl/config/fio/randw-verify.fio
new file mode 100644
index 000000000..edca6c618
--- /dev/null
+++ b/src/spdk/test/ftl/config/fio/randw-verify.fio
@@ -0,0 +1,20 @@
+[global]
+ioengine=spdk_bdev
+spdk_json_conf=${FTL_JSON_CONF}
+filename=${FTL_BDEV_NAME}
+thread=1
+direct=1
+iodepth=1
+rw=randwrite
+size=256M
+verify=crc32c
+do_verify=1
+verify_dump=0
+verify_state_save=0
+verify_backlog=16
+verify_fatal=1
+bs=68k
+random_distribution=normal
+
+[test]
+numjobs=1
diff --git a/src/spdk/test/ftl/config/fio/randw.fio b/src/spdk/test/ftl/config/fio/randw.fio
new file mode 100644
index 000000000..f5b20b124
--- /dev/null
+++ b/src/spdk/test/ftl/config/fio/randw.fio
@@ -0,0 +1,18 @@
+[global]
+ioengine=spdk_bdev
+spdk_json_conf=${FTL_JSON_CONF}
+filename=${FTL_BDEV_NAME}
+direct=1
+thread=1
+buffered=0
+size=100%
+randrepeat=0
+time_based
+norandommap
+
+[test]
+bs=4k
+numjobs=1
+rw=randwrite
+iodepth=64
+runtime=1200
diff --git a/src/spdk/test/ftl/dirty_shutdown.sh b/src/spdk/test/ftl/dirty_shutdown.sh
new file mode 100755
index 000000000..c0e1f3115
--- /dev/null
+++ b/src/spdk/test/ftl/dirty_shutdown.sh
@@ -0,0 +1,93 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/test/common/autotest_common.sh
+source $testdir/common.sh
+
+rpc_py=$rootdir/scripts/rpc.py
+
+while getopts ':u:c:' opt; do
+ case $opt in
+ u) uuid=$OPTARG ;;
+ c) nv_cache=$OPTARG ;;
+ ?) echo "Usage: $0 [-u UUID] [-c NV_CACHE_PCI_BDF] OCSSD_PCI_BDF" && exit 1 ;;
+ esac
+done
+shift $((OPTIND - 1))
+
+device=$1
+
+restore_kill() {
+ rm -f $testdir/config/ftl.json
+ rm -f $testdir/testfile.md5
+ rm -f $testdir/testfile2.md5
+
+ killprocess $svcpid || true
+ rmmod nbd || true
+}
+
+trap "restore_kill; exit 1" SIGINT SIGTERM EXIT
+
+chunk_size=$(get_chunk_size $device)
+num_group=$(get_num_group $device)
+num_pu=$(get_num_pu $device)
+pu_count=$((num_group * num_pu))
+
+# Write one band worth of data + one extra chunk
+data_size=$((chunk_size * (pu_count + 1)))
+
+"$SPDK_BIN_DIR/spdk_tgt" --json <(gen_ftl_nvme_conf) &
+svcpid=$!
+waitforlisten $svcpid
+
+if [ -n "$nv_cache" ]; then
+ nvc_bdev=$(create_nv_cache_bdev nvc0 $device $nv_cache $pu_count)
+fi
+
+$rpc_py bdev_nvme_attach_controller -b nvme0 -a $device -t pcie
+$rpc_py bdev_ocssd_create -c nvme0 -b nvme0n1 -n 1
+ftl_construct_args="bdev_ftl_create -b ftl0 -d nvme0n1 -o"
+
+[ -n "$nvc_bdev" ] && ftl_construct_args+=" -c $nvc_bdev"
+[ -n "$uuid" ] && ftl_construct_args+=" -u $uuid"
+
+$rpc_py $ftl_construct_args
+
+# Load the nbd driver
+modprobe nbd
+$rpc_py nbd_start_disk ftl0 /dev/nbd0
+waitfornbd nbd0
+
+$rpc_py save_config > $testdir/config/ftl.json
+
+dd if=/dev/urandom of=/dev/nbd0 bs=4K count=$data_size oflag=dsync
+# Calculate checksum of the data written
+dd if=/dev/nbd0 bs=4K count=$data_size | md5sum > $testdir/testfile.md5
+$rpc_py nbd_stop_disk /dev/nbd0
+
+# Force kill bdev service (dirty shutdown) and start it again
+kill -9 $svcpid
+rm -f /dev/shm/spdk_tgt_trace.pid$svcpid
+
+"$SPDK_BIN_DIR/spdk_tgt" --json <(gen_ftl_nvme_conf) -L ftl_init &
+svcpid=$!
+waitforlisten $svcpid
+
+$rpc_py load_config < $testdir/config/ftl.json
+waitfornbd nbd0
+
+# Write extra data after restore
+dd if=/dev/urandom of=/dev/nbd0 bs=4K count=$chunk_size seek=$data_size oflag=dsync
+# Save md5 data
+dd if=/dev/nbd0 bs=4K count=$chunk_size skip=$data_size | md5sum > $testdir/testfile2.md5
+
+# Make sure all data will be read from disk
+echo 3 > /proc/sys/vm/drop_caches
+
+# Verify that the checksum matches and the data is consistent
+dd if=/dev/nbd0 bs=4K count=$data_size | md5sum -c $testdir/testfile.md5
+dd if=/dev/nbd0 bs=4K count=$chunk_size skip=$data_size | md5sum -c $testdir/testfile2.md5
+
+trap - SIGINT SIGTERM EXIT
+restore_kill
diff --git a/src/spdk/test/ftl/fio.sh b/src/spdk/test/ftl/fio.sh
new file mode 100755
index 000000000..3ad2a085a
--- /dev/null
+++ b/src/spdk/test/ftl/fio.sh
@@ -0,0 +1,68 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/test/common/autotest_common.sh
+source $testdir/common.sh
+
+declare -A suite
+suite['basic']='randw-verify randw-verify-j2 randw-verify-depth128'
+suite['extended']='drive-prep randw-verify-qd128-ext randw randr randrw'
+
+rpc_py=$rootdir/scripts/rpc.py
+
+fio_kill() {
+ killprocess $svcpid
+ rm -f $FTL_JSON_CONF
+}
+
+device=$1
+tests=${suite[$2]}
+uuid=$3
+
+if [[ $CONFIG_FIO_PLUGIN != y ]]; then
+ echo "FIO not available"
+ exit 1
+fi
+
+if [ -z "$tests" ]; then
+ echo "Invalid test suite '$2'"
+ exit 1
+fi
+
+export FTL_BDEV_NAME=ftl0
+export FTL_JSON_CONF=$testdir/config/ftl.json
+
+trap "fio_kill; exit 1" SIGINT SIGTERM EXIT
+
+"$SPDK_BIN_DIR/spdk_tgt" --json <(gen_ftl_nvme_conf) &
+svcpid=$!
+waitforlisten $svcpid
+
+$rpc_py bdev_nvme_attach_controller -b nvme0 -a $device -t pcie
+$rpc_py bdev_ocssd_create -c nvme0 -b nvme0n1
+
+if [ -z "$uuid" ]; then
+ $rpc_py bdev_ftl_create -b ftl0 -d nvme0n1
+else
+ $rpc_py bdev_ftl_create -b ftl0 -d nvme0n1 -u $uuid
+fi
+
+waitforbdev ftl0
+
+(
+ echo '{"subsystems": ['
+ $rpc_py save_subsystem_config -n bdev
+ echo ']}'
+) > $FTL_JSON_CONF
+
+killprocess $svcpid
+trap - SIGINT SIGTERM EXIT
+
+for test in ${tests}; do
+ timing_enter $test
+ fio_bdev $testdir/config/fio/$test.fio
+ timing_exit $test
+done
+
+rm -f $FTL_JSON_CONF
diff --git a/src/spdk/test/ftl/ftl.sh b/src/spdk/test/ftl/ftl.sh
new file mode 100755
index 000000000..b432bdfb0
--- /dev/null
+++ b/src/spdk/test/ftl/ftl.sh
@@ -0,0 +1,80 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/test/common/autotest_common.sh
+source $testdir/common.sh
+
+rpc_py=$rootdir/scripts/rpc.py
+
+function at_ftl_exit() {
+ # restore original driver
+ PCI_WHITELIST="$device" PCI_BLACKLIST="" DRIVER_OVERRIDE="$ocssd_original_dirver" $rootdir/scripts/setup.sh
+}
+
+read -r device _ <<< "$OCSSD_PCI_DEVICES"
+
+if [[ -z "$device" ]]; then
+ echo "OCSSD device list is empty."
+ echo "This test require that OCSSD_PCI_DEVICES environment variable to be set"
+ echo "and point to OCSSD devices PCI BDF. You can specify multiple space"
+ echo "separated BDFs in this case first one will be used."
+ exit 1
+fi
+
+ocssd_original_dirver="$(basename $(readlink /sys/bus/pci/devices/$device/driver))"
+
+trap 'at_ftl_exit' SIGINT SIGTERM EXIT
+
+# OCSSD is blacklisted so bind it to vfio/uio driver before testing
+PCI_WHITELIST="$device" PCI_BLACKLIST="" DRIVER_OVERRIDE="" $rootdir/scripts/setup.sh
+
+# Use first regular NVMe disk (non-OC) as non-volatile cache
+nvme_disks=$($rootdir/scripts/gen_nvme.sh --json | jq -r \
+ ".config[] | select(.params.traddr != \"$device\").params.traddr")
+
+for disk in $nvme_disks; do
+ if has_separate_md $disk; then
+ nv_cache=$disk
+ break
+ fi
+done
+
+if [ -z "$nv_cache" ]; then
+ # TODO: once CI has devices with separate metadata support fail the test here
+ echo "Couldn't find NVMe device to be used as non-volatile cache"
+fi
+
+run_test "ftl_bdevperf" $testdir/bdevperf.sh $device
+run_test "ftl_bdevperf_append" $testdir/bdevperf.sh $device --use_append
+
+run_test "ftl_restore" $testdir/restore.sh $device
+if [ -n "$nv_cache" ]; then
+ run_test "ftl_restore_nv_cache" $testdir/restore.sh -c $nv_cache $device
+fi
+
+if [ -n "$nv_cache" ]; then
+ run_test "ftl_dirty_shutdown" $testdir/dirty_shutdown.sh -c $nv_cache $device
+fi
+
+run_test "ftl_json" $testdir/json.sh $device
+
+if [ $SPDK_TEST_FTL_EXTENDED -eq 1 ]; then
+ run_test "ftl_fio_basic" $testdir/fio.sh $device basic
+
+ "$SPDK_BIN_DIR/spdk_tgt" --json <(gen_ftl_nvme_conf) &
+ svcpid=$!
+
+ trap 'killprocess $svcpid; exit 1' SIGINT SIGTERM EXIT
+
+ waitforlisten $svcpid
+
+ $rpc_py bdev_nvme_attach_controller -b nvme0 -a $device -t pcie
+ $rpc_py bdev_ocssd_create -c nvme0 -b nvme0n1 -n 1
+ uuid=$($rpc_py bdev_ftl_create -b ftl0 -d nvme0n1 | jq -r '.uuid')
+ killprocess $svcpid
+
+ trap - SIGINT SIGTERM EXIT
+
+ run_test "ftl_fio_extended" $testdir/fio.sh $device extended $uuid
+fi
diff --git a/src/spdk/test/ftl/json.sh b/src/spdk/test/ftl/json.sh
new file mode 100755
index 000000000..0052665f8
--- /dev/null
+++ b/src/spdk/test/ftl/json.sh
@@ -0,0 +1,38 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/test/common/autotest_common.sh
+source $testdir/common.sh
+
+rpc_py=$rootdir/scripts/rpc.py
+
+device=$1
+
+json_kill() {
+ killprocess $svcpid
+}
+
+trap "json_kill; exit 1" SIGINT SIGTERM EXIT
+
+"$SPDK_BIN_DIR/spdk_tgt" --json <(gen_ftl_nvme_conf) &
+svcpid=$!
+waitforlisten $svcpid
+
+# Create new bdev from json configuration
+$rootdir/scripts/gen_ftl.sh -n ftl0 -d nvme0n1 | $rpc_py load_subsystem_config
+$rpc_py bdev_nvme_attach_controller -b nvme0 -a $device -t pcie
+$rpc_py bdev_ocssd_create -c nvme0 -b nvme0n1 -n 1
+
+waitforbdev ftl0
+uuid=$($rpc_py bdev_get_bdevs | jq -r ".[] | select(.name==\"ftl0\").uuid")
+
+$rpc_py bdev_ftl_delete -b ftl0
+
+# Restore bdev from json configuration
+$rootdir/scripts/gen_ftl.sh -n ftl0 -d nvme0n1 -u $uuid | $rpc_py load_subsystem_config
+$rpc_py bdev_ftl_delete -b ftl0
+$rpc_py bdev_nvme_detach_controller nvme0
+
+trap - SIGINT SIGTERM EXIT
+json_kill
diff --git a/src/spdk/test/ftl/restore.sh b/src/spdk/test/ftl/restore.sh
new file mode 100755
index 000000000..7b6b0ef05
--- /dev/null
+++ b/src/spdk/test/ftl/restore.sh
@@ -0,0 +1,99 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/test/common/autotest_common.sh
+source $testdir/common.sh
+
+rpc_py=$rootdir/scripts/rpc.py
+
+mount_dir=$(mktemp -d)
+
+while getopts ':u:c:' opt; do
+ case $opt in
+ u) uuid=$OPTARG ;;
+ c) nv_cache=$OPTARG ;;
+ ?) echo "Usage: $0 [-u UUID] [-c NV_CACHE_PCI_BDF] OCSSD_PCI_BDF" && exit 1 ;;
+ esac
+done
+shift $((OPTIND - 1))
+device=$1
+num_group=$(get_num_group $device)
+num_pu=$(get_num_pu $device)
+pu_count=$((num_group * num_pu))
+
+restore_kill() {
+ if mount | grep $mount_dir; then
+ umount $mount_dir
+ fi
+ rm -rf $mount_dir
+ rm -f $testdir/testfile.md5
+ rm -f $testdir/testfile2.md5
+ rm -f $testdir/config/ftl.json
+
+ killprocess $svcpid
+ rmmod nbd || true
+}
+
+trap "restore_kill; exit 1" SIGINT SIGTERM EXIT
+
+"$SPDK_BIN_DIR/spdk_tgt" --json <(gen_ftl_nvme_conf) &
+svcpid=$!
+# Wait until spdk_tgt starts
+waitforlisten $svcpid
+
+if [ -n "$nv_cache" ]; then
+ nvc_bdev=$(create_nv_cache_bdev nvc0 $device $nv_cache $pu_count)
+fi
+
+$rpc_py bdev_nvme_attach_controller -b nvme0 -a $device -t pcie
+$rpc_py bdev_ocssd_create -c nvme0 -b nvme0n1 -n 1
+ftl_construct_args="bdev_ftl_create -b ftl0 -d nvme0n1"
+
+[ -n "$uuid" ] && ftl_construct_args+=" -u $uuid"
+[ -n "$nv_cache" ] && ftl_construct_args+=" -c $nvc_bdev"
+
+$rpc_py $ftl_construct_args
+
+# Load the nbd driver
+modprobe nbd
+$rpc_py nbd_start_disk ftl0 /dev/nbd0
+waitfornbd nbd0
+
+$rpc_py save_config > $testdir/config/ftl.json
+
+# Prepare the disk by creating ext4 fs and putting a file on it
+make_filesystem ext4 /dev/nbd0
+mount /dev/nbd0 $mount_dir
+dd if=/dev/urandom of=$mount_dir/testfile bs=4K count=256K
+sync
+mount -o remount /dev/nbd0 $mount_dir
+md5sum $mount_dir/testfile > $testdir/testfile.md5
+
+# Kill bdev service and start it again
+umount $mount_dir
+killprocess $svcpid
+
+"$SPDK_BIN_DIR/spdk_tgt" --json <(gen_ftl_nvme_conf) -L ftl_init &
+svcpid=$!
+# Wait until spdk_tgt starts
+waitforlisten $svcpid
+
+$rpc_py load_config < $testdir/config/ftl.json
+waitfornbd nbd0
+
+mount /dev/nbd0 $mount_dir
+
+# Write second file, to make sure writer thread has restored properly
+dd if=/dev/urandom of=$mount_dir/testfile2 bs=4K count=256K
+md5sum $mount_dir/testfile2 > $testdir/testfile2.md5
+
+# Make sure second file will be read from disk
+echo 3 > /proc/sys/vm/drop_caches
+
+# Check both files have proper data
+md5sum -c $testdir/testfile.md5
+md5sum -c $testdir/testfile2.md5
+
+trap - SIGINT SIGTERM EXIT
+restore_kill