summaryrefslogtreecommitdiffstats
path: root/src/spdk/test/iscsi_tgt
diff options
context:
space:
mode:
Diffstat (limited to 'src/spdk/test/iscsi_tgt')
-rwxr-xr-xsrc/spdk/test/iscsi_tgt/bdev_io_wait/bdev_io_wait.sh57
-rw-r--r--src/spdk/test/iscsi_tgt/calsoft/auth.conf3
-rwxr-xr-xsrc/spdk/test/iscsi_tgt/calsoft/calsoft.py115
-rwxr-xr-xsrc/spdk/test/iscsi_tgt/calsoft/calsoft.sh71
-rw-r--r--src/spdk/test/iscsi_tgt/calsoft/iscsi.json17
-rw-r--r--src/spdk/test/iscsi_tgt/calsoft/its.conf7
-rw-r--r--src/spdk/test/iscsi_tgt/common.sh50
-rwxr-xr-xsrc/spdk/test/iscsi_tgt/digests/digests.sh104
-rwxr-xr-xsrc/spdk/test/iscsi_tgt/ext4test/ext4test.sh128
-rwxr-xr-xsrc/spdk/test/iscsi_tgt/filesystem/filesystem.sh136
-rwxr-xr-xsrc/spdk/test/iscsi_tgt/fio/fio.sh142
-rw-r--r--src/spdk/test/iscsi_tgt/fio/iscsi.conf.in16
-rwxr-xr-xsrc/spdk/test/iscsi_tgt/fio/running_config.sh22
-rwxr-xr-xsrc/spdk/test/iscsi_tgt/initiator/initiator.sh57
-rwxr-xr-xsrc/spdk/test/iscsi_tgt/ip_migration/ip_migration.sh91
-rwxr-xr-xsrc/spdk/test/iscsi_tgt/iscsi_tgt.sh76
-rwxr-xr-xsrc/spdk/test/iscsi_tgt/iscsijson/json_config.sh43
-rwxr-xr-xsrc/spdk/test/iscsi_tgt/lvol/iscsi_lvol.sh82
-rwxr-xr-xsrc/spdk/test/iscsi_tgt/multiconnection/multiconnection.sh84
-rwxr-xr-xsrc/spdk/test/iscsi_tgt/nvme_remote/fio_remote_nvme.sh112
-rwxr-xr-xsrc/spdk/test/iscsi_tgt/pmem/iscsi_pmem.sh78
-rwxr-xr-xsrc/spdk/test/iscsi_tgt/qos/qos.sh98
-rwxr-xr-xsrc/spdk/test/iscsi_tgt/rbd/rbd.sh67
-rwxr-xr-xsrc/spdk/test/iscsi_tgt/reset/reset.sh77
-rwxr-xr-xsrc/spdk/test/iscsi_tgt/rpc_config/rpc_config.py502
-rwxr-xr-xsrc/spdk/test/iscsi_tgt/rpc_config/rpc_config.sh46
-rw-r--r--src/spdk/test/iscsi_tgt/test_plan.md41
27 files changed, 2322 insertions, 0 deletions
diff --git a/src/spdk/test/iscsi_tgt/bdev_io_wait/bdev_io_wait.sh b/src/spdk/test/iscsi_tgt/bdev_io_wait/bdev_io_wait.sh
new file mode 100755
index 00000000..94137507
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/bdev_io_wait/bdev_io_wait.sh
@@ -0,0 +1,57 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/iscsi_tgt/common.sh
+
+timing_enter bdev_io_wait
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+timing_enter start_iscsi_tgt
+
+# Start the iSCSI target without using stub
+# Reason: Two SPDK processes will be started
+$ISCSI_APP -m 0x2 -p 1 -s 512 --wait-for-rpc &
+pid=$!
+echo "iSCSI target launched. pid: $pid"
+trap "killprocess $pid;exit 1" SIGINT SIGTERM EXIT
+waitforlisten $pid
+$rpc_py set_iscsi_options -o 30 -a 4
+# Minimal number of bdev io pool (5) and cache (1)
+$rpc_py set_bdev_options -p 5 -c 1
+$rpc_py start_subsystem_init
+echo "iscsi_tgt is listening. Running tests..."
+
+timing_exit start_iscsi_tgt
+
+$rpc_py add_portal_group $PORTAL_TAG $TARGET_IP:$ISCSI_PORT
+$rpc_py add_initiator_group $INITIATOR_TAG $INITIATOR_NAME $NETMASK
+$rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE
+# "Malloc0:0" ==> use Malloc0 blockdev for LUN0
+# "1:2" ==> map PortalGroup1 to InitiatorGroup2
+# "64" ==> iSCSI queue depth 64
+# "-d" ==> disable CHAP authentication
+$rpc_py construct_target_node disk1 disk1_alias 'Malloc0:0' $PORTAL_TAG:$INITIATOR_TAG 256 -d
+sleep 1
+trap "killprocess $pid; rm -f $testdir/bdev.conf; exit 1" SIGINT SIGTERM EXIT
+
+# Prepare config file for iSCSI initiator
+echo "[iSCSI_Initiator]" > $testdir/bdev.conf
+echo " URL iscsi://$TARGET_IP/iqn.2016-06.io.spdk:disk1/0 iSCSI0" >> $testdir/bdev.conf
+$rootdir/test/bdev/bdevperf/bdevperf -c $testdir/bdev.conf -q 128 -o 4096 -w write -t 1
+$rootdir/test/bdev/bdevperf/bdevperf -c $testdir/bdev.conf -q 128 -o 4096 -w read -t 1
+$rootdir/test/bdev/bdevperf/bdevperf -c $testdir/bdev.conf -q 128 -o 4096 -w flush -t 1
+$rootdir/test/bdev/bdevperf/bdevperf -c $testdir/bdev.conf -q 128 -o 4096 -w unmap -t 1
+rm -f $testdir/bdev.conf
+
+trap - SIGINT SIGTERM EXIT
+
+killprocess $pid
+
+report_test_completion "bdev_io_wait"
+timing_exit bdev_io_wait
diff --git a/src/spdk/test/iscsi_tgt/calsoft/auth.conf b/src/spdk/test/iscsi_tgt/calsoft/auth.conf
new file mode 100644
index 00000000..303bac31
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/calsoft/auth.conf
@@ -0,0 +1,3 @@
+[AuthGroup1]
+ Comment "Auth Group1"
+ Auth "root" "tester"
diff --git a/src/spdk/test/iscsi_tgt/calsoft/calsoft.py b/src/spdk/test/iscsi_tgt/calsoft/calsoft.py
new file mode 100755
index 00000000..2970328e
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/calsoft/calsoft.py
@@ -0,0 +1,115 @@
+#!/usr/bin/env python3
+
+import os
+import time
+import sys
+import subprocess
+import threading
+import json
+
+CALSOFT_BIN_PATH = "/usr/local/calsoft/iscsi-pcts-v1.5/bin"
+
+'''
+11/26/2015 disable tc_login_11_2 and tc_login_11_4
+RFC 7143 6.3
+Neither the initiator nor the target should attempt to declare or
+negotiate a parameter more than once during login, except for
+responses to specific keys that explicitly allow repeated key
+declarations (e.g., TargetAddress)
+
+The spec didn't make it clear what other keys could be re-declare
+Disscussed this with UNH and get the conclusion that TargetName/
+TargetAddress/MaxRecvDataSegmentLength could be re-declare.
+'''
+'''
+12/1/2015 add tc_login_2_2 to known_failed_cases
+RFC 7143 6.1
+A standard-label MUST begin with a capital letter and must not exceed
+63 characters.
+key name: A standard-label
+'''
+known_failed_cases = ['tc_ffp_15_2', 'tc_ffp_29_2', 'tc_ffp_29_3', 'tc_ffp_29_4',
+ 'tc_err_1_1', 'tc_err_1_2', 'tc_err_2_8',
+ 'tc_err_3_1', 'tc_err_3_2', 'tc_err_3_3',
+ 'tc_err_3_4', 'tc_err_5_1', 'tc_login_3_1',
+ 'tc_login_11_2', 'tc_login_11_4', 'tc_login_2_2']
+
+
+def run_case(case, result_list, log_dir_path):
+ try:
+ case_log = subprocess.check_output("{}/{}".format(CALSOFT_BIN_PATH, case), stderr=subprocess.STDOUT, shell=True)
+ except subprocess.CalledProcessError as e:
+ result_list.append({"Name": case, "Result": "FAIL"})
+ case_log = e.output
+ else:
+ result_list.append({"Name": case, "Result": "PASS"})
+ with open(log_dir_path + case + '.txt', 'w') as f:
+ f.write(case_log)
+
+
+def main():
+ if not os.path.exists(CALSOFT_BIN_PATH):
+ print("The Calsoft test suite is not available on this machine.")
+ sys.exit(1)
+
+ output_dir = sys.argv[1]
+ if len(sys.argv) > 2:
+ output_file = sys.argv[2]
+ else:
+ output_file = "%s/calsoft.json" % (output_dir)
+
+ log_dir = "%s/calsoft/" % output_dir
+
+ all_cases = [x for x in os.listdir(CALSOFT_BIN_PATH) if x.startswith('tc')]
+ all_cases.sort()
+
+ case_result_list = []
+
+ result = {"Calsoft iSCSI tests": case_result_list}
+
+ if not os.path.exists(log_dir):
+ os.mkdir(log_dir)
+ for case in known_failed_cases:
+ print("Skipping %s. It is known to fail." % (case))
+ case_result_list.append({"Name": case, "Result": "SKIP"})
+
+ thread_objs = []
+ left_cases = list(set(all_cases) - set(known_failed_cases))
+ index = 0
+ max_thread_count = 32
+
+ while index < len(left_cases):
+ cur_thread_count = 0
+ for thread_obj in thread_objs:
+ if thread_obj.is_alive():
+ cur_thread_count += 1
+ while cur_thread_count < max_thread_count and index < len(left_cases):
+ thread_obj = threading.Thread(target=run_case, args=(left_cases[index], case_result_list, log_dir, ))
+ thread_obj.start()
+ time.sleep(0.02)
+ thread_objs.append(thread_obj)
+ index += 1
+ cur_thread_count += 1
+ end_time = time.time() + 30
+ while time.time() < end_time:
+ for thread_obj in thread_objs:
+ if thread_obj.is_alive():
+ break
+ else:
+ break
+ else:
+ print("Thread timeout")
+ exit(1)
+ with open(output_file, 'w') as f:
+ json.dump(obj=result, fp=f, indent=2)
+
+ failed = 0
+ for x in case_result_list:
+ if x["Result"] == "FAIL":
+ print("Test case %s failed." % (x["Name"]))
+ failed = 1
+ exit(failed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/src/spdk/test/iscsi_tgt/calsoft/calsoft.sh b/src/spdk/test/iscsi_tgt/calsoft/calsoft.sh
new file mode 100755
index 00000000..1a5c3932
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/calsoft/calsoft.sh
@@ -0,0 +1,71 @@
+#!/usr/bin/env bash
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/iscsi_tgt/common.sh
+
+delete_tmp_conf_files() {
+ rm -f /usr/local/etc/its.conf
+ rm -f /usr/local/etc/auth.conf
+}
+
+if [ ! -d /usr/local/calsoft ]; then
+ echo "skipping calsoft tests"
+ exit 0
+fi
+
+timing_enter calsoft
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+calsoft_py="$testdir/calsoft.py"
+
+# Copy the calsoft config file to /usr/local/etc
+mkdir -p /usr/local/etc
+cp $testdir/its.conf /usr/local/etc/
+cp $testdir/auth.conf /usr/local/etc/
+
+# Append target ip to calsoft config
+echo "IP=$TARGET_IP" >> /usr/local/etc/its.conf
+
+timing_enter start_iscsi_tgt
+
+$ISCSI_APP -m 0x1 --wait-for-rpc &
+pid=$!
+echo "Process pid: $pid"
+
+trap "killprocess $pid; delete_tmp_conf_files; exit 1 " SIGINT SIGTERM EXIT
+
+waitforlisten $pid
+$rpc_py load_subsystem_config < $testdir/iscsi.json
+$rpc_py start_subsystem_init
+echo "iscsi_tgt is listening. Running tests..."
+
+timing_exit start_iscsi_tgt
+
+$rpc_py add_portal_group $PORTAL_TAG $TARGET_IP:$ISCSI_PORT
+$rpc_py add_initiator_group $INITIATOR_TAG $INITIATOR_NAME $NETMASK
+$rpc_py construct_malloc_bdev -b MyBdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE
+# "MyBdev:0" ==> use MyBdev blockdev for LUN0
+# "1:2" ==> map PortalGroup1 to InitiatorGroup2
+# "64" ==> iSCSI queue depth 64
+# "0 0 0 1" ==> enable CHAP authentication using auth group 1
+$rpc_py construct_target_node Target3 Target3_alias 'MyBdev:0' $PORTAL_TAG:$INITIATOR_TAG 64 -g 1
+sleep 1
+
+if [ "$1" ]; then
+ $calsoft_py "$output_dir" "$1"
+ failed=$?
+else
+ $calsoft_py "$output_dir"
+ failed=$?
+fi
+
+trap - SIGINT SIGTERM EXIT
+
+killprocess $pid
+delete_tmp_conf_files
+timing_exit calsoft
+exit $failed
diff --git a/src/spdk/test/iscsi_tgt/calsoft/iscsi.json b/src/spdk/test/iscsi_tgt/calsoft/iscsi.json
new file mode 100644
index 00000000..34e44ca0
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/calsoft/iscsi.json
@@ -0,0 +1,17 @@
+{
+ "subsystem": "iscsi",
+ "config": [
+ {
+ "params": {
+ "allow_duplicated_isid": true,
+ "nop_timeout": 30,
+ "nop_in_interval": 10,
+ "discovery_auth_group": 1,
+ "max_sessions": 256,
+ "error_recovery_level": 2,
+ "auth_file": "/usr/local/etc/auth.conf"
+ },
+ "method": "set_iscsi_options"
+ }
+ ]
+}
diff --git a/src/spdk/test/iscsi_tgt/calsoft/its.conf b/src/spdk/test/iscsi_tgt/calsoft/its.conf
new file mode 100644
index 00000000..6469dab6
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/calsoft/its.conf
@@ -0,0 +1,7 @@
+InitiatorName=iqn.1994-05.com.redhat:b3283535dc3b
+TargetName=iqn.2016-06.io.spdk:Target3
+DefaultTime2Retain=20
+DefaultTime2Wait=2
+AuthMethod=CHAP,None
+UserName=root
+PassWord=tester
diff --git a/src/spdk/test/iscsi_tgt/common.sh b/src/spdk/test/iscsi_tgt/common.sh
new file mode 100644
index 00000000..1928449b
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/common.sh
@@ -0,0 +1,50 @@
+# Network configuration
+TARGET_INTERFACE="spdk_tgt_int"
+INITIATOR_INTERFACE="spdk_init_int"
+TARGET_NAMESPACE="spdk_iscsi_ns"
+TARGET_NS_CMD="ip netns exec $TARGET_NAMESPACE"
+
+# iSCSI target configuration
+TARGET_IP=10.0.0.1
+INITIATOR_IP=10.0.0.2
+ISCSI_PORT=3260
+NETMASK=$INITIATOR_IP/32
+INITIATOR_TAG=2
+INITIATOR_NAME=ANY
+PORTAL_TAG=1
+ISCSI_APP="$TARGET_NS_CMD ./app/iscsi_tgt/iscsi_tgt -i 0"
+ISCSI_TEST_CORE_MASK=0xFF
+
+function create_veth_interfaces() {
+ # $1 = test type (posix/vpp)
+ ip netns del $TARGET_NAMESPACE || true
+ ip link delete $INITIATOR_INTERFACE || true
+
+ trap "cleanup_veth_interfaces $1; exit 1" SIGINT SIGTERM EXIT
+
+ # Create veth (Virtual ethernet) interface pair
+ ip link add $INITIATOR_INTERFACE type veth peer name $TARGET_INTERFACE
+ ip addr add $INITIATOR_IP/24 dev $INITIATOR_INTERFACE
+ ip link set $INITIATOR_INTERFACE up
+
+ # Create and add interface for target to network namespace
+ ip netns add $TARGET_NAMESPACE
+ ip link set $TARGET_INTERFACE netns $TARGET_NAMESPACE
+
+ $TARGET_NS_CMD ip link set lo up
+ $TARGET_NS_CMD ip addr add $TARGET_IP/24 dev $TARGET_INTERFACE
+ $TARGET_NS_CMD ip link set $TARGET_INTERFACE up
+
+ # Verify connectivity
+ ping -c 1 $TARGET_IP
+ ip netns exec $TARGET_NAMESPACE ping -c 1 $INITIATOR_IP
+}
+
+function cleanup_veth_interfaces() {
+ # $1 = test type (posix/vpp)
+
+ # Cleanup veth interfaces and network namespace
+ # Note: removing one veth, removes the pair
+ ip link delete $INITIATOR_INTERFACE
+ ip netns del $TARGET_NAMESPACE
+}
diff --git a/src/spdk/test/iscsi_tgt/digests/digests.sh b/src/spdk/test/iscsi_tgt/digests/digests.sh
new file mode 100755
index 00000000..675cf1c1
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/digests/digests.sh
@@ -0,0 +1,104 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/iscsi_tgt/common.sh
+
+function node_login_fio_logout() {
+ for arg in "$@"; do
+ iscsiadm -m node -p $TARGET_IP:$ISCSI_PORT -o update -n node.conn[0].iscsi.$arg
+ done
+ iscsiadm -m node --login -p $TARGET_IP:$ISCSI_PORT
+ sleep 1
+ $fio_py 512 1 write 2
+ $fio_py 512 1 read 2
+ iscsiadm -m node --logout -p $TARGET_IP:$ISCSI_PORT
+ sleep 1
+}
+
+function iscsi_header_digest_test() {
+ # Enable HeaderDigest to CRC32C
+ timing_enter HeaderDigest_enabled
+ node_login_fio_logout "HeaderDigest -v CRC32C"
+ timing_exit HeaderDigest_enabled
+
+ # Let iscsi target to decide its preference on
+ # HeaderDigest based on its capability.
+ timing_enter preferred
+ node_login_fio_logout "HeaderDigest -v CRC32C,None"
+ timing_exit preferred
+}
+
+function iscsi_header_data_digest_test() {
+ # Only enable HeaderDigest to CRC32C
+ timing_enter HeaderDigest_enabled
+ node_login_fio_logout "HeaderDigest -v CRC32C" "DataDigest -v None"
+ timing_exit HeaderDigest_enabled
+
+ # Only enable DataDigest to CRC32C
+ timing_enter DataDigest_enabled
+ node_login_fio_logout "HeaderDigest -v None" "DataDigest -v CRC32C"
+ timing_exit DataDigest_enabled
+
+ # Let iscsi target to decide its preference on both
+ # HeaderDigest and DataDigest based on its capability.
+ timing_enter both_preferred
+ node_login_fio_logout "HeaderDigest -v CRC32C,None" "DataDigest -v CRC32C,None"
+ timing_exit both_preferred
+
+ # Enable HeaderDigest and DataDigest both.
+ timing_enter both_enabled
+ node_login_fio_logout "HeaderDigest -v CRC32C" "DataDigest -v CRC32C"
+ timing_exit both_enabled
+}
+
+timing_enter digests
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+fio_py="$rootdir/scripts/fio.py"
+
+timing_enter start_iscsi_tgt
+
+$ISCSI_APP -m $ISCSI_TEST_CORE_MASK --wait-for-rpc &
+pid=$!
+echo "Process pid: $pid"
+
+trap "killprocess $pid; exit 1" SIGINT SIGTERM EXIT
+
+waitforlisten $pid
+$rpc_py set_iscsi_options -o 30 -a 16
+$rpc_py start_subsystem_init
+echo "iscsi_tgt is listening. Running tests..."
+
+timing_exit start_iscsi_tgt
+
+$rpc_py add_portal_group $PORTAL_TAG $TARGET_IP:$ISCSI_PORT
+$rpc_py add_initiator_group $INITIATOR_TAG $INITIATOR_NAME $NETMASK
+$rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE
+# "Malloc0:0" ==> use Malloc0 blockdev for LUN0
+# "1:2" ==> map PortalGroup1 to InitiatorGroup2
+# "64" ==> iSCSI queue depth 64
+# "-d" ==> disable CHAP authentication
+$rpc_py construct_target_node Target3 Target3_alias 'Malloc0:0' $PORTAL_TAG:$INITIATOR_TAG 64 -d
+sleep 1
+
+iscsiadm -m discovery -t sendtargets -p $TARGET_IP:$ISCSI_PORT
+
+# iscsiadm installed by some Fedora releases loses DataDigest parameter.
+# Check and avoid setting DataDigest.
+DataDigestAbility=$(iscsiadm -m node -p $TARGET_IP:$ISCSI_PORT | grep DataDigest || true)
+if [ "$DataDigestAbility"x = x ]; then
+ iscsi_header_digest_test
+else
+ iscsi_header_data_digest_test
+fi
+
+trap - SIGINT SIGTERM EXIT
+
+iscsicleanup
+killprocess $pid
+timing_exit digests
diff --git a/src/spdk/test/iscsi_tgt/ext4test/ext4test.sh b/src/spdk/test/iscsi_tgt/ext4test/ext4test.sh
new file mode 100755
index 00000000..b022cfb7
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/ext4test/ext4test.sh
@@ -0,0 +1,128 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/iscsi_tgt/common.sh
+
+if [ ! -z $1 ]; then
+ DPDK_DIR=$(readlink -f $1)
+fi
+
+timing_enter ext4test
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+timing_enter start_iscsi_tgt
+
+$ISCSI_APP --wait-for-rpc &
+pid=$!
+echo "Process pid: $pid"
+
+trap "$rpc_py destruct_split_vbdev Name0n1 || true; killprocess $pid; rm -f $testdir/iscsi.conf; exit 1" SIGINT SIGTERM EXIT
+
+waitforlisten $pid
+$rpc_py set_iscsi_options -o 30 -a 4 -b "iqn.2013-06.com.intel.ch.spdk"
+$rpc_py start_subsystem_init
+$rootdir/scripts/gen_nvme.sh --json | $rpc_py load_subsystem_config
+$rpc_py construct_malloc_bdev 512 4096 --name Malloc0
+echo "iscsi_tgt is listening. Running tests..."
+
+timing_exit start_iscsi_tgt
+
+$rpc_py add_portal_group $PORTAL_TAG $TARGET_IP:$ISCSI_PORT
+$rpc_py add_initiator_group $INITIATOR_TAG $INITIATOR_NAME $NETMASK
+$rpc_py construct_error_bdev 'Malloc0'
+# "1:2" ==> map PortalGroup1 to InitiatorGroup2
+# "64" ==> iSCSI queue depth 64
+# "-d" ==> disable CHAP authentication
+$rpc_py construct_target_node Target0 Target0_alias EE_Malloc0:0 1:2 64 -d
+sleep 1
+
+iscsiadm -m discovery -t sendtargets -p $TARGET_IP:$ISCSI_PORT
+iscsiadm -m node --login -p $TARGET_IP:$ISCSI_PORT
+
+trap 'for new_dir in `dir -d /mnt/*dir`; do umount $new_dir; rm -rf $new_dir; done; \
+ iscsicleanup; killprocess $pid; rm -f $testdir/iscsi.conf; exit 1' SIGINT SIGTERM EXIT
+
+sleep 1
+
+echo "Test error injection"
+$rpc_py bdev_inject_error EE_Malloc0 'all' 'failure' -n 1000
+
+dev=$(iscsiadm -m session -P 3 | grep "Attached scsi disk" | awk '{print $4}')
+
+set +e
+mkfs.ext4 -F /dev/$dev
+if [ $? -eq 0 ]; then
+ echo "mkfs successful - expected failure"
+ iscsicleanup
+ killprocess $pid
+ exit 1
+else
+ echo "mkfs failed as expected"
+fi
+set -e
+
+$rpc_py bdev_inject_error EE_Malloc0 'clear' 'failure'
+echo "Error injection test done"
+
+iscsicleanup
+
+if [ -z "$NO_NVME" ]; then
+ $rpc_py construct_split_vbdev Nvme0n1 2 -s 10000
+ $rpc_py construct_target_node Target1 Target1_alias Nvme0n1p0:0 1:2 64 -d
+fi
+
+iscsiadm -m discovery -t sendtargets -p $TARGET_IP:$ISCSI_PORT
+iscsiadm -m node --login -p $TARGET_IP:$ISCSI_PORT
+
+devs=$(iscsiadm -m session -P 3 | grep "Attached scsi disk" | awk '{print $4}')
+
+for dev in $devs; do
+ mkfs.ext4 -F /dev/$dev
+ mkdir -p /mnt/${dev}dir
+ mount -o sync /dev/$dev /mnt/${dev}dir
+
+ rsync -qav --exclude=".git" --exclude="*.o" $rootdir/ /mnt/${dev}dir/spdk
+
+ make -C /mnt/${dev}dir/spdk clean
+ (cd /mnt/${dev}dir/spdk && ./configure $config_params)
+ make -C /mnt/${dev}dir/spdk -j16
+
+ # Print out space consumed on target device to help decide
+ # if/when we need to increase the size of the malloc LUN
+ df -h /dev/$dev
+
+ rm -rf /mnt/${dev}dir/spdk
+done
+
+for dev in $devs; do
+ umount /mnt/${dev}dir
+ rm -rf /mnt/${dev}dir
+
+ stats=($(cat /sys/block/$dev/stat))
+ echo ""
+ echo "$dev stats"
+ printf "READ IO cnt: % 8u merges: % 8u sectors: % 8u ticks: % 8u\n" \
+ ${stats[0]} ${stats[1]} ${stats[2]} ${stats[3]}
+ printf "WRITE IO cnt: % 8u merges: % 8u sectors: % 8u ticks: % 8u\n" \
+ ${stats[4]} ${stats[5]} ${stats[6]} ${stats[7]}
+ printf "in flight: % 8u io ticks: % 8u time in queue: % 8u\n" \
+ ${stats[8]} ${stats[9]} ${stats[10]}
+ echo ""
+done
+
+trap - SIGINT SIGTERM EXIT
+
+iscsicleanup
+$rpc_py destruct_split_vbdev Nvme0n1
+$rpc_py delete_error_bdev EE_Malloc0
+
+if [ -z "$NO_NVME" ]; then
+ $rpc_py delete_nvme_controller Nvme0
+fi
+
+killprocess $pid
+report_test_completion "nightly_iscsi_ext4test"
+timing_exit ext4test
diff --git a/src/spdk/test/iscsi_tgt/filesystem/filesystem.sh b/src/spdk/test/iscsi_tgt/filesystem/filesystem.sh
new file mode 100755
index 00000000..0c530b3b
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/filesystem/filesystem.sh
@@ -0,0 +1,136 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/iscsi_tgt/common.sh
+source $rootdir/scripts/common.sh
+
+timing_enter filesystem
+
+rpc_py="$rootdir/scripts/rpc.py"
+# Remove lvol bdevs and stores.
+function remove_backends() {
+ echo "INFO: Removing lvol bdev"
+ $rpc_py destroy_lvol_bdev "lvs_0/lbd_0"
+
+ echo "INFO: Removing lvol stores"
+ $rpc_py destroy_lvol_store -l lvs_0
+
+ echo "INFO: Removing NVMe"
+ $rpc_py delete_nvme_controller Nvme0
+
+ return 0
+}
+
+timing_enter start_iscsi_tgt
+
+$ISCSI_APP -m $ISCSI_TEST_CORE_MASK --wait-for-rpc &
+pid=$!
+echo "Process pid: $pid"
+
+trap "killprocess $pid; exit 1" SIGINT SIGTERM EXIT
+
+waitforlisten $pid
+$rpc_py set_iscsi_options -o 30 -a 16
+$rpc_py start_subsystem_init
+echo "iscsi_tgt is listening. Running tests..."
+
+timing_exit start_iscsi_tgt
+
+bdf=$(iter_pci_class_code 01 08 02 | head -1)
+$rpc_py add_portal_group $PORTAL_TAG $TARGET_IP:$ISCSI_PORT
+$rpc_py add_initiator_group $INITIATOR_TAG $INITIATOR_NAME $NETMASK
+$rpc_py construct_nvme_bdev -b "Nvme0" -t "pcie" -a $bdf
+
+ls_guid=$($rpc_py construct_lvol_store Nvme0n1 lvs_0)
+free_mb=$(get_lvs_free_mb "$ls_guid")
+# Using maximum 2048MiB to reduce the test time
+if [ $free_mb -gt 2048 ]; then
+ $rpc_py construct_lvol_bdev -u $ls_guid lbd_0 2048
+else
+ $rpc_py construct_lvol_bdev -u $ls_guid lbd_0 $free_mb
+fi
+# "lvs_0/lbd_0:0" ==> use lvs_0/lbd_0 blockdev for LUN0
+# "1:2" ==> map PortalGroup1 to InitiatorGroup2
+# "256" ==> iSCSI queue depth 256
+# "-d" ==> disable CHAP authentication
+$rpc_py construct_target_node Target1 Target1_alias 'lvs_0/lbd_0:0' $PORTAL_TAG:$INITIATOR_TAG 256 -d
+sleep 1
+
+iscsiadm -m discovery -t sendtargets -p $TARGET_IP:$ISCSI_PORT
+iscsiadm -m node --login -p $TARGET_IP:$ISCSI_PORT
+
+trap "remove_backends; umount /mnt/device; rm -rf /mnt/device; iscsicleanup; killprocess $pid; exit 1" SIGINT SIGTERM EXIT
+
+sleep 1
+
+mkdir -p /mnt/device
+
+dev=$(iscsiadm -m session -P 3 | grep "Attached scsi disk" | awk '{print $4}')
+
+parted -s /dev/$dev mklabel msdos
+parted -s /dev/$dev mkpart primary '0%' '100%'
+sleep 1
+
+for fstype in "ext4" "btrfs" "xfs"; do
+
+ if [ "$fstype" == "ext4" ]; then
+ mkfs.${fstype} -F /dev/${dev}1
+ else
+ mkfs.${fstype} -f /dev/${dev}1
+ fi
+ mount /dev/${dev}1 /mnt/device
+ if [ $RUN_NIGHTLY -eq 1 ]; then
+ fio -filename=/mnt/device/test -direct=1 -iodepth 64 -thread=1 -invalidate=1 -rw=randwrite -ioengine=libaio -bs=4k \
+ -size=1024M -name=job0
+ umount /mnt/device
+
+ iscsiadm -m node --logout
+ sleep 1
+ iscsiadm -m node --login -p $TARGET_IP:$ISCSI_PORT
+ sleep 1
+ dev=$(iscsiadm -m session -P 3 | grep "Attached scsi disk" | awk '{print $4}')
+ mount -o rw /dev/${dev}1 /mnt/device
+ if [ -f "/mnt/device/test" ]; then
+ echo "File existed."
+ fio -filename=/mnt/device/test -direct=1 -iodepth 64 -thread=1 -invalidate=1 -rw=randread \
+ -ioengine=libaio -bs=4k -runtime=20 -time_based=1 -name=job0
+ else
+ echo "File doesn't exist."
+ exit 1
+ fi
+
+ rm -rf /mnt/device/test
+ umount /mnt/device
+ else
+ touch /mnt/device/aaa
+ umount /mnt/device
+
+ iscsiadm -m node --logout
+ sleep 1
+ iscsiadm -m node --login -p $TARGET_IP:$ISCSI_PORT
+ sleep 1
+ dev=$(iscsiadm -m session -P 3 | grep "Attached scsi disk" | awk '{print $4}')
+ mount -o rw /dev/${dev}1 /mnt/device
+
+ if [ -f "/mnt/device/aaa" ]; then
+ echo "File existed."
+ else
+ echo "File doesn't exist."
+ exit 1
+ fi
+
+ rm -rf /mnt/device/aaa
+ umount /mnt/device
+ fi
+done
+
+rm -rf /mnt/device
+
+trap - SIGINT SIGTERM EXIT
+
+iscsicleanup
+remove_backends
+killprocess $pid
+timing_exit filesystem
diff --git a/src/spdk/test/iscsi_tgt/fio/fio.sh b/src/spdk/test/iscsi_tgt/fio/fio.sh
new file mode 100755
index 00000000..5fdeaed2
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/fio/fio.sh
@@ -0,0 +1,142 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/iscsi_tgt/common.sh
+
+delete_tmp_files() {
+ rm -f $testdir/iscsi.conf
+ rm -f ./local-job0-0-verify.state
+}
+
+function running_config() {
+ # generate a config file from the running iscsi_tgt
+ # running_config.sh will leave the file at /tmp/iscsi.conf
+ $testdir/running_config.sh $pid
+ sleep 1
+
+ # now start iscsi_tgt again using the generated config file
+ # keep the same iscsiadm configuration to confirm that the
+ # config file matched the running configuration
+ killprocess $pid
+ trap "iscsicleanup; delete_tmp_files; exit 1" SIGINT SIGTERM EXIT
+
+ timing_enter start_iscsi_tgt2
+
+ $ISCSI_APP -c /tmp/iscsi.conf &
+ pid=$!
+ echo "Process pid: $pid"
+ trap "iscsicleanup; killprocess $pid; delete_tmp_files; exit 1" SIGINT SIGTERM EXIT
+ waitforlisten $pid
+ echo "iscsi_tgt is listening. Running tests..."
+
+ timing_exit start_iscsi_tgt2
+
+ sleep 1
+ $fio_py 4096 1 randrw 5
+}
+
+if [ -z "$TARGET_IP" ]; then
+ echo "TARGET_IP not defined in environment"
+ exit 1
+fi
+
+if [ -z "$INITIATOR_IP" ]; then
+ echo "INITIATOR_IP not defined in environment"
+ exit 1
+fi
+
+timing_enter fio
+
+cp $testdir/iscsi.conf.in $testdir/iscsi.conf
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=4096
+
+rpc_py="$rootdir/scripts/rpc.py"
+fio_py="$rootdir/scripts/fio.py"
+
+timing_enter start_iscsi_tgt
+
+$ISCSI_APP -c $testdir/iscsi.conf &
+pid=$!
+echo "Process pid: $pid"
+
+trap "killprocess $pid; rm -f $testdir/iscsi.conf; exit 1" SIGINT SIGTERM EXIT
+
+waitforlisten $pid
+echo "iscsi_tgt is listening. Running tests..."
+
+timing_exit start_iscsi_tgt
+
+$rpc_py add_portal_group $PORTAL_TAG $TARGET_IP:$ISCSI_PORT
+$rpc_py add_initiator_group $INITIATOR_TAG $INITIATOR_NAME $NETMASK
+# Create a RAID-0 bdev from two malloc bdevs
+malloc_bdevs="$($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE) "
+malloc_bdevs+="$($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
+$rpc_py construct_raid_bdev -n raid0 -s 64 -r 0 -b "$malloc_bdevs"
+# "raid0:0" ==> use raid0 blockdev for LUN0
+# "1:2" ==> map PortalGroup1 to InitiatorGroup2
+# "64" ==> iSCSI queue depth 64
+# "-d" ==> disable CHAP authentication
+$rpc_py construct_target_node Target3 Target3_alias 'raid0:0' $PORTAL_TAG:$INITIATOR_TAG 64 -d
+sleep 1
+
+iscsiadm -m discovery -t sendtargets -p $TARGET_IP:$ISCSI_PORT
+iscsiadm -m node --login -p $TARGET_IP:$ISCSI_PORT
+
+trap "iscsicleanup; killprocess $pid; delete_tmp_files; exit 1" SIGINT SIGTERM EXIT
+
+sleep 1
+$fio_py 4096 1 randrw 1 verify
+$fio_py 131072 32 randrw 1 verify
+$fio_py 524288 128 randrw 1 verify
+
+if [ $RUN_NIGHTLY -eq 1 ]; then
+ $fio_py 4096 1 write 300 verify
+
+ # Run the running_config test which will generate a config file from the
+ # running iSCSI target, then kill and restart the iSCSI target using the
+ # generated config file
+ # Temporarily disabled
+ # running_config
+fi
+
+# Start hotplug test case.
+$fio_py 1048576 128 rw 10 &
+fio_pid=$!
+
+sleep 3
+set +e
+# Delete raid0, Malloc0, Malloc1 blockdevs
+$rpc_py destroy_raid_bdev 'raid0'
+$rpc_py delete_malloc_bdev 'Malloc0'
+$rpc_py delete_malloc_bdev 'Malloc1'
+
+wait $fio_pid
+fio_status=$?
+
+if [ $fio_status -eq 0 ]; then
+ echo "iscsi hotplug test: fio successful - expected failure"
+ set -e
+ exit 1
+else
+ echo "iscsi hotplug test: fio failed as expected"
+fi
+
+set -e
+
+iscsicleanup
+$rpc_py delete_target_node 'iqn.2016-06.io.spdk:Target3'
+
+delete_tmp_files
+
+trap - SIGINT SIGTERM EXIT
+
+killprocess $pid
+#echo 1 > /sys/bus/pci/rescan
+#sleep 2
+$rootdir/scripts/setup.sh
+
+timing_exit fio
diff --git a/src/spdk/test/iscsi_tgt/fio/iscsi.conf.in b/src/spdk/test/iscsi_tgt/fio/iscsi.conf.in
new file mode 100644
index 00000000..be06af58
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/fio/iscsi.conf.in
@@ -0,0 +1,16 @@
+[Global]
+
+[iSCSI]
+ NodeBase "iqn.2016-06.io.spdk"
+ AuthFile /usr/local/etc/spdk/auth.conf
+ Timeout 30
+ DiscoveryAuthMethod Auto
+ MaxSessions 16
+ ImmediateData Yes
+ ErrorRecoveryLevel 0
+
+[Nvme]
+ RetryCount 4
+ ActionOnTimeout None
+ AdminPollRate 100000
+ HotplugEnable Yes
diff --git a/src/spdk/test/iscsi_tgt/fio/running_config.sh b/src/spdk/test/iscsi_tgt/fio/running_config.sh
new file mode 100755
index 00000000..ea59eb5a
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/fio/running_config.sh
@@ -0,0 +1,22 @@
+#!/usr/bin/env bash
+
+set -xe
+
+pid="$1"
+
+if [[ -z "$pid" ]]; then
+ echo "usage: $0 pid"
+ exit 1
+fi
+
+# delete any existing temporary iscsi.conf files
+rm -f /tmp/iscsi.conf*
+
+kill -USR1 "$pid"
+
+if [ ! -f $(ls /tmp/iscsi.conf.*) ]; then
+ echo "iscsi_tgt did not generate config file"
+ exit 1
+fi
+
+mv $(ls /tmp/iscsi.conf.*) /tmp/iscsi.conf
diff --git a/src/spdk/test/iscsi_tgt/initiator/initiator.sh b/src/spdk/test/iscsi_tgt/initiator/initiator.sh
new file mode 100755
index 00000000..8f3104a4
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/initiator/initiator.sh
@@ -0,0 +1,57 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/iscsi_tgt/common.sh
+
+timing_enter initiator
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+
+timing_enter start_iscsi_tgt
+
+# Start the iSCSI target without using stub
+# Reason: Two SPDK processes will be started
+$ISCSI_APP -m 0x2 -p 1 -s 512 --wait-for-rpc &
+pid=$!
+echo "iSCSI target launched. pid: $pid"
+trap "killprocess $pid;exit 1" SIGINT SIGTERM EXIT
+waitforlisten $pid
+$rpc_py set_iscsi_options -o 30 -a 4
+$rpc_py start_subsystem_init
+echo "iscsi_tgt is listening. Running tests..."
+
+timing_exit start_iscsi_tgt
+
+$rpc_py add_portal_group $PORTAL_TAG $TARGET_IP:$ISCSI_PORT
+$rpc_py add_initiator_group $INITIATOR_TAG $INITIATOR_NAME $NETMASK
+$rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE
+# "Malloc0:0" ==> use Malloc0 blockdev for LUN0
+# "1:2" ==> map PortalGroup1 to InitiatorGroup2
+# "64" ==> iSCSI queue depth 64
+# "-d" ==> disable CHAP authentication
+$rpc_py construct_target_node disk1 disk1_alias 'Malloc0:0' $PORTAL_TAG:$INITIATOR_TAG 256 -d
+sleep 1
+trap "killprocess $pid; rm -f $testdir/bdev.conf; exit 1" SIGINT SIGTERM EXIT
+
+# Prepare config file for iSCSI initiator
+echo "[iSCSI_Initiator]" > $testdir/bdev.conf
+echo " URL iscsi://$TARGET_IP/iqn.2016-06.io.spdk:disk1/0 iSCSI0" >> $testdir/bdev.conf
+$rootdir/test/bdev/bdevperf/bdevperf -c $testdir/bdev.conf -q 128 -o 4096 -w verify -t 5 -s 512
+if [ $RUN_NIGHTLY -eq 1 ]; then
+ $rootdir/test/bdev/bdevperf/bdevperf -c $testdir/bdev.conf -q 128 -o 4096 -w unmap -t 5 -s 512
+ $rootdir/test/bdev/bdevperf/bdevperf -c $testdir/bdev.conf -q 128 -o 4096 -w flush -t 5 -s 512
+ $rootdir/test/bdev/bdevperf/bdevperf -c $testdir/bdev.conf -q 128 -o 4096 -w reset -t 10 -s 512
+fi
+rm -f $testdir/bdev.conf
+
+trap - SIGINT SIGTERM EXIT
+
+killprocess $pid
+
+report_test_completion "iscsi_initiator"
+timing_exit initiator
diff --git a/src/spdk/test/iscsi_tgt/ip_migration/ip_migration.sh b/src/spdk/test/iscsi_tgt/ip_migration/ip_migration.sh
new file mode 100755
index 00000000..25332ff8
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/ip_migration/ip_migration.sh
@@ -0,0 +1,91 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/iscsi_tgt/common.sh
+
+rpc_py="$rootdir/scripts/rpc.py"
+fio_py="$rootdir/scripts/fio.py"
+
+# Namespaces are NOT used here on purpose. This test requires changes to detect
+# ifc_index for interface that was put into namespace. Needed for add_ip_address.
+ISCSI_APP="$rootdir/app/iscsi_tgt/iscsi_tgt"
+NETMASK=127.0.0.0/24
+MIGRATION_ADDRESS=127.0.0.2
+
+function kill_all_iscsi_target() {
+ for ((i = 0; i < 2; i++)); do
+ rpc_addr="/var/tmp/spdk${i}.sock"
+ $rpc_py -s $rpc_addr kill_instance SIGTERM
+ done
+}
+
+function rpc_config() {
+ # $1 = RPC server address
+ # $2 = Netmask
+ $rpc_py -s $1 add_initiator_group $INITIATOR_TAG $INITIATOR_NAME $2
+ $rpc_py -s $1 construct_malloc_bdev 64 512
+}
+
+function rpc_add_target_node() {
+ $rpc_py -s $1 add_ip_address 1 $MIGRATION_ADDRESS
+ $rpc_py -s $1 add_portal_group $PORTAL_TAG $MIGRATION_ADDRESS:$ISCSI_PORT
+ $rpc_py -s $1 construct_target_node target1 target1_alias 'Malloc0:0' $PORTAL_TAG:$INITIATOR_TAG 64 -d
+}
+
+timing_enter ip_migration
+
+echo "Running ip migration tests"
+for ((i = 0; i < 2; i++)); do
+ timing_enter start_iscsi_tgt_$i
+
+ rpc_addr="/var/tmp/spdk${i}.sock"
+
+ # TODO: run the different iSCSI instances on non-overlapping CPU masks
+ $ISCSI_APP -r $rpc_addr -s 1000 -i $i -m $ISCSI_TEST_CORE_MASK --wait-for-rpc &
+ pid=$!
+ echo "Process pid: $pid"
+
+ trap "kill_all_iscsi_target; exit 1" SIGINT SIGTERM EXIT
+
+ waitforlisten $pid $rpc_addr
+ $rpc_py -s $rpc_addr set_iscsi_options -o 30 -a 64
+ $rpc_py -s $rpc_addr start_subsystem_init
+ echo "iscsi_tgt is listening. Running tests..."
+
+ timing_exit start_iscsi_tgt_$i
+
+ rpc_config $rpc_addr $NETMASK
+ trap "kill_all_iscsi_target; exit 1" \
+ SIGINT SIGTERM EXIT
+done
+
+rpc_first_addr="/var/tmp/spdk0.sock"
+rpc_add_target_node $rpc_first_addr
+
+sleep 1
+iscsiadm -m discovery -t sendtargets -p $MIGRATION_ADDRESS:$ISCSI_PORT
+sleep 1
+iscsiadm -m node --login -p $MIGRATION_ADDRESS:$ISCSI_PORT
+
+# fio tests for multi-process
+sleep 1
+$fio_py 4096 32 randrw 10 &
+fiopid=$!
+sleep 5
+
+$rpc_py -s $rpc_first_addr kill_instance SIGTERM
+
+rpc_second_addr="/var/tmp/spdk1.sock"
+rpc_add_target_node $rpc_second_addr
+
+wait $fiopid
+
+trap - SIGINT SIGTERM EXIT
+
+iscsicleanup
+
+$rpc_py -s $rpc_second_addr kill_instance SIGTERM
+report_test_completion "nightly_iscsi_ip_migration"
+timing_exit ip_migration
diff --git a/src/spdk/test/iscsi_tgt/iscsi_tgt.sh b/src/spdk/test/iscsi_tgt/iscsi_tgt.sh
new file mode 100755
index 00000000..fbf9f239
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/iscsi_tgt.sh
@@ -0,0 +1,76 @@
+#!/usr/bin/env bash
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+source $rootdir/test/common/autotest_common.sh
+
+if [ ! $(uname -s) = Linux ]; then
+ exit 0
+fi
+
+source $rootdir/test/iscsi_tgt/common.sh
+
+timing_enter iscsi_tgt
+
+# $1 = test type (posix/vpp)
+if [ "$1" == "posix" ] || [ "$1" == "vpp" ]; then
+ TEST_TYPE=$1
+else
+ echo "No iSCSI test type specified"
+ exit 1
+fi
+
+# Network configuration
+create_veth_interfaces $TEST_TYPE
+
+# ISCSI_TEST_CORE_MASK is the biggest core mask specified by
+# any of the iscsi_tgt tests. Using this mask for the stub
+# ensures that if this mask spans CPU sockets, that we will
+# allocate memory from both sockets. The stub will *not*
+# run anything on the extra cores (and will sleep on master
+# core 0) so there is no impact to the iscsi_tgt tests by
+# specifying the bigger core mask.
+start_stub "-s 2048 -i 0 -m $ISCSI_TEST_CORE_MASK"
+trap "kill_stub; cleanup_veth_interfaces $TEST_TYPE; exit 1" SIGINT SIGTERM EXIT
+
+run_test suite ./test/iscsi_tgt/calsoft/calsoft.sh
+run_test suite ./test/iscsi_tgt/filesystem/filesystem.sh
+run_test suite ./test/iscsi_tgt/reset/reset.sh
+run_test suite ./test/iscsi_tgt/rpc_config/rpc_config.sh $TEST_TYPE
+run_test suite ./test/iscsi_tgt/lvol/iscsi_lvol.sh
+run_test suite ./test/iscsi_tgt/fio/fio.sh
+run_test suite ./test/iscsi_tgt/qos/qos.sh
+
+if [ $RUN_NIGHTLY -eq 1 ]; then
+ if [ $SPDK_TEST_PMDK -eq 1 ]; then
+ run_test suite ./test/iscsi_tgt/pmem/iscsi_pmem.sh 4096 10
+ fi
+ run_test suite ./test/iscsi_tgt/ip_migration/ip_migration.sh
+ run_test suite ./test/iscsi_tgt/ext4test/ext4test.sh
+ run_test suite ./test/iscsi_tgt/digests/digests.sh
+fi
+if [ $SPDK_TEST_RBD -eq 1 ]; then
+ run_test suite ./test/iscsi_tgt/rbd/rbd.sh
+fi
+
+trap "cleanup_veth_interfaces $TEST_TYPE; exit 1" SIGINT SIGTERM EXIT
+kill_stub
+
+if [ $SPDK_TEST_NVMF -eq 1 ]; then
+ # TODO: enable remote NVMe controllers with multi-process so that
+ # we can use the stub for this test
+ # Test configure remote NVMe device from rpc and conf file
+ run_test suite ./test/iscsi_tgt/nvme_remote/fio_remote_nvme.sh
+fi
+
+if [ $RUN_NIGHTLY -eq 1 ]; then
+ run_test suite ./test/iscsi_tgt/multiconnection/multiconnection.sh
+fi
+
+if [ $SPDK_TEST_ISCSI_INITIATOR -eq 1 ]; then
+ run_test suite ./test/iscsi_tgt/initiator/initiator.sh
+ run_test suite ./test/iscsi_tgt/bdev_io_wait/bdev_io_wait.sh
+fi
+
+cleanup_veth_interfaces $TEST_TYPE
+trap - SIGINT SIGTERM EXIT
+timing_exit iscsi_tgt
diff --git a/src/spdk/test/iscsi_tgt/iscsijson/json_config.sh b/src/spdk/test/iscsi_tgt/iscsijson/json_config.sh
new file mode 100755
index 00000000..cec662b7
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/iscsijson/json_config.sh
@@ -0,0 +1,43 @@
+#!/usr/bin/env bash
+set -xe
+ISCSI_JSON_DIR=$(readlink -f $(dirname $0))
+. $ISCSI_JSON_DIR/../../json_config/common.sh
+. $JSON_DIR/../iscsi_tgt/common.sh
+base_iscsi_config=$JSON_DIR/base_iscsi_config.json
+last_iscsi_config=$JSON_DIR/last_iscsi_config.json
+rpc_py="$spdk_rpc_py"
+clear_config_py="$spdk_clear_config_py"
+trap 'on_error_exit "${FUNCNAME}" "${LINENO}"; rm -f $base_iscsi_config $last_iscsi_config' ERR
+
+timing_enter iscsi_json_config
+run_spdk_tgt
+$rpc_py start_subsystem_init
+
+timing_enter iscsi_json_config_create_setup
+$rpc_py add_portal_group $PORTAL_TAG 127.0.0.1:$ISCSI_PORT
+$rpc_py add_initiator_group $INITIATOR_TAG $INITIATOR_NAME $NETMASK
+$rpc_py construct_malloc_bdev 64 4096 --name Malloc0
+$rpc_py construct_target_node Target3 Target3_alias 'Malloc0:0' $PORTAL_TAG:$INITIATOR_TAG 64 -d
+$rpc_py save_config > $base_iscsi_config
+timing_exit iscsi_json_config_create_setup
+
+timing_enter iscsi_json_config_test
+test_json_config
+timing_exit iscsi_json_config_test
+
+timing_enter iscsi_json_config_restart_spdk
+$clear_config_py clear_config
+kill_targets
+run_spdk_tgt
+$rpc_py load_config < $base_iscsi_config
+$rpc_py save_config > $last_iscsi_config
+timing_exit iscsi_json_config_restart_spdk
+
+json_diff $base_iscsi_config $last_iscsi_config
+
+$clear_config_py clear_config
+kill_targets
+rm -f $base_iscsi_config $last_iscsi_config
+
+timing_exit iscsi_json_config
+report_test_completion iscsi_json_config
diff --git a/src/spdk/test/iscsi_tgt/lvol/iscsi_lvol.sh b/src/spdk/test/iscsi_tgt/lvol/iscsi_lvol.sh
new file mode 100755
index 00000000..c3df3af7
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/lvol/iscsi_lvol.sh
@@ -0,0 +1,82 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/iscsi_tgt/common.sh
+
+timing_enter iscsi_lvol
+
+MALLOC_BDEV_SIZE=128
+MALLOC_BLOCK_SIZE=512
+if [ $RUN_NIGHTLY -eq 1 ]; then
+ NUM_LVS=10
+ NUM_LVOL=10
+else
+ NUM_LVS=2
+ NUM_LVOL=2
+fi
+
+rpc_py="$rootdir/scripts/rpc.py"
+fio_py="$rootdir/scripts/fio.py"
+
+timing_enter start_iscsi_tgt
+
+$ISCSI_APP -m $ISCSI_TEST_CORE_MASK --wait-for-rpc &
+pid=$!
+echo "Process pid: $pid"
+
+trap "iscsicleanup; killprocess $pid; exit 1" SIGINT SIGTERM EXIT
+
+waitforlisten $pid
+$rpc_py set_iscsi_options -o 30 -a 16
+$rpc_py start_subsystem_init
+echo "iscsi_tgt is listening. Running tests..."
+
+timing_exit start_iscsi_tgt
+
+timing_enter setup
+$rpc_py add_portal_group $PORTAL_TAG $TARGET_IP:$ISCSI_PORT
+# Create the first LVS from a Raid-0 bdev, which is created from two malloc bdevs
+# Create remaining LVSs from a malloc bdev, respectively
+for i in $(seq 1 $NUM_LVS); do
+ INITIATOR_TAG=$((i + 2))
+ $rpc_py add_initiator_group $INITIATOR_TAG $INITIATOR_NAME $NETMASK
+ if [ $i -eq 1 ]; then
+ # construct RAID bdev and put its name in $bdev
+ malloc_bdevs="$($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE) "
+ malloc_bdevs+="$($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
+ $rpc_py construct_raid_bdev -n raid0 -s 64 -r 0 -b "$malloc_bdevs"
+ bdev="raid0"
+ else
+ # construct malloc bdev and put its name in $bdev
+ bdev=$($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)
+ fi
+ ls_guid=$($rpc_py construct_lvol_store $bdev lvs_$i -c 1048576)
+ LUNs=""
+ for j in $(seq 1 $NUM_LVOL); do
+ lb_name=$($rpc_py construct_lvol_bdev -u $ls_guid lbd_$j 10)
+ LUNs+="$lb_name:$((j - 1)) "
+ done
+ $rpc_py construct_target_node Target$i Target${i}_alias "$LUNs" "1:$INITIATOR_TAG" 256 -d
+done
+timing_exit setup
+
+sleep 1
+
+timing_enter discovery
+iscsiadm -m discovery -t sendtargets -p $TARGET_IP:$ISCSI_PORT
+iscsiadm -m node --login -p $TARGET_IP:$ISCSI_PORT
+timing_exit discovery
+
+timing_enter fio
+$fio_py 131072 8 randwrite 10 verify
+timing_exit fio
+
+rm -f ./local-job0-0-verify.state
+trap - SIGINT SIGTERM EXIT
+
+rm -f ./local-job*
+iscsicleanup
+killprocess $pid
+timing_exit iscsi_lvol
diff --git a/src/spdk/test/iscsi_tgt/multiconnection/multiconnection.sh b/src/spdk/test/iscsi_tgt/multiconnection/multiconnection.sh
new file mode 100755
index 00000000..b793d751
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/multiconnection/multiconnection.sh
@@ -0,0 +1,84 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/iscsi_tgt/common.sh
+
+rpc_py="$rootdir/scripts/rpc.py"
+fio_py="$rootdir/scripts/fio.py"
+
+CONNECTION_NUMBER=30
+
+# Remove lvol bdevs and stores.
+function remove_backends() {
+ echo "INFO: Removing lvol bdevs"
+ for i in $(seq 1 $CONNECTION_NUMBER); do
+ lun="lvs0/lbd_$i"
+ $rpc_py destroy_lvol_bdev $lun
+ echo -e "\tINFO: lvol bdev $lun removed"
+ done
+ sleep 1
+
+ echo "INFO: Removing lvol stores"
+ $rpc_py destroy_lvol_store -l lvs0
+ echo "INFO: lvol store lvs0 removed"
+
+ echo "INFO: Removing NVMe"
+ $rpc_py delete_nvme_controller Nvme0
+
+ return 0
+}
+
+set -e
+timing_enter multiconnection
+
+timing_enter start_iscsi_tgt
+# Start the iSCSI target without using stub.
+$ISCSI_APP --wait-for-rpc &
+iscsipid=$!
+echo "iSCSI target launched. pid: $iscsipid"
+trap "remove_backends; iscsicleanup; killprocess $iscsipid; exit 1" SIGINT SIGTERM EXIT
+
+waitforlisten $iscsipid
+$rpc_py set_iscsi_options -o 30 -a 128
+$rpc_py start_subsystem_init
+$rootdir/scripts/gen_nvme.sh --json | $rpc_py load_subsystem_config
+timing_exit start_iscsi_tgt
+
+$rpc_py add_portal_group $PORTAL_TAG $TARGET_IP:$ISCSI_PORT
+$rpc_py add_initiator_group $INITIATOR_TAG $INITIATOR_NAME $NETMASK
+
+echo "Creating an iSCSI target node."
+ls_guid=$($rpc_py construct_lvol_store "Nvme0n1" "lvs0" -c 1048576)
+
+# Assign even size for each lvol_bdev.
+get_lvs_free_mb $ls_guid
+lvol_bdev_size=$(($free_mb / $CONNECTION_NUMBER))
+for i in $(seq 1 $CONNECTION_NUMBER); do
+ $rpc_py construct_lvol_bdev -u $ls_guid lbd_$i $lvol_bdev_size
+done
+
+for i in $(seq 1 $CONNECTION_NUMBER); do
+ lun="lvs0/lbd_$i:0"
+ $rpc_py construct_target_node Target$i Target${i}_alias "$lun" $PORTAL_TAG:$INITIATOR_TAG 256 -d
+done
+sleep 1
+
+echo "Logging into iSCSI target."
+iscsiadm -m discovery -t sendtargets -p $TARGET_IP:$ISCSI_PORT
+iscsiadm -m node --login -p $TARGET_IP:$ISCSI_PORT
+sleep 1
+
+echo "Running FIO"
+$fio_py 131072 64 randrw 5
+$fio_py 262144 16 randwrite 10
+sync
+
+trap - SIGINT SIGTERM EXIT
+
+rm -f ./local-job*
+iscsicleanup
+remove_backends
+killprocess $iscsipid
+timing_exit multiconnection
diff --git a/src/spdk/test/iscsi_tgt/nvme_remote/fio_remote_nvme.sh b/src/spdk/test/iscsi_tgt/nvme_remote/fio_remote_nvme.sh
new file mode 100755
index 00000000..2f00b7a5
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/nvme_remote/fio_remote_nvme.sh
@@ -0,0 +1,112 @@
+#!/usr/bin/env bash
+
+set -e
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/nvmf/common.sh
+source $rootdir/test/iscsi_tgt/common.sh
+
+RDMA_IP_LIST=$(get_available_rdma_ips)
+NVMF_FIRST_TARGET_IP=$(echo "$RDMA_IP_LIST" | head -n 1)
+if [ -z $NVMF_FIRST_TARGET_IP ]; then
+ echo "no NIC for nvmf test"
+ exit 0
+fi
+
+rpc_py="$rootdir/scripts/rpc.py"
+fio_py="$rootdir/scripts/fio.py"
+
+NVMF_PORT=4420
+
+# Namespaces are NOT used here on purpose. Rxe_cfg utilility used for NVMf tests do not support namespaces.
+TARGET_IP=127.0.0.1
+INITIATOR_IP=127.0.0.1
+NETMASK=$INITIATOR_IP/32
+
+function run_nvme_remote() {
+ echo "now use $1 method to run iscsi tgt."
+
+ # Start the iSCSI target without using stub
+ iscsi_rpc_addr="/var/tmp/spdk-iscsi.sock"
+ ISCSI_APP="$rootdir/app/iscsi_tgt/iscsi_tgt"
+ $ISCSI_APP -r "$iscsi_rpc_addr" -m 0x1 -p 0 -s 512 --wait-for-rpc &
+ iscsipid=$!
+ echo "iSCSI target launched. pid: $iscsipid"
+ trap "killprocess $iscsipid; killprocess $nvmfpid; exit 1" SIGINT SIGTERM EXIT
+ waitforlisten $iscsipid "$iscsi_rpc_addr"
+ $rpc_py -s "$iscsi_rpc_addr" set_iscsi_options -o 30 -a 16
+ $rpc_py -s "$iscsi_rpc_addr" start_subsystem_init
+ if [ "$1" = "remote" ]; then
+ $rpc_py -s $iscsi_rpc_addr construct_nvme_bdev -b "Nvme0" -t "rdma" -f "ipv4" -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT -n nqn.2016-06.io.spdk:cnode1
+ fi
+
+ echo "iSCSI target has started."
+
+ timing_exit start_iscsi_tgt
+
+ echo "Creating an iSCSI target node."
+ $rpc_py -s "$iscsi_rpc_addr" add_portal_group $PORTAL_TAG $TARGET_IP:$ISCSI_PORT
+ $rpc_py -s "$iscsi_rpc_addr" add_initiator_group $INITIATOR_TAG $INITIATOR_NAME $NETMASK
+ if [ "$1" = "local" ]; then
+ $rpc_py -s "$iscsi_rpc_addr" construct_nvme_bdev -b "Nvme0" -t "rdma" -f "ipv4" -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT -n nqn.2016-06.io.spdk:cnode1
+ fi
+ $rpc_py -s "$iscsi_rpc_addr" construct_target_node Target1 Target1_alias 'Nvme0n1:0' $PORTAL_TAG:$INITIATOR_TAG 64 -d
+ sleep 1
+
+ echo "Logging in to iSCSI target."
+ iscsiadm -m discovery -t sendtargets -p $TARGET_IP:$ISCSI_PORT
+ iscsiadm -m node --login -p $TARGET_IP:$ISCSI_PORT
+}
+
+timing_enter nvme_remote
+
+# Start the NVMf target
+NVMF_APP="$rootdir/app/nvmf_tgt/nvmf_tgt"
+$NVMF_APP -m 0x2 -p 1 -s 512 --wait-for-rpc &
+nvmfpid=$!
+echo "NVMf target launched. pid: $nvmfpid"
+trap "killprocess $nvmfpid; exit 1" SIGINT SIGTERM EXIT
+waitforlisten $nvmfpid
+$rpc_py start_subsystem_init
+$rpc_py nvmf_create_transport -t RDMA -u 8192 -p 4
+echo "NVMf target has started."
+bdevs=$($rpc_py construct_malloc_bdev 64 512)
+$rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t rdma -a $NVMF_FIRST_TARGET_IP -s 4420
+for bdev in $bdevs; do
+ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 $bdev
+done
+echo "NVMf subsystem created."
+
+timing_enter start_iscsi_tgt
+
+run_nvme_remote "local"
+
+trap "iscsicleanup; killprocess $iscsipid; killprocess $nvmfpid; \
+ rm -f ./local-job0-0-verify.state; exit 1" SIGINT SIGTERM EXIT
+sleep 1
+
+echo "Running FIO"
+$fio_py 4096 1 randrw 1 verify
+
+rm -f ./local-job0-0-verify.state
+iscsicleanup
+killprocess $iscsipid
+
+run_nvme_remote "remote"
+
+echo "Running FIO"
+$fio_py 4096 1 randrw 1 verify
+
+rm -f ./local-job0-0-verify.state
+trap - SIGINT SIGTERM EXIT
+
+iscsicleanup
+killprocess $iscsipid
+$rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode1
+killprocess $nvmfpid
+
+report_test_completion "iscsi_nvme_remote"
+timing_exit nvme_remote
diff --git a/src/spdk/test/iscsi_tgt/pmem/iscsi_pmem.sh b/src/spdk/test/iscsi_tgt/pmem/iscsi_pmem.sh
new file mode 100755
index 00000000..063bb695
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/pmem/iscsi_pmem.sh
@@ -0,0 +1,78 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/iscsi_tgt/common.sh
+
+BLOCKSIZE=$1
+RUNTIME=$2
+PMEM_BDEVS=""
+PMEM_SIZE=128
+PMEM_BLOCK_SIZE=512
+TGT_NR=10
+PMEM_PER_TGT=1
+rpc_py="$rootdir/scripts/rpc.py"
+fio_py="$rootdir/scripts/fio.py"
+
+timing_enter iscsi_pmem
+
+timing_enter start_iscsi_target
+$ISCSI_APP -m $ISCSI_TEST_CORE_MASK --wait-for-rpc &
+pid=$!
+echo "Process pid: $pid"
+
+trap "iscsicleanup; killprocess $pid; rm -f /tmp/pool_file*; exit 1" SIGINT SIGTERM EXIT
+
+waitforlisten $pid
+$rpc_py set_iscsi_options -o 30 -a 16
+$rpc_py start_subsystem_init
+echo "iscsi_tgt is listening. Running tests..."
+timing_exit start_iscsi_target
+
+timing_enter setup
+$rpc_py add_portal_group $PORTAL_TAG $TARGET_IP:$ISCSI_PORT
+for i in $(seq 1 $TGT_NR); do
+ INITIATOR_TAG=$((i + 1))
+ $rpc_py add_initiator_group $INITIATOR_TAG $INITIATOR_NAME $NETMASK
+
+ luns=""
+ for j in $(seq 1 $PMEM_PER_TGT); do
+ $rpc_py create_pmem_pool /tmp/pool_file${i}_${j} $PMEM_SIZE $PMEM_BLOCK_SIZE
+ bdevs_name="$($rpc_py construct_pmem_bdev -n pmem${i}_${j} /tmp/pool_file${i}_${j})"
+ PMEM_BDEVS+="$bdevs_name "
+ luns+="$bdevs_name:$((j - 1)) "
+ done
+ $rpc_py construct_target_node Target$i Target${i}_alias "$luns" "1:$INITIATOR_TAG " 256 -d
+done
+timing_exit setup
+sleep 1
+
+timing_enter discovery
+iscsiadm -m discovery -t sendtargets -p $TARGET_IP:$ISCSI_PORT
+iscsiadm -m node --login -p $TARGET_IP:$ISCSI_PORT
+timing_exit discovery
+
+timing_enter fio_test
+$fio_py $BLOCKSIZE 64 randwrite $RUNTIME verify
+timing_exit fio_test
+
+iscsicleanup
+
+for pmem in $PMEM_BDEVS; do
+ $rpc_py delete_pmem_bdev $pmem
+done
+
+for i in $(seq 1 $TGT_NR); do
+ for c in $(seq 1 $PMEM_PER_TGT); do
+ $rpc_py delete_pmem_pool /tmp/pool_file${i}_${c}
+ done
+done
+
+trap - SIGINT SIGTERM EXIT
+
+rm -f ./local-job*
+rm -f /tmp/pool_file*
+killprocess $pid
+report_test_completion "nightly_iscsi_pmem"
+timing_exit iscsi_pmem
diff --git a/src/spdk/test/iscsi_tgt/qos/qos.sh b/src/spdk/test/iscsi_tgt/qos/qos.sh
new file mode 100755
index 00000000..da12f8f8
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/qos/qos.sh
@@ -0,0 +1,98 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/iscsi_tgt/common.sh
+
+function check_qos_works_well() {
+ local enable_limit=$1
+ local iops_limit=$2
+ local retval=0
+
+ start_io_count=$($rpc_py get_bdevs_iostat -b $3 | jq -r '.[1].num_read_ops')
+ $fio_py 512 64 randread 5
+ end_io_count=$($rpc_py get_bdevs_iostat -b $3 | jq -r '.[1].num_read_ops')
+
+ read_iops=$(((end_io_count-start_io_count)/5))
+
+ if [ $enable_limit = true ]; then
+ retval=$(echo "$iops_limit*0.9 < $read_iops && $read_iops < $iops_limit*1.01" | bc)
+ if [ $retval -eq 0 ]; then
+ echo "Failed to limit the io read rate of malloc bdev by qos"
+ exit 1
+ fi
+ else
+ retval=$(echo "$read_iops > $iops_limit" | bc)
+ if [ $retval -eq 0 ]; then
+ echo "$read_iops less than $iops_limit - expected greater than"
+ exit 1
+ fi
+ fi
+}
+
+if [ -z "$TARGET_IP" ]; then
+ echo "TARGET_IP not defined in environment"
+ exit 1
+fi
+
+if [ -z "$INITIATOR_IP" ]; then
+ echo "INITIATOR_IP not defined in environment"
+ exit 1
+fi
+
+timing_enter qos
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+IOPS_LIMIT=20000
+rpc_py="$rootdir/scripts/rpc.py"
+fio_py="$rootdir/scripts/fio.py"
+
+timing_enter start_iscsi_tgt
+
+$ISCSI_APP &
+pid=$!
+echo "Process pid: $pid"
+trap "killprocess $pid; exit 1" SIGINT SIGTERM EXIT
+waitforlisten $pid
+echo "iscsi_tgt is listening. Running tests..."
+
+timing_exit start_iscsi_tgt
+
+$rpc_py add_portal_group $PORTAL_TAG $TARGET_IP:$ISCSI_PORT
+$rpc_py add_initiator_group $INITIATOR_TAG $INITIATOR_NAME $NETMASK
+$rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE
+# "Malloc0:0" ==> use Malloc0 blockdev for LUN0
+# "1:2" ==> map PortalGroup1 to InitiatorGroup2
+# "64" ==> iSCSI queue depth 64
+# "-d" ==> disable CHAP authentication
+$rpc_py construct_target_node Target1 Target1_alias 'Malloc0:0' $PORTAL_TAG:$INITIATOR_TAG 64 -d
+sleep 1
+
+iscsiadm -m discovery -t sendtargets -p $TARGET_IP:$ISCSI_PORT
+iscsiadm -m node --login -p $TARGET_IP:$ISCSI_PORT
+
+trap "iscsicleanup; killprocess $pid; exit 1" SIGINT SIGTERM EXIT
+
+# Limit the I/O rate by RPC, then confirm the observed rate matches.
+$rpc_py set_bdev_qos_limit Malloc0 --rw_ios_per_sec $IOPS_LIMIT
+check_qos_works_well true $IOPS_LIMIT Malloc0
+
+# Now disable the rate limiting, and confirm the observed rate is not limited anymore.
+$rpc_py set_bdev_qos_limit Malloc0 --rw_ios_per_sec 0
+check_qos_works_well false $IOPS_LIMIT Malloc0
+
+# Limit the I/O rate again.
+$rpc_py set_bdev_qos_limit Malloc0 --rw_ios_per_sec $IOPS_LIMIT
+check_qos_works_well true $IOPS_LIMIT Malloc0
+echo "I/O rate limiting tests successful"
+
+iscsicleanup
+$rpc_py delete_target_node 'iqn.2016-06.io.spdk:Target1'
+
+rm -f ./local-job0-0-verify.state
+trap - SIGINT SIGTERM EXIT
+killprocess $pid
+
+timing_exit qos
diff --git a/src/spdk/test/iscsi_tgt/rbd/rbd.sh b/src/spdk/test/iscsi_tgt/rbd/rbd.sh
new file mode 100755
index 00000000..27d86159
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/rbd/rbd.sh
@@ -0,0 +1,67 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/iscsi_tgt/common.sh
+
+if ! hash ceph; then
+ echo "Ceph not detected on this system; skipping RBD tests"
+ exit 0
+fi
+
+timing_enter rbd_setup
+rbd_setup $TARGET_IP $TARGET_NAMESPACE
+trap "rbd_cleanup; exit 1" SIGINT SIGTERM EXIT
+timing_exit rbd_setup
+
+timing_enter rbd
+
+rpc_py="$rootdir/scripts/rpc.py"
+fio_py="$rootdir/scripts/fio.py"
+
+timing_enter start_iscsi_tgt
+
+$ISCSI_APP -m $ISCSI_TEST_CORE_MASK --wait-for-rpc &
+pid=$!
+
+trap "killprocess $pid; rbd_cleanup; exit 1" SIGINT SIGTERM EXIT
+
+waitforlisten $pid
+$rpc_py set_iscsi_options -o 30 -a 16
+$rpc_py start_subsystem_init
+echo "iscsi_tgt is listening. Running tests..."
+
+timing_exit start_iscsi_tgt
+
+$rpc_py add_portal_group $PORTAL_TAG $TARGET_IP:$ISCSI_PORT
+$rpc_py add_initiator_group $INITIATOR_TAG $INITIATOR_NAME $NETMASK
+rbd_bdev="$($rpc_py construct_rbd_bdev $RBD_POOL $RBD_NAME 4096)"
+$rpc_py get_bdevs
+# "Ceph0:0" ==> use Ceph0 blockdev for LUN0
+# "1:2" ==> map PortalGroup1 to InitiatorGroup2
+# "64" ==> iSCSI queue depth 64
+# "-d" ==> disable CHAP authentication
+$rpc_py construct_target_node Target3 Target3_alias 'Ceph0:0' $PORTAL_TAG:$INITIATOR_TAG 64 -d
+sleep 1
+
+iscsiadm -m discovery -t sendtargets -p $TARGET_IP:$ISCSI_PORT
+iscsiadm -m node --login -p $TARGET_IP:$ISCSI_PORT
+
+trap "iscsicleanup; killprocess $pid; rbd_cleanup; exit 1" SIGINT SIGTERM EXIT
+
+sleep 1
+$fio_py 4096 1 randrw 1 verify
+$fio_py 131072 32 randrw 1 verify
+
+rm -f ./local-job0-0-verify.state
+
+trap - SIGINT SIGTERM EXIT
+
+iscsicleanup
+$rpc_py delete_rbd_bdev $rbd_bdev
+killprocess $pid
+rbd_cleanup
+
+report_test_completion "iscsi_rbd"
+timing_exit rbd
diff --git a/src/spdk/test/iscsi_tgt/reset/reset.sh b/src/spdk/test/iscsi_tgt/reset/reset.sh
new file mode 100755
index 00000000..0e986ac5
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/reset/reset.sh
@@ -0,0 +1,77 @@
+#!/usr/bin/env bash
+
+set -xe
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/iscsi_tgt/common.sh
+
+timing_enter reset
+
+MALLOC_BDEV_SIZE=64
+MALLOC_BLOCK_SIZE=512
+
+rpc_py="$rootdir/scripts/rpc.py"
+fio_py="$rootdir/scripts/fio.py"
+
+if ! hash sg_reset; then
+ exit 1
+fi
+
+timing_enter start_iscsi_tgt
+
+$ISCSI_APP --wait-for-rpc &
+pid=$!
+echo "Process pid: $pid"
+
+trap "killprocess $pid; exit 1" SIGINT SIGTERM EXIT
+
+waitforlisten $pid
+$rpc_py set_iscsi_options -o 30 -a 16
+$rpc_py start_subsystem_init
+echo "iscsi_tgt is listening. Running tests..."
+
+timing_exit start_iscsi_tgt
+
+$rpc_py add_portal_group $PORTAL_TAG $TARGET_IP:$ISCSI_PORT
+$rpc_py add_initiator_group $INITIATOR_TAG $INITIATOR_NAME $NETMASK
+$rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE
+# "Malloc0:0" ==> use Malloc0 blockdev for LUN0
+# "1:2" ==> map PortalGroup1 to InitiatorGroup2
+# "64" ==> iSCSI queue depth 64
+# "-d" ==> disable CHAP authentication
+$rpc_py construct_target_node Target3 Target3_alias 'Malloc0:0' $PORTAL_TAG:$INITIATOR_TAG 64 -d
+sleep 1
+
+iscsiadm -m discovery -t sendtargets -p $TARGET_IP:$ISCSI_PORT
+iscsiadm -m node --login -p $TARGET_IP:$ISCSI_PORT
+sleep 1
+dev=$(iscsiadm -m session -P 3 | grep "Attached scsi disk" | awk '{print $4}')
+
+sleep 1
+$fio_py 512 1 read 60 &
+fiopid=$!
+echo "FIO pid: $fiopid"
+
+trap "iscsicleanup; killprocess $pid; killprocess $fiopid; exit 1" SIGINT SIGTERM EXIT
+
+# Do 3 resets while making sure iscsi_tgt and fio are still running
+for i in 1 2 3; do
+ sleep 1
+ kill -s 0 $pid
+ kill -s 0 $fiopid
+ sg_reset -d /dev/$dev
+ sleep 1
+ kill -s 0 $pid
+ kill -s 0 $fiopid
+done
+
+kill $fiopid
+wait $fiopid || true
+
+trap - SIGINT SIGTERM EXIT
+
+iscsicleanup
+killprocess $pid
+timing_exit reset
diff --git a/src/spdk/test/iscsi_tgt/rpc_config/rpc_config.py b/src/spdk/test/iscsi_tgt/rpc_config/rpc_config.py
new file mode 100755
index 00000000..03647c47
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/rpc_config/rpc_config.py
@@ -0,0 +1,502 @@
+#!/usr/bin/env python3
+
+
+import os
+import os.path
+import re
+import sys
+import time
+import json
+import random
+from subprocess import check_call, call, check_output, Popen, PIPE, CalledProcessError
+
+if (len(sys.argv) == 8):
+ target_ip = sys.argv[2]
+ initiator_ip = sys.argv[3]
+ port = sys.argv[4]
+ netmask = sys.argv[5]
+ namespace = sys.argv[6]
+ test_type = sys.argv[7]
+
+ns_cmd = 'ip netns exec ' + namespace
+other_ip = '127.0.0.6'
+initiator_name = 'ANY'
+portal_tag = '1'
+initiator_tag = '1'
+
+rpc_param = {
+ 'target_ip': target_ip,
+ 'initiator_ip': initiator_ip,
+ 'port': port,
+ 'initiator_name': initiator_name,
+ 'netmask': netmask,
+ 'lun_total': 3,
+ 'malloc_bdev_size': 64,
+ 'malloc_block_size': 512,
+ 'queue_depth': 64,
+ 'target_name': 'Target3',
+ 'alias_name': 'Target3_alias',
+ 'disable_chap': True,
+ 'mutual_chap': False,
+ 'require_chap': False,
+ 'chap_group': 0,
+ 'header_digest': False,
+ 'data_digest': False,
+ 'trace_flag': 'rpc',
+ 'cpumask': 0x1
+}
+
+
+class RpcException(Exception):
+
+ def __init__(self, retval, *args):
+ super(RpcException, self).__init__(*args)
+ self.retval = retval
+
+
+class spdk_rpc(object):
+
+ def __init__(self, rpc_py):
+ self.rpc_py = rpc_py
+
+ def __getattr__(self, name):
+ def call(*args):
+ cmd = "{} {}".format(self.rpc_py, name)
+ for arg in args:
+ cmd += " {}".format(arg)
+ return check_output(cmd, shell=True)
+ return call
+
+
+def verify(expr, retcode, msg):
+ if not expr:
+ raise RpcException(retcode, msg)
+
+
+def verify_trace_flag_rpc_methods(rpc_py, rpc_param):
+ rpc = spdk_rpc(rpc_py)
+ output = rpc.get_trace_flags()
+ jsonvalue = json.loads(output)
+ verify(not jsonvalue[rpc_param['trace_flag']], 1,
+ "get_trace_flags returned {}, expected false".format(jsonvalue))
+ rpc.set_trace_flag(rpc_param['trace_flag'])
+ output = rpc.get_trace_flags()
+ jsonvalue = json.loads(output)
+ verify(jsonvalue[rpc_param['trace_flag']], 1,
+ "get_trace_flags returned {}, expected true".format(jsonvalue))
+ rpc.clear_trace_flag(rpc_param['trace_flag'])
+ output = rpc.get_trace_flags()
+ jsonvalue = json.loads(output)
+ verify(not jsonvalue[rpc_param['trace_flag']], 1,
+ "get_trace_flags returned {}, expected false".format(jsonvalue))
+
+ print("verify_trace_flag_rpc_methods passed")
+
+
+def verify_iscsi_connection_rpc_methods(rpc_py):
+ rpc = spdk_rpc(rpc_py)
+ output = rpc.get_iscsi_connections()
+ jsonvalue = json.loads(output)
+ verify(not jsonvalue, 1,
+ "get_iscsi_connections returned {}, expected empty".format(jsonvalue))
+
+ rpc.construct_malloc_bdev(rpc_param['malloc_bdev_size'], rpc_param['malloc_block_size'])
+ rpc.add_portal_group(portal_tag, "{}:{}".format(rpc_param['target_ip'], str(rpc_param['port'])))
+ rpc.add_initiator_group(initiator_tag, rpc_param['initiator_name'], rpc_param['netmask'])
+
+ lun_mapping = "Malloc" + str(rpc_param['lun_total']) + ":0"
+ net_mapping = portal_tag + ":" + initiator_tag
+ rpc.construct_target_node(rpc_param['target_name'], rpc_param['alias_name'], lun_mapping, net_mapping, rpc_param['queue_depth'], '-d')
+ check_output('iscsiadm -m discovery -t st -p {}'.format(rpc_param['target_ip']), shell=True)
+ check_output('iscsiadm -m node --login', shell=True)
+ name = json.loads(rpc.get_target_nodes())[0]['name']
+ output = rpc.get_iscsi_connections()
+ jsonvalues = json.loads(output)
+ verify(jsonvalues[0]['target_node_name'] == rpc_param['target_name'], 1,
+ "target node name vaule is {}, expected {}".format(jsonvalues[0]['target_node_name'], rpc_param['target_name']))
+ verify(jsonvalues[0]['id'] == 0, 1,
+ "device id value is {}, expected 0".format(jsonvalues[0]['id']))
+ verify(jsonvalues[0]['initiator_addr'] == rpc_param['initiator_ip'], 1,
+ "initiator address values is {}, expected {}".format(jsonvalues[0]['initiator_addr'], rpc_param['initiator_ip']))
+ verify(jsonvalues[0]['target_addr'] == rpc_param['target_ip'], 1,
+ "target address values is {}, expected {}".format(jsonvalues[0]['target_addr'], rpc_param['target_ip']))
+
+ check_output('iscsiadm -m node --logout', shell=True)
+ check_output('iscsiadm -m node -o delete', shell=True)
+ rpc.delete_initiator_group(initiator_tag)
+ rpc.delete_portal_group(portal_tag)
+ rpc.delete_target_node(name)
+ output = rpc.get_iscsi_connections()
+ jsonvalues = json.loads(output)
+ verify(not jsonvalues, 1,
+ "get_iscsi_connections returned {}, expected empty".format(jsonvalues))
+
+ print("verify_iscsi_connection_rpc_methods passed")
+
+
+def verify_scsi_devices_rpc_methods(rpc_py):
+ rpc = spdk_rpc(rpc_py)
+ output = rpc.get_scsi_devices()
+ jsonvalue = json.loads(output)
+ verify(not jsonvalue, 1,
+ "get_scsi_devices returned {}, expected empty".format(jsonvalue))
+
+ rpc.construct_malloc_bdev(rpc_param['malloc_bdev_size'], rpc_param['malloc_block_size'])
+ rpc.add_portal_group(portal_tag, "{}:{}".format(rpc_param['target_ip'], str(rpc_param['port'])))
+ rpc.add_initiator_group(initiator_tag, rpc_param['initiator_name'], rpc_param['netmask'])
+
+ lun_mapping = "Malloc" + str(rpc_param['lun_total']) + ":0"
+ net_mapping = portal_tag + ":" + initiator_tag
+ rpc.construct_target_node(rpc_param['target_name'], rpc_param['alias_name'], lun_mapping, net_mapping, rpc_param['queue_depth'], '-d')
+ check_output('iscsiadm -m discovery -t st -p {}'.format(rpc_param['target_ip']), shell=True)
+ check_output('iscsiadm -m node --login', shell=True)
+ name = json.loads(rpc.get_target_nodes())[0]['name']
+ output = rpc.get_iscsi_global_params()
+ jsonvalues = json.loads(output)
+ nodebase = jsonvalues['node_base']
+ output = rpc.get_scsi_devices()
+ jsonvalues = json.loads(output)
+ verify(jsonvalues[0]['device_name'] == nodebase + ":" + rpc_param['target_name'], 1,
+ "device name vaule is {}, expected {}".format(jsonvalues[0]['device_name'], rpc_param['target_name']))
+ verify(jsonvalues[0]['id'] == 0, 1,
+ "device id value is {}, expected 0".format(jsonvalues[0]['id']))
+
+ check_output('iscsiadm -m node --logout', shell=True)
+ check_output('iscsiadm -m node -o delete', shell=True)
+ rpc.delete_initiator_group(initiator_tag)
+ rpc.delete_portal_group(portal_tag)
+ rpc.delete_target_node(name)
+ output = rpc.get_scsi_devices()
+ jsonvalues = json.loads(output)
+ verify(not jsonvalues, 1,
+ "get_scsi_devices returned {}, expected empty".format(jsonvalues))
+
+ print("verify_scsi_devices_rpc_methods passed")
+
+
+def create_malloc_bdevs_rpc_methods(rpc_py, rpc_param):
+ rpc = spdk_rpc(rpc_py)
+
+ for i in range(1, rpc_param['lun_total'] + 1):
+ rpc.construct_malloc_bdev(rpc_param['malloc_bdev_size'], rpc_param['malloc_block_size'])
+
+ print("create_malloc_bdevs_rpc_methods passed")
+
+
+def verify_portal_groups_rpc_methods(rpc_py, rpc_param):
+ rpc = spdk_rpc(rpc_py)
+ output = rpc.get_portal_groups()
+ jsonvalues = json.loads(output)
+ verify(not jsonvalues, 1,
+ "get_portal_groups returned {} groups, expected empty".format(jsonvalues))
+
+ lo_ip = (target_ip, other_ip)
+ nics = json.loads(rpc.get_interfaces())
+ for x in nics:
+ if x["ifc_index"] == 'lo':
+ rpc.add_ip_address(x["ifc_index"], lo_ip[1])
+ for idx, value in enumerate(lo_ip):
+ # The portal group tag must start at 1
+ tag = idx + 1
+ rpc.add_portal_group(tag, "{}:{}@{}".format(value, rpc_param['port'], rpc_param['cpumask']))
+ output = rpc.get_portal_groups()
+ jsonvalues = json.loads(output)
+ verify(len(jsonvalues) == tag, 1,
+ "get_portal_groups returned {} groups, expected {}".format(len(jsonvalues), tag))
+
+ tag_list = []
+ for idx, value in enumerate(jsonvalues):
+ verify(value['portals'][0]['host'] == lo_ip[idx], 1,
+ "host value is {}, expected {}".format(value['portals'][0]['host'], rpc_param['target_ip']))
+ verify(value['portals'][0]['port'] == str(rpc_param['port']), 1,
+ "port value is {}, expected {}".format(value['portals'][0]['port'], str(rpc_param['port'])))
+ verify(value['portals'][0]['cpumask'] == format(rpc_param['cpumask'], '#x'), 1,
+ "cpumask value is {}, expected {}".format(value['portals'][0]['cpumask'], format(rpc_param['cpumask'], '#x')))
+ tag_list.append(value['tag'])
+ verify(value['tag'] == idx + 1, 1,
+ "tag value is {}, expected {}".format(value['tag'], idx + 1))
+
+ for idx, value in enumerate(tag_list):
+ rpc.delete_portal_group(value)
+ output = rpc.get_portal_groups()
+ jsonvalues = json.loads(output)
+ verify(len(jsonvalues) == (len(tag_list) - (idx + 1)), 1,
+ "get_portal_group returned {} groups, expected {}".format(len(jsonvalues), (len(tag_list) - (idx + 1))))
+ if not jsonvalues:
+ break
+
+ for jidx, jvalue in enumerate(jsonvalues):
+ verify(jvalue['portals'][0]['host'] == lo_ip[idx + jidx + 1], 1,
+ "host value is {}, expected {}".format(jvalue['portals'][0]['host'], lo_ip[idx + jidx + 1]))
+ verify(jvalue['portals'][0]['port'] == str(rpc_param['port']), 1,
+ "port value is {}, expected {}".format(jvalue['portals'][0]['port'], str(rpc_param['port'])))
+ verify(jvalue['portals'][0]['cpumask'] == format(rpc_param['cpumask'], '#x'), 1,
+ "cpumask value is {}, expected {}".format(jvalue['portals'][0]['cpumask'], format(rpc_param['cpumask'], '#x')))
+ verify(jvalue['tag'] != value or jvalue['tag'] == tag_list[idx + jidx + 1], 1,
+ "tag value is {}, expected {} and not {}".format(jvalue['tag'], tag_list[idx + jidx + 1], value))
+
+ for x in nics:
+ if x["ifc_index"] == 'lo':
+ rpc.delete_ip_address(x["ifc_index"], lo_ip[1])
+
+ print("verify_portal_groups_rpc_methods passed")
+
+
+def verify_initiator_groups_rpc_methods(rpc_py, rpc_param):
+ rpc = spdk_rpc(rpc_py)
+ output = rpc.get_initiator_groups()
+ jsonvalues = json.loads(output)
+ verify(not jsonvalues, 1,
+ "get_initiator_groups returned {}, expected empty".format(jsonvalues))
+ for idx, value in enumerate(rpc_param['netmask']):
+ # The initiator group tag must start at 1
+ tag = idx + 1
+ rpc.add_initiator_group(tag, rpc_param['initiator_name'], value)
+ output = rpc.get_initiator_groups()
+ jsonvalues = json.loads(output)
+ verify(len(jsonvalues) == tag, 1,
+ "get_initiator_groups returned {} groups, expected {}".format(len(jsonvalues), tag))
+
+ tag_list = []
+ for idx, value in enumerate(jsonvalues):
+ verify(value['initiators'][0] == rpc_param['initiator_name'], 1,
+ "initiator value is {}, expected {}".format(value['initiators'][0], rpc_param['initiator_name']))
+ tag_list.append(value['tag'])
+ verify(value['tag'] == idx + 1, 1,
+ "tag value is {}, expected {}".format(value['tag'], idx + 1))
+ verify(value['netmasks'][0] == rpc_param['netmask'][idx], 1,
+ "netmasks value is {}, expected {}".format(value['netmasks'][0], rpc_param['netmask'][idx]))
+
+ for idx, value in enumerate(rpc_param['netmask']):
+ tag = idx + 1
+ rpc.delete_initiators_from_initiator_group(tag, '-n', rpc_param['initiator_name'], '-m', value)
+
+ output = rpc.get_initiator_groups()
+ jsonvalues = json.loads(output)
+ verify(len(jsonvalues) == tag, 1,
+ "get_initiator_groups returned {} groups, expected {}".format(len(jsonvalues), tag))
+
+ for idx, value in enumerate(jsonvalues):
+ verify(value['tag'] == idx + 1, 1,
+ "tag value is {}, expected {}".format(value['tag'], idx + 1))
+ initiators = value.get('initiators')
+ verify(len(initiators) == 0, 1,
+ "length of initiator list is {}, expected 0".format(len(initiators)))
+ netmasks = value.get('netmasks')
+ verify(len(netmasks) == 0, 1,
+ "length of netmask list is {}, expected 0".format(len(netmasks)))
+
+ for idx, value in enumerate(rpc_param['netmask']):
+ tag = idx + 1
+ rpc.add_initiators_to_initiator_group(tag, '-n', rpc_param['initiator_name'], '-m', value)
+ output = rpc.get_initiator_groups()
+ jsonvalues = json.loads(output)
+ verify(len(jsonvalues) == tag, 1,
+ "get_initiator_groups returned {} groups, expected {}".format(len(jsonvalues), tag))
+
+ tag_list = []
+ for idx, value in enumerate(jsonvalues):
+ verify(value['initiators'][0] == rpc_param['initiator_name'], 1,
+ "initiator value is {}, expected {}".format(value['initiators'][0], rpc_param['initiator_name']))
+ tag_list.append(value['tag'])
+ verify(value['tag'] == idx + 1, 1,
+ "tag value is {}, expected {}".format(value['tag'], idx + 1))
+ verify(value['netmasks'][0] == rpc_param['netmask'][idx], 1,
+ "netmasks value is {}, expected {}".format(value['netmasks'][0], rpc_param['netmask'][idx]))
+
+ for idx, value in enumerate(tag_list):
+ rpc.delete_initiator_group(value)
+ output = rpc.get_initiator_groups()
+ jsonvalues = json.loads(output)
+ verify(len(jsonvalues) == (len(tag_list) - (idx + 1)), 1,
+ "get_initiator_groups returned {} groups, expected {}".format(len(jsonvalues), (len(tag_list) - (idx + 1))))
+ if not jsonvalues:
+ break
+ for jidx, jvalue in enumerate(jsonvalues):
+ verify(jvalue['initiators'][0] == rpc_param['initiator_name'], 1,
+ "initiator value is {}, expected {}".format(jvalue['initiators'][0], rpc_param['initiator_name']))
+ verify(jvalue['tag'] != value or jvalue['tag'] == tag_list[idx + jidx + 1], 1,
+ "tag value is {}, expected {} and not {}".format(jvalue['tag'], tag_list[idx + jidx + 1], value))
+ verify(jvalue['netmasks'][0] == rpc_param['netmask'][idx + jidx + 1], 1,
+ "netmasks value is {}, expected {}".format(jvalue['netmasks'][0], rpc_param['netmask'][idx + jidx + 1]))
+
+ print("verify_initiator_groups_rpc_method passed.")
+
+
+def verify_target_nodes_rpc_methods(rpc_py, rpc_param):
+ rpc = spdk_rpc(rpc_py)
+ output = rpc.get_iscsi_global_params()
+ jsonvalues = json.loads(output)
+ nodebase = jsonvalues['node_base']
+ output = rpc.get_target_nodes()
+ jsonvalues = json.loads(output)
+ verify(not jsonvalues, 1,
+ "get_target_nodes returned {}, expected empty".format(jsonvalues))
+
+ rpc.construct_malloc_bdev(rpc_param['malloc_bdev_size'], rpc_param['malloc_block_size'])
+ rpc.add_portal_group(portal_tag, "{}:{}".format(rpc_param['target_ip'], str(rpc_param['port'])))
+ rpc.add_initiator_group(initiator_tag, rpc_param['initiator_name'], rpc_param['netmask'])
+
+ lun_mapping = "Malloc" + str(rpc_param['lun_total']) + ":0"
+ net_mapping = portal_tag + ":" + initiator_tag
+ rpc.construct_target_node(rpc_param['target_name'], rpc_param['alias_name'], lun_mapping, net_mapping, rpc_param['queue_depth'], '-d')
+ output = rpc.get_target_nodes()
+ jsonvalues = json.loads(output)
+ verify(len(jsonvalues) == 1, 1,
+ "get_target_nodes returned {} nodes, expected 1".format(len(jsonvalues)))
+ bdev_name = jsonvalues[0]['luns'][0]['bdev_name']
+ verify(bdev_name == "Malloc" + str(rpc_param['lun_total']), 1,
+ "bdev_name value is {}, expected Malloc{}".format(jsonvalues[0]['luns'][0]['bdev_name'], str(rpc_param['lun_total'])))
+ name = jsonvalues[0]['name']
+ verify(name == nodebase + ":" + rpc_param['target_name'], 1,
+ "target name value is {}, expected {}".format(name, nodebase + ":" + rpc_param['target_name']))
+ verify(jsonvalues[0]['alias_name'] == rpc_param['alias_name'], 1,
+ "target alias_name value is {}, expected {}".format(jsonvalues[0]['alias_name'], rpc_param['alias_name']))
+ verify(jsonvalues[0]['luns'][0]['lun_id'] == 0, 1,
+ "lun id value is {}, expected 0".format(jsonvalues[0]['luns'][0]['lun_id']))
+ verify(jsonvalues[0]['pg_ig_maps'][0]['ig_tag'] == int(initiator_tag), 1,
+ "initiator group tag value is {}, expected {}".format(jsonvalues[0]['pg_ig_maps'][0]['ig_tag'], initiator_tag))
+ verify(jsonvalues[0]['queue_depth'] == rpc_param['queue_depth'], 1,
+ "queue depth value is {}, expected {}".format(jsonvalues[0]['queue_depth'], rpc_param['queue_depth']))
+ verify(jsonvalues[0]['pg_ig_maps'][0]['pg_tag'] == int(portal_tag), 1,
+ "portal group tag value is {}, expected {}".format(jsonvalues[0]['pg_ig_maps'][0]['pg_tag'], portal_tag))
+ verify(jsonvalues[0]['disable_chap'] == rpc_param['disable_chap'], 1,
+ "disable chap value is {}, expected {}".format(jsonvalues[0]['disable_chap'], rpc_param['disable_chap']))
+ verify(jsonvalues[0]['mutual_chap'] == rpc_param['mutual_chap'], 1,
+ "chap mutual value is {}, expected {}".format(jsonvalues[0]['mutual_chap'], rpc_param['mutual_chap']))
+ verify(jsonvalues[0]['require_chap'] == rpc_param['require_chap'], 1,
+ "chap required value is {}, expected {}".format(jsonvalues[0]['require_chap'], rpc_param['require_chap']))
+ verify(jsonvalues[0]['chap_group'] == rpc_param['chap_group'], 1,
+ "chap auth group value is {}, expected {}".format(jsonvalues[0]['chap_group'], rpc_param['chap_group']))
+ verify(jsonvalues[0]['header_digest'] == rpc_param['header_digest'], 1,
+ "header digest value is {}, expected {}".format(jsonvalues[0]['header_digest'], rpc_param['header_digest']))
+ verify(jsonvalues[0]['data_digest'] == rpc_param['data_digest'], 1,
+ "data digest value is {}, expected {}".format(jsonvalues[0]['data_digest'], rpc_param['data_digest']))
+ lun_id = '1'
+ rpc.target_node_add_lun(name, bdev_name, "-i", lun_id)
+ output = rpc.get_target_nodes()
+ jsonvalues = json.loads(output)
+ verify(jsonvalues[0]['luns'][1]['bdev_name'] == "Malloc" + str(rpc_param['lun_total']), 1,
+ "bdev_name value is {}, expected Malloc{}".format(jsonvalues[0]['luns'][0]['bdev_name'], str(rpc_param['lun_total'])))
+ verify(jsonvalues[0]['luns'][1]['lun_id'] == 1, 1,
+ "lun id value is {}, expected 1".format(jsonvalues[0]['luns'][1]['lun_id']))
+
+ rpc.delete_target_node(name)
+ output = rpc.get_target_nodes()
+ jsonvalues = json.loads(output)
+ verify(not jsonvalues, 1,
+ "get_target_nodes returned {}, expected empty".format(jsonvalues))
+
+ rpc.construct_target_node(rpc_param['target_name'], rpc_param['alias_name'], lun_mapping, net_mapping, rpc_param['queue_depth'], '-d')
+
+ rpc.delete_portal_group(portal_tag)
+ rpc.delete_initiator_group(initiator_tag)
+ rpc.delete_target_node(name)
+ output = rpc.get_target_nodes()
+ jsonvalues = json.loads(output)
+ if not jsonvalues:
+ print("This issue will be fixed later.")
+
+ print("verify_target_nodes_rpc_methods passed.")
+
+
+def verify_get_interfaces(rpc_py):
+ rpc = spdk_rpc(rpc_py)
+ nics = json.loads(rpc.get_interfaces())
+ nics_names = set(x["name"] for x in nics)
+ # parse ip link show to verify the get_interfaces result
+ ip_show = ns_cmd + " ip link show"
+ ifcfg_nics = set(re.findall("\S+:\s(\S+?)(?:@\S+){0,1}:\s<.*", check_output(ip_show.split()).decode()))
+ verify(nics_names == ifcfg_nics, 1, "get_interfaces returned {}".format(nics))
+ print("verify_get_interfaces passed.")
+
+
+def help_get_interface_ip_list(rpc_py, nic_name):
+ rpc = spdk_rpc(rpc_py)
+ nics = json.loads(rpc.get_interfaces())
+ nic = list([x for x in nics if x["name"] == nic_name])
+ verify(len(nic) != 0, 1,
+ "Nic name: {} is not found in {}".format(nic_name, [x["name"] for x in nics]))
+ return nic[0]["ip_addr"]
+
+
+def verify_add_delete_ip_address(rpc_py):
+ rpc = spdk_rpc(rpc_py)
+ nics = json.loads(rpc.get_interfaces())
+ # add ip on up to first 2 nics
+ for x in nics[:2]:
+ faked_ip = "123.123.{}.{}".format(random.randint(1, 254), random.randint(1, 254))
+ ping_cmd = ns_cmd + " ping -c 1 -W 1 " + faked_ip
+ rpc.add_ip_address(x["ifc_index"], faked_ip)
+ verify(faked_ip in help_get_interface_ip_list(rpc_py, x["name"]), 1,
+ "add ip {} to nic {} failed.".format(faked_ip, x["name"]))
+ try:
+ check_call(ping_cmd.split())
+ except BaseException:
+ verify(False, 1,
+ "ping ip {} for {} was failed(adding was successful)".format
+ (faked_ip, x["name"]))
+ rpc.delete_ip_address(x["ifc_index"], faked_ip)
+ verify(faked_ip not in help_get_interface_ip_list(rpc_py, x["name"]), 1,
+ "delete ip {} from nic {} failed.(adding and ping were successful)".format
+ (faked_ip, x["name"]))
+ # ping should be failed and throw an CalledProcessError exception
+ try:
+ check_call(ping_cmd.split())
+ except CalledProcessError as _:
+ pass
+ except Exception as e:
+ verify(False, 1,
+ "Unexpected exception was caught {}(adding/ping/delete were successful)".format
+ (str(e)))
+ else:
+ verify(False, 1,
+ "ip {} for {} could be pinged after delete ip(adding/ping/delete were successful)".format
+ (faked_ip, x["name"]))
+ print("verify_add_delete_ip_address passed.")
+
+
+def verify_add_nvme_bdev_rpc_methods(rpc_py):
+ rpc = spdk_rpc(rpc_py)
+ test_pass = 0
+ output = check_output(["lspci", "-mm", "-nn"])
+ addrs = re.findall('^([0-9]{2}:[0-9]{2}.[0-9]) "Non-Volatile memory controller \[0108\]".*-p02', output.decode(), re.MULTILINE)
+ for addr in addrs:
+ ctrlr_address = "-b Nvme{} -t pcie -a 0000:{}".format(addrs.index(addr), addr)
+ rpc.construct_nvme_bdev(ctrlr_address)
+ print("add nvme device passed first time")
+ test_pass = 0
+ try:
+ rpc.construct_nvme_bdev(ctrlr_address)
+ except Exception as e:
+ print("add nvme device passed second time")
+ test_pass = 1
+ pass
+ else:
+ pass
+ verify(test_pass == 1, 1, "add nvme device passed second time")
+ print("verify_add_nvme_bdev_rpc_methods passed.")
+
+
+if __name__ == "__main__":
+
+ rpc_py = sys.argv[1]
+
+ try:
+ verify_trace_flag_rpc_methods(rpc_py, rpc_param)
+ verify_get_interfaces(rpc_py)
+ verify_add_delete_ip_address(rpc_py)
+ create_malloc_bdevs_rpc_methods(rpc_py, rpc_param)
+ verify_portal_groups_rpc_methods(rpc_py, rpc_param)
+ verify_initiator_groups_rpc_methods(rpc_py, rpc_param)
+ verify_target_nodes_rpc_methods(rpc_py, rpc_param)
+ verify_scsi_devices_rpc_methods(rpc_py)
+ verify_iscsi_connection_rpc_methods(rpc_py)
+ verify_add_nvme_bdev_rpc_methods(rpc_py)
+ except RpcException as e:
+ print("{}. Exiting with status {}".format(e.message, e.retval))
+ raise e
+ except Exception as e:
+ raise e
+
+ sys.exit(0)
diff --git a/src/spdk/test/iscsi_tgt/rpc_config/rpc_config.sh b/src/spdk/test/iscsi_tgt/rpc_config/rpc_config.sh
new file mode 100755
index 00000000..ac5c4647
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/rpc_config/rpc_config.sh
@@ -0,0 +1,46 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+source $rootdir/test/common/autotest_common.sh
+source $rootdir/test/iscsi_tgt/common.sh
+
+timing_enter rpc_config
+
+# $1 = test type (posix/vpp)
+if [ "$1" == "posix" ] || [ "$1" == "vpp" ]; then
+ TEST_TYPE=$1
+else
+ echo "No iSCSI test type specified"
+ exit 1
+fi
+
+MALLOC_BDEV_SIZE=64
+
+rpc_py=$rootdir/scripts/rpc.py
+rpc_config_py="$testdir/rpc_config.py"
+
+timing_enter start_iscsi_tgt
+
+$ISCSI_APP --wait-for-rpc &
+pid=$!
+echo "Process pid: $pid"
+
+trap "killprocess $pid; exit 1" SIGINT SIGTERM EXIT
+
+waitforlisten $pid
+$rpc_py set_iscsi_options -o 30 -a 16
+$rpc_py start_subsystem_init
+echo "iscsi_tgt is listening. Running tests..."
+
+timing_exit start_iscsi_tgt
+
+$rpc_config_py $rpc_py $TARGET_IP $INITIATOR_IP $ISCSI_PORT $NETMASK $TARGET_NAMESPACE $TEST_TYPE
+
+$rpc_py get_bdevs
+
+trap - SIGINT SIGTERM EXIT
+
+iscsicleanup
+killprocess $pid
+timing_exit rpc_config
diff --git a/src/spdk/test/iscsi_tgt/test_plan.md b/src/spdk/test/iscsi_tgt/test_plan.md
new file mode 100644
index 00000000..4afad162
--- /dev/null
+++ b/src/spdk/test/iscsi_tgt/test_plan.md
@@ -0,0 +1,41 @@
+# SPDK iscsi_tgt test plan
+
+## Objective
+The purpose of these tests is to verify correct behavior of SPDK iSCSI target
+feature.
+These tests are run either per-commit or as nightly tests.
+
+## Configuration
+All tests share the same basic configuration file for SPDK iscsi_tgt to run.
+Static configuration from config file consists of setting number of per session
+queues and enabling RPC for further configuration via RPC calls.
+RPC calls used for dynamic configuration consist:
+- creating Malloc backend devices
+- creating Null Block backend devices
+- creating Pmem backend devices
+- constructing iSCSI subsystems
+- deleting iSCSI subsystems
+
+### Tests
+
+#### Test 1: iSCSI namespace on a Pmem device
+This test configures a SPDK iSCSI subsystem backed by pmem
+devices and uses FIO to generate I/Os that target those subsystems.
+Test steps:
+- Step 1: Start SPDK iscsi_tgt application.
+- Step 2: Create 10 pmem pools.
+- Step 3: Create pmem bdevs on pmem pools.
+- Step 4: Create iSCSI subsystems with 10 pmem bdevs namespaces.
+- Step 5: Connect to iSCSI susbsystems with kernel initiator.
+- Step 6: Run FIO with workload parameters: blocksize=4096, iodepth=64,
+ workload=randwrite; varify flag is enabled so that
+ FIO reads and verifies the data written to the pmem device.
+ The run time is 10 seconds for a quick test an 10 minutes
+ for longer nightly test.
+- Step 7: Run FIO with workload parameters: blocksize=128kB, iodepth=4,
+ workload=randwrite; varify flag is enabled so that
+ FIO reads and verifies the data written to the pmem device.
+ The run time is 10 seconds for a quick test an 10 minutes
+ for longer nightly test.
+- Step 8: Disconnect kernel initiator from iSCSI subsystems.
+- Step 9: Delete iSCSI subsystems from configuration.