summaryrefslogtreecommitdiffstats
path: root/src/spdk/scripts
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/spdk/scripts/ceph/ceph.conf64
-rwxr-xr-xsrc/spdk/scripts/ceph/start.sh102
-rwxr-xr-xsrc/spdk/scripts/ceph/stop.sh13
-rwxr-xr-xsrc/spdk/scripts/check_format.sh234
-rw-r--r--src/spdk/scripts/common.sh44
-rwxr-xr-xsrc/spdk/scripts/config_converter.py701
-rwxr-xr-xsrc/spdk/scripts/detect_cc.sh80
-rwxr-xr-xsrc/spdk/scripts/eofnl38
-rwxr-xr-xsrc/spdk/scripts/fio.py144
-rwxr-xr-xsrc/spdk/scripts/gen_crypto.sh18
-rwxr-xr-xsrc/spdk/scripts/gen_nvme.sh46
-rwxr-xr-xsrc/spdk/scripts/gen_rbd.sh11
-rwxr-xr-xsrc/spdk/scripts/genconfig.py46
-rw-r--r--src/spdk/scripts/perf/nvme/README12
-rw-r--r--src/spdk/scripts/perf/nvme/fio_test.conf20
-rwxr-xr-xsrc/spdk/scripts/perf/nvme/run_fio_test.py166
-rwxr-xr-xsrc/spdk/scripts/perf/nvme/run_fio_test.sh19
-rw-r--r--src/spdk/scripts/perf/vhost/fio_test.conf21
-rw-r--r--src/spdk/scripts/perf/vhost/run_vhost_test.py208
-rwxr-xr-xsrc/spdk/scripts/pkgdep.sh101
-rw-r--r--src/spdk/scripts/posix.txt82
-rwxr-xr-xsrc/spdk/scripts/prep_benchmarks.sh73
-rwxr-xr-xsrc/spdk/scripts/qat_setup.sh111
-rwxr-xr-xsrc/spdk/scripts/rpc.py1827
-rw-r--r--src/spdk/scripts/rpc/__init__.py157
-rw-r--r--src/spdk/scripts/rpc/app.py23
-rw-r--r--src/spdk/scripts/rpc/bdev.py531
-rw-r--r--src/spdk/scripts/rpc/client.py100
-rw-r--r--src/spdk/scripts/rpc/ioat.py12
-rw-r--r--src/spdk/scripts/rpc/iscsi.py502
-rw-r--r--src/spdk/scripts/rpc/log.py65
-rw-r--r--src/spdk/scripts/rpc/lvol.py195
-rw-r--r--src/spdk/scripts/rpc/nbd.py18
-rw-r--r--src/spdk/scripts/rpc/net.py29
-rw-r--r--src/spdk/scripts/rpc/nvme.py54
-rw-r--r--src/spdk/scripts/rpc/nvmf.py352
-rw-r--r--src/spdk/scripts/rpc/pmem.py29
-rw-r--r--src/spdk/scripts/rpc/subsystem.py7
-rw-r--r--src/spdk/scripts/rpc/vhost.py248
-rwxr-xr-xsrc/spdk/scripts/setup.sh604
-rwxr-xr-xsrc/spdk/scripts/spdkcli.py61
-rw-r--r--src/spdk/scripts/spdkcli/__init__.py1
-rw-r--r--src/spdk/scripts/spdkcli/ui_node.py929
-rw-r--r--src/spdk/scripts/spdkcli/ui_node_iscsi.py635
-rw-r--r--src/spdk/scripts/spdkcli/ui_node_nvmf.py302
-rw-r--r--src/spdk/scripts/spdkcli/ui_root.py482
-rw-r--r--src/spdk/scripts/vagrant/README.md219
-rw-r--r--src/spdk/scripts/vagrant/Vagrantfile158
-rw-r--r--src/spdk/scripts/vagrant/Vagrantfile_vhost_vm111
-rw-r--r--src/spdk/scripts/vagrant/autorun-spdk.conf28
-rwxr-xr-xsrc/spdk/scripts/vagrant/create_nvme_img.sh22
-rwxr-xr-xsrc/spdk/scripts/vagrant/create_vbox.sh220
-rwxr-xr-xsrc/spdk/scripts/vagrant/create_vhost_vm.sh129
-rwxr-xr-xsrc/spdk/scripts/vagrant/run-autorun.sh235
-rwxr-xr-xsrc/spdk/scripts/vagrant/update.sh99
55 files changed, 10738 insertions, 0 deletions
diff --git a/src/spdk/scripts/ceph/ceph.conf b/src/spdk/scripts/ceph/ceph.conf
new file mode 100644
index 00000000..9cf918e5
--- /dev/null
+++ b/src/spdk/scripts/ceph/ceph.conf
@@ -0,0 +1,64 @@
+[global]
+ debug_lockdep = 0/0
+ debug_context = 0/0
+ debug_crush = 0/0
+ debug_buffer = 0/0
+ debug_timer = 0/0
+ debug_filer = 0/0
+ debug_objecter = 0/0
+ debug_rados = 0/0
+ debug_rbd = 0/0
+ debug_ms = 0/0
+ debug_monc = 0/0
+ debug_tp = 0/0
+ debug_auth = 0/0
+ debug_finisher = 0/0
+ debug_heartbeatmap = 0/0
+ debug_perfcounter = 0/0
+ debug_asok = 0/0
+ debug_throttle = 0/0
+ debug_mon = 0/0
+ debug_paxos = 0/0
+ debug_rgw = 0/0
+
+ perf = true
+ mutex_perf_counter = false
+ throttler_perf_counter = false
+ rbd cache = false
+ mon_allow_pool_delete = true
+
+ osd_pool_default_size = 1
+
+[mon]
+ mon_max_pool_pg_num=166496
+ mon_osd_max_split_count = 10000
+ mon_pg_warn_max_per_osd = 10000
+
+[osd]
+ osd_op_threads = 64
+ filestore_queue_max_ops=5000
+ filestore_queue_committing_max_ops=5000
+ journal_max_write_entries=1000
+ journal_queue_max_ops=3000
+ objecter_inflight_ops=102400
+ filestore_wbthrottle_enable=false
+ filestore_queue_max_bytes=1048576000
+ filestore_queue_committing_max_bytes=1048576000
+ journal_max_write_bytes=1048576000
+ journal_queue_max_bytes=1048576000
+ ms_dispatch_throttle_bytes=1048576000
+ objecter_infilght_op_bytes=1048576000
+ filestore_max_sync_interval=10
+ osd_client_message_size_cap = 0
+ osd_client_message_cap = 0
+ osd_enable_op_tracker = false
+ filestore_fd_cache_size = 10240
+ filestore_fd_cache_shards = 64
+ filestore_op_threads = 16
+ osd_op_num_shards = 48
+ osd_op_num_threads_per_shard = 2
+ osd_pg_object_context_cache_count = 10240
+ filestore_odsync_write = True
+ journal_dynamic_throttle = True
+
+[osd.0]
diff --git a/src/spdk/scripts/ceph/start.sh b/src/spdk/scripts/ceph/start.sh
new file mode 100755
index 00000000..fbb2e83d
--- /dev/null
+++ b/src/spdk/scripts/ceph/start.sh
@@ -0,0 +1,102 @@
+#!/usr/bin/env bash
+# create mon
+
+set -x
+set -e
+
+script_dir=$(readlink -f $(dirname $0))
+
+base_dir=/var/tmp/ceph
+mon_ip=$1
+mon_dir=${base_dir}/mon.a/
+pid_dir=${base_dir}/pid
+ceph_conf=${base_dir}/ceph.conf
+mnt_dir=${base_dir}/mnt
+dev_backend=/dev/ceph
+image=/var/tmp/ceph_raw.img
+dev=/dev/loop200
+
+umount $dev || true
+losetup -d $dev_backend || true
+
+# partition osd
+if [ -d $base_dir ]; then
+ rm -rf $base_dir
+fi
+mkdir ${base_dir}
+cp ${script_dir}/ceph.conf $ceph_conf
+
+if [ ! -e $image ]; then
+ fallocate -l 10G $image
+fi
+
+mknod ${dev_backend} b 7 200 || true
+losetup ${dev_backend} ${image} || true
+
+PARTED="parted -s"
+SGDISK="sgdisk"
+
+echo "Partitioning ${dev}"
+${PARTED} ${dev} mktable gpt
+sleep 2
+${PARTED} ${dev} mkpart primary 0% 5GiB
+${PARTED} ${dev} mkpart primary 5GiB 100%
+
+
+partno=0
+echo "Setting name on ${dev}"
+${SGDISK} -c 1:osd-device-${partno}-journal ${dev}
+${SGDISK} -c 2:osd-device-${partno}-data ${dev}
+kpartx ${dev}
+
+# prep osds
+
+mnt_pt=${mnt_dir}/osd-device-0-data/
+mkdir -p ${mnt_pt}
+mkfs.xfs -f /dev/disk/by-partlabel/osd-device-0-data
+mount /dev/disk/by-partlabel/osd-device-0-data ${mnt_pt}
+echo -e "\tosd data = ${mnt_pt}" >> "$ceph_conf"
+echo -e "\tosd journal = /dev/disk/by-partlabel/osd-device-0-journal" >> "$ceph_conf"
+
+# add mon address
+echo -e "\t[mon.a]" >> "$ceph_conf"
+echo -e "\tmon addr = ${mon_ip}:12046" >> "$ceph_conf"
+
+# create mon
+rm -rf ${mon_dir}/*
+mkdir -p ${mon_dir}
+mkdir -p ${pid_dir}
+
+ceph-authtool --create-keyring --gen-key --name=mon. ${base_dir}/keyring --cap mon 'allow *'
+ceph-authtool --gen-key --name=client.admin --set-uid=0 --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *' ${base_dir}/keyring
+
+monmaptool --create --clobber --add a ${mon_ip}:12046 --print ${base_dir}/monmap
+
+sh -c "ulimit -c unlimited && exec ceph-mon --mkfs -c ${ceph_conf} -i a --monmap=${base_dir}/monmap --keyring=${base_dir}/keyring --mon-data=${mon_dir}"
+
+cp ${base_dir}/keyring ${mon_dir}/keyring
+
+cp $ceph_conf /etc/ceph/ceph.conf
+
+cp ${base_dir}/keyring /etc/ceph/keyring
+
+ceph-run sh -c "ulimit -n 16384 && ulimit -c unlimited && exec ceph-mon -c ${ceph_conf} -i a --keyring=${base_dir}/keyring --pid-file=${base_dir}/pid/root@`hostname`.pid --mon-data=${mon_dir}" || true
+
+# create osd
+
+i=0
+
+mkdir -p ${mnt_dir}
+
+uuid=`uuidgen`
+ceph -c ${ceph_conf} osd create ${uuid} $i
+ceph-osd -c ${ceph_conf} -i $i --mkfs --mkkey --osd-uuid ${uuid}
+ceph -c ${ceph_conf} osd crush add osd.${i} 1.0 host=`hostname` root=default
+ceph -c ${ceph_conf} -i ${mnt_dir}/osd-device-${i}-data/keyring auth add osd.${i} osd "allow *" mon "allow profile osd" mgr "allow"
+
+# start osd
+pkill -9 ceph-osd || true
+sleep 2
+
+mkdir -p ${pid_dir}
+env -i TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES=134217728 ceph-osd -c ${ceph_conf} -i 0 --pid-file=${pid_dir}/ceph-osd.0.pid
diff --git a/src/spdk/scripts/ceph/stop.sh b/src/spdk/scripts/ceph/stop.sh
new file mode 100755
index 00000000..5c1c4a95
--- /dev/null
+++ b/src/spdk/scripts/ceph/stop.sh
@@ -0,0 +1,13 @@
+#!/usr/bin/env bash
+
+set -x
+
+base_dir=/var/tmp/ceph
+image=${base_dir}/ceph_raw.img
+dev_backend=/dev/ceph
+
+pkill -9 ceph
+sleep 3
+umount /dev/loop200p2
+losetup -d $dev_backend
+rm -rf $base_dir
diff --git a/src/spdk/scripts/check_format.sh b/src/spdk/scripts/check_format.sh
new file mode 100755
index 00000000..f7e718d8
--- /dev/null
+++ b/src/spdk/scripts/check_format.sh
@@ -0,0 +1,234 @@
+#!/usr/bin/env bash
+
+readonly BASEDIR=$(readlink -f $(dirname $0))/..
+cd $BASEDIR
+
+# exit on errors
+set -e
+
+rc=0
+
+echo -n "Checking file permissions..."
+
+while read -r perm _res0 _res1 path; do
+ if [ ! -f "$path" ]; then
+ continue
+ fi
+
+ fname=$(basename -- "$path")
+
+ case ${fname##*.} in
+ c|h|cpp|cc|cxx|hh|hpp|md|html|js|json|svg|Doxyfile|yml|LICENSE|README|conf|in|Makefile|mk|gitignore|go|txt)
+ # These file types should never be executable
+ if [ "$perm" -eq 100755 ]; then
+ echo "ERROR: $path is marked executable but is a code file."
+ rc=1
+ fi
+ ;;
+ *)
+ shebang=$(head -n 1 $path | cut -c1-3)
+
+ # git only tracks the execute bit, so will only ever return 755 or 644 as the permission.
+ if [ "$perm" -eq 100755 ]; then
+ # If the file has execute permission, it should start with a shebang.
+ if [ "$shebang" != "#!/" ]; then
+ echo "ERROR: $path is marked executable but does not start with a shebang."
+ rc=1
+ fi
+ else
+ # If the file doesnot have execute permissions, it should not start with a shebang.
+ if [ "$shebang" = "#!/" ]; then
+ echo "ERROR: $path is not marked executable but starts with a shebang."
+ rc=1
+ fi
+ fi
+ ;;
+ esac
+
+done <<< "$(git grep -I --name-only --untracked -e . | git ls-files -s)"
+
+if [ $rc -eq 0 ]; then
+ echo " OK"
+fi
+
+if hash astyle; then
+ echo -n "Checking coding style..."
+ if [ "$(astyle -V)" \< "Artistic Style Version 3" ]
+ then
+ echo -n " Your astyle version is too old. This may cause failure on patch verification performed by CI. Please update astyle to at least 3.0.1 version..."
+ fi
+ rm -f astyle.log
+ touch astyle.log
+ # Exclude rte_vhost code imported from DPDK - we want to keep the original code
+ # as-is to enable ongoing work to synch with a generic upstream DPDK vhost library,
+ # rather than making diffs more complicated by a lot of changes to follow SPDK
+ # coding standards.
+ git ls-files '*.[ch]' '*.cpp' '*.cc' '*.cxx' '*.hh' '*.hpp' | \
+ grep -v rte_vhost | grep -v cpp_headers | \
+ xargs astyle --options=.astylerc >> astyle.log
+ if grep -q "^Formatted" astyle.log; then
+ echo " errors detected"
+ git diff
+ sed -i -e 's/ / /g' astyle.log
+ grep --color=auto "^Formatted.*" astyle.log
+ echo "Incorrect code style detected in one or more files."
+ echo "The files have been automatically formatted."
+ echo "Remember to add the files to your commit."
+ rc=1
+ else
+ echo " OK"
+ fi
+ rm -f astyle.log
+else
+ echo "You do not have astyle installed so your code style is not being checked!"
+fi
+
+echo -n "Checking comment style..."
+
+git grep --line-number -e '/[*][^ *-]' -- '*.[ch]' > comment.log || true
+git grep --line-number -e '[^ ][*]/' -- '*.[ch]' ':!lib/vhost/rte_vhost*/*' >> comment.log || true
+git grep --line-number -e '^[*]' -- '*.[ch]' >> comment.log || true
+
+if [ -s comment.log ]; then
+ echo " Incorrect comment formatting detected"
+ cat comment.log
+ rc=1
+else
+ echo " OK"
+fi
+rm -f comment.log
+
+echo -n "Checking for spaces before tabs..."
+git grep --line-number $' \t' -- > whitespace.log || true
+if [ -s whitespace.log ]; then
+ echo " Spaces before tabs detected"
+ cat whitespace.log
+ rc=1
+else
+ echo " OK"
+fi
+rm -f whitespace.log
+
+echo -n "Checking trailing whitespace in output strings..."
+
+git grep --line-number -e ' \\n"' -- '*.[ch]' > whitespace.log || true
+
+if [ -s whitespace.log ]; then
+ echo " Incorrect trailing whitespace detected"
+ cat whitespace.log
+ rc=1
+else
+ echo " OK"
+fi
+rm -f whitespace.log
+
+echo -n "Checking for use of forbidden library functions..."
+
+git grep --line-number -w '\(strncpy\|strcpy\|strcat\|sprintf\|vsprintf\)' -- './*.c' ':!lib/vhost/rte_vhost*/**' > badfunc.log || true
+if [ -s badfunc.log ]; then
+ echo " Forbidden library functions detected"
+ cat badfunc.log
+ rc=1
+else
+ echo " OK"
+fi
+rm -f badfunc.log
+
+echo -n "Checking for use of forbidden CUnit macros..."
+
+git grep --line-number -w 'CU_ASSERT_FATAL' -- 'test/*' ':!test/spdk_cunit.h' > badcunit.log || true
+if [ -s badcunit.log ]; then
+ echo " Forbidden CU_ASSERT_FATAL usage detected - use SPDK_CU_ASSERT_FATAL instead"
+ cat badcunit.log
+ rc=1
+else
+ echo " OK"
+fi
+rm -f badcunit.log
+
+echo -n "Checking blank lines at end of file..."
+
+if ! git grep -I -l -e . -z | \
+ xargs -0 -P8 -n1 scripts/eofnl > eofnl.log; then
+ echo " Incorrect end-of-file formatting detected"
+ cat eofnl.log
+ rc=1
+else
+ echo " OK"
+fi
+rm -f eofnl.log
+
+echo -n "Checking for POSIX includes..."
+git grep -I -i -f scripts/posix.txt -- './*' ':!include/spdk/stdinc.h' ':!include/linux/**' ':!lib/vhost/rte_vhost*/**' ':!scripts/posix.txt' > scripts/posix.log || true
+if [ -s scripts/posix.log ]; then
+ echo "POSIX includes detected. Please include spdk/stdinc.h instead."
+ cat scripts/posix.log
+ rc=1
+else
+ echo " OK"
+fi
+rm -f scripts/posix.log
+
+if hash pycodestyle 2>/dev/null; then
+ PEP8=pycodestyle
+elif hash pep8 2>/dev/null; then
+ PEP8=pep8
+fi
+
+if [ ! -z ${PEP8} ]; then
+ echo -n "Checking Python style..."
+
+ PEP8_ARGS+=" --max-line-length=140"
+
+ error=0
+ git ls-files '*.py' | xargs -n1 $PEP8 $PEP8_ARGS > pep8.log || error=1
+ if [ $error -ne 0 ]; then
+ echo " Python formatting errors detected"
+ cat pep8.log
+ rc=1
+ else
+ echo " OK"
+ fi
+ rm -f pep8.log
+else
+ echo "You do not have pycodestyle or pep8 installed so your Python style is not being checked!"
+fi
+
+# Check if any of the public interfaces were modified by this patch.
+# Warn the user to consider updating the changelog any changes
+# are detected.
+echo -n "Checking whether CHANGELOG.md should be updated..."
+staged=$(git diff --name-only --cached .)
+working=$(git status -s --porcelain | grep -iv "??" | awk '{print $2}')
+files="$staged $working"
+if [[ "$files" = " " ]]; then
+ files=$(git diff-tree --no-commit-id --name-only -r HEAD)
+fi
+
+has_changelog=0
+for f in $files; do
+ if [[ $f == CHANGELOG.md ]]; then
+ # The user has a changelog entry, so exit.
+ has_changelog=1
+ break
+ fi
+done
+
+needs_changelog=0
+if [ $has_changelog -eq 0 ]; then
+ for f in $files; do
+ if [[ $f == include/spdk/* ]] || [[ $f == scripts/rpc.py ]] || [[ $f == etc/* ]]; then
+ echo ""
+ echo -n "$f was modified. Consider updating CHANGELOG.md."
+ needs_changelog=1
+ fi
+ done
+fi
+
+if [ $needs_changelog -eq 0 ]; then
+ echo " OK"
+else
+ echo ""
+fi
+
+exit $rc
diff --git a/src/spdk/scripts/common.sh b/src/spdk/scripts/common.sh
new file mode 100644
index 00000000..d85fdb8b
--- /dev/null
+++ b/src/spdk/scripts/common.sh
@@ -0,0 +1,44 @@
+# Common shell utility functions
+
+function iter_pci_class_code() {
+ local class="$(printf %02x $((0x$1)))"
+ local subclass="$(printf %02x $((0x$2)))"
+ local progif="$(printf %02x $((0x$3)))"
+
+ if hash lspci &>/dev/null; then
+ if [ "$progif" != "00" ]; then
+ lspci -mm -n -D | \
+ grep -i -- "-p${progif}" | \
+ awk -v cc="\"${class}${subclass}\"" -F " " \
+ '{if (cc ~ $2) print $1}' | tr -d '"'
+ else
+ lspci -mm -n -D | \
+ awk -v cc="\"${class}${subclass}\"" -F " " \
+ '{if (cc ~ $2) print $1}' | tr -d '"'
+ fi
+ elif hash pciconf &>/dev/null; then
+ addr=($(pciconf -l | grep -i "class=0x${class}${subclass}${progif}" | \
+ cut -d$'\t' -f1 | sed -e 's/^[a-zA-Z0-9_]*@pci//g' | tr ':' ' '))
+ printf "%04x:%02x:%02x:%x\n" ${addr[0]} ${addr[1]} ${addr[2]} ${addr[3]}
+ else
+ echo "Missing PCI enumeration utility"
+ exit 1
+ fi
+}
+
+function iter_pci_dev_id() {
+ local ven_id="$(printf %04x $((0x$1)))"
+ local dev_id="$(printf %04x $((0x$2)))"
+
+ if hash lspci &>/dev/null; then
+ lspci -mm -n -D | awk -v ven="\"$ven_id\"" -v dev="\"${dev_id}\"" -F " " \
+ '{if (ven ~ $3 && dev ~ $4) print $1}' | tr -d '"'
+ elif hash pciconf &>/dev/null; then
+ addr=($(pciconf -l | grep -i "chip=0x${dev_id}${ven_id}" | \
+ cut -d$'\t' -f1 | sed -e 's/^[a-zA-Z0-9_]*@pci//g' | tr ':' ' '))
+ printf "%04x:%02x:%02x:%x\n" ${addr[0]} ${addr[1]} ${addr[2]} ${addr[3]}
+ else
+ echo "Missing PCI enumeration utility"
+ exit 1
+ fi
+}
diff --git a/src/spdk/scripts/config_converter.py b/src/spdk/scripts/config_converter.py
new file mode 100755
index 00000000..d78541a7
--- /dev/null
+++ b/src/spdk/scripts/config_converter.py
@@ -0,0 +1,701 @@
+#!/usr/bin/python
+
+import configparser
+import re
+import sys
+import json
+from collections import OrderedDict
+
+bdev_dict = OrderedDict()
+bdev_dict["set_bdev_options"] = []
+bdev_dict["construct_split_vbdev"] = []
+bdev_dict["set_bdev_nvme_options"] = []
+bdev_dict["construct_nvme_bdev"] = []
+bdev_dict["set_bdev_nvme_hotplug"] = []
+bdev_dict["construct_malloc_bdev"] = []
+bdev_dict["construct_aio_bdev"] = []
+bdev_dict["construct_pmem_bdev"] = []
+bdev_dict["construct_virtio_dev"] = []
+
+vhost_dict = OrderedDict()
+vhost_dict["construct_vhost_scsi_controller"] = []
+vhost_dict["construct_vhost_blk_controller"] = []
+vhost_dict["construct_vhost_nvme_controller"] = []
+
+iscsi_dict = OrderedDict()
+iscsi_dict["set_iscsi_options"] = []
+iscsi_dict["add_portal_group"] = []
+iscsi_dict["add_initiator_group"] = []
+iscsi_dict["construct_target_node"] = []
+
+nvmf_dict = OrderedDict()
+nvmf_dict["set_nvmf_target_config"] = []
+nvmf_dict["set_nvmf_target_options"] = []
+nvmf_dict["construct_nvmf_subsystem"] = []
+
+
+# dictionary with new config that will be written to new json config file
+subsystem = {
+ "copy": None,
+ "interface": None,
+ "net_framework": None,
+ "bdev": bdev_dict,
+ "scsi": [],
+ "nvmf": nvmf_dict,
+ "nbd": [],
+ "vhost": vhost_dict,
+ "iscsi": iscsi_dict
+}
+
+
+class OptionOrderedDict(OrderedDict):
+ def __setitem__(self, option, value):
+ if option in self and isinstance(value, list):
+ self[option].extend(value)
+ return
+ super(OptionOrderedDict, self).__setitem__(option, value)
+
+
+no_yes_map = {"no": False, "No": False, "Yes": True, "yes": True}
+
+
+def generate_new_json_config():
+ json_subsystem = [
+ {'subsystem': "copy", 'config': None},
+ {"subsystem": "interface", "config": None},
+ {"subsystem": "net_framework", "config": None},
+ {"subsystem": "bdev", "config": []},
+ {"subsystem": "scsi", "config": None},
+ {"subsystem": "nvmf", "config": []},
+ {"subsystem": "nbd", "config": []},
+ {"subsystem": "vhost", "config": []},
+ {"subsystem": "iscsi", "config": []}
+ ]
+ for method in subsystem['bdev']:
+ for item in subsystem['bdev'][method]:
+ json_subsystem[3]['config'].append(item)
+ for item in subsystem['scsi']:
+ if json_subsystem[4]['config'] is None:
+ json_subsystem[4]['config'] = []
+ json_subsystem[4]['config'].append(item)
+ for method in subsystem['nvmf']:
+ for item in subsystem['nvmf'][method]:
+ json_subsystem[5]['config'].append(item)
+ for method in subsystem['vhost']:
+ for item in subsystem['vhost'][method]:
+ json_subsystem[7]['config'].append(item)
+ for method in subsystem['iscsi']:
+ for item in subsystem['iscsi'][method]:
+ json_subsystem[8]['config'].append(item)
+
+ return {"subsystems": json_subsystem}
+
+
+section_to_subsystem = {
+ "Bdev": subsystem['bdev'],
+ "AIO": subsystem['bdev'],
+ "Malloc": subsystem['bdev'],
+ "Nvme": subsystem['bdev'],
+ "Pmem": subsystem['bdev'],
+ "Split": subsystem['bdev'],
+ "Nvmf": subsystem['nvmf'],
+ "Subsystem": subsystem['nvmf'],
+ "VhostScsi": subsystem['vhost'],
+ "VhostBlk": subsystem['vhost'],
+ "VhostNvme": subsystem['vhost'],
+ "VirtioUser": subsystem['bdev'],
+ "iSCSI": subsystem['iscsi'],
+ "PortalGroup": subsystem['iscsi'],
+ "InitiatorGroup": subsystem['iscsi'],
+ "TargetNode": subsystem['iscsi']
+}
+
+
+def set_param(params, cfg_name, value):
+ for param in params:
+ if param[0] != cfg_name:
+ continue
+ if param[1] == "disable_chap":
+ param[3] = True if value == "None" else False
+ elif param[1] == "require_chap":
+ param[3] = True if value in ["CHAP", "Mutual"] else False
+ elif param[1] == "mutual_chap":
+ param[3] = True if value == "Mutual" else False
+ elif param[1] == "chap_group":
+ param[3] = int(value.replace("AuthGroup", ""))
+ elif param[2] == bool:
+ param[3] = True if value in ("yes", "true", "Yes") else False
+ elif param[2] == "hex":
+ param[3] = str(int(value, 16))
+ elif param[2] == int:
+ param[3] = int(value)
+ elif param[2] == list:
+ param[3].append(value)
+ elif param[2] == "dev_type":
+ if value.lower() == "blk":
+ param[3] = "blk"
+ else:
+ param[3] = param[2](value.replace("\"", ""))
+
+
+def to_json_params(params):
+ out = {}
+ for param in params:
+ if param[3] is not None:
+ out[param[1]] = param[3]
+ return out
+
+
+def get_bdev_options_json(config, section):
+ params = [
+ ["BdevIoPoolSize", "bdev_io_pool_size", int, 65536],
+ ["BdevIoCacheSize", "bdev_io_cache_size", int, 256]
+ ]
+ for option in config.options("Bdev"):
+ set_param(params, option, config.get("Bdev", option))
+
+ return [{"params": to_json_params(params), "method": "set_bdev_options"}]
+
+
+def get_aio_bdev_json(config, section):
+ aio_json = []
+ value = None
+ for option in config.options("AIO"):
+ if option == "AIO":
+ value = config.get("AIO", option).split("\n")
+ for item in value:
+ items = re.findall("\S+", item)
+ params = {}
+ params['filename'] = items[0]
+ params['name'] = items[1]
+ if len(items) == 3:
+ params['block_size'] = int(items[2])
+ aio_json.append({
+ "params": params,
+ "method": "construct_aio_bdev"
+ })
+
+ return aio_json
+
+
+def get_malloc_bdev_json(config, section):
+ malloc_json = []
+ params = [
+ ['NumberOfLuns', '', int, -1],
+ ['LunSizeInMB', '', int, 20],
+ ['BlockSize', '', int, 512]
+ ]
+ for option in config.options("Malloc"):
+ set_param(params, option, config.get("Malloc", option))
+ for lun in range(0, params[0][3]):
+ malloc_json.append({
+ "params": {
+ "block_size": params[2][3],
+ "num_blocks": params[1][3] * 1024 * 1024 / params[2][3],
+ "name": "Malloc%s" % lun
+ },
+ "method": "construct_malloc_bdev"
+ })
+
+ return malloc_json
+
+
+def get_nvme_bdev_json(config, section):
+ params = [
+ ["RetryCount", "retry_count", int, 4],
+ ["TimeoutuSec", "timeout_us", int, 0],
+ ["AdminPollRate", "nvme_adminq_poll_period_us", int, 1000000],
+ ["ActionOnTimeout", "action_on_timeout", str, "none"],
+ ["HotplugEnable", "enable", bool, False],
+ ["AdminPollRate", "period_us", int, 1000]
+ ]
+ nvme_json = []
+ for option in config.options("Nvme"):
+ value = config.get("Nvme", option)
+ if "TransportID" == option:
+ entry = re.findall("\S+", value)
+ nvme_name = entry[-1]
+ trtype = re.findall("trtype:\S+", value)
+ if trtype:
+ trtype = trtype[0].replace("trtype:", "").replace("\"", "")
+ traddr = re.findall("traddr:\S+", value)
+ if traddr:
+ traddr = traddr[0].replace("traddr:", "").replace("\"", "")
+ nvme_json.append({
+ "params": {
+ "trtype": trtype,
+ "name": nvme_name,
+ "traddr": traddr
+ },
+ "method": "construct_nvme_bdev"
+ })
+ else:
+ set_param(params, option, value)
+ params[3][3] = params[3][3].lower()
+ params[5][3] = params[5][3] * 100
+ nvme_json.append({
+ "params": to_json_params(params[4:6]),
+ "method": "set_bdev_nvme_hotplug"
+ })
+ nvme_json.append({
+ "params": to_json_params(params[0:4]),
+ "method": "set_bdev_nvme_options"
+ })
+ return nvme_json
+
+
+def get_pmem_bdev_json(config, section):
+ pmem_json = []
+ for option in config.options(section):
+ if "Blk" == option:
+ for value in config.get(section, option).split("\n"):
+ items = re.findall("\S+", value)
+ pmem_json.append({
+ "params": {
+ "name": items[1],
+ "pmem_file": items[0]
+ },
+ "method": "construct_pmem_bdev"
+ })
+
+ return pmem_json
+
+
+def get_split_bdev_json(config, section):
+ split_json = []
+ value = []
+ for option in config.options("Split"):
+ if "Split" == option:
+ value = config.get("Split", option)
+ if value and not isinstance(value, list):
+ value = [value]
+ for split in value:
+ items = re.findall("\S+", split)
+ split_size_mb = 0
+ base_bdev = items[0]
+ split_count = int(items[1])
+ if len(items) == 3:
+ split_size_mb = items[2]
+ split_json.append({
+ "params": {
+ "base_bdev": base_bdev,
+ "split_size_mb": split_size_mb,
+ "split_count": split_count
+ },
+ "method": "construct_split_vbdev"
+ })
+
+ return split_json
+
+
+def get_nvmf_options_json(config, section):
+ params = [
+ ["AcceptorPollRate", "acceptor_poll_rate", int, 10000],
+ ["MaxQueuesPerSession", "max_qpairs_per_ctrlr", int, 64],
+ ["MaxQueueDepth", "max_queue_depth", int, 128],
+ ["IncapsuleDataSize", "in_capsule_data_size", int, 4096],
+ ["MaxIOSize", "max_io_size", int, 131072],
+ ["IOUnitSize", "io_unit_size", int, 131072],
+ ["MaxSubsystems", "max_subsystems", int, 1024]
+ ]
+ for option in config.options("Nvmf"):
+ set_param(params, option, config.get("Nvmf", option))
+ nvmf_json = []
+ nvmf_json.append({
+ "params": to_json_params([params[0]]),
+ "method": "set_nvmf_target_config"
+ })
+ nvmf_json.append({
+ "params": to_json_params(params[1:7]),
+ "method": "set_nvmf_target_options"
+ })
+
+ return nvmf_json
+
+
+def get_nvmf_subsystem_json(config, section):
+ params = [
+ ["NQN", "nqn", str, ""],
+ ["Host", "hosts", list, []],
+ ["AllowAnyHost", "allow_any_host", bool, True],
+ ["SN", "serial_number", str, ""],
+ ["MaxNamespaces", "max_namespaces", str, ""],
+ ]
+ listen_address = []
+ namespaces = []
+ nsid = 0
+ searched_items = [param[0] for param in params]
+ for option in config.options(section):
+ value = config.get(section, option)
+ if option in searched_items:
+ set_param(params, option, value)
+ continue
+ if "Listen" == option:
+ items = re.findall("\S+", value)
+ adrfam = "IPv4"
+ if len(items[1].split(":")) > 2:
+ adrfam = "IPv6"
+ listen_address.append({
+ "trtype": items[0],
+ "adrfam": adrfam,
+ "trsvcid": items[1].rsplit(":", 1)[-1],
+ "traddr": items[1].rsplit(":", 1)[0].replace(
+ "]", "").replace("[", "")
+ })
+ if "Namespace" == option:
+ for item in value.split("\n"):
+ items = re.findall("\S+", item)
+ if len(items) == 2:
+ nsid = items[1]
+ else:
+ nsid += 1
+ namespaces.append({
+ "nsid": int(nsid),
+ "bdev_name": items[0],
+ })
+ parameters = to_json_params(params[0:4])
+ parameters['listen_addresses'] = listen_address
+ parameters['namespaces'] = namespaces
+ nvmf_subsystem = {
+ "params": parameters,
+ "method": "construct_nvmf_subsystem"
+ }
+
+ if params[4][3]:
+ nvmf_subsystem['params']['max_namespaces'] = int(params[4][3])
+
+ return [nvmf_subsystem]
+
+
+def get_vhost_scsi_json(config, section):
+ params = [
+ ["Name", "ctrlr", str, None],
+ ["Cpumask", "cpumask", "hex", "1"],
+ ]
+ targets = []
+ vhost_scsi_json = []
+ for option in config.options(section):
+ value = config.get(section, option)
+ if option in ["Name", "Cpumask"]:
+ set_param(params, option, value)
+ if "Target" == option:
+ for item in value.split("\n"):
+ items = re.findall("\S+", item)
+ targets.append({
+ "scsi_target_num": int(items[0]),
+ "ctrlr": params[0][3],
+ "bdev_name": items[1]
+ })
+ vhost_scsi_json.append({
+ "params": to_json_params(params),
+ "method": "construct_vhost_scsi_controller"
+ })
+ for target in targets:
+ vhost_scsi_json.append({
+ "params": target,
+ "method": "add_vhost_scsi_lun"
+ })
+
+ return vhost_scsi_json
+
+
+def get_vhost_blk_json(config, section):
+ params = [
+ ["ReadOnly", "readonly", bool, False],
+ ["Dev", "dev_name", str, ""],
+ ["Name", "ctrlr", str, ""],
+ ["Cpumask", "cpumask", "hex", ""]
+ ]
+ for option in config.options(section):
+ set_param(params, option, config.get(section, option))
+ return [{"method": "construct_vhost_blk_controller",
+ "params": to_json_params(params)}]
+
+
+def get_vhost_nvme_json(config, section):
+ params = [
+ ["Name", "ctrlr", str, ""],
+ ["NumberOfQueues", "io_queues", int, -1],
+ ["Cpumask", "cpumask", "hex", 0x1],
+ ["Namespace", "bdev_name", list, []]
+ ]
+ for option in config.options(section):
+ values = config.get(section, option).split("\n")
+ for value in values:
+ set_param(params, option, value)
+ vhost_nvme_json = []
+ vhost_nvme_json.append({
+ "params": to_json_params(params[:3]),
+ "method": "construct_vhost_nvme_controller"
+ })
+ for namespace in params[3][3]:
+ vhost_nvme_json.append({
+ "params": {
+ "ctrlr": params[0][3],
+ "bdev_name": namespace,
+ },
+ "method": "add_vhost_nvme_ns"
+ })
+
+ return vhost_nvme_json
+
+
+def get_virtio_user_json(config, section):
+ params = [
+ ["Path", "traddr", str, ""],
+ ["Queues", "vq_count", int, 1],
+ ["Type", "dev_type", "dev_type", "scsi"],
+ ["Name", "name", str, section],
+ # Define parameters with default values.
+ # These params are set by rpc commands and
+ # do not occur in ini config file.
+ # But they are visible in json config file
+ # with default values even if not set by rpc.
+ [None, "trtype", str, "user"],
+ [None, "vq_size", int, 512]
+ ]
+ for option in config.options(section):
+ set_param(params, option, config.get(section, option))
+ dev_name = "Scsi"
+ if params[2][3] == "blk":
+ dev_name = "Blk"
+ params[3][3] = params[3][3].replace("User", dev_name)
+
+ return [{
+ "params": to_json_params(params),
+ "method": "construct_virtio_dev"
+ }]
+
+
+def get_iscsi_options_json(config, section):
+ params = [
+ ['AllowDuplicateIsid', 'allow_duplicated_isid', bool, False],
+ ['DefaultTime2Retain', 'default_time2retain', int, 20],
+ ['DiscoveryAuthMethod', 'mutual_chap', bool, False],
+ ['MaxConnectionsPerSession', 'max_connections_per_session', int, 2],
+ ['Timeout', 'nop_timeout', int, 60],
+ ['DiscoveryAuthMethod', 'disable_chap', bool, False],
+ ['DiscoveryAuthMethod', 'require_chap', bool, False],
+ ['NodeBase', 'node_base', str, "iqn.2016-06.io.spdk"],
+ ['AuthFile', 'auth_file', str, None],
+ ['DiscoveryAuthGroup', 'chap_group', int, 0],
+ ['MaxSessions', 'max_sessions', int, 128],
+ ['ImmediateData', 'immediate_data', bool, True],
+ ['ErrorRecoveryLevel', 'error_recovery_level', int, 0],
+ ['NopInInterval', 'nop_in_interval', int, 30],
+ ['MinConnectionsPerCore', 'min_connections_per_core', int, 4],
+ ['DefaultTime2Wait', 'default_time2wait', int, 2],
+ ['QueueDepth', 'max_queue_depth', int, 64],
+ ['', 'first_burst_length', int, 8192]
+ ]
+ for option in config.options(section):
+ set_param(params, option, config.get(section, option))
+ return [{"method": "set_iscsi_options", "params": to_json_params(params)}]
+
+
+def get_iscsi_portal_group_json(config, name):
+ portal_group_json = []
+ portals = []
+ for option in config.options(name):
+ if "Portal" == option:
+ for value in config.get(name, option).split("\n"):
+ items = re.findall("\S+", value)
+ portal = {'host': items[1].rsplit(":", 1)[0]}
+ if "@" in items[1]:
+ portal['port'] =\
+ items[1].rsplit(":", 1)[1].split("@")[0]
+ portal['cpumask'] =\
+ items[1].rsplit(":", 1)[1].split("@")[1]
+ else:
+ portal['port'] = items[1].rsplit(":", 1)[1]
+ portals.append(portal)
+
+ portal_group_json.append({
+ "params": {
+ "portals": portals,
+ "tag": int(re.findall('\d+', name)[0])
+ },
+ "method": "add_portal_group"
+ })
+
+ return portal_group_json
+
+
+def get_iscsi_initiator_group_json(config, name):
+ initiators = []
+ netmasks = []
+
+ for option in config.options(name):
+ if "InitiatorName" == option:
+ initiators.append(config.get(name, option))
+ if "Netmask" == option:
+ netmasks.append(config.get(name, option))
+ initiator_group_json = {
+ "params": {
+ "initiators": initiators,
+ "tag": int(re.findall('\d+', name)[0]),
+ "netmasks": netmasks
+ },
+ "method": "add_initiator_group"
+ }
+
+ return [initiator_group_json]
+
+
+def get_iscsi_target_node_json(config, section):
+ luns = []
+ mutual_chap = False
+ name = ""
+ alias_name = ""
+ require_chap = False
+ chap_group = 1
+ pg_ig_maps = []
+ data_digest = False
+ disable_chap = False
+ header_digest = False
+ queue_depth = 64
+
+ for option in config.options(section):
+ value = config.get(section, option)
+ if "TargetName" == option:
+ name = value
+ if "TargetAlias" == option:
+ alias_name = value.replace("\"", "")
+ if "Mapping" == option:
+ items = re.findall("\S+", value)
+ pg_ig_maps.append({
+ "ig_tag": int(re.findall('\d+', items[1])[0]),
+ "pg_tag": int(re.findall('\d+', items[0])[0])
+ })
+ if "AuthMethod" == option:
+ items = re.findall("\S+", value)
+ for item in items:
+ if "CHAP" == item:
+ require_chap = True
+ elif "Mutual" == item:
+ mutual_chap = True
+ elif "Auto" == item:
+ disable_chap = False
+ require_chap = False
+ mutual_chap = False
+ elif "None" == item:
+ disable_chap = True
+ require_chap = False
+ mutual_chap = False
+ if "AuthGroup" == option: # AuthGroup1
+ items = re.findall("\S+", value)
+ chap_group = int(re.findall('\d+', items[0])[0])
+ if "UseDigest" == option:
+ items = re.findall("\S+", value)
+ for item in items:
+ if "Header" == item:
+ header_digest = True
+ elif "Data" == item:
+ data_digest = True
+ elif "Auto" == item:
+ header_digest = False
+ data_digest = False
+
+ if re.match("LUN\d+", option):
+ luns.append({"lun_id": len(luns),
+ "bdev_name": value})
+ if "QueueDepth" == option:
+ queue_depth = int(value)
+
+ params = {"alias_name": alias_name}
+ params["name"] = "iqn.2016-06.io.spdk:%s" % name
+ params["luns"] = luns
+ params["pg_ig_maps"] = pg_ig_maps
+ params["queue_depth"] = queue_depth
+ params["chap_group"] = chap_group
+ params["header_digest"] = header_digest
+ params["mutual_chap"] = mutual_chap
+ params["require_chap"] = require_chap
+ params["data_digest"] = data_digest
+ params["disable_chap"] = disable_chap
+
+ target_json = {
+ "params": params,
+ "method": "construct_target_node"
+ }
+
+ return [target_json]
+
+
+if __name__ == "__main__":
+ try:
+ config = configparser.ConfigParser(strict=False, delimiters=(' '),
+ dict_type=OptionOrderedDict,
+ allow_no_value=True)
+ # Do not parse options and values. Capital letters are relevant.
+ config.optionxform = str
+ config.read_file(sys.stdin)
+ except Exception as e:
+ print("Exception while parsing config: %s" % e)
+ exit(1)
+ # Add missing sections to generate default configuration
+ for section in ['Nvme', 'Nvmf', 'Bdev', 'iSCSI']:
+ if section not in config.sections():
+ config.add_section(section)
+
+ for section in config.sections():
+ match = re.match("(Bdev|Nvme|Malloc|VirtioUser\d+|Split|Pmem|AIO|"
+ "iSCSI|PortalGroup\d+|InitiatorGroup\d+|"
+ "TargetNode\d+|Nvmf|Subsystem\d+|VhostScsi\d+|"
+ "VhostBlk\d+|VhostNvme\d+)", section)
+ if match:
+ match_section = ''.join(letter for letter in match.group(0)
+ if not letter.isdigit())
+ if match_section == "Bdev":
+ items = get_bdev_options_json(config, section)
+ elif match_section == "AIO":
+ items = get_aio_bdev_json(config, section)
+ elif match_section == "Malloc":
+ items = get_malloc_bdev_json(config, section)
+ elif match_section == "Nvme":
+ items = get_nvme_bdev_json(config, section)
+ elif match_section == "Pmem":
+ items = get_pmem_bdev_json(config, section)
+ elif match_section == "Split":
+ items = get_split_bdev_json(config, section)
+ elif match_section == "Nvmf":
+ items = get_nvmf_options_json(config, section)
+ elif match_section == "Subsystem":
+ items = get_nvmf_subsystem_json(config, section)
+ elif match_section == "VhostScsi":
+ items = get_vhost_scsi_json(config, section)
+ elif match_section == "VhostBlk":
+ items = get_vhost_blk_json(config, section)
+ elif match_section == "VhostNvme":
+ items = get_vhost_nvme_json(config, section)
+ elif match_section == "VirtioUser":
+ items = get_virtio_user_json(config, section)
+ elif match_section == "iSCSI":
+ items = get_iscsi_options_json(config, section)
+ elif match_section == "PortalGroup":
+ items = get_iscsi_portal_group_json(config, section)
+ elif match_section == "InitiatorGroup":
+ items = get_iscsi_initiator_group_json(config, section)
+ elif match_section == "TargetNode":
+ items = get_iscsi_target_node_json(config, section)
+ for item in items:
+ if match_section == "VhostScsi":
+ section_to_subsystem[match_section][
+ "construct_vhost_scsi_controller"].append(item)
+ elif match_section == "VhostNvme":
+ section_to_subsystem[match_section][
+ "construct_vhost_nvme_controller"].append(item)
+ else:
+ section_to_subsystem[match_section][
+ item['method']].append(item)
+ elif section == "Global":
+ pass
+ elif section == "VirtioPci":
+ print("Please use spdk target flags.")
+ exit(1)
+ else:
+ print("An invalid section detected: %s.\n"
+ "Please revise your config file." % section)
+ exit(1)
+ json.dump(generate_new_json_config(), sys.stdout, indent=2)
+ print("")
diff --git a/src/spdk/scripts/detect_cc.sh b/src/spdk/scripts/detect_cc.sh
new file mode 100755
index 00000000..936520e3
--- /dev/null
+++ b/src/spdk/scripts/detect_cc.sh
@@ -0,0 +1,80 @@
+#!/usr/bin/env bash
+
+set -e
+
+function err()
+{
+ echo "$@" >&2
+}
+
+function usage()
+{
+ err "Detect compiler and linker versions, generate mk/cc.mk"
+ err ""
+ err "Usage: ./detect_cc.sh [OPTION]..."
+ err ""
+ err "Defaults for the options are specified in brackets."
+ err ""
+ err "General:"
+ err " -h, --help Display this help and exit"
+ err " --cc=path C compiler to use"
+ err " --cxx=path C++ compiler to use"
+ err " --lto=[y|n] Attempt to configure for LTO"
+
+}
+
+CC=cc
+CXX=c++
+LTO=n
+
+for i in "$@"; do
+ case "$i" in
+ -h|--help)
+ usage
+ exit 0
+ ;;
+ --cc=*)
+ CC="${i#*=}"
+ ;;
+ --cxx=*)
+ CXX="${i#*=}"
+ ;;
+ --lto=*)
+ LTO="${i#*=}"
+ ;;
+ --)
+ break
+ ;;
+ *)
+ err "Unrecognized option $i"
+ usage
+ exit 1
+ esac
+done
+
+CC_TYPE=$($CC -v 2>&1 | grep -o -E '\w+ version' | head -1 | awk '{ print $1 }')
+CXX_TYPE=$($CXX -v 2>&1 | grep -o -E '\w+ version' | head -1 | awk '{ print $1 }')
+LD_TYPE=$(ld -v 2>&1 | awk '{print $2}')
+
+if [ "$CC_TYPE" != "$CXX_TYPE" ]; then
+ err "C compiler is $CC_TYPE but C++ compiler is $CXX_TYPE"
+ err "This may result in errors"
+fi
+
+CCAR="ar"
+if [ "$LTO" = "y" ]; then
+ if [ "$CC_TYPE" = "clang" ]; then
+ if [ "$LD_TYPE" != "gold" ]; then
+ err "Using LTO with clang requires the gold linker."
+ exit 1
+ fi
+ CCAR="llvm-ar"
+ else
+ CCAR="gcc-ar"
+ fi
+fi
+
+echo "CC?=$CC"
+echo "CXX?=$CXX"
+echo "CCAR=$CCAR"
+echo "CC_TYPE=$CC_TYPE"
diff --git a/src/spdk/scripts/eofnl b/src/spdk/scripts/eofnl
new file mode 100755
index 00000000..59544561
--- /dev/null
+++ b/src/spdk/scripts/eofnl
@@ -0,0 +1,38 @@
+#!/usr/bin/env bash
+# Make sure file has a trailing newline
+
+f="$1"
+
+if [ -z "$f" ]; then
+ echo "usage: $0 <file>"
+ exit 1
+fi
+
+if [ ! -f "$f" ]; then
+ exit 0
+fi
+
+if [[ $(tail -c1 "$f") ]]; then
+ echo "$f: No newline at end of file"
+ echo '' >> "$f"
+ exit 1
+fi
+
+if [[ ! $(tail -c2 "$f") ]]; then
+ echo "$f: Extra trailing newline"
+ exit 1
+fi
+
+if grep -q $'\r' "$f"; then
+ echo "$f: DOS-style newlines"
+ dos2unix "$f" &> /dev/null
+ exit 1
+fi
+
+if grep -q $'[\t ]$' "$f"; then
+ echo "$f: Trailing whitespace"
+ sed -i $'s/[ \t]*$//' "$f"
+ exit 1
+fi
+
+exit 0
diff --git a/src/spdk/scripts/fio.py b/src/spdk/scripts/fio.py
new file mode 100755
index 00000000..0868633e
--- /dev/null
+++ b/src/spdk/scripts/fio.py
@@ -0,0 +1,144 @@
+#!/usr/bin/env python3
+
+from subprocess import check_call, call, check_output, Popen, PIPE, CalledProcessError
+import re
+import sys
+import signal
+
+fio_template = """
+[global]
+thread=1
+invalidate=1
+rw=%(testtype)s
+time_based=1
+runtime=%(runtime)s
+ioengine=libaio
+direct=1
+bs=%(blocksize)d
+iodepth=%(iodepth)d
+norandommap=%(norandommap)d
+%(verify)s
+verify_dump=1
+
+"""
+
+verify_template = """
+do_verify=1
+verify=crc32c-intel
+"""
+
+
+fio_job_template = """
+[job%(jobnumber)d]
+filename=%(device)s
+
+"""
+
+
+def interrupt_handler(signum, frame):
+ fio.terminate()
+ print("FIO terminated")
+ sys.exit(0)
+
+
+def main():
+
+ global fio
+ if (len(sys.argv) < 5):
+ print("usage:")
+ print(" " + sys.argv[0] + " <io_size> <queue_depth> <test_type> <runtime>")
+ print("advanced usage:")
+ print("If you want to run fio with verify, please add verify string after runtime.")
+ print("Currently fio.py only support write rw randwrite randrw with verify enabled.")
+ sys.exit(1)
+
+ io_size = int(sys.argv[1])
+ queue_depth = int(sys.argv[2])
+ test_type = sys.argv[3]
+ runtime = sys.argv[4]
+ if len(sys.argv) > 5:
+ verify = True
+ else:
+ verify = False
+
+ devices = get_target_devices()
+ print(("Found devices: ", devices))
+
+ configure_devices(devices)
+ try:
+ fio_executable = check_output("which fio", shell=True).split()[0]
+ except CalledProcessError as e:
+ sys.stderr.write(str(e))
+ sys.stderr.write("\nCan't find the fio binary, please install it.\n")
+ sys.exit(1)
+
+ device_paths = ['/dev/' + dev for dev in devices]
+ sys.stdout.flush()
+ signal.signal(signal.SIGTERM, interrupt_handler)
+ signal.signal(signal.SIGINT, interrupt_handler)
+ fio = Popen([fio_executable, '-'], stdin=PIPE)
+ fio.communicate(create_fio_config(io_size, queue_depth, device_paths, test_type, runtime, verify).encode())
+ fio.stdin.close()
+ rc = fio.wait()
+ print("FIO completed with code %d\n" % rc)
+ sys.stdout.flush()
+ sys.exit(rc)
+
+
+def get_target_devices():
+ output = check_output('iscsiadm -m session -P 3', shell=True)
+ return re.findall("Attached scsi disk (sd[a-z]+)", output.decode("ascii"))
+
+
+def create_fio_config(size, q_depth, devices, test, run_time, verify):
+ norandommap = 0
+ if not verify:
+ verifyfio = ""
+ norandommap = 1
+ else:
+ verifyfio = verify_template
+ fiofile = fio_template % {"blocksize": size, "iodepth": q_depth,
+ "testtype": test, "runtime": run_time,
+ "norandommap": norandommap, "verify": verifyfio}
+ for (i, dev) in enumerate(devices):
+ fiofile += fio_job_template % {"jobnumber": i, "device": dev}
+ return fiofile
+
+
+def set_device_parameter(devices, filename_template, value):
+ valid_value = True
+
+ for dev in devices:
+ filename = filename_template % dev
+ f = open(filename, 'r+b')
+ try:
+ f.write(value.encode())
+ f.close()
+ except OSError:
+ valid_value = False
+ continue
+
+ return valid_value
+
+
+def configure_devices(devices):
+ set_device_parameter(devices, "/sys/block/%s/queue/nomerges", "2")
+ set_device_parameter(devices, "/sys/block/%s/queue/nr_requests", "128")
+ requested_qd = 128
+ qd = requested_qd
+ while qd > 0:
+ try:
+ set_device_parameter(devices, "/sys/block/%s/device/queue_depth", str(qd))
+ break
+ except IOError:
+ qd = qd - 1
+ if qd == 0:
+ print("Could not set block device queue depths.")
+ else:
+ print("Requested queue_depth {} but only {} is supported.".format(str(requested_qd), str(qd)))
+ if not set_device_parameter(devices, "/sys/block/%s/queue/scheduler", "noop"):
+ set_device_parameter(devices, "/sys/block/%s/queue/scheduler", "none")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/src/spdk/scripts/gen_crypto.sh b/src/spdk/scripts/gen_crypto.sh
new file mode 100755
index 00000000..f92ea163
--- /dev/null
+++ b/src/spdk/scripts/gen_crypto.sh
@@ -0,0 +1,18 @@
+#!/usr/bin/env bash
+
+set -e
+
+rootdir=$(readlink -f $(dirname $0))/..
+source "$rootdir/scripts/common.sh"
+
+base_bdev=$1
+
+if [ -n $base_bdev ]; then
+ echo
+ echo "[crypto]"
+ if [ $(lspci -d:37c8 | wc -l) -eq 0 ]; then
+ echo " CRY $base_bdev crypto_ram 0123456789123456 crypto_aesni_mb"
+ else
+ echo " CRY $base_bdev crypto_ram 0123456789123456 crypto_qat"
+ fi
+fi
diff --git a/src/spdk/scripts/gen_nvme.sh b/src/spdk/scripts/gen_nvme.sh
new file mode 100755
index 00000000..fa8a9242
--- /dev/null
+++ b/src/spdk/scripts/gen_nvme.sh
@@ -0,0 +1,46 @@
+#!/usr/bin/env bash
+
+set -e
+
+rootdir=$(readlink -f $(dirname $0))/..
+source "$rootdir/scripts/common.sh"
+
+bdfs=($(iter_pci_class_code 01 08 02))
+function create_classic_config()
+{
+ echo "[Nvme]"
+ for (( i=0; i < ${#bdfs[@]}; i++))
+ do
+ echo " TransportID \"trtype:PCIe traddr:${bdfs[i]}\" Nvme$i"
+ done
+}
+
+function create_json_config()
+{
+ echo "{"
+ echo '"subsystem": "bdev",'
+ echo '"config": ['
+ for (( i=0; i < ${#bdfs[@]}; i++))
+ do
+ echo '{'
+ echo '"params": {'
+ echo '"trtype": "PCIe",'
+ echo "\"name\": \"Nvme$i\","
+ echo "\"traddr\": \"${bdfs[i]}\""
+ echo '},'
+ echo '"method": "construct_nvme_bdev"'
+ if [ -z ${bdfs[i+1]} ]; then
+ echo '}'
+ else
+ echo '},'
+ fi
+ done
+ echo ']'
+ echo '}'
+}
+
+if [ "$1" = "--json" ]; then
+ create_json_config
+else
+ create_classic_config
+fi
diff --git a/src/spdk/scripts/gen_rbd.sh b/src/spdk/scripts/gen_rbd.sh
new file mode 100755
index 00000000..07fdbd1e
--- /dev/null
+++ b/src/spdk/scripts/gen_rbd.sh
@@ -0,0 +1,11 @@
+#!/usr/bin/env bash
+
+set -e
+
+if ! hash ceph &> /dev/null; then
+ exit 0
+fi
+
+echo
+echo "[Ceph]"
+echo " Ceph $RBD_POOL $RBD_NAME 512"
diff --git a/src/spdk/scripts/genconfig.py b/src/spdk/scripts/genconfig.py
new file mode 100755
index 00000000..550f0e9b
--- /dev/null
+++ b/src/spdk/scripts/genconfig.py
@@ -0,0 +1,46 @@
+#!/usr/bin/env python3
+
+import os
+import re
+import sys
+
+comment = re.compile('^\s*#')
+assign = re.compile('^\s*([a-zA-Z_]+)\s*(\?)?=\s*([^#]*)')
+
+args = os.environ.copy()
+for arg in sys.argv:
+ m = assign.match(arg)
+ if m:
+ var = m.group(1).strip()
+ val = m.group(3).strip()
+ args[var] = val
+
+defs = {}
+try:
+ with open("mk/config.mk") as f:
+ for line in f:
+ line = line.strip()
+ if not comment.match(line):
+ m = assign.match(line)
+ if m:
+ var = m.group(1).strip()
+ default = m.group(3).strip()
+ val = default
+ if var in args:
+ val = args[var]
+ if default.lower() == 'y' or default.lower() == 'n':
+ if val.lower() == 'y':
+ defs["SPDK_{0}".format(var)] = 1
+ else:
+ defs["SPDK_{0}".format(var)] = 0
+ else:
+ strval = val.replace('"', '\"')
+ defs["SPDK_{0}".format(var)] = strval
+except IOError:
+ print("mk/config.mk not found")
+
+for key, value in sorted(defs.items()):
+ if value == 0:
+ print("#undef {0}".format(key))
+ else:
+ print("#define {0} {1}".format(key, value))
diff --git a/src/spdk/scripts/perf/nvme/README b/src/spdk/scripts/perf/nvme/README
new file mode 100644
index 00000000..6468399a
--- /dev/null
+++ b/src/spdk/scripts/perf/nvme/README
@@ -0,0 +1,12 @@
+These scripts are used to perform benchmark testing with fio.
+The run_fio_test.py is the main script that runs the performance test and parses the test results.
+Users can populate test parameters for different fio workloads in the lists (q_depth, io_size, workload_type, mix, core_mask and run_time) at the top of the run_fio_test.py script.
+The run_fio_test.py puts the test results in a csv file named <hostname>_<num ssds>_perf_output.csv.
+The run_fio_test.sh script demonstrates how to invoke the run_fio_test.py script with the
+input parameters: path_to_fio_conf, path_to_ioengine and num_ssds. The run_fio_test.sh script will
+call the SPDK setup.sh script to unbind NVMe devices from the kernel driver and bind them to the uio driver.
+We include a sample fio configuration file that includes the parameters we use in our test environment.
+The run_fio_test.py will append the NVMe devices to the end of the configuration file. The number of
+NVMe devices used is specified using the num_ssds parameter.
+
+Usage: ./run_fio_test.sh
diff --git a/src/spdk/scripts/perf/nvme/fio_test.conf b/src/spdk/scripts/perf/nvme/fio_test.conf
new file mode 100644
index 00000000..a03c6a1e
--- /dev/null
+++ b/src/spdk/scripts/perf/nvme/fio_test.conf
@@ -0,0 +1,20 @@
+[global]
+ioengine=${IOENGINE}
+thread=1
+group_reporting=1
+direct=1
+verify=0
+norandommap=1
+cpumask=1
+percentile_list=50:90:99:99.5:99.9:99.99:99.999
+
+[perf_test]
+stonewall
+description="Run NVMe driver performance test for a given workload"
+bs=${BLK_SIZE}
+rw=${RW}
+rwmixread=${MIX}
+iodepth=${IODEPTH}
+time_based=1
+ramp_time=10s
+runtime=${RUNTIME}
diff --git a/src/spdk/scripts/perf/nvme/run_fio_test.py b/src/spdk/scripts/perf/nvme/run_fio_test.py
new file mode 100755
index 00000000..79d9e566
--- /dev/null
+++ b/src/spdk/scripts/perf/nvme/run_fio_test.py
@@ -0,0 +1,166 @@
+#!/usr/bin/env python3
+
+# This script runs fio benchmark test on the local nvme device using the SPDK NVMe driver.
+# Prework: Run script/setup.sh to bind SSDs to SPDK driver.
+# Prework: Change any fio configurations in the template fio config file fio_test.conf
+# Output: A csv file <hostname>_<num ssds>_perf_output.csv
+
+import subprocess
+from subprocess import check_call, call, check_output, Popen, PIPE
+import random
+import os
+import sys
+import re
+import signal
+import getopt
+from datetime import datetime
+from itertools import *
+import csv
+import itertools
+from shutil import copyfile
+import json
+
+# Populate test parameters into these lists to run different workloads
+# The configuration below runs QD 1 & 128. To add QD 32 set q_depth=['1', '32', '128']
+q_depth = ['1', '128']
+# io_size specifies the size in bytes of the IO workload.
+# To add 64K IOs set io_size = ['4096', '65536']
+io_size = ['4096']
+workload_type = ['randrw']
+mix = ['100']
+core_mask = ['0x1']
+# run_time parameter specifies how long to run each test.
+# Set run_time = ['600'] to run the test for 10 minutes
+run_time = ['60']
+# iter_num parameter is used to run the test multiple times.
+# set iter_num = ['1', '2', '3'] to repeat each test 3 times
+iter_num = ['1']
+
+
+def run_fio(io_size_bytes, qd, rw_mix, cpu_mask, run_num, workload, run_time_sec):
+ print("Running Test: IO Size={} QD={} Mix={} CPU Mask={}".format(io_size_bytes, qd, rw_mix, cpu_mask))
+ string = "s_" + str(io_size_bytes) + "_q_" + str(qd) + "_m_" + str(rw_mix) + "_c_" + str(cpu_mask) + "_run_" + str(run_num)
+
+ # Call fio
+ path_to_fio_conf = config_file_for_test
+ path_to_ioengine = sys.argv[2]
+ command = "BLK_SIZE=" + str(io_size_bytes) + " RW=" + str(workload) + " MIX=" + str(rw_mix) \
+ + " IODEPTH=" + str(qd) + " RUNTIME=" + str(run_time_sec) + " IOENGINE=" + path_to_ioengine \
+ + " fio " + str(path_to_fio_conf) + " -output=" + string + " -output-format=json"
+ output = subprocess.check_output(command, shell=True)
+
+ print("Finished Test: IO Size={} QD={} Mix={} CPU Mask={}".format(io_size_bytes, qd, rw_mix, cpu_mask))
+ return
+
+
+def parse_results(io_size_bytes, qd, rw_mix, cpu_mask, run_num, workload, run_time_sec):
+ results_array = []
+
+ # If json file has results for multiple fio jobs pick the results from the right job
+ job_pos = 0
+
+ # generate the next result line that will be added to the output csv file
+ results = str(io_size_bytes) + "," + str(qd) + "," + str(rw_mix) + "," \
+ + str(workload) + "," + str(cpu_mask) + "," + str(run_time_sec) + "," + str(run_num)
+
+ # Read the results of this run from the test result file
+ string = "s_" + str(io_size_bytes) + "_q_" + str(qd) + "_m_" + str(rw_mix) + "_c_" + str(cpu_mask) + "_run_" + str(run_num)
+ with open(string) as json_file:
+ data = json.load(json_file)
+ job_name = data['jobs'][job_pos]['jobname']
+ # print "FIO job name: ", job_name
+ if 'lat_ns' in data['jobs'][job_pos]['read']:
+ lat = 'lat_ns'
+ lat_units = 'ns'
+ else:
+ lat = 'lat'
+ lat_units = 'us'
+ read_iops = float(data['jobs'][job_pos]['read']['iops'])
+ read_bw = float(data['jobs'][job_pos]['read']['bw'])
+ read_avg_lat = float(data['jobs'][job_pos]['read'][lat]['mean'])
+ read_min_lat = float(data['jobs'][job_pos]['read'][lat]['min'])
+ read_max_lat = float(data['jobs'][job_pos]['read'][lat]['max'])
+ write_iops = float(data['jobs'][job_pos]['write']['iops'])
+ write_bw = float(data['jobs'][job_pos]['write']['bw'])
+ write_avg_lat = float(data['jobs'][job_pos]['write'][lat]['mean'])
+ write_min_lat = float(data['jobs'][job_pos]['write'][lat]['min'])
+ write_max_lat = float(data['jobs'][job_pos]['write'][lat]['max'])
+ print("%-10s" % "IO Size", "%-10s" % "QD", "%-10s" % "Mix",
+ "%-10s" % "Workload Type", "%-10s" % "CPU Mask",
+ "%-10s" % "Run Time", "%-10s" % "Run Num",
+ "%-15s" % "Read IOps",
+ "%-10s" % "Read MBps", "%-15s" % "Read Avg. Lat(" + lat_units + ")",
+ "%-15s" % "Read Min. Lat(" + lat_units + ")", "%-15s" % "Read Max. Lat(" + lat_units + ")",
+ "%-15s" % "Write IOps",
+ "%-10s" % "Write MBps", "%-15s" % "Write Avg. Lat(" + lat_units + ")",
+ "%-15s" % "Write Min. Lat(" + lat_units + ")", "%-15s" % "Write Max. Lat(" + lat_units + ")")
+ print("%-10s" % io_size_bytes, "%-10s" % qd, "%-10s" % rw_mix,
+ "%-10s" % workload, "%-10s" % cpu_mask, "%-10s" % run_time_sec,
+ "%-10s" % run_num, "%-15s" % read_iops, "%-10s" % read_bw,
+ "%-15s" % read_avg_lat, "%-15s" % read_min_lat, "%-15s" % read_max_lat,
+ "%-15s" % write_iops, "%-10s" % write_bw, "%-15s" % write_avg_lat,
+ "%-15s" % write_min_lat, "%-15s" % write_max_lat)
+ results = results + "," + str(read_iops) + "," + str(read_bw) + "," \
+ + str(read_avg_lat) + "," + str(read_min_lat) + "," + str(read_max_lat) \
+ + "," + str(write_iops) + "," + str(write_bw) + "," + str(write_avg_lat) \
+ + "," + str(write_min_lat) + "," + str(write_max_lat)
+ with open(result_file_name, "a") as result_file:
+ result_file.write(results + "\n")
+ results_array = []
+ return
+
+
+def get_nvme_devices_count():
+ output = check_output('lspci | grep -i Non | wc -l', shell=True)
+ return int(output)
+
+
+def get_nvme_devices_bdf():
+ output = check_output('lspci | grep -i Non | awk \'{print $1}\'', shell=True).decode("utf-8")
+ output = output.split()
+ return output
+
+
+def add_filename_to_conf(conf_file_name, bdf):
+ filestring = "filename=trtype=PCIe traddr=0000." + bdf.replace(":", ".") + " ns=1"
+ with open(conf_file_name, "a") as conf_file:
+ conf_file.write(filestring + "\n")
+
+
+if len(sys.argv) != 4:
+ print("usage: " % sys.argv[0] % " path_to_fio_conf path_to_ioengine num_ssds")
+ sys.exit()
+
+num_ssds = int(sys.argv[3])
+if num_ssds > get_nvme_devices_count():
+ print("System does not have {} NVMe SSDs.".format(num_ssds))
+ sys.exit()
+
+host_name = os.uname()[1]
+result_file_name = host_name + "_" + sys.argv[3] + "ssds_perf_output.csv"
+
+bdf = get_nvme_devices_bdf()
+config_file_for_test = sys.argv[1] + "_" + sys.argv[3] + "ssds"
+copyfile(sys.argv[1], config_file_for_test)
+
+# Add the number of threads to the fio config file
+with open(config_file_for_test, "a") as conf_file:
+ conf_file.write("numjobs=" + str(1) + "\n")
+
+# Add the NVMe bdf to the fio config file
+for i in range(0, num_ssds):
+ add_filename_to_conf(config_file_for_test, bdf[i])
+
+# Set up for output
+columns = "IO_Size,Q_Depth,Workload_Mix,Workload_Type,Core_Mask,Run_Time,Run,Read_IOPS,Read_bw(KiB/s), \
+ Read_Avg_lat(us),Read_Min_Lat(us),Read_Max_Lat(us),Write_IOPS,Write_bw(KiB/s),Write_Avg_lat(us), \
+ Write_Min_Lat(us),Write_Max_Lat(us)"
+
+with open(result_file_name, "w+") as result_file:
+ result_file.write(columns + "\n")
+
+for i, (s, q, m, w, c, t) in enumerate(itertools.product(io_size, q_depth, mix, workload_type, core_mask, run_time)):
+ run_fio(s, q, m, c, i, w, t)
+ parse_results(s, q, m, c, i, w, t)
+
+result_file.close()
diff --git a/src/spdk/scripts/perf/nvme/run_fio_test.sh b/src/spdk/scripts/perf/nvme/run_fio_test.sh
new file mode 100755
index 00000000..454ea555
--- /dev/null
+++ b/src/spdk/scripts/perf/nvme/run_fio_test.sh
@@ -0,0 +1,19 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+
+# Bind devices to NVMe driver
+$rootdir/scripts/setup.sh
+
+# Run Performance Test with 1 SSD
+$testdir/run_fio_test.py $testdir/fio_test.conf $rootdir/examples/nvme/fio_plugin/fio_plugin 1
+
+# 2 SSDs test run
+$testdir/run_fio_test.py $testdir/fio_test.conf $rootdir/examples/nvme/fio_plugin/fio_plugin 2
+
+# 4 SSDs test run
+$testdir/run_fio_test.py $testdir/fio_test.conf $rootdir/examples/nvme/fio_plugin/fio_plugin 4
+
+# 8 SSDs test run
+$testdir/run_fio_test.py $testdir/fio_test.conf $rootdir/examples/nvme/fio_plugin/fio_plugin 8
diff --git a/src/spdk/scripts/perf/vhost/fio_test.conf b/src/spdk/scripts/perf/vhost/fio_test.conf
new file mode 100644
index 00000000..e1054e07
--- /dev/null
+++ b/src/spdk/scripts/perf/vhost/fio_test.conf
@@ -0,0 +1,21 @@
+[global]
+ioengine=libaio
+thread=1
+group_reporting=1
+direct=1
+verify=0
+norandommap=1
+cpumask=1
+percentile_list=50:90:99:99.5:99.9:99.99:99.999
+
+[perf_test]
+stonewall
+description="Run NVMe driver performance test for a given workload"
+bs={blksize}
+rw={rw}
+rwmixread={rwmixread}
+iodepth={iodepth}
+time_based=1
+ramp_time={ramptime}
+runtime={runtime}
+filename=
diff --git a/src/spdk/scripts/perf/vhost/run_vhost_test.py b/src/spdk/scripts/perf/vhost/run_vhost_test.py
new file mode 100644
index 00000000..bb1f9985
--- /dev/null
+++ b/src/spdk/scripts/perf/vhost/run_vhost_test.py
@@ -0,0 +1,208 @@
+import os
+import sys
+import argparse
+import multiprocessing
+import subprocess
+from subprocess import check_call, call, check_output, Popen, PIPE
+
+
+def range_incl(a, b):
+ return list(range(a, b + 1))
+
+
+def list_spdk_used_cpus(cpus):
+ cpu_list = []
+ for chunk in cpus.split(","):
+ if "-" in chunk:
+ _ = chunk.split("-")
+ _ = list(map(int, _))
+ cpu_list.extend(list(range_incl(*_)))
+ else:
+ cpu_list.append(int(chunk))
+ return cpu_list
+
+
+def gen_cpu_mask_config(output_dir, spdk_cpu_list, vm_count, vm_cpu_num):
+ spdk = gen_spdk_cpu_mask_config(spdk_cpu_list)
+ qemu = gen_qemu_cpu_mask_config(spdk_cpu_list, vm_count, vm_cpu_num)
+ file_path = os.path.join(output_dir, "mask_config")
+ with open(file_path, "w") as fh:
+ fh.write("".join([spdk, qemu]))
+ return file_path
+
+
+def gen_spdk_cpu_mask_config(spdk_cpu_list):
+ cpus = "vhost_0_reactor_mask=[%s]" % (spdk_cpu_list)
+
+ # Go through assigned CPUs and use the lowest CPU index as
+ # default primary core
+ cpu_indexes = list_spdk_used_cpus(spdk_cpu_list)
+ cpu_indexes.sort()
+ print(cpu_indexes)
+
+ pr_core = "vhost_0_master_core=%s" % (cpu_indexes[0])
+ return "\n".join([cpus, pr_core, "\n"])
+
+
+def get_host_cpus():
+ cpu_num = multiprocessing.cpu_count()
+ cpu_list = list(range(0, cpu_num))
+ output = check_output("lscpu | grep 'per core'", shell=True)
+
+ # Assuming 2-socket server
+ if "2" in str(output):
+ ht_enabled = True
+ cpu_chunk = int(cpu_num/4)
+ numa0_cpus = cpu_list[0:cpu_chunk]
+ numa0_cpus.extend(cpu_list[2*cpu_chunk:3*cpu_chunk])
+ numa1_cpus = cpu_list[cpu_chunk:2*cpu_chunk]
+ numa1_cpus.extend(cpu_list[3*cpu_chunk:4*cpu_chunk])
+ else:
+ ht_enabled = False
+ cpu_chunk = int(cpu_num/2)
+ numa0_cpus = cpu_list[:cpu_chunk]
+ numa1_cpus = cpu_list[cpu_chunk:]
+ return [numa0_cpus, numa1_cpus]
+
+
+def gen_qemu_cpu_mask_config(spdk_cpu_list, vm_count, vm_cpu_num):
+ print("Creating masks for QEMU")
+ ret = ""
+
+ # Exclude SPDK cores from available CPU list
+ numa0_cpus, numa1_cpus = get_host_cpus()
+ spdk_cpus = list_spdk_used_cpus(spdk_cpu_list)
+ spdk_cpus.sort()
+
+ numa0_cpus = sorted(list(set(numa0_cpus) - set(spdk_cpus)))
+ numa1_cpus = sorted(list(set(numa1_cpus) - set(spdk_cpus)))
+
+ # Generate qemu cpu mask and numa param for VMs out of
+ # remaining free CPU cores.
+ # All CPUs assigned to a VM will come from the same NUMA node.
+ # Assuming 2 socket server.
+ used_numa = 0
+ available = numa0_cpus
+ for i in range(0, vm_count):
+ cpus = [str(x) for x in available[0:vm_cpu_num]]
+
+ # If there is not enough cores on first numa node for a VM
+ # then switch to next numa node
+ if len(cpus) < vm_cpu_num and used_numa == 0:
+ available = numa1_cpus
+ used_numa = 1
+ cpus = [str(x) for x in available[0:vm_cpu_num]]
+
+ # If not enough cores on second numa node - break and exit
+ if len(cpus) < vm_cpu_num and used_numa == 1:
+ print("There is not enough CPU Cores available on \
+ Numa node1 to create VM %s" % i)
+ break
+
+ cpus = ",".join(cpus)
+ cpus = "VM_%s_qemu_mask=%s" % (i, cpus)
+ numa = "VM_%s_qemu_numa_node=%s\n" % (i, used_numa)
+
+ # Remove used CPU cores from available list
+ available = available[vm_cpu_num:]
+ ret = "\n".join([ret, cpus, numa])
+
+ return ret
+
+
+def create_fio_cfg(template_dir, output_dir, **kwargs):
+ fio_tempalte = os.path.join(template_dir, "fio_test.conf")
+ with open("scripts/perf/vhost/fio_test.conf", "r") as fh:
+ cfg = fh.read()
+ cfg = cfg.format(**kwargs)
+
+ file_path = os.path.join(output_dir, "fio_job.cfg")
+ with open(file_path, "w") as fh:
+ fh.write(cfg)
+ return file_path
+
+
+script_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
+parser = argparse.ArgumentParser()
+
+parser.add_argument('blksize', default="4k", type=str,
+ help="Block size param for FIO. Default: 4k")
+parser.add_argument('iodepth', default="128", type=str,
+ help="Iodepth param for FIO. Default: 128")
+parser.add_argument('rw', default="randread", type=str,
+ help="RW param for FIO. Default: randread")
+parser.add_argument('-m', '--rwmixread', default="70", type=str,
+ help="Percentage of reads in read-write mode. Default: 70")
+parser.add_argument('-r', '--runtime', default="10", type=str,
+ help="Run time param for FIO (in seconds). Default: 10")
+parser.add_argument('-R', '--ramptime', default="10", type=str,
+ help="Ramp time param for FIO (in seconds). Default: 10")
+parser.add_argument('-c', '--ctrl-type', default="spdk_vhost_scsi", type=str,
+ help="Type of vhost controller to use in test.\
+ Possible options: spdk_vhost_scsi, spdk_vhost_blk.\
+ Default: spdk_vhost_scsi")
+parser.add_argument('-s', '--split', default=False, type=bool,
+ help="Use split vbdevs instead of logical volumes. Default: false")
+parser.add_argument('-d', '--max-disks', default=0, type=int,
+ help="How many physical disks to use in test. Default: all disks.\
+ Depending on the number of --vm-count disks may be split into\
+ smaller logical bdevs (splits or logical volumes) so that\
+ each virtual machine gets it's own bdev to work on.")
+parser.add_argument('-v', '--vm-count', default=1, type=int,
+ help="How many VMs to run in test. Default: 1")
+parser.add_argument('-i', '--vm-image', default="/home/sys_sgsw/vhost_vm_image.qcow2",
+ type=str, help="VM image to use for running VMs.")
+
+subparsers = parser.add_subparsers()
+cpu_cfg_create = subparsers.add_parser('create_cpu_cfg',
+ help="Generate a CPU config file for test.\
+ This option will attempt to automatically\
+ generate config file with SPDK/QEMU cpu lists.\
+ CPU cores on NUMA Node 0 will be used first\
+ (including logical cores when HT is enabled)\
+ and NUMA Node 1 will be used last.")
+cpu_cfg_create.add_argument('spdk_cpu_list', default=None,
+ help="List of CPU cores to be used by SPDK vhost app.\
+ Accepted format examples:\
+ single cpus: 0,2,4\
+ ranges (inclusive!): 0-2\
+ mixed: 0,2-5,9")
+cpu_cfg_create.add_argument('vm_cpu_num', default=None, type=int)
+
+cpu_cfg_load = subparsers.add_parser('load_cpu_cfg',
+ help="Load and use a CPU config file for test\
+ Example configuration files can be found in:\
+ test/vhost/common/autotest.config")
+cpu_cfg_load.add_argument('custom_mask_file', default=None,
+ help="Path to file with custom values for vhost's\
+ reactor mask and master core, and each VM's qemu mask\
+ and qemu numa node")
+
+args = parser.parse_args()
+fio_cfg_path = create_fio_cfg(script_dir, script_dir, **vars(args))
+
+cpu_cfg_arg = ""
+disk_arg = ""
+split_arg = ""
+if "spdk_cpu_list" in args:
+ cfg_path = gen_cpu_mask_config(script_dir, args.spdk_cpu_list, args.vm_count, args.vm_cpu_num)
+ cpu_cfg_arg = "--custom-cpu-cfg=%s" % cfg_path
+if "custom_mask_file" in args:
+ cpu_cfg_arg = "--custom-cpu-cfg=%s" % args.custom_mask_file
+if args.split is True:
+ split_arg = "--use-split"
+if args.max_disks > 0:
+ disk_arg = "--max-disks=%s" % args.max_disks
+
+
+command = " ".join(["test/vhost/perf_bench/vhost_perf.sh",
+ "--vm-image=%s" % args.vm_image,
+ "--vm-count=%s" % args.vm_count,
+ "--ctrl-type=%s" % args.ctrl_type,
+ "%s" % split_arg,
+ "%s" % disk_arg,
+ "--fio-job=%s" % fio_cfg_path,
+ "%s" % cpu_cfg_arg])
+print("INFO: Running perf test with command:")
+print(command)
+pr = check_output(command, shell=True)
diff --git a/src/spdk/scripts/pkgdep.sh b/src/spdk/scripts/pkgdep.sh
new file mode 100755
index 00000000..2ab610b0
--- /dev/null
+++ b/src/spdk/scripts/pkgdep.sh
@@ -0,0 +1,101 @@
+#!/usr/bin/env bash
+# Please run this script as root.
+
+set -e
+trap 'set +e; trap - ERR; echo "Error!"; exit 1;' ERR
+
+scriptsdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $scriptsdir/..)
+
+if [ -s /etc/redhat-release ]; then
+ . /etc/os-release
+
+ # Includes Fedora, CentOS 7, RHEL 7
+ # Add EPEL repository for CUnit-devel and libunwind-devel
+ if echo "$ID $VERSION_ID" | egrep -q 'rhel 7|centos 7'; then
+ if ! rpm --quiet -q epel-release; then
+ yum install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
+ fi
+
+ if [ $ID = 'rhel' ]; then
+ subscription-manager repos --enable "rhel-*-optional-rpms" --enable "rhel-*-extras-rpms"
+ elif [ $ID = 'centos' ]; then
+ yum --enablerepo=extras install -y epel-release
+ fi
+ fi
+
+ yum install -y gcc gcc-c++ make CUnit-devel libaio-devel openssl-devel \
+ git astyle python-pep8 lcov python clang-analyzer libuuid-devel \
+ sg3_utils libiscsi-devel pciutils
+ # Additional (optional) dependencies for showing backtrace in logs
+ yum install -y libunwind-devel || true
+ # Additional dependencies for NVMe over Fabrics
+ yum install -y libibverbs-devel librdmacm-devel
+ # Additional dependencies for DPDK
+ yum install -y numactl-devel nasm
+ # Additional dependencies for building docs
+ yum install -y doxygen mscgen graphviz
+ # Additional dependencies for building pmem based backends
+ yum install -y libpmemblk-devel || true
+ # Additional dependencies for SPDK CLI - not available in rhel and centos
+ if ! echo "$ID $VERSION_ID" | egrep -q 'rhel 7|centos 7'; then
+ yum install -y python3-configshell python3-pexpect
+ fi
+elif [ -f /etc/debian_version ]; then
+ # Includes Ubuntu, Debian
+ apt-get install -y gcc g++ make libcunit1-dev libaio-dev libssl-dev \
+ git astyle pep8 lcov clang uuid-dev sg3-utils libiscsi-dev pciutils
+ # Additional (optional) dependencies for showing backtrace in logs
+ apt-get install -y libunwind-dev || true
+ # Additional dependencies for NVMe over Fabrics
+ apt-get install -y libibverbs-dev librdmacm-dev
+ # Additional dependencies for DPDK
+ apt-get install -y libnuma-dev nasm
+ # Additional dependencies for building docs
+ apt-get install -y doxygen mscgen graphviz
+ # Additional dependencies for SPDK CLI
+ apt-get install -y python-pip python3-pip
+ pip install configshell_fb pexpect
+ pip3 install configshell_fb pexpect
+elif [ -f /etc/SuSE-release ]; then
+ zypper install -y gcc gcc-c++ make cunit-devel libaio-devel libopenssl-devel \
+ git-core lcov python-base python-pep8 libuuid-devel sg3_utils pciutils
+ # Additional (optional) dependencies for showing backtrace in logs
+ zypper install libunwind-devel || true
+ # Additional dependencies for NVMe over Fabrics
+ zypper install -y rdma-core-devel
+ # Additional dependencies for DPDK
+ zypper install -y libnuma-devel nasm
+ # Additional dependencies for building pmem based backends
+ zypper install -y libpmemblk-devel
+ # Additional dependencies for building docs
+ zypper install -y doxygen mscgen graphviz
+elif [ $(uname -s) = "FreeBSD" ] ; then
+ pkg install -y gmake cunit openssl git devel/astyle bash py27-pycodestyle \
+ python misc/e2fsprogs-libuuid sysutils/sg3_utils nasm
+ # Additional dependencies for building docs
+ pkg install -y doxygen mscgen graphviz
+else
+ echo "pkgdep: unknown system type."
+ exit 1
+fi
+
+# Only crypto needs nasm and this lib but because the lib requires root to
+# install we do it here.
+nasm_ver=$(nasm -v | sed 's/[^0-9]*//g' | awk '{print substr ($0, 0, 5)}')
+if [ $nasm_ver -lt "21202" ]; then
+ echo Crypto requires NASM version 2.12.02 or newer. Please install
+ echo or upgrade and re-run this script if you are going to use Crypto.
+else
+ ipsec="$(find /usr -name intel-ipsec-mb.h 2>/dev/null)"
+ if [ "$ipsec" == "" ]; then
+ ipsec_submodule_cloned="$(find $rootdir/intel-ipsec-mb -name intel-ipsec-mb.h 2>/dev/null)"
+ if [ "$ipsec_submodule_cloned" != "" ]; then
+ su - $SUDO_USER -c "make -C $rootdir/intel-ipsec-mb"
+ make -C $rootdir/intel-ipsec-mb install
+ else
+ echo "The intel-ipsec-mb submodule has not been cloned and will not be installed."
+ echo "To enable crypto, run 'git submodule update --init' and then run this script again."
+ fi
+ fi
+fi
diff --git a/src/spdk/scripts/posix.txt b/src/spdk/scripts/posix.txt
new file mode 100644
index 00000000..2d07f23d
--- /dev/null
+++ b/src/spdk/scripts/posix.txt
@@ -0,0 +1,82 @@
+<aio.h>
+<arpa/inet.h>
+<assert.h>
+<complex.h>
+<cpio.h>
+<ctype.h>
+<dirent.h>
+<dlfcn.h>
+<errno.h>
+<fcntl.h>
+<fenv.h>
+<float.h>
+<fmtmsg.h>
+<fnmatch.h>
+<ftw.h>
+<glob.h>
+<grp.h>
+<iconv.h>
+<inttypes.h>
+<iso646.h>
+<langinfo.h>
+<libgen.h>
+<limits.h>
+<locale.h>
+<math.h>
+<monetary.h>
+<mqueue.h>
+<ndbm.h>
+<net/if.h>
+<netdb.h>
+<netinet/in.h>
+<netinet/tcp.h>
+<nl_types.h>
+<poll.h>
+<pthread.h>
+<pwd.h>
+<regex.h>
+<sched.h>
+<search.h>
+<semaphore.h>
+<setjmp.h>
+<signal.h>
+<spawn.h>
+<stdarg.h>
+<stdbool.h>
+<stddef.h>
+<stdint.h>
+<stdio.h>
+<stdlib.h>
+<string.h>
+<strings.h>
+<stropts.h>
+<sys/ipc.h>
+<sys/mman.h>
+<sys/msg.h>
+<sys/resource.h>
+<sys/select.h>
+<sys/sem.h>
+<sys/shm.h>
+<sys/socket.h>
+<sys/stat.h>
+<sys/statvfs.h>
+<sys/time.h>
+<sys/times.h>
+<sys/types.h>
+<sys/uio.h>
+<sys/un.h>
+<sys/utsname.h>
+<sys/wait.h>
+<syslog.h>
+<tar.h>
+<termios.h>
+<tgmath.h>
+<time.h>
+<trace.h>
+<ulimit.h>
+<unistd.h>
+<utime.h>
+<utmpx.h>
+<wchar.h>
+<wctype.h>
+<wordexp.h>
diff --git a/src/spdk/scripts/prep_benchmarks.sh b/src/spdk/scripts/prep_benchmarks.sh
new file mode 100755
index 00000000..40830114
--- /dev/null
+++ b/src/spdk/scripts/prep_benchmarks.sh
@@ -0,0 +1,73 @@
+#!/usr/bin/env bash
+
+function configure_performance() {
+ echo -n "Placing all CPUs in performance mode..."
+ for governor in /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor; do
+ echo -n performance > $governor
+ done
+ echo "Done"
+
+ if [ -f "/sys/devices/system/cpu/intel_pstate/no_turbo" ]; then
+ echo -n "Disabling Turbo Boost..."
+ echo -n 1 > /sys/devices/system/cpu/intel_pstate/no_turbo
+ echo "Done"
+ fi
+
+ echo -n "Disabling irqbalance service..."
+ service irqbalance stop 2> /dev/null
+ echo "Done"
+
+ echo -n "Moving all interrupts off of core 0..."
+ count=`expr $(nproc) / 4`
+ cpumask="e"
+ for ((i=1; i<$count; i++)); do
+ if [ `expr $i % 8` -eq 0 ]; then
+ cpumask=",$cpumask"
+ fi
+ cpumask="f$cpumask"
+ done
+ for file in /proc/irq/*/smp_affinity; do
+ echo "$cpumask" > $file 2> /dev/null
+ done
+ echo "Done"
+
+ echo -n "Configuring kernel blk-mq for NVMe SSDs..."
+ for queue in /sys/block/nvme*n*/queue; do
+ if [ -f "$queue/nomerges" ]; then
+ echo "1" > $queue/nomerges
+ fi
+
+ if [ -f "$queue/io_poll" ]; then
+ echo "1" > $queue/io_poll
+ fi
+
+ if [ -f "$queue/io_poll_delay" ]; then
+ echo "-1" > $queue/io_poll_delay
+ fi
+ done
+ echo "Done"
+}
+
+function reset_performance() {
+ echo -n "Placing all CPUs in powersave mode..."
+ for governor in /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor; do
+ echo -n powersave > $governor
+ done
+ echo "Done"
+
+ if [ -f "/sys/devices/system/cpu/intel_pstate/no_turbo" ]; then
+ echo -n "Enabling Turbo Boost..."
+ echo -n 0 > /sys/devices/system/cpu/intel_pstate/no_turbo
+ echo "Done"
+ fi
+
+ echo -n "Enabling irqbalance service..."
+ service irqbalance start 2> /dev/null
+ echo "Done"
+}
+
+if [ "$1" = "reset" ]; then
+ reset_performance
+else
+ configure_performance
+fi
diff --git a/src/spdk/scripts/qat_setup.sh b/src/spdk/scripts/qat_setup.sh
new file mode 100755
index 00000000..b46351d7
--- /dev/null
+++ b/src/spdk/scripts/qat_setup.sh
@@ -0,0 +1,111 @@
+#!/usr/bin/env bash
+
+rootdir=$(readlink -f $(dirname $0))/..
+igb_driverdir=$rootdir/dpdk/build/build/kernel/igb_uio/
+allowed_drivers=("igb_uio" "uio_pci_generic")
+
+# This script requires an igb_uio kernel module binary located at $igb_driverdir/igb_uio.ko
+# Please also note that this script is not intended to be comprehensive or production quality.
+# It supports configuring a single card (the Intel QAT 8970) for use with the SPDK
+
+bad_driver=true
+driver_to_bind=uio_pci_generic
+num_vfs=16
+firmware_download_url=http://git.kernel.org/cgit/linux/kernel/git/firmware/linux-firmware.git/tree
+qat_binary=qat_895xcc.bin
+qat_mmp_binary=qat_895xcc_mmp.bin
+
+qat_pci_bdfs=( $(lspci -Dd:37c8 | awk '{print $1}') )
+if [ ${#qat_pci_bdfs[@]} -eq 0 ]; then
+ echo "No QAT devices found. Exiting"
+ exit 0
+fi
+
+if [ ! -z $1 ]; then
+ driver_to_bind=$1
+fi
+
+for driver in ${allowed_drivers[@]}; do
+ if [ $driver == $driver_to_bind ]; then
+ bad_driver=false
+ fi
+done
+
+if $bad_driver; then
+ echo "Unrecognized driver. Please specify an accepted driver (listed below):"
+ echo "${allowed_drivers[@]}"
+ exit 1
+fi
+
+# Fetch firmware if needed.
+if [ ! -f /lib/firmware/$qat_binary ]; then
+ echo "installing qat firmware"
+ if ! wget $firmware_download_url/$qat_binary -O /lib/firmware/$qat_binary; then
+ echo "Cannot download the qat binary $qat_binary from <$firmware_download_url/$qat_binary>"
+ exit 1
+ fi
+fi
+
+if [ ! -f /lib/firmware/$qat_mmp_binary ]; then
+ echo "installing qat mmp firmware"
+ if ! wget $firmware_download_url/$qat_mmp_binary -O /lib/firmware/$qat_mmp_binary; then
+ echo "Cannot download the qat mmp binary $qat_mmp_binary from <$firmware_download_url/$qat_mmp_binary>"
+ exit 1
+ fi
+fi
+
+# configure virtual functions for the QAT cards.
+for qat_bdf in ${qat_pci_bdfs[@]}; do
+ echo "$num_vfs" > /sys/bus/pci/drivers/c6xx/$qat_bdf/sriov_numvfs
+ num_vfs=$(cat /sys/bus/pci/drivers/c6xx/$qat_bdf/sriov_numvfs)
+ echo "$qat_bdf set to $num_vfs VFs"
+done
+
+# Confirm we have all of the virtual functions we asked for.
+
+qat_vf_bdfs=( $(lspci -Dd:37c9 | awk '{print $1}') )
+if (( ${#qat_vf_bdfs[@]} != ${#qat_pci_bdfs[@]}*num_vfs )); then
+ echo "Failed to prepare the VFs. Aborting"
+ exit 1
+fi
+
+# Unbind old driver if necessary.
+for vf in ${qat_vf_bdfs[@]}; do
+ old_driver=$(basename $(readlink -f /sys/bus/pci/devices/${vf}/driver))
+ if [ $old_driver != "driver" ]; then
+ echo "unbinding driver $old_driver from qat VF at BDF $vf"
+ echo -n $vf > /sys/bus/pci/drivers/$old_driver/unbind
+ fi
+done
+
+modprobe uio
+
+# Insert the dpdk uio kernel module.
+if [ $driver_to_bind == "igb_uio" ]; then
+ if ! lsmod | grep -q igb_uio; then
+ if ! insmod $igb_driverdir/igb_uio.ko; then
+ echo "Unable to insert the igb_uio kernel module. Aborting."
+ exit 1
+ fi
+ fi
+elif [ "$driver_to_bind" == "uio_pci_generic" ]; then
+ modprobe uio_pci_generic
+else
+ echo "Unsure how to work with driver $driver_to_bind. Please configure it in qat_setup.sh"
+ exit 1
+fi
+
+echo -n "8086 37c9" > /sys/bus/pci/drivers/$driver_to_bind/new_id
+for vf in ${qat_vf_bdfs[@]}; do
+ if ! ls -l /sys/bus/pci/devices/$vf/driver | grep -q $driver_to_bind; then
+ echo "unable to bind the driver to the device at bdf $vf"
+ if [ "$driver_to_bind" == "uio_pci_generic" ]; then
+ echo "Your kernel's uio_pci_generic module does not support binding to virtual functions."
+ echo "It likely is missing Linux git commit ID acec09e67 which is needed to bind"
+ echo "uio_pci_generic to virtual functions which have no legacy interrupt vector."
+ echo "Please rebuild spdk with --with-igb-uio-driver and re-run this script specifying the igb_uio driver."
+ fi
+ exit 1
+ fi
+done
+echo "Properly configured the qat device with driver $driver_to_bind."
diff --git a/src/spdk/scripts/rpc.py b/src/spdk/scripts/rpc.py
new file mode 100755
index 00000000..d6ff9d26
--- /dev/null
+++ b/src/spdk/scripts/rpc.py
@@ -0,0 +1,1827 @@
+#!/usr/bin/env python3
+
+from rpc.client import print_dict, JSONRPCException
+
+import argparse
+import rpc
+import sys
+
+try:
+ from shlex import quote
+except ImportError:
+ from pipes import quote
+
+
+def print_array(a):
+ print(" ".join((quote(v) for v in a)))
+
+
+def call_cmd(func):
+ def rpc_cmd(*args, **kwargs):
+ try:
+ func(*args, **kwargs)
+ except JSONRPCException as ex:
+ print(ex.message)
+ exit(1)
+ return rpc_cmd
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description='SPDK RPC command line interface')
+ parser.add_argument('-s', dest='server_addr',
+ help='RPC server address', default='/var/tmp/spdk.sock')
+ parser.add_argument('-p', dest='port',
+ help='RPC port number (if server_addr is IP address)',
+ default=5260, type=int)
+ parser.add_argument('-t', dest='timeout',
+ help='Timout as a floating point number expressed in seconds waiting for reponse. Default: 60.0',
+ default=60.0, type=float)
+ parser.add_argument('-v', dest='verbose',
+ help='Verbose mode', action='store_true')
+ subparsers = parser.add_subparsers(help='RPC methods')
+
+ @call_cmd
+ def start_subsystem_init(args):
+ rpc.start_subsystem_init(args.client)
+
+ p = subparsers.add_parser('start_subsystem_init', help='Start initialization of subsystems')
+ p.set_defaults(func=start_subsystem_init)
+
+ @call_cmd
+ def get_rpc_methods(args):
+ print_dict(rpc.get_rpc_methods(args.client,
+ current=args.current))
+
+ p = subparsers.add_parser('get_rpc_methods', help='Get list of supported RPC methods')
+ p.add_argument('-c', '--current', help='Get list of RPC methods only callable in the current state.', action='store_true')
+ p.set_defaults(func=get_rpc_methods)
+
+ @call_cmd
+ def save_config(args):
+ rpc.save_config(args.client,
+ sys.stdout,
+ indent=args.indent)
+
+ p = subparsers.add_parser('save_config', help="""Write current (live) configuration of SPDK subsystems and targets to stdout.
+ """)
+ p.add_argument('-i', '--indent', help="""Indent level. Value less than 0 mean compact mode. Default indent level is 2.
+ """, type=int, default=2)
+ p.set_defaults(func=save_config)
+
+ @call_cmd
+ def load_config(args):
+ rpc.load_config(args.client, sys.stdin)
+
+ p = subparsers.add_parser('load_config', help="""Configure SPDK subsystems and targets using JSON RPC read from stdin.""")
+ p.set_defaults(func=load_config)
+
+ @call_cmd
+ def save_subsystem_config(args):
+ rpc.save_subsystem_config(args.client,
+ sys.stdout,
+ indent=args.indent,
+ name=args.name)
+
+ p = subparsers.add_parser('save_subsystem_config', help="""Write current (live) configuration of SPDK subsystem to stdout.
+ """)
+ p.add_argument('-i', '--indent', help="""Indent level. Value less than 0 mean compact mode. Default indent level is 2.
+ """, type=int, default=2)
+ p.add_argument('-n', '--name', help='Name of subsystem', required=True)
+ p.set_defaults(func=save_subsystem_config)
+
+ @call_cmd
+ def load_subsystem_config(args):
+ rpc.load_subsystem_config(args.client,
+ sys.stdin)
+
+ p = subparsers.add_parser('load_subsystem_config', help="""Configure SPDK subsystem using JSON RPC read from stdin.""")
+ p.set_defaults(func=load_subsystem_config)
+
+ # app
+ @call_cmd
+ def kill_instance(args):
+ rpc.app.kill_instance(args.client,
+ sig_name=args.sig_name)
+
+ p = subparsers.add_parser('kill_instance', help='Send signal to instance')
+ p.add_argument('sig_name', help='signal will be sent to server.')
+ p.set_defaults(func=kill_instance)
+
+ @call_cmd
+ def context_switch_monitor(args):
+ enabled = None
+ if args.enable:
+ enabled = True
+ if args.disable:
+ enabled = False
+ print_dict(rpc.app.context_switch_monitor(args.client,
+ enabled=enabled))
+
+ p = subparsers.add_parser('context_switch_monitor', help='Control whether the context switch monitor is enabled')
+ p.add_argument('-e', '--enable', action='store_true', help='Enable context switch monitoring')
+ p.add_argument('-d', '--disable', action='store_true', help='Disable context switch monitoring')
+ p.set_defaults(func=context_switch_monitor)
+
+ # bdev
+ @call_cmd
+ def set_bdev_options(args):
+ rpc.bdev.set_bdev_options(args.client,
+ bdev_io_pool_size=args.bdev_io_pool_size,
+ bdev_io_cache_size=args.bdev_io_cache_size)
+
+ p = subparsers.add_parser('set_bdev_options', help="""Set options of bdev subsystem""")
+ p.add_argument('-p', '--bdev-io-pool-size', help='Number of bdev_io structures in shared buffer pool', type=int)
+ p.add_argument('-c', '--bdev-io-cache-size', help='Maximum number of bdev_io structures cached per thread', type=int)
+ p.set_defaults(func=set_bdev_options)
+
+ @call_cmd
+ def construct_crypto_bdev(args):
+ print(rpc.bdev.construct_crypto_bdev(args.client,
+ base_bdev_name=args.base_bdev_name,
+ name=args.name,
+ crypto_pmd=args.crypto_pmd,
+ key=args.key))
+ p = subparsers.add_parser('construct_crypto_bdev',
+ help='Add a crypto vbdev')
+ p.add_argument('-b', '--base_bdev_name', help="Name of the base bdev")
+ p.add_argument('-c', '--name', help="Name of the crypto vbdev")
+ p.add_argument('-d', '--crypto_pmd', help="Name of the crypto device driver")
+ p.add_argument('-k', '--key', help="Key")
+ p.set_defaults(func=construct_crypto_bdev)
+
+ @call_cmd
+ def delete_crypto_bdev(args):
+ rpc.bdev.delete_crypto_bdev(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('delete_crypto_bdev', help='Delete a crypto disk')
+ p.add_argument('name', help='crypto bdev name')
+ p.set_defaults(func=delete_crypto_bdev)
+
+ @call_cmd
+ def construct_malloc_bdev(args):
+ num_blocks = (args.total_size * 1024 * 1024) // args.block_size
+ print(rpc.bdev.construct_malloc_bdev(args.client,
+ num_blocks=int(num_blocks),
+ block_size=args.block_size,
+ name=args.name,
+ uuid=args.uuid))
+ p = subparsers.add_parser('construct_malloc_bdev',
+ help='Add a bdev with malloc backend')
+ p.add_argument('-b', '--name', help="Name of the bdev")
+ p.add_argument('-u', '--uuid', help="UUID of the bdev")
+ p.add_argument(
+ 'total_size', help='Size of malloc bdev in MB (float > 0)', type=float)
+ p.add_argument('block_size', help='Block size for this bdev', type=int)
+ p.set_defaults(func=construct_malloc_bdev)
+
+ @call_cmd
+ def delete_malloc_bdev(args):
+ rpc.bdev.delete_malloc_bdev(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('delete_malloc_bdev', help='Delete a malloc disk')
+ p.add_argument('name', help='malloc bdev name')
+ p.set_defaults(func=delete_malloc_bdev)
+
+ @call_cmd
+ def construct_null_bdev(args):
+ num_blocks = (args.total_size * 1024 * 1024) // args.block_size
+ print(rpc.bdev.construct_null_bdev(args.client,
+ num_blocks=num_blocks,
+ block_size=args.block_size,
+ name=args.name,
+ uuid=args.uuid))
+
+ p = subparsers.add_parser('construct_null_bdev',
+ help='Add a bdev with null backend')
+ p.add_argument('name', help='Block device name')
+ p.add_argument('-u', '--uuid', help='UUID of the bdev')
+ p.add_argument(
+ 'total_size', help='Size of null bdev in MB (int > 0)', type=int)
+ p.add_argument('block_size', help='Block size for this bdev', type=int)
+ p.set_defaults(func=construct_null_bdev)
+
+ @call_cmd
+ def delete_null_bdev(args):
+ rpc.bdev.delete_null_bdev(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('delete_null_bdev', help='Delete a null bdev')
+ p.add_argument('name', help='null bdev name')
+ p.set_defaults(func=delete_null_bdev)
+
+ @call_cmd
+ def construct_aio_bdev(args):
+ print(rpc.bdev.construct_aio_bdev(args.client,
+ filename=args.filename,
+ name=args.name,
+ block_size=args.block_size))
+
+ p = subparsers.add_parser('construct_aio_bdev',
+ help='Add a bdev with aio backend')
+ p.add_argument('filename', help='Path to device or file (ex: /dev/sda)')
+ p.add_argument('name', help='Block device name')
+ p.add_argument('block_size', help='Block size for this bdev', type=int, nargs='?', default=0)
+ p.set_defaults(func=construct_aio_bdev)
+
+ @call_cmd
+ def delete_aio_bdev(args):
+ rpc.bdev.delete_aio_bdev(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('delete_aio_bdev', help='Delete an aio disk')
+ p.add_argument('name', help='aio bdev name')
+ p.set_defaults(func=delete_aio_bdev)
+
+ @call_cmd
+ def set_bdev_nvme_options(args):
+ rpc.bdev.set_bdev_nvme_options(args.client,
+ action_on_timeout=args.action_on_timeout,
+ timeout_us=args.timeout_us,
+ retry_count=args.retry_count,
+ nvme_adminq_poll_period_us=args.nvme_adminq_poll_period_us)
+
+ p = subparsers.add_parser('set_bdev_nvme_options',
+ help='Set options for the bdev nvme type. This is startup command.')
+ p.add_argument('-a', '--action-on-timeout',
+ help="Action to take on command time out. Valid valies are: none, reset, abort")
+ p.add_argument('-t', '--timeout-us',
+ help="Timeout for each command, in microseconds. If 0, don't track timeouts.", type=int)
+ p.add_argument('-n', '--retry-count',
+ help='the number of attempts per I/O when an I/O fails', type=int)
+ p.add_argument('-p', '--nvme-adminq-poll-period-us',
+ help='How often the admin queue is polled for asynchronous events', type=int)
+ p.set_defaults(func=set_bdev_nvme_options)
+
+ @call_cmd
+ def set_bdev_nvme_hotplug(args):
+ rpc.bdev.set_bdev_nvme_hotplug(args.client, enable=args.enable, period_us=args.period_us)
+
+ p = subparsers.add_parser('set_bdev_nvme_hotplug',
+ help='Set hotplug options for bdev nvme type.')
+ p.add_argument('-d', '--disable', dest='enable', default=False, action='store_false', help="Disable hotplug (default)")
+ p.add_argument('-e', '--enable', dest='enable', action='store_true', help="Enable hotplug")
+ p.add_argument('-r', '--period-us',
+ help='How often the hotplug is processed for insert and remove events', type=int)
+ p.set_defaults(func=set_bdev_nvme_hotplug)
+
+ @call_cmd
+ def construct_nvme_bdev(args):
+ print_array(rpc.bdev.construct_nvme_bdev(args.client,
+ name=args.name,
+ trtype=args.trtype,
+ traddr=args.traddr,
+ adrfam=args.adrfam,
+ trsvcid=args.trsvcid,
+ subnqn=args.subnqn))
+
+ p = subparsers.add_parser('construct_nvme_bdev',
+ help='Add bdev with nvme backend')
+ p.add_argument('-b', '--name', help="Name of the bdev", required=True)
+ p.add_argument('-t', '--trtype',
+ help='NVMe-oF target trtype: e.g., rdma, pcie', required=True)
+ p.add_argument('-a', '--traddr',
+ help='NVMe-oF target address: e.g., an ip address or BDF', required=True)
+ p.add_argument('-f', '--adrfam',
+ help='NVMe-oF target adrfam: e.g., ipv4, ipv6, ib, fc, intra_host')
+ p.add_argument('-s', '--trsvcid',
+ help='NVMe-oF target trsvcid: e.g., a port number')
+ p.add_argument('-n', '--subnqn', help='NVMe-oF target subnqn')
+ p.set_defaults(func=construct_nvme_bdev)
+
+ @call_cmd
+ def get_nvme_controllers(args):
+ print_dict(rpc.nvme.get_nvme_controllers(args.client,
+ name=args.name))
+
+ p = subparsers.add_parser(
+ 'get_nvme_controllers', help='Display current NVMe controllers list or required NVMe controller')
+ p.add_argument('-n', '--name', help="Name of the NVMe controller. Example: Nvme0", required=False)
+ p.set_defaults(func=get_nvme_controllers)
+
+ @call_cmd
+ def delete_nvme_controller(args):
+ rpc.bdev.delete_nvme_controller(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('delete_nvme_controller',
+ help='Delete a NVMe controller using controller name')
+ p.add_argument('name', help="Name of the controller")
+ p.set_defaults(func=delete_nvme_controller)
+
+ @call_cmd
+ def construct_rbd_bdev(args):
+ print(rpc.bdev.construct_rbd_bdev(args.client,
+ name=args.name,
+ pool_name=args.pool_name,
+ rbd_name=args.rbd_name,
+ block_size=args.block_size))
+
+ p = subparsers.add_parser('construct_rbd_bdev',
+ help='Add a bdev with ceph rbd backend')
+ p.add_argument('-b', '--name', help="Name of the bdev", required=False)
+ p.add_argument('pool_name', help='rbd pool name')
+ p.add_argument('rbd_name', help='rbd image name')
+ p.add_argument('block_size', help='rbd block size', type=int)
+ p.set_defaults(func=construct_rbd_bdev)
+
+ @call_cmd
+ def delete_rbd_bdev(args):
+ rpc.bdev.delete_rbd_bdev(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('delete_rbd_bdev', help='Delete a rbd bdev')
+ p.add_argument('name', help='rbd bdev name')
+ p.set_defaults(func=delete_rbd_bdev)
+
+ @call_cmd
+ def construct_error_bdev(args):
+ print(rpc.bdev.construct_error_bdev(args.client,
+ base_name=args.base_name))
+
+ p = subparsers.add_parser('construct_error_bdev',
+ help='Add bdev with error injection backend')
+ p.add_argument('base_name', help='base bdev name')
+ p.set_defaults(func=construct_error_bdev)
+
+ @call_cmd
+ def delete_error_bdev(args):
+ rpc.bdev.delete_error_bdev(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('delete_error_bdev', help='Delete an error bdev')
+ p.add_argument('name', help='error bdev name')
+ p.set_defaults(func=delete_error_bdev)
+
+ @call_cmd
+ def construct_iscsi_bdev(args):
+ print(rpc.bdev.construct_iscsi_bdev(args.client,
+ name=args.name,
+ url=args.url,
+ initiator_iqn=args.initiator_iqn))
+
+ p = subparsers.add_parser('construct_iscsi_bdev',
+ help='Add bdev with iSCSI initiator backend')
+ p.add_argument('-b', '--name', help="Name of the bdev", required=True)
+ p.add_argument('-i', '--initiator-iqn', help="Initiator IQN", required=True)
+ p.add_argument('--url', help="iSCSI Lun URL", required=True)
+ p.set_defaults(func=construct_iscsi_bdev)
+
+ @call_cmd
+ def delete_iscsi_bdev(args):
+ rpc.bdev.delete_iscsi_bdev(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('delete_iscsi_bdev', help='Delete an iSCSI bdev')
+ p.add_argument('name', help='iSCSI bdev name')
+ p.set_defaults(func=delete_iscsi_bdev)
+
+ @call_cmd
+ def construct_pmem_bdev(args):
+ print(rpc.bdev.construct_pmem_bdev(args.client,
+ pmem_file=args.pmem_file,
+ name=args.name))
+
+ p = subparsers.add_parser('construct_pmem_bdev', help='Add a bdev with pmem backend')
+ p.add_argument('pmem_file', help='Path to pmemblk pool file')
+ p.add_argument('-n', '--name', help='Block device name', required=True)
+ p.set_defaults(func=construct_pmem_bdev)
+
+ @call_cmd
+ def delete_pmem_bdev(args):
+ rpc.bdev.delete_pmem_bdev(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('delete_pmem_bdev', help='Delete a pmem bdev')
+ p.add_argument('name', help='pmem bdev name')
+ p.set_defaults(func=delete_pmem_bdev)
+
+ @call_cmd
+ def construct_passthru_bdev(args):
+ print(rpc.bdev.construct_passthru_bdev(args.client,
+ base_bdev_name=args.base_bdev_name,
+ passthru_bdev_name=args.passthru_bdev_name))
+
+ p = subparsers.add_parser('construct_passthru_bdev',
+ help='Add a pass through bdev on existing bdev')
+ p.add_argument('-b', '--base-bdev-name', help="Name of the existing bdev", required=True)
+ p.add_argument('-p', '--passthru-bdev-name', help="Name of the pass through bdev", required=True)
+ p.set_defaults(func=construct_passthru_bdev)
+
+ @call_cmd
+ def delete_passthru_bdev(args):
+ rpc.bdev.delete_passthru_bdev(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('delete_passthru_bdev', help='Delete a pass through bdev')
+ p.add_argument('name', help='pass through bdev name')
+ p.set_defaults(func=delete_passthru_bdev)
+
+ @call_cmd
+ def get_bdevs(args):
+ print_dict(rpc.bdev.get_bdevs(args.client,
+ name=args.name))
+
+ p = subparsers.add_parser(
+ 'get_bdevs', help='Display current blockdev list or required blockdev')
+ p.add_argument('-b', '--name', help="Name of the Blockdev. Example: Nvme0n1", required=False)
+ p.set_defaults(func=get_bdevs)
+
+ @call_cmd
+ def get_bdevs_iostat(args):
+ print_dict(rpc.bdev.get_bdevs_iostat(args.client,
+ name=args.name))
+
+ p = subparsers.add_parser(
+ 'get_bdevs_iostat', help='Display current I/O statistics of all the blockdevs or required blockdev.')
+ p.add_argument('-b', '--name', help="Name of the Blockdev. Example: Nvme0n1", required=False)
+ p.set_defaults(func=get_bdevs_iostat)
+
+ @call_cmd
+ def delete_bdev(args):
+ rpc.bdev.delete_bdev(args.client,
+ bdev_name=args.bdev_name)
+
+ p = subparsers.add_parser('delete_bdev', help='Delete a blockdev')
+ p.add_argument(
+ 'bdev_name', help='Blockdev name to be deleted. Example: Malloc0.')
+ p.set_defaults(func=delete_bdev)
+
+ @call_cmd
+ def set_bdev_qd_sampling_period(args):
+ rpc.bdev.set_bdev_qd_sampling_period(args.client,
+ name=args.name,
+ period=args.period)
+
+ p = subparsers.add_parser('set_bdev_qd_sampling_period', help="Enable or disable tracking of a bdev's queue depth.")
+ p.add_argument('name', help='Blockdev name. Example: Malloc0')
+ p.add_argument('period', help='Period with which to poll the block device queue depth in microseconds.'
+ ' If set to 0, polling will be disabled.',
+ type=int)
+ p.set_defaults(func=set_bdev_qd_sampling_period)
+
+ @call_cmd
+ def set_bdev_qos_limit(args):
+ rpc.bdev.set_bdev_qos_limit(args.client,
+ name=args.name,
+ rw_ios_per_sec=args.rw_ios_per_sec,
+ rw_mbytes_per_sec=args.rw_mbytes_per_sec)
+
+ p = subparsers.add_parser('set_bdev_qos_limit', help='Set QoS rate limit on a blockdev')
+ p.add_argument('name', help='Blockdev name to set QoS. Example: Malloc0')
+ p.add_argument('--rw_ios_per_sec',
+ help='R/W IOs per second limit (>=10000, example: 20000). 0 means unlimited.',
+ type=int, required=False)
+ p.add_argument('--rw_mbytes_per_sec',
+ help="R/W megabytes per second limit (>=10, example: 100). 0 means unlimited.",
+ type=int, required=False)
+ p.set_defaults(func=set_bdev_qos_limit)
+
+ @call_cmd
+ def bdev_inject_error(args):
+ rpc.bdev.bdev_inject_error(args.client,
+ name=args.name,
+ io_type=args.io_type,
+ error_type=args.error_type,
+ num=args.num)
+
+ p = subparsers.add_parser('bdev_inject_error', help='bdev inject error')
+ p.add_argument('name', help="""the name of the error injection bdev""")
+ p.add_argument('io_type', help="""io_type: 'clear' 'read' 'write' 'unmap' 'flush' 'all'""")
+ p.add_argument('error_type', help="""error_type: 'failure' 'pending'""")
+ p.add_argument(
+ '-n', '--num', help='the number of commands you want to fail', type=int, default=1)
+ p.set_defaults(func=bdev_inject_error)
+
+ @call_cmd
+ def apply_firmware(args):
+ print_dict(rpc.bdev.apply_firmware(args.client,
+ bdev_name=args.bdev_name,
+ filename=args.filename))
+
+ p = subparsers.add_parser('apply_firmware', help='Download and commit firmware to NVMe device')
+ p.add_argument('filename', help='filename of the firmware to download')
+ p.add_argument('bdev_name', help='name of the NVMe device')
+ p.set_defaults(func=apply_firmware)
+
+ # iSCSI
+ @call_cmd
+ def set_iscsi_options(args):
+ rpc.iscsi.set_iscsi_options(
+ args.client,
+ auth_file=args.auth_file,
+ node_base=args.node_base,
+ nop_timeout=args.nop_timeout,
+ nop_in_interval=args.nop_in_interval,
+ disable_chap=args.disable_chap,
+ require_chap=args.require_chap,
+ mutual_chap=args.mutual_chap,
+ chap_group=args.chap_group,
+ max_sessions=args.max_sessions,
+ max_queue_depth=args.max_queue_depth,
+ max_connections_per_session=args.max_connections_per_session,
+ default_time2wait=args.default_time2wait,
+ default_time2retain=args.default_time2retain,
+ first_burst_length=args.first_burst_length,
+ immediate_data=args.immediate_data,
+ error_recovery_level=args.error_recovery_level,
+ allow_duplicated_isid=args.allow_duplicated_isid,
+ min_connections_per_core=args.min_connections_per_core)
+
+ p = subparsers.add_parser('set_iscsi_options', help="""Set options of iSCSI subsystem""")
+ p.add_argument('-f', '--auth-file', help='Path to CHAP shared secret file')
+ p.add_argument('-b', '--node-base', help='Prefix of the name of iSCSI target node')
+ p.add_argument('-o', '--nop-timeout', help='Timeout in seconds to nop-in request to the initiator', type=int)
+ p.add_argument('-n', '--nop-in-interval', help='Time interval in secs between nop-in requests by the target', type=int)
+ p.add_argument('-d', '--disable-chap', help="""CHAP for discovery session should be disabled.
+ *** Mutually exclusive with --require-chap""", action='store_true')
+ p.add_argument('-r', '--require-chap', help="""CHAP for discovery session should be required.
+ *** Mutually exclusive with --disable-chap""", action='store_true')
+ p.add_argument('-m', '--mutual-chap', help='CHAP for discovery session should be mutual', action='store_true')
+ p.add_argument('-g', '--chap-group', help="""Authentication group ID for discovery session.
+ *** Authentication group must be precreated ***""", type=int)
+ p.add_argument('-a', '--max-sessions', help='Maximum number of sessions in the host.', type=int)
+ p.add_argument('-q', '--max-queue-depth', help='Max number of outstanding I/Os per queue.', type=int)
+ p.add_argument('-c', '--max-connections-per-session', help='Negotiated parameter, MaxConnections.', type=int)
+ p.add_argument('-w', '--default-time2wait', help='Negotiated parameter, DefaultTime2Wait.', type=int)
+ p.add_argument('-v', '--default-time2retain', help='Negotiated parameter, DefaultTime2Retain.', type=int)
+ p.add_argument('-s', '--first-burst-length', help='Negotiated parameter, FirstBurstLength.', type=int)
+ p.add_argument('-i', '--immediate-data', help='Negotiated parameter, ImmediateData.', action='store_true')
+ p.add_argument('-l', '--error-recovery-level', help='Negotiated parameter, ErrorRecoveryLevel', type=int)
+ p.add_argument('-p', '--allow-duplicated-isid', help='Allow duplicated initiator session ID.', action='store_true')
+ p.add_argument('-u', '--min-connections-per-core', help='Allocation unit of connections per core', type=int)
+ p.set_defaults(func=set_iscsi_options)
+
+ @call_cmd
+ def set_iscsi_discovery_auth(args):
+ rpc.iscsi.set_iscsi_discovery_auth(
+ args.client,
+ disable_chap=args.disable_chap,
+ require_chap=args.require_chap,
+ mutual_chap=args.mutual_chap,
+ chap_group=args.chap_group)
+
+ p = subparsers.add_parser('set_iscsi_discovery_auth', help="""Set CHAP authentication for discovery session.""")
+ p.add_argument('-d', '--disable-chap', help="""CHAP for discovery session should be disabled.
+ *** Mutually exclusive with --require-chap""", action='store_true')
+ p.add_argument('-r', '--require-chap', help="""CHAP for discovery session should be required.
+ *** Mutually exclusive with --disable-chap""", action='store_true')
+ p.add_argument('-m', '--mutual-chap', help='CHAP for discovery session should be mutual', action='store_true')
+ p.add_argument('-g', '--chap-group', help="""Authentication group ID for discovery session.
+ *** Authentication group must be precreated ***""", type=int)
+ p.set_defaults(func=set_iscsi_discovery_auth)
+
+ def add_iscsi_auth_group(args):
+ secrets = None
+ if args.secrets:
+ secrets = [dict(u.split(":") for u in a.split(" ")) for a in args.secrets.split(",")]
+
+ rpc.iscsi.add_iscsi_auth_group(args.client, tag=args.tag, secrets=secrets)
+
+ p = subparsers.add_parser('add_iscsi_auth_group', help='Add authentication group for CHAP authentication.')
+ p.add_argument('tag', help='Authentication group tag (unique, integer > 0).', type=int)
+ p.add_argument('-c', '--secrets', help="""Comma-separated list of CHAP secrets
+<user:user_name secret:chap_secret muser:mutual_user_name msecret:mutual_chap_secret> enclosed in quotes.
+Format: 'user:u1 secret:s1 muser:mu1 msecret:ms1,user:u2 secret:s2 muser:mu2 msecret:ms2'""", required=False)
+ p.set_defaults(func=add_iscsi_auth_group)
+
+ @call_cmd
+ def delete_iscsi_auth_group(args):
+ rpc.iscsi.delete_iscsi_auth_group(args.client, tag=args.tag)
+
+ p = subparsers.add_parser('delete_iscsi_auth_group', help='Delete an authentication group.')
+ p.add_argument('tag', help='Authentication group tag', type=int)
+ p.set_defaults(func=delete_iscsi_auth_group)
+
+ @call_cmd
+ def add_secret_to_iscsi_auth_group(args):
+ rpc.iscsi.add_secret_to_iscsi_auth_group(
+ args.client,
+ tag=args.tag,
+ user=args.user,
+ secret=args.secret,
+ muser=args.muser,
+ msecret=args.msecret)
+
+ p = subparsers.add_parser('add_secret_to_iscsi_auth_group', help='Add a secret to an authentication group.')
+ p.add_argument('tag', help='Authentication group tag', type=int)
+ p.add_argument('-u', '--user', help='User name for one-way CHAP authentication', required=True)
+ p.add_argument('-s', '--secret', help='Secret for one-way CHAP authentication', required=True)
+ p.add_argument('-m', '--muser', help='User name for mutual CHAP authentication')
+ p.add_argument('-r', '--msecret', help='Secret for mutual CHAP authentication')
+ p.set_defaults(func=add_secret_to_iscsi_auth_group)
+
+ @call_cmd
+ def delete_secret_from_iscsi_auth_group(args):
+ rpc.iscsi.delete_secret_from_iscsi_auth_group(args.client, tag=args.tag, user=args.user)
+
+ p = subparsers.add_parser('delete_secret_from_iscsi_auth_group', help='Delete a secret from an authentication group.')
+ p.add_argument('tag', help='Authentication group tag', type=int)
+ p.add_argument('-u', '--user', help='User name for one-way CHAP authentication', required=True)
+ p.set_defaults(func=delete_secret_from_iscsi_auth_group)
+
+ @call_cmd
+ def get_iscsi_auth_groups(args):
+ print_dict(rpc.iscsi.get_iscsi_auth_groups(args.client))
+
+ p = subparsers.add_parser('get_iscsi_auth_groups',
+ help='Display current authentication group configuration')
+ p.set_defaults(func=get_iscsi_auth_groups)
+
+ @call_cmd
+ def get_portal_groups(args):
+ print_dict(rpc.iscsi.get_portal_groups(args.client))
+
+ p = subparsers.add_parser(
+ 'get_portal_groups', help='Display current portal group configuration')
+ p.set_defaults(func=get_portal_groups)
+
+ @call_cmd
+ def get_initiator_groups(args):
+ print_dict(rpc.iscsi.get_initiator_groups(args.client))
+
+ p = subparsers.add_parser('get_initiator_groups',
+ help='Display current initiator group configuration')
+ p.set_defaults(func=get_initiator_groups)
+
+ @call_cmd
+ def get_target_nodes(args):
+ print_dict(rpc.iscsi.get_target_nodes(args.client))
+
+ p = subparsers.add_parser('get_target_nodes', help='Display target nodes')
+ p.set_defaults(func=get_target_nodes)
+
+ @call_cmd
+ def construct_target_node(args):
+ luns = []
+ for u in args.bdev_name_id_pairs.strip().split(" "):
+ bdev_name, lun_id = u.split(":")
+ luns.append({"bdev_name": bdev_name, "lun_id": int(lun_id)})
+
+ pg_ig_maps = []
+ for u in args.pg_ig_mappings.strip().split(" "):
+ pg, ig = u.split(":")
+ pg_ig_maps.append({"pg_tag": int(pg), "ig_tag": int(ig)})
+
+ rpc.iscsi.construct_target_node(
+ args.client,
+ luns=luns,
+ pg_ig_maps=pg_ig_maps,
+ name=args.name,
+ alias_name=args.alias_name,
+ queue_depth=args.queue_depth,
+ chap_group=args.chap_group,
+ disable_chap=args.disable_chap,
+ require_chap=args.require_chap,
+ mutual_chap=args.mutual_chap,
+ header_digest=args.header_digest,
+ data_digest=args.data_digest)
+
+ p = subparsers.add_parser('construct_target_node',
+ help='Add a target node')
+ p.add_argument('name', help='Target node name (ASCII)')
+ p.add_argument('alias_name', help='Target node alias name (ASCII)')
+ p.add_argument('bdev_name_id_pairs', help="""Whitespace-separated list of <bdev name:LUN ID> pairs enclosed
+ in quotes. Format: 'bdev_name0:id0 bdev_name1:id1' etc
+ Example: 'Malloc0:0 Malloc1:1 Malloc5:2'
+ *** The bdevs must pre-exist ***
+ *** LUN0 (id = 0) is required ***
+ *** bdevs names cannot contain space or colon characters ***""")
+ p.add_argument('pg_ig_mappings', help="""List of (Portal_Group_Tag:Initiator_Group_Tag) mappings
+ Whitespace separated, quoted, mapping defined with colon
+ separated list of "tags" (int > 0)
+ Example: '1:1 2:2 2:1'
+ *** The Portal/Initiator Groups must be precreated ***""")
+ p.add_argument('queue_depth', help='Desired target queue depth', type=int)
+ p.add_argument('-g', '--chap-group', help="""Authentication group ID for this target node.
+ *** Authentication group must be precreated ***""", type=int, default=0)
+ p.add_argument('-d', '--disable-chap', help="""CHAP authentication should be disabled for this target node.
+ *** Mutually exclusive with --require-chap ***""", action='store_true')
+ p.add_argument('-r', '--require-chap', help="""CHAP authentication should be required for this target node.
+ *** Mutually exclusive with --disable-chap ***""", action='store_true')
+ p.add_argument(
+ '-m', '--mutual-chap', help='CHAP authentication should be mutual/bidirectional.', action='store_true')
+ p.add_argument('-H', '--header-digest',
+ help='Header Digest should be required for this target node.', action='store_true')
+ p.add_argument('-D', '--data-digest',
+ help='Data Digest should be required for this target node.', action='store_true')
+ p.set_defaults(func=construct_target_node)
+
+ @call_cmd
+ def target_node_add_lun(args):
+ rpc.iscsi.target_node_add_lun(
+ args.client,
+ name=args.name,
+ bdev_name=args.bdev_name,
+ lun_id=args.lun_id)
+
+ p = subparsers.add_parser('target_node_add_lun', help='Add LUN to the target node')
+ p.add_argument('name', help='Target node name (ASCII)')
+ p.add_argument('bdev_name', help="""bdev name enclosed in quotes.
+ *** bdev name cannot contain space or colon characters ***""")
+ p.add_argument('-i', dest='lun_id', help="""LUN ID (integer >= 0)
+ *** If LUN ID is omitted or -1, the lowest free one is assigned ***""", type=int, required=False)
+ p.set_defaults(func=target_node_add_lun)
+
+ @call_cmd
+ def set_iscsi_target_node_auth(args):
+ rpc.iscsi.set_iscsi_target_node_auth(
+ args.client,
+ name=args.name,
+ chap_group=args.chap_group,
+ disable_chap=args.disable_chap,
+ require_chap=args.require_chap,
+ mutual_chap=args.mutual_chap)
+
+ p = subparsers.add_parser('set_iscsi_target_node_auth', help='Set CHAP authentication for the target node')
+ p.add_argument('name', help='Target node name (ASCII)')
+ p.add_argument('-g', '--chap-group', help="""Authentication group ID for this target node.
+ *** Authentication group must be precreated ***""", type=int, default=0)
+ p.add_argument('-d', '--disable-chap', help="""CHAP authentication should be disabled for this target node.
+ *** Mutually exclusive with --require-chap ***""", action='store_true')
+ p.add_argument('-r', '--require-chap', help="""CHAP authentication should be required for this target node.
+ *** Mutually exclusive with --disable-chap ***""", action='store_true')
+ p.add_argument('-m', '--mutual-chap', help='CHAP authentication should be mutual/bidirectional.',
+ action='store_true')
+ p.set_defaults(func=set_iscsi_target_node_auth)
+
+ @call_cmd
+ def add_pg_ig_maps(args):
+ pg_ig_maps = []
+ for u in args.pg_ig_mappings.strip().split(" "):
+ pg, ig = u.split(":")
+ pg_ig_maps.append({"pg_tag": int(pg), "ig_tag": int(ig)})
+ rpc.iscsi.add_pg_ig_maps(
+ args.client,
+ pg_ig_maps=pg_ig_maps,
+ name=args.name)
+
+ p = subparsers.add_parser('add_pg_ig_maps', help='Add PG-IG maps to the target node')
+ p.add_argument('name', help='Target node name (ASCII)')
+ p.add_argument('pg_ig_mappings', help="""List of (Portal_Group_Tag:Initiator_Group_Tag) mappings
+ Whitespace separated, quoted, mapping defined with colon
+ separated list of "tags" (int > 0)
+ Example: '1:1 2:2 2:1'
+ *** The Portal/Initiator Groups must be precreated ***""")
+ p.set_defaults(func=add_pg_ig_maps)
+
+ @call_cmd
+ def delete_pg_ig_maps(args):
+ pg_ig_maps = []
+ for u in args.pg_ig_mappings.strip().split(" "):
+ pg, ig = u.split(":")
+ pg_ig_maps.append({"pg_tag": int(pg), "ig_tag": int(ig)})
+ rpc.iscsi.delete_pg_ig_maps(
+ args.client, pg_ig_maps=pg_ig_maps, name=args.name)
+
+ p = subparsers.add_parser('delete_pg_ig_maps', help='Delete PG-IG maps from the target node')
+ p.add_argument('name', help='Target node name (ASCII)')
+ p.add_argument('pg_ig_mappings', help="""List of (Portal_Group_Tag:Initiator_Group_Tag) mappings
+ Whitespace separated, quoted, mapping defined with colon
+ separated list of "tags" (int > 0)
+ Example: '1:1 2:2 2:1'
+ *** The Portal/Initiator Groups must be precreated ***""")
+ p.set_defaults(func=delete_pg_ig_maps)
+
+ @call_cmd
+ def add_portal_group(args):
+ portals = []
+ for p in args.portal_list:
+ ip, separator, port_cpumask = p.rpartition(':')
+ split_port_cpumask = port_cpumask.split('@')
+ if len(split_port_cpumask) == 1:
+ port = port_cpumask
+ portals.append({'host': ip, 'port': port})
+ else:
+ port = split_port_cpumask[0]
+ cpumask = split_port_cpumask[1]
+ portals.append({'host': ip, 'port': port, 'cpumask': cpumask})
+ rpc.iscsi.add_portal_group(
+ args.client,
+ portals=portals,
+ tag=args.tag)
+
+ p = subparsers.add_parser('add_portal_group', help='Add a portal group')
+ p.add_argument(
+ 'tag', help='Portal group tag (unique, integer > 0)', type=int)
+ p.add_argument('portal_list', nargs=argparse.REMAINDER, help="""List of portals in 'host:port@cpumask' format, separated by whitespace
+ (cpumask is optional and can be skipped)
+ Example: '192.168.100.100:3260' '192.168.100.100:3261' '192.168.100.100:3262@0x1""")
+ p.set_defaults(func=add_portal_group)
+
+ @call_cmd
+ def add_initiator_group(args):
+ initiators = []
+ netmasks = []
+ for i in args.initiator_list.strip().split(' '):
+ initiators.append(i)
+ for n in args.netmask_list.strip().split(' '):
+ netmasks.append(n)
+ rpc.iscsi.add_initiator_group(
+ args.client,
+ tag=args.tag,
+ initiators=initiators,
+ netmasks=netmasks)
+
+ p = subparsers.add_parser('add_initiator_group',
+ help='Add an initiator group')
+ p.add_argument(
+ 'tag', help='Initiator group tag (unique, integer > 0)', type=int)
+ p.add_argument('initiator_list', help="""Whitespace-separated list of initiator hostnames or IP addresses,
+ enclosed in quotes. Example: 'ANY' or '127.0.0.1 192.168.200.100'""")
+ p.add_argument('netmask_list', help="""Whitespace-separated list of initiator netmasks enclosed in quotes.
+ Example: '255.255.0.0 255.248.0.0' etc""")
+ p.set_defaults(func=add_initiator_group)
+
+ @call_cmd
+ def add_initiators_to_initiator_group(args):
+ initiators = None
+ netmasks = None
+ if args.initiator_list:
+ initiators = []
+ for i in args.initiator_list.strip().split(' '):
+ initiators.append(i)
+ if args.netmask_list:
+ netmasks = []
+ for n in args.netmask_list.strip().split(' '):
+ netmasks.append(n)
+ rpc.iscsi.add_initiators_to_initiator_group(
+ args.client,
+ tag=args.tag,
+ initiators=initiators,
+ netmasks=netmasks)
+
+ p = subparsers.add_parser('add_initiators_to_initiator_group',
+ help='Add initiators to an existing initiator group')
+ p.add_argument(
+ 'tag', help='Initiator group tag (unique, integer > 0)', type=int)
+ p.add_argument('-n', dest='initiator_list', help="""Whitespace-separated list of initiator hostnames or IP addresses,
+ enclosed in quotes. This parameter can be omitted. Example: 'ANY' or '127.0.0.1 192.168.200.100'""", required=False)
+ p.add_argument('-m', dest='netmask_list', help="""Whitespace-separated list of initiator netmasks enclosed in quotes.
+ This parameter can be omitted. Example: '255.255.0.0 255.248.0.0' etc""", required=False)
+ p.set_defaults(func=add_initiators_to_initiator_group)
+
+ @call_cmd
+ def delete_initiators_from_initiator_group(args):
+ initiators = None
+ netmasks = None
+ if args.initiator_list:
+ initiators = []
+ for i in args.initiator_list.strip().split(' '):
+ initiators.append(i)
+ if args.netmask_list:
+ netmasks = []
+ for n in args.netmask_list.strip().split(' '):
+ netmasks.append(n)
+ rpc.iscsi.delete_initiators_from_initiator_group(
+ args.client,
+ tag=args.tag,
+ initiators=initiators,
+ netmasks=netmasks)
+
+ p = subparsers.add_parser('delete_initiators_from_initiator_group',
+ help='Delete initiators from an existing initiator group')
+ p.add_argument(
+ 'tag', help='Initiator group tag (unique, integer > 0)', type=int)
+ p.add_argument('-n', dest='initiator_list', help="""Whitespace-separated list of initiator hostnames or IP addresses,
+ enclosed in quotes. This parameter can be omitted. Example: 'ANY' or '127.0.0.1 192.168.200.100'""", required=False)
+ p.add_argument('-m', dest='netmask_list', help="""Whitespace-separated list of initiator netmasks enclosed in quotes.
+ This parameter can be omitted. Example: '255.255.0.0 255.248.0.0' etc""", required=False)
+ p.set_defaults(func=delete_initiators_from_initiator_group)
+
+ @call_cmd
+ def delete_target_node(args):
+ rpc.iscsi.delete_target_node(
+ args.client, target_node_name=args.target_node_name)
+
+ p = subparsers.add_parser('delete_target_node',
+ help='Delete a target node')
+ p.add_argument('target_node_name',
+ help='Target node name to be deleted. Example: iqn.2016-06.io.spdk:disk1.')
+ p.set_defaults(func=delete_target_node)
+
+ @call_cmd
+ def delete_portal_group(args):
+ rpc.iscsi.delete_portal_group(args.client, tag=args.tag)
+
+ p = subparsers.add_parser('delete_portal_group',
+ help='Delete a portal group')
+ p.add_argument(
+ 'tag', help='Portal group tag (unique, integer > 0)', type=int)
+ p.set_defaults(func=delete_portal_group)
+
+ @call_cmd
+ def delete_initiator_group(args):
+ rpc.iscsi.delete_initiator_group(args.client, tag=args.tag)
+
+ p = subparsers.add_parser('delete_initiator_group',
+ help='Delete an initiator group')
+ p.add_argument(
+ 'tag', help='Initiator group tag (unique, integer > 0)', type=int)
+ p.set_defaults(func=delete_initiator_group)
+
+ @call_cmd
+ def get_iscsi_connections(args):
+ print_dict(rpc.iscsi.get_iscsi_connections(args.client))
+
+ p = subparsers.add_parser('get_iscsi_connections',
+ help='Display iSCSI connections')
+ p.set_defaults(func=get_iscsi_connections)
+
+ @call_cmd
+ def get_iscsi_global_params(args):
+ print_dict(rpc.iscsi.get_iscsi_global_params(args.client))
+
+ p = subparsers.add_parser('get_iscsi_global_params', help='Display iSCSI global parameters')
+ p.set_defaults(func=get_iscsi_global_params)
+
+ @call_cmd
+ def get_scsi_devices(args):
+ print_dict(rpc.iscsi.get_scsi_devices(args.client))
+
+ p = subparsers.add_parser('get_scsi_devices', help='Display SCSI devices')
+ p.set_defaults(func=get_scsi_devices)
+
+ # log
+ @call_cmd
+ def set_trace_flag(args):
+ rpc.log.set_trace_flag(args.client, flag=args.flag)
+
+ p = subparsers.add_parser('set_trace_flag', help='set trace flag')
+ p.add_argument(
+ 'flag', help='trace mask we want to set. (for example "nvme").')
+ p.set_defaults(func=set_trace_flag)
+
+ @call_cmd
+ def clear_trace_flag(args):
+ rpc.log.clear_trace_flag(args.client, flag=args.flag)
+
+ p = subparsers.add_parser('clear_trace_flag', help='clear trace flag')
+ p.add_argument(
+ 'flag', help='trace mask we want to clear. (for example "nvme").')
+ p.set_defaults(func=clear_trace_flag)
+
+ @call_cmd
+ def get_trace_flags(args):
+ print_dict(rpc.log.get_trace_flags(args.client))
+
+ p = subparsers.add_parser('get_trace_flags', help='get trace flags')
+ p.set_defaults(func=get_trace_flags)
+
+ @call_cmd
+ def set_log_level(args):
+ rpc.log.set_log_level(args.client, level=args.level)
+
+ p = subparsers.add_parser('set_log_level', help='set log level')
+ p.add_argument('level', help='log level we want to set. (for example "DEBUG").')
+ p.set_defaults(func=set_log_level)
+
+ @call_cmd
+ def get_log_level(args):
+ print_dict(rpc.log.get_log_level(args.client))
+
+ p = subparsers.add_parser('get_log_level', help='get log level')
+ p.set_defaults(func=get_log_level)
+
+ @call_cmd
+ def set_log_print_level(args):
+ rpc.log.set_log_print_level(args.client, level=args.level)
+
+ p = subparsers.add_parser('set_log_print_level', help='set log print level')
+ p.add_argument('level', help='log print level we want to set. (for example "DEBUG").')
+ p.set_defaults(func=set_log_print_level)
+
+ @call_cmd
+ def get_log_print_level(args):
+ print_dict(rpc.log.get_log_print_level(args.client))
+
+ p = subparsers.add_parser('get_log_print_level', help='get log print level')
+ p.set_defaults(func=get_log_print_level)
+
+ # lvol
+ @call_cmd
+ def construct_lvol_store(args):
+ print(rpc.lvol.construct_lvol_store(args.client,
+ bdev_name=args.bdev_name,
+ lvs_name=args.lvs_name,
+ cluster_sz=args.cluster_sz))
+
+ p = subparsers.add_parser('construct_lvol_store', help='Add logical volume store on base bdev')
+ p.add_argument('bdev_name', help='base bdev name')
+ p.add_argument('lvs_name', help='name for lvol store')
+ p.add_argument('-c', '--cluster-sz', help='size of cluster (in bytes)', type=int, required=False)
+ p.set_defaults(func=construct_lvol_store)
+
+ @call_cmd
+ def rename_lvol_store(args):
+ rpc.lvol.rename_lvol_store(args.client,
+ old_name=args.old_name,
+ new_name=args.new_name)
+
+ p = subparsers.add_parser('rename_lvol_store', help='Change logical volume store name')
+ p.add_argument('old_name', help='old name')
+ p.add_argument('new_name', help='new name')
+ p.set_defaults(func=rename_lvol_store)
+
+ @call_cmd
+ def construct_lvol_bdev(args):
+ print(rpc.lvol.construct_lvol_bdev(args.client,
+ lvol_name=args.lvol_name,
+ size=args.size * 1024 * 1024,
+ thin_provision=args.thin_provision,
+ uuid=args.uuid,
+ lvs_name=args.lvs_name))
+
+ p = subparsers.add_parser('construct_lvol_bdev', help='Add a bdev with an logical volume backend')
+ p.add_argument('-u', '--uuid', help='lvol store UUID', required=False)
+ p.add_argument('-l', '--lvs-name', help='lvol store name', required=False)
+ p.add_argument('-t', '--thin-provision', action='store_true', help='create lvol bdev as thin provisioned')
+ p.add_argument('lvol_name', help='name for this lvol')
+ p.add_argument('size', help='size in MiB for this bdev', type=int)
+ p.set_defaults(func=construct_lvol_bdev)
+
+ @call_cmd
+ def snapshot_lvol_bdev(args):
+ print(rpc.lvol.snapshot_lvol_bdev(args.client,
+ lvol_name=args.lvol_name,
+ snapshot_name=args.snapshot_name))
+
+ p = subparsers.add_parser('snapshot_lvol_bdev', help='Create a snapshot of an lvol bdev')
+ p.add_argument('lvol_name', help='lvol bdev name')
+ p.add_argument('snapshot_name', help='lvol snapshot name')
+ p.set_defaults(func=snapshot_lvol_bdev)
+
+ @call_cmd
+ def clone_lvol_bdev(args):
+ print(rpc.lvol.clone_lvol_bdev(args.client,
+ snapshot_name=args.snapshot_name,
+ clone_name=args.clone_name))
+
+ p = subparsers.add_parser('clone_lvol_bdev', help='Create a clone of an lvol snapshot')
+ p.add_argument('snapshot_name', help='lvol snapshot name')
+ p.add_argument('clone_name', help='lvol clone name')
+ p.set_defaults(func=clone_lvol_bdev)
+
+ @call_cmd
+ def rename_lvol_bdev(args):
+ rpc.lvol.rename_lvol_bdev(args.client,
+ old_name=args.old_name,
+ new_name=args.new_name)
+
+ p = subparsers.add_parser('rename_lvol_bdev', help='Change lvol bdev name')
+ p.add_argument('old_name', help='lvol bdev name')
+ p.add_argument('new_name', help='new lvol name')
+ p.set_defaults(func=rename_lvol_bdev)
+
+ @call_cmd
+ def inflate_lvol_bdev(args):
+ rpc.lvol.inflate_lvol_bdev(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('inflate_lvol_bdev', help='Make thin provisioned lvol a thick provisioned lvol')
+ p.add_argument('name', help='lvol bdev name')
+ p.set_defaults(func=inflate_lvol_bdev)
+
+ @call_cmd
+ def decouple_parent_lvol_bdev(args):
+ rpc.lvol.decouple_parent_lvol_bdev(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('decouple_parent_lvol_bdev', help='Decouple parent of lvol')
+ p.add_argument('name', help='lvol bdev name')
+ p.set_defaults(func=decouple_parent_lvol_bdev)
+
+ @call_cmd
+ def resize_lvol_bdev(args):
+ rpc.lvol.resize_lvol_bdev(args.client,
+ name=args.name,
+ size=args.size * 1024 * 1024)
+
+ p = subparsers.add_parser('resize_lvol_bdev', help='Resize existing lvol bdev')
+ p.add_argument('name', help='lvol bdev name')
+ p.add_argument('size', help='new size in MiB for this bdev', type=int)
+ p.set_defaults(func=resize_lvol_bdev)
+
+ @call_cmd
+ def destroy_lvol_bdev(args):
+ rpc.lvol.destroy_lvol_bdev(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('destroy_lvol_bdev', help='Destroy a logical volume')
+ p.add_argument('name', help='lvol bdev name')
+ p.set_defaults(func=destroy_lvol_bdev)
+
+ @call_cmd
+ def destroy_lvol_store(args):
+ rpc.lvol.destroy_lvol_store(args.client,
+ uuid=args.uuid,
+ lvs_name=args.lvs_name)
+
+ p = subparsers.add_parser('destroy_lvol_store', help='Destroy an logical volume store')
+ p.add_argument('-u', '--uuid', help='lvol store UUID', required=False)
+ p.add_argument('-l', '--lvs-name', help='lvol store name', required=False)
+ p.set_defaults(func=destroy_lvol_store)
+
+ @call_cmd
+ def get_lvol_stores(args):
+ print_dict(rpc.lvol.get_lvol_stores(args.client,
+ uuid=args.uuid,
+ lvs_name=args.lvs_name))
+
+ p = subparsers.add_parser('get_lvol_stores', help='Display current logical volume store list')
+ p.add_argument('-u', '--uuid', help='lvol store UUID', required=False)
+ p.add_argument('-l', '--lvs-name', help='lvol store name', required=False)
+ p.set_defaults(func=get_lvol_stores)
+
+ @call_cmd
+ def get_raid_bdevs(args):
+ print_array(rpc.bdev.get_raid_bdevs(args.client,
+ category=args.category))
+
+ p = subparsers.add_parser('get_raid_bdevs', help="""This is used to list all the raid bdev names based on the input category
+ requested. Category should be one of 'all', 'online', 'configuring' or 'offline'. 'all' means all the raid bdevs whether
+ they are online or configuring or offline. 'online' is the raid bdev which is registered with bdev layer. 'configuring'
+ is the raid bdev which does not have full configuration discovered yet. 'offline' is the raid bdev which is not registered
+ with bdev as of now and it has encountered any error or user has requested to offline the raid bdev""")
+ p.add_argument('category', help='all or online or configuring or offline')
+ p.set_defaults(func=get_raid_bdevs)
+
+ @call_cmd
+ def construct_raid_bdev(args):
+ base_bdevs = []
+ for u in args.base_bdevs.strip().split(" "):
+ base_bdevs.append(u)
+
+ rpc.bdev.construct_raid_bdev(args.client,
+ name=args.name,
+ strip_size=args.strip_size,
+ raid_level=args.raid_level,
+ base_bdevs=base_bdevs)
+ p = subparsers.add_parser('construct_raid_bdev', help='Construct new raid bdev')
+ p.add_argument('-n', '--name', help='raid bdev name', required=True)
+ p.add_argument('-s', '--strip-size', help='strip size in KB', type=int, required=True)
+ p.add_argument('-r', '--raid-level', help='raid level, only raid level 0 is supported', type=int, required=True)
+ p.add_argument('-b', '--base-bdevs', help='base bdevs name, whitespace separated list in quotes', required=True)
+ p.set_defaults(func=construct_raid_bdev)
+
+ @call_cmd
+ def destroy_raid_bdev(args):
+ rpc.bdev.destroy_raid_bdev(args.client,
+ name=args.name)
+ p = subparsers.add_parser('destroy_raid_bdev', help='Destroy existing raid bdev')
+ p.add_argument('name', help='raid bdev name')
+ p.set_defaults(func=destroy_raid_bdev)
+
+ # split
+ @call_cmd
+ def construct_split_vbdev(args):
+ print_array(rpc.bdev.construct_split_vbdev(args.client,
+ base_bdev=args.base_bdev,
+ split_count=args.split_count,
+ split_size_mb=args.split_size_mb))
+
+ p = subparsers.add_parser('construct_split_vbdev', help="""Add given disk name to split config. If bdev with base_name
+ name exist the split bdevs will be created right away, if not split bdevs will be created when base bdev became
+ available (during examination process).""")
+ p.add_argument('base_bdev', help='base bdev name')
+ p.add_argument('-s', '--split-size-mb', help='size in MiB for each bdev', type=int, default=0)
+ p.add_argument('split_count', help="""Optional - number of split bdevs to create. Total size * split_count must not
+ exceed the base bdev size.""", type=int)
+ p.set_defaults(func=construct_split_vbdev)
+
+ @call_cmd
+ def destruct_split_vbdev(args):
+ rpc.bdev.destruct_split_vbdev(args.client,
+ base_bdev=args.base_bdev)
+
+ p = subparsers.add_parser('destruct_split_vbdev', help="""Delete split config with all created splits.""")
+ p.add_argument('base_bdev', help='base bdev name')
+ p.set_defaults(func=destruct_split_vbdev)
+
+ # nbd
+ @call_cmd
+ def start_nbd_disk(args):
+ print(rpc.nbd.start_nbd_disk(args.client,
+ bdev_name=args.bdev_name,
+ nbd_device=args.nbd_device))
+
+ p = subparsers.add_parser('start_nbd_disk', help='Export a bdev as a nbd disk')
+ p.add_argument('bdev_name', help='Blockdev name to be exported. Example: Malloc0.')
+ p.add_argument('nbd_device', help='Nbd device name to be assigned. Example: /dev/nbd0.')
+ p.set_defaults(func=start_nbd_disk)
+
+ @call_cmd
+ def stop_nbd_disk(args):
+ rpc.nbd.stop_nbd_disk(args.client,
+ nbd_device=args.nbd_device)
+
+ p = subparsers.add_parser('stop_nbd_disk', help='Stop a nbd disk')
+ p.add_argument('nbd_device', help='Nbd device name to be stopped. Example: /dev/nbd0.')
+ p.set_defaults(func=stop_nbd_disk)
+
+ @call_cmd
+ def get_nbd_disks(args):
+ print_dict(rpc.nbd.get_nbd_disks(args.client,
+ nbd_device=args.nbd_device))
+
+ p = subparsers.add_parser('get_nbd_disks', help='Display full or specified nbd device list')
+ p.add_argument('-n', '--nbd-device', help="Path of the nbd device. Example: /dev/nbd0", required=False)
+ p.set_defaults(func=get_nbd_disks)
+
+ # net
+ @call_cmd
+ def add_ip_address(args):
+ rpc.net.add_ip_address(args.client, ifc_index=args.ifc_index, ip_addr=args.ip_addr)
+
+ p = subparsers.add_parser('add_ip_address', help='Add IP address')
+ p.add_argument('ifc_index', help='ifc index of the nic device.', type=int)
+ p.add_argument('ip_addr', help='ip address will be added.')
+ p.set_defaults(func=add_ip_address)
+
+ @call_cmd
+ def delete_ip_address(args):
+ rpc.net.delete_ip_address(args.client, ifc_index=args.ifc_index, ip_addr=args.ip_addr)
+
+ p = subparsers.add_parser('delete_ip_address', help='Delete IP address')
+ p.add_argument('ifc_index', help='ifc index of the nic device.', type=int)
+ p.add_argument('ip_addr', help='ip address will be deleted.')
+ p.set_defaults(func=delete_ip_address)
+
+ @call_cmd
+ def get_interfaces(args):
+ print_dict(rpc.net.get_interfaces(args.client))
+
+ p = subparsers.add_parser(
+ 'get_interfaces', help='Display current interface list')
+ p.set_defaults(func=get_interfaces)
+
+ # NVMe-oF
+ @call_cmd
+ def set_nvmf_target_options(args):
+ rpc.nvmf.set_nvmf_target_options(args.client,
+ max_queue_depth=args.max_queue_depth,
+ max_qpairs_per_ctrlr=args.max_qpairs_per_ctrlr,
+ in_capsule_data_size=args.in_capsule_data_size,
+ max_io_size=args.max_io_size,
+ max_subsystems=args.max_subsystems,
+ io_unit_size=args.io_unit_size)
+
+ p = subparsers.add_parser('set_nvmf_target_options', help='Set NVMf target options')
+ p.add_argument('-q', '--max-queue-depth', help='Max number of outstanding I/O per queue', type=int)
+ p.add_argument('-p', '--max-qpairs-per-ctrlr', help='Max number of SQ and CQ per controller', type=int)
+ p.add_argument('-c', '--in-capsule-data-size', help='Max number of in-capsule data size', type=int)
+ p.add_argument('-i', '--max-io-size', help='Max I/O size (bytes)', type=int)
+ p.add_argument('-x', '--max-subsystems', help='Max number of NVMf subsystems', type=int)
+ p.add_argument('-u', '--io-unit-size', help='I/O unit size (bytes)', type=int)
+ p.set_defaults(func=set_nvmf_target_options)
+
+ @call_cmd
+ def set_nvmf_target_config(args):
+ rpc.nvmf.set_nvmf_target_config(args.client,
+ acceptor_poll_rate=args.acceptor_poll_rate,
+ conn_sched=args.conn_sched)
+
+ p = subparsers.add_parser('set_nvmf_target_config', help='Set NVMf target config')
+ p.add_argument('-r', '--acceptor-poll-rate', help='Polling interval of the acceptor for incoming connections (usec)', type=int)
+ p.add_argument('-s', '--conn-sched', help="""'roundrobin' - Schedule the incoming connections from any host
+ on the cores in a round robin manner (Default). 'hostip' - Schedule all the incoming connections from a
+ specific host IP on to the same core. Connections from different IP will be assigned to cores in a round
+ robin manner""")
+ p.set_defaults(func=set_nvmf_target_config)
+
+ @call_cmd
+ def nvmf_create_transport(args):
+ rpc.nvmf.nvmf_create_transport(args.client,
+ trtype=args.trtype,
+ max_queue_depth=args.max_queue_depth,
+ max_qpairs_per_ctrlr=args.max_qpairs_per_ctrlr,
+ in_capsule_data_size=args.in_capsule_data_size,
+ max_io_size=args.max_io_size,
+ io_unit_size=args.io_unit_size,
+ max_aq_depth=args.max_aq_depth)
+
+ p = subparsers.add_parser('nvmf_create_transport', help='Create NVMf transport')
+ p.add_argument('-t', '--trtype', help='Transport type (ex. RDMA)', type=str, required=True)
+ p.add_argument('-q', '--max-queue-depth', help='Max number of outstanding I/O per queue', type=int)
+ p.add_argument('-p', '--max-qpairs-per-ctrlr', help='Max number of SQ and CQ per controller', type=int)
+ p.add_argument('-c', '--in-capsule-data-size', help='Max number of in-capsule data size', type=int)
+ p.add_argument('-i', '--max-io-size', help='Max I/O size (bytes)', type=int)
+ p.add_argument('-u', '--io-unit-size', help='I/O unit size (bytes)', type=int)
+ p.add_argument('-a', '--max-aq-depth', help='Max number of admin cmds per AQ', type=int)
+ p.set_defaults(func=nvmf_create_transport)
+
+ @call_cmd
+ def get_nvmf_subsystems(args):
+ print_dict(rpc.nvmf.get_nvmf_subsystems(args.client))
+
+ p = subparsers.add_parser('get_nvmf_subsystems',
+ help='Display nvmf subsystems')
+ p.set_defaults(func=get_nvmf_subsystems)
+
+ @call_cmd
+ def construct_nvmf_subsystem(args):
+ listen_addresses = None
+ hosts = None
+ namespaces = None
+ if args.listen:
+ listen_addresses = [
+ dict(
+ u.split(
+ ":",
+ 1) for u in a.split(" ")) for a in args.listen.split(",")]
+
+ if args.hosts:
+ hosts = []
+ for u in args.hosts.strip().split(" "):
+ hosts.append(u)
+
+ if args.namespaces:
+ namespaces = []
+ for u in args.namespaces.strip().split(" "):
+ bdev_name = u
+ nsid = 0
+ if ':' in u:
+ (bdev_name, nsid) = u.split(":")
+
+ ns_params = {'bdev_name': bdev_name}
+
+ nsid = int(nsid)
+ if nsid != 0:
+ ns_params['nsid'] = nsid
+
+ namespaces.append(ns_params)
+
+ rpc.nvmf.construct_nvmf_subsystem(args.client,
+ nqn=args.nqn,
+ listen_addresses=listen_addresses,
+ hosts=hosts,
+ allow_any_host=args.allow_any_host,
+ serial_number=args.serial_number,
+ namespaces=namespaces,
+ max_namespaces=args.max_namespaces)
+
+ p = subparsers.add_parser('construct_nvmf_subsystem', help='Add a nvmf subsystem')
+ p.add_argument('nqn', help='Target nqn(ASCII)')
+ p.add_argument('listen', help="""comma-separated list of Listen <trtype:transport_name traddr:address trsvcid:port_id> pairs enclosed
+ in quotes. Format: 'trtype:transport0 traddr:traddr0 trsvcid:trsvcid0,trtype:transport1 traddr:traddr1 trsvcid:trsvcid1' etc
+ Example: 'trtype:RDMA traddr:192.168.100.8 trsvcid:4420,trtype:RDMA traddr:192.168.100.9 trsvcid:4420'""")
+ p.add_argument('hosts', help="""Whitespace-separated list of host nqn list.
+ Format: 'nqn1 nqn2' etc
+ Example: 'nqn.2016-06.io.spdk:init nqn.2016-07.io.spdk:init'""")
+ p.add_argument("-a", "--allow-any-host", action='store_true', help="Allow any host to connect (don't enforce host NQN whitelist)")
+ p.add_argument("-s", "--serial-number", help="""
+ Format: 'sn' etc
+ Example: 'SPDK00000000000001'""", default='00000000000000000000')
+ p.add_argument("-n", "--namespaces", help="""Whitespace-separated list of namespaces
+ Format: 'bdev_name1[:nsid1] bdev_name2[:nsid2] bdev_name3[:nsid3]' etc
+ Example: '1:Malloc0 2:Malloc1 3:Malloc2'
+ *** The devices must pre-exist ***""")
+ p.add_argument("-m", "--max-namespaces", help="Maximum number of namespaces allowed to added during active connection",
+ type=int, default=0)
+ p.set_defaults(func=construct_nvmf_subsystem)
+
+ @call_cmd
+ def nvmf_subsystem_create(args):
+ rpc.nvmf.nvmf_subsystem_create(args.client,
+ nqn=args.nqn,
+ serial_number=args.serial_number,
+ allow_any_host=args.allow_any_host,
+ max_namespaces=args.max_namespaces)
+
+ p = subparsers.add_parser('nvmf_subsystem_create', help='Create an NVMe-oF subsystem')
+ p.add_argument('nqn', help='Subsystem NQN (ASCII)')
+ p.add_argument("-s", "--serial-number", help="""
+ Format: 'sn' etc
+ Example: 'SPDK00000000000001'""", default='00000000000000000000')
+ p.add_argument("-a", "--allow-any-host", action='store_true', help="Allow any host to connect (don't enforce host NQN whitelist)")
+ p.add_argument("-m", "--max-namespaces", help="Maximum number of namespaces allowed",
+ type=int, default=0)
+ p.set_defaults(func=nvmf_subsystem_create)
+
+ @call_cmd
+ def delete_nvmf_subsystem(args):
+ rpc.nvmf.delete_nvmf_subsystem(args.client,
+ nqn=args.subsystem_nqn)
+
+ p = subparsers.add_parser('delete_nvmf_subsystem',
+ help='Delete a nvmf subsystem')
+ p.add_argument('subsystem_nqn',
+ help='subsystem nqn to be deleted. Example: nqn.2016-06.io.spdk:cnode1.')
+ p.set_defaults(func=delete_nvmf_subsystem)
+
+ @call_cmd
+ def nvmf_subsystem_add_listener(args):
+ rpc.nvmf.nvmf_subsystem_add_listener(args.client,
+ nqn=args.nqn,
+ trtype=args.trtype,
+ traddr=args.traddr,
+ adrfam=args.adrfam,
+ trsvcid=args.trsvcid)
+
+ p = subparsers.add_parser('nvmf_subsystem_add_listener', help='Add a listener to an NVMe-oF subsystem')
+ p.add_argument('nqn', help='NVMe-oF subsystem NQN')
+ p.add_argument('-t', '--trtype', help='NVMe-oF transport type: e.g., rdma', required=True)
+ p.add_argument('-a', '--traddr', help='NVMe-oF transport address: e.g., an ip address', required=True)
+ p.add_argument('-f', '--adrfam', help='NVMe-oF transport adrfam: e.g., ipv4, ipv6, ib, fc, intra_host')
+ p.add_argument('-s', '--trsvcid', help='NVMe-oF transport service id: e.g., a port number')
+ p.set_defaults(func=nvmf_subsystem_add_listener)
+
+ @call_cmd
+ def nvmf_subsystem_remove_listener(args):
+ rpc.nvmf.nvmf_subsystem_remove_listener(args.client,
+ nqn=args.nqn,
+ trtype=args.trtype,
+ traddr=args.traddr,
+ adrfam=args.adrfam,
+ trsvcid=args.trsvcid)
+
+ p = subparsers.add_parser('nvmf_subsystem_remove_listener', help='Remove a listener from an NVMe-oF subsystem')
+ p.add_argument('nqn', help='NVMe-oF subsystem NQN')
+ p.add_argument('-t', '--trtype', help='NVMe-oF transport type: e.g., rdma', required=True)
+ p.add_argument('-a', '--traddr', help='NVMe-oF transport address: e.g., an ip address', required=True)
+ p.add_argument('-f', '--adrfam', help='NVMe-oF transport adrfam: e.g., ipv4, ipv6, ib, fc, intra_host')
+ p.add_argument('-s', '--trsvcid', help='NVMe-oF transport service id: e.g., a port number')
+ p.set_defaults(func=nvmf_subsystem_remove_listener)
+
+ @call_cmd
+ def nvmf_subsystem_add_ns(args):
+ rpc.nvmf.nvmf_subsystem_add_ns(args.client,
+ nqn=args.nqn,
+ bdev_name=args.bdev_name,
+ nsid=args.nsid,
+ nguid=args.nguid,
+ eui64=args.eui64,
+ uuid=args.uuid)
+
+ p = subparsers.add_parser('nvmf_subsystem_add_ns', help='Add a namespace to an NVMe-oF subsystem')
+ p.add_argument('nqn', help='NVMe-oF subsystem NQN')
+ p.add_argument('bdev_name', help='The name of the bdev that will back this namespace')
+ p.add_argument('-n', '--nsid', help='The requested NSID (optional)', type=int)
+ p.add_argument('-g', '--nguid', help='Namespace globally unique identifier (optional)')
+ p.add_argument('-e', '--eui64', help='Namespace EUI-64 identifier (optional)')
+ p.add_argument('-u', '--uuid', help='Namespace UUID (optional)')
+ p.set_defaults(func=nvmf_subsystem_add_ns)
+
+ @call_cmd
+ def nvmf_subsystem_remove_ns(args):
+ rpc.nvmf.nvmf_subsystem_remove_ns(args.client,
+ nqn=args.nqn,
+ nsid=args.nsid)
+
+ p = subparsers.add_parser('nvmf_subsystem_remove_ns', help='Remove a namespace to an NVMe-oF subsystem')
+ p.add_argument('nqn', help='NVMe-oF subsystem NQN')
+ p.add_argument('nsid', help='The requested NSID', type=int)
+ p.set_defaults(func=nvmf_subsystem_remove_ns)
+
+ @call_cmd
+ def nvmf_subsystem_add_host(args):
+ rpc.nvmf.nvmf_subsystem_add_host(args.client,
+ nqn=args.nqn,
+ host=args.host)
+
+ p = subparsers.add_parser('nvmf_subsystem_add_host', help='Add a host to an NVMe-oF subsystem')
+ p.add_argument('nqn', help='NVMe-oF subsystem NQN')
+ p.add_argument('host', help='Host NQN to allow')
+ p.set_defaults(func=nvmf_subsystem_add_host)
+
+ @call_cmd
+ def nvmf_subsystem_remove_host(args):
+ rpc.nvmf.nvmf_subsystem_remove_host(args.client,
+ nqn=args.nqn,
+ host=args.host)
+
+ p = subparsers.add_parser('nvmf_subsystem_remove_host', help='Remove a host from an NVMe-oF subsystem')
+ p.add_argument('nqn', help='NVMe-oF subsystem NQN')
+ p.add_argument('host', help='Host NQN to remove')
+ p.set_defaults(func=nvmf_subsystem_remove_host)
+
+ @call_cmd
+ def nvmf_subsystem_allow_any_host(args):
+ rpc.nvmf.nvmf_subsystem_allow_any_host(args.client,
+ nqn=args.nqn,
+ disable=args.disable)
+
+ p = subparsers.add_parser('nvmf_subsystem_allow_any_host', help='Allow any host to connect to the subsystem')
+ p.add_argument('nqn', help='NVMe-oF subsystem NQN')
+ p.add_argument('-e', '--enable', action='store_true', help='Enable allowing any host')
+ p.add_argument('-d', '--disable', action='store_true', help='Disable allowing any host')
+ p.set_defaults(func=nvmf_subsystem_allow_any_host)
+
+ # pmem
+ @call_cmd
+ def create_pmem_pool(args):
+ num_blocks = int((args.total_size * 1024 * 1024) / args.block_size)
+ rpc.pmem.create_pmem_pool(args.client,
+ pmem_file=args.pmem_file,
+ num_blocks=num_blocks,
+ block_size=args.block_size)
+
+ p = subparsers.add_parser('create_pmem_pool', help='Create pmem pool')
+ p.add_argument('pmem_file', help='Path to pmemblk pool file')
+ p.add_argument('total_size', help='Size of malloc bdev in MB (int > 0)', type=int)
+ p.add_argument('block_size', help='Block size for this pmem pool', type=int)
+ p.set_defaults(func=create_pmem_pool)
+
+ @call_cmd
+ def pmem_pool_info(args):
+ print_dict(rpc.pmem.pmem_pool_info(args.client,
+ pmem_file=args.pmem_file))
+
+ p = subparsers.add_parser('pmem_pool_info', help='Display pmem pool info and check consistency')
+ p.add_argument('pmem_file', help='Path to pmemblk pool file')
+ p.set_defaults(func=pmem_pool_info)
+
+ @call_cmd
+ def delete_pmem_pool(args):
+ rpc.pmem.delete_pmem_pool(args.client,
+ pmem_file=args.pmem_file)
+
+ p = subparsers.add_parser('delete_pmem_pool', help='Delete pmem pool')
+ p.add_argument('pmem_file', help='Path to pmemblk pool file')
+ p.set_defaults(func=delete_pmem_pool)
+
+ # subsystem
+ @call_cmd
+ def get_subsystems(args):
+ print_dict(rpc.subsystem.get_subsystems(args.client))
+
+ p = subparsers.add_parser('get_subsystems', help="""Print subsystems array in initialization order. Each subsystem
+ entry contain (unsorted) array of subsystems it depends on.""")
+ p.set_defaults(func=get_subsystems)
+
+ @call_cmd
+ def get_subsystem_config(args):
+ print_dict(rpc.subsystem.get_subsystem_config(args.client, args.name))
+
+ p = subparsers.add_parser('get_subsystem_config', help="""Print subsystem configuration""")
+ p.add_argument('name', help='Name of subsystem to query')
+ p.set_defaults(func=get_subsystem_config)
+
+ # vhost
+ @call_cmd
+ def set_vhost_controller_coalescing(args):
+ rpc.vhost.set_vhost_controller_coalescing(args.client,
+ ctrlr=args.ctrlr,
+ delay_base_us=args.delay_base_us,
+ iops_threshold=args.iops_threshold)
+
+ p = subparsers.add_parser('set_vhost_controller_coalescing', help='Set vhost controller coalescing')
+ p.add_argument('ctrlr', help='controller name')
+ p.add_argument('delay_base_us', help='Base delay time', type=int)
+ p.add_argument('iops_threshold', help='IOPS threshold when coalescing is enabled', type=int)
+ p.set_defaults(func=set_vhost_controller_coalescing)
+
+ @call_cmd
+ def construct_vhost_scsi_controller(args):
+ rpc.vhost.construct_vhost_scsi_controller(args.client,
+ ctrlr=args.ctrlr,
+ cpumask=args.cpumask)
+
+ p = subparsers.add_parser(
+ 'construct_vhost_scsi_controller', help='Add new vhost controller')
+ p.add_argument('ctrlr', help='controller name')
+ p.add_argument('--cpumask', help='cpu mask for this controller')
+ p.set_defaults(func=construct_vhost_scsi_controller)
+
+ @call_cmd
+ def add_vhost_scsi_lun(args):
+ rpc.vhost.add_vhost_scsi_lun(args.client,
+ ctrlr=args.ctrlr,
+ scsi_target_num=args.scsi_target_num,
+ bdev_name=args.bdev_name)
+
+ p = subparsers.add_parser('add_vhost_scsi_lun',
+ help='Add lun to vhost controller')
+ p.add_argument('ctrlr', help='conntroller name where add lun')
+ p.add_argument('scsi_target_num', help='scsi_target_num', type=int)
+ p.add_argument('bdev_name', help='bdev name')
+ p.set_defaults(func=add_vhost_scsi_lun)
+
+ @call_cmd
+ def remove_vhost_scsi_target(args):
+ rpc.vhost.remove_vhost_scsi_target(args.client,
+ ctrlr=args.ctrlr,
+ scsi_target_num=args.scsi_target_num)
+
+ p = subparsers.add_parser('remove_vhost_scsi_target', help='Remove target from vhost controller')
+ p.add_argument('ctrlr', help='controller name to remove target from')
+ p.add_argument('scsi_target_num', help='scsi_target_num', type=int)
+ p.set_defaults(func=remove_vhost_scsi_target)
+
+ @call_cmd
+ def construct_vhost_blk_controller(args):
+ rpc.vhost.construct_vhost_blk_controller(args.client,
+ ctrlr=args.ctrlr,
+ dev_name=args.dev_name,
+ cpumask=args.cpumask,
+ readonly=args.readonly)
+
+ p = subparsers.add_parser('construct_vhost_blk_controller', help='Add a new vhost block controller')
+ p.add_argument('ctrlr', help='controller name')
+ p.add_argument('dev_name', help='device name')
+ p.add_argument('--cpumask', help='cpu mask for this controller')
+ p.add_argument("-r", "--readonly", action='store_true', help='Set controller as read-only')
+ p.set_defaults(func=construct_vhost_blk_controller)
+
+ @call_cmd
+ def construct_vhost_nvme_controller(args):
+ rpc.vhost.construct_vhost_nvme_controller(args.client,
+ ctrlr=args.ctrlr,
+ io_queues=args.io_queues,
+ cpumask=args.cpumask)
+
+ p = subparsers.add_parser('construct_vhost_nvme_controller', help='Add new vhost controller')
+ p.add_argument('ctrlr', help='controller name')
+ p.add_argument('io_queues', help='number of IO queues for the controller', type=int)
+ p.add_argument('--cpumask', help='cpu mask for this controller')
+ p.set_defaults(func=construct_vhost_nvme_controller)
+
+ @call_cmd
+ def add_vhost_nvme_ns(args):
+ rpc.vhost.add_vhost_nvme_ns(args.client,
+ ctrlr=args.ctrlr,
+ bdev_name=args.bdev_name)
+
+ p = subparsers.add_parser('add_vhost_nvme_ns', help='Add a Namespace to vhost controller')
+ p.add_argument('ctrlr', help='conntroller name where add a Namespace')
+ p.add_argument('bdev_name', help='block device name for a new Namespace')
+ p.set_defaults(func=add_vhost_nvme_ns)
+
+ @call_cmd
+ def get_vhost_controllers(args):
+ print_dict(rpc.vhost.get_vhost_controllers(args.client, args.name))
+
+ p = subparsers.add_parser('get_vhost_controllers', help='List all or specific vhost controller(s)')
+ p.add_argument('-n', '--name', help="Name of vhost controller", required=False)
+ p.set_defaults(func=get_vhost_controllers)
+
+ @call_cmd
+ def remove_vhost_controller(args):
+ rpc.vhost.remove_vhost_controller(args.client,
+ ctrlr=args.ctrlr)
+
+ p = subparsers.add_parser('remove_vhost_controller', help='Remove a vhost controller')
+ p.add_argument('ctrlr', help='controller name')
+ p.set_defaults(func=remove_vhost_controller)
+
+ @call_cmd
+ def construct_virtio_dev(args):
+ print_array(rpc.vhost.construct_virtio_dev(args.client,
+ name=args.name,
+ trtype=args.trtype,
+ traddr=args.traddr,
+ dev_type=args.dev_type,
+ vq_count=args.vq_count,
+ vq_size=args.vq_size))
+
+ p = subparsers.add_parser('construct_virtio_dev', help="""Construct new virtio device using provided
+ transport type and device type. In case of SCSI device type this implies scan and add bdevs offered by
+ remote side. Result is array of added bdevs.""")
+ p.add_argument('name', help="Use this name as base for new created bdevs")
+ p.add_argument('-t', '--trtype',
+ help='Virtio target transport type: pci or user', required=True)
+ p.add_argument('-a', '--traddr',
+ help='Transport type specific target address: e.g. UNIX domain socket path or BDF', required=True)
+ p.add_argument('-d', '--dev-type',
+ help='Device type: blk or scsi', required=True)
+ p.add_argument('--vq-count', help='Number of virtual queues to be used.', type=int)
+ p.add_argument('--vq-size', help='Size of each queue', type=int)
+ p.set_defaults(func=construct_virtio_dev)
+
+ @call_cmd
+ def construct_virtio_user_scsi_bdev(args):
+ print_array(rpc.vhost.construct_virtio_user_scsi_bdev(args.client,
+ path=args.path,
+ name=args.name,
+ vq_count=args.vq_count,
+ vq_size=args.vq_size))
+
+ p = subparsers.add_parser('construct_virtio_user_scsi_bdev', help="""Connect to virtio user scsi device.
+ This imply scan and add bdevs offered by remote side.
+ Result is array of added bdevs.""")
+ p.add_argument('path', help='Path to Virtio SCSI socket')
+ p.add_argument('name', help="""Use this name as base instead of 'VirtioScsiN'
+ Base will be used to construct new bdev's found on target by adding 't<TARGET_ID>' sufix.""")
+ p.add_argument('--vq-count', help='Number of virtual queues to be used.', type=int)
+ p.add_argument('--vq-size', help='Size of each queue', type=int)
+ p.set_defaults(func=construct_virtio_user_scsi_bdev)
+
+ @call_cmd
+ def construct_virtio_pci_scsi_bdev(args):
+ print_array(rpc.vhost.construct_virtio_pci_scsi_bdev(args.client,
+ pci_address=args.pci_address,
+ name=args.name))
+
+ p = subparsers.add_parser('construct_virtio_pci_scsi_bdev', help="""Create a Virtio
+ SCSI device from a virtio-pci device.""")
+ p.add_argument('pci_address', help="""PCI address in domain:bus:device.function format or
+ domain.bus.device.function format""")
+ p.add_argument('name', help="""Name for the virtio device.
+ It will be inhereted by all created bdevs, which are named n the following format: <name>t<target_id>""")
+ p.set_defaults(func=construct_virtio_pci_scsi_bdev)
+
+ @call_cmd
+ def get_virtio_scsi_devs(args):
+ print_dict(rpc.vhost.get_virtio_scsi_devs(args.client))
+
+ p = subparsers.add_parser('get_virtio_scsi_devs', help='List all Virtio-SCSI devices.')
+ p.set_defaults(func=get_virtio_scsi_devs)
+
+ @call_cmd
+ def remove_virtio_scsi_bdev(args):
+ rpc.vhost.remove_virtio_scsi_bdev(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('remove_virtio_scsi_bdev', help="""Remove a Virtio-SCSI device
+ This will delete all bdevs exposed by this device (this call is deprecated - please use remove_virtio_bdev call instead).""")
+ p.add_argument('name', help='Virtio device name. E.g. VirtioUser0')
+ p.set_defaults(func=remove_virtio_scsi_bdev)
+
+ @call_cmd
+ def remove_virtio_bdev(args):
+ rpc.vhost.remove_virtio_bdev(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('remove_virtio_bdev', help="""Remove a Virtio device
+ This will delete all bdevs exposed by this device""")
+ p.add_argument('name', help='Virtio device name. E.g. VirtioUser0')
+ p.set_defaults(func=remove_virtio_bdev)
+
+ @call_cmd
+ def construct_virtio_user_blk_bdev(args):
+ print(rpc.vhost.construct_virtio_user_blk_bdev(args.client,
+ path=args.path,
+ name=args.name,
+ vq_count=args.vq_count,
+ vq_size=args.vq_size))
+
+ p = subparsers.add_parser('construct_virtio_user_blk_bdev', help='Connect to a virtio user blk device.')
+ p.add_argument('path', help='Path to Virtio BLK socket')
+ p.add_argument('name', help='Name for the bdev')
+ p.add_argument('--vq-count', help='Number of virtual queues to be used.', type=int)
+ p.add_argument('--vq-size', help='Size of each queue', type=int)
+ p.set_defaults(func=construct_virtio_user_blk_bdev)
+
+ @call_cmd
+ def construct_virtio_pci_blk_bdev(args):
+ print(rpc.vhost.construct_virtio_pci_blk_bdev(args.client,
+ pci_address=args.pci_address,
+ name=args.name))
+
+ p = subparsers.add_parser('construct_virtio_pci_blk_bdev', help='Create a Virtio Blk device from a virtio-pci device.')
+ p.add_argument('pci_address', help="""PCI address in domain:bus:device.function format or
+ domain.bus.device.function format""")
+ p.add_argument('name', help='Name for the bdev')
+ p.set_defaults(func=construct_virtio_pci_blk_bdev)
+
+ # ioat
+ @call_cmd
+ def scan_ioat_copy_engine(args):
+ pci_whitelist = []
+ if args.pci_whitelist:
+ for w in args.pci_whitelist.strip().split(" "):
+ pci_whitelist.append(w)
+ rpc.ioat.scan_ioat_copy_engine(args.client, pci_whitelist)
+
+ p = subparsers.add_parser('scan_ioat_copy_engine', help='Set scan and enable IOAT copy engine offload.')
+ p.add_argument('-w', '--pci-whitelist', help="""Whitespace-separated list of PCI addresses in
+ domain:bus:device.function format or domain.bus.device.function format""")
+ p.set_defaults(func=scan_ioat_copy_engine)
+
+ # send_nvme_cmd
+ @call_cmd
+ def send_nvme_cmd(args):
+ print_dict(rpc.nvme.send_nvme_cmd(args.client,
+ name=args.nvme_name,
+ cmd_type=args.cmd_type,
+ data_direction=args.data_direction,
+ cmdbuf=args.cmdbuf,
+ data=args.data,
+ metadata=args.metadata,
+ data_len=args.data_length,
+ metadata_len=args.metadata_length,
+ timeout_ms=args.timeout_ms))
+
+ p = subparsers.add_parser('send_nvme_cmd', help='NVMe passthrough cmd.')
+ p.add_argument('-n', '--nvme-name', help="""Name of the operating NVMe controller""")
+ p.add_argument('-t', '--cmd-type', help="""Type of nvme cmd. Valid values are: admin, io""")
+ p.add_argument('-r', '--data-direction', help="""Direction of data transfer. Valid values are: c2h, h2c""")
+ p.add_argument('-c', '--cmdbuf', help="""NVMe command encoded by base64 urlsafe""")
+ p.add_argument('-d', '--data', help="""Data transferring to controller from host, encoded by base64 urlsafe""")
+ p.add_argument('-m', '--metadata', help="""Metadata transferring to controller from host, encoded by base64 urlsafe""")
+ p.add_argument('-D', '--data-length', help="""Data length required to transfer from controller to host""", type=int)
+ p.add_argument('-M', '--metadata-length', help="""Metadata length required to transfer from controller to host""", type=int)
+ p.add_argument('-T', '--timeout-ms',
+ help="""Command execution timeout value, in milliseconds, if 0, don't track timeout""", type=int, default=0)
+ p.set_defaults(func=send_nvme_cmd)
+
+ args = parser.parse_args()
+
+ try:
+ args.client = rpc.client.JSONRPCClient(args.server_addr, args.port, args.verbose, args.timeout)
+ except JSONRPCException as ex:
+ print(ex.message)
+ exit(1)
+ args.func(args)
diff --git a/src/spdk/scripts/rpc/__init__.py b/src/spdk/scripts/rpc/__init__.py
new file mode 100644
index 00000000..9a4dbb58
--- /dev/null
+++ b/src/spdk/scripts/rpc/__init__.py
@@ -0,0 +1,157 @@
+import json
+import sys
+
+from . import app
+from . import bdev
+from . import ioat
+from . import iscsi
+from . import log
+from . import lvol
+from . import nbd
+from . import net
+from . import nvme
+from . import nvmf
+from . import pmem
+from . import subsystem
+from . import vhost
+from . import client as rpc_client
+
+
+def start_subsystem_init(client):
+ """Start initialization of subsystems"""
+ return client.call('start_subsystem_init')
+
+
+def get_rpc_methods(client, current=None):
+ """Get list of supported RPC methods.
+ Args:
+ current: Get list of RPC methods only callable in the current state.
+ """
+ params = {}
+
+ if current:
+ params['current'] = current
+
+ return client.call('get_rpc_methods', params)
+
+
+def _json_dump(config, fd, indent):
+ if indent is None:
+ indent = 2
+ elif indent < 0:
+ indent = None
+ json.dump(config, fd, indent=indent)
+ fd.write('\n')
+
+
+def save_config(client, fd, indent=2):
+ """Write current (live) configuration of SPDK subsystems and targets to stdout.
+ Args:
+ fd: opened file descriptor where data will be saved
+ indent: Indent level. Value less than 0 mean compact mode.
+ Default indent level is 2.
+ """
+ config = {
+ 'subsystems': []
+ }
+
+ for elem in client.call('get_subsystems'):
+ cfg = {
+ 'subsystem': elem['subsystem'],
+ 'config': client.call('get_subsystem_config', {"name": elem['subsystem']})
+ }
+ config['subsystems'].append(cfg)
+
+ _json_dump(config, fd, indent)
+
+
+def load_config(client, fd):
+ """Configure SPDK subsystems and targets using JSON RPC read from stdin.
+ Args:
+ fd: opened file descriptor where data will be taken from
+ """
+ json_config = json.load(fd)
+
+ # remove subsystems with no config
+ subsystems = json_config['subsystems']
+ for subsystem in list(subsystems):
+ if not subsystem['config']:
+ subsystems.remove(subsystem)
+
+ # check if methods in the config file are known
+ allowed_methods = client.call('get_rpc_methods')
+ for subsystem in list(subsystems):
+ config = subsystem['config']
+ for elem in list(config):
+ if 'method' not in elem or elem['method'] not in allowed_methods:
+ raise rpc_client.JSONRPCException("Unknown method was included in the config file")
+
+ while subsystems:
+ allowed_methods = client.call('get_rpc_methods', {'current': True})
+ allowed_found = False
+
+ for subsystem in list(subsystems):
+ config = subsystem['config']
+ for elem in list(config):
+ if 'method' not in elem or elem['method'] not in allowed_methods:
+ continue
+
+ client.call(elem['method'], elem['params'])
+ config.remove(elem)
+ allowed_found = True
+
+ if not config:
+ subsystems.remove(subsystem)
+
+ if 'start_subsystem_init' in allowed_methods:
+ client.call('start_subsystem_init')
+ allowed_found = True
+
+ if not allowed_found:
+ break
+
+ if subsystems:
+ print("Some configs were skipped because the RPC state that can call them passed over.")
+
+
+def save_subsystem_config(client, fd, indent=2, name=None):
+ """Write current (live) configuration of SPDK subsystem to stdout.
+ Args:
+ fd: opened file descriptor where data will be saved
+ indent: Indent level. Value less than 0 mean compact mode.
+ Default is indent level 2.
+ """
+ cfg = {
+ 'subsystem': name,
+ 'config': client.call('get_subsystem_config', {"name": name})
+ }
+
+ _json_dump(cfg, fd, indent)
+
+
+def load_subsystem_config(client, fd):
+ """Configure SPDK subsystem using JSON RPC read from stdin.
+ Args:
+ fd: opened file descriptor where data will be taken from
+ """
+ subsystem = json.load(fd)
+
+ if not subsystem['config']:
+ return
+
+ allowed_methods = client.call('get_rpc_methods')
+ config = subsystem['config']
+ for elem in list(config):
+ if 'method' not in elem or elem['method'] not in allowed_methods:
+ raise rpc_client.JSONRPCException("Unknown method was included in the config file")
+
+ allowed_methods = client.call('get_rpc_methods', {'current': True})
+ for elem in list(config):
+ if 'method' not in elem or elem['method'] not in allowed_methods:
+ continue
+
+ client.call(elem['method'], elem['params'])
+ config.remove(elem)
+
+ if config:
+ print("Some configs were skipped because they cannot be called in the current RPC state.")
diff --git a/src/spdk/scripts/rpc/app.py b/src/spdk/scripts/rpc/app.py
new file mode 100644
index 00000000..c9b088f8
--- /dev/null
+++ b/src/spdk/scripts/rpc/app.py
@@ -0,0 +1,23 @@
+def kill_instance(client, sig_name):
+ """Send a signal to the SPDK process.
+
+ Args:
+ sig_name: signal to send ("SIGINT", "SIGTERM", "SIGQUIT", "SIGHUP", or "SIGKILL")
+ """
+ params = {'sig_name': sig_name}
+ return client.call('kill_instance', params)
+
+
+def context_switch_monitor(client, enabled=None):
+ """Query or set state of context switch monitoring.
+
+ Args:
+ enabled: True to enable monitoring; False to disable monitoring; None to query (optional)
+
+ Returns:
+ Current context switch monitoring state (after applying enabled flag).
+ """
+ params = {}
+ if enabled is not None:
+ params['enabled'] = enabled
+ return client.call('context_switch_monitor', params)
diff --git a/src/spdk/scripts/rpc/bdev.py b/src/spdk/scripts/rpc/bdev.py
new file mode 100644
index 00000000..6c7d0ecd
--- /dev/null
+++ b/src/spdk/scripts/rpc/bdev.py
@@ -0,0 +1,531 @@
+def set_bdev_options(client, bdev_io_pool_size=None, bdev_io_cache_size=None):
+ """Set parameters for the bdev subsystem.
+
+ Args:
+ bdev_io_pool_size: number of bdev_io structures in shared buffer pool (optional)
+ bdev_io_cache_size: maximum number of bdev_io structures cached per thread (optional)
+ """
+ params = {}
+
+ if bdev_io_pool_size:
+ params['bdev_io_pool_size'] = bdev_io_pool_size
+ if bdev_io_cache_size:
+ params['bdev_io_cache_size'] = bdev_io_cache_size
+
+ return client.call('set_bdev_options', params)
+
+
+def construct_crypto_bdev(client, base_bdev_name, name, crypto_pmd, key):
+ """Construct a crypto virtual block device.
+
+ Args:
+ base_bdev_name: name of the underlying base bdev
+ name: name for the crypto vbdev
+ crypto_pmd: name of of the DPDK crypto driver to use
+ key: key
+
+ Returns:
+ Name of created virtual block device.
+ """
+ params = {'base_bdev_name': base_bdev_name, 'name': name, 'crypto_pmd': crypto_pmd, 'key': key}
+
+ return client.call('construct_crypto_bdev', params)
+
+
+def delete_crypto_bdev(client, name):
+ """Delete crypto virtual block device.
+
+ Args:
+ name: name of crypto vbdev to delete
+ """
+ params = {'name': name}
+ return client.call('delete_crypto_bdev', params)
+
+
+def construct_malloc_bdev(client, num_blocks, block_size, name=None, uuid=None):
+ """Construct a malloc block device.
+
+ Args:
+ num_blocks: size of block device in blocks
+ block_size: block size of device; must be a power of 2 and at least 512
+ name: name of block device (optional)
+ uuid: UUID of block device (optional)
+
+ Returns:
+ Name of created block device.
+ """
+ params = {'num_blocks': num_blocks, 'block_size': block_size}
+ if name:
+ params['name'] = name
+ if uuid:
+ params['uuid'] = uuid
+ return client.call('construct_malloc_bdev', params)
+
+
+def delete_malloc_bdev(client, name):
+ """Delete malloc block device.
+
+ Args:
+ bdev_name: name of malloc bdev to delete
+ """
+ params = {'name': name}
+ return client.call('delete_malloc_bdev', params)
+
+
+def construct_null_bdev(client, num_blocks, block_size, name, uuid=None):
+ """Construct a null block device.
+
+ Args:
+ num_blocks: size of block device in blocks
+ block_size: block size of device; must be a power of 2 and at least 512
+ name: name of block device
+ uuid: UUID of block device (optional)
+
+ Returns:
+ Name of created block device.
+ """
+ params = {'name': name, 'num_blocks': num_blocks,
+ 'block_size': block_size}
+ if uuid:
+ params['uuid'] = uuid
+ return client.call('construct_null_bdev', params)
+
+
+def delete_null_bdev(client, name):
+ """Remove null bdev from the system.
+
+ Args:
+ name: name of null bdev to delete
+ """
+ params = {'name': name}
+ return client.call('delete_null_bdev', params)
+
+
+def get_raid_bdevs(client, category):
+ """Get list of raid bdevs based on category
+
+ Args:
+ category: any one of all or online or configuring or offline
+
+ Returns:
+ List of raid bdev names
+ """
+ params = {'category': category}
+ return client.call('get_raid_bdevs', params)
+
+
+def construct_raid_bdev(client, name, strip_size, raid_level, base_bdevs):
+ """Construct pooled device
+
+ Args:
+ name: user defined raid bdev name
+ strip_size: strip size of raid bdev in KB, supported values like 8, 16, 32, 64, 128, 256, 512, 1024 etc
+ raid_level: raid level of raid bdev, supported values 0
+ base_bdevs: Space separated names of Nvme bdevs in double quotes, like "Nvme0n1 Nvme1n1 Nvme2n1"
+
+ Returns:
+ None
+ """
+ params = {'name': name, 'strip_size': strip_size, 'raid_level': raid_level, 'base_bdevs': base_bdevs}
+
+ return client.call('construct_raid_bdev', params)
+
+
+def destroy_raid_bdev(client, name):
+ """Destroy pooled device
+
+ Args:
+ name: raid bdev name
+
+ Returns:
+ None
+ """
+ params = {'name': name}
+ return client.call('destroy_raid_bdev', params)
+
+
+def construct_aio_bdev(client, filename, name, block_size=None):
+ """Construct a Linux AIO block device.
+
+ Args:
+ filename: path to device or file (ex: /dev/sda)
+ name: name of block device
+ block_size: block size of device (optional; autodetected if omitted)
+
+ Returns:
+ Name of created block device.
+ """
+ params = {'name': name,
+ 'filename': filename}
+
+ if block_size:
+ params['block_size'] = block_size
+
+ return client.call('construct_aio_bdev', params)
+
+
+def delete_aio_bdev(client, name):
+ """Remove aio bdev from the system.
+
+ Args:
+ bdev_name: name of aio bdev to delete
+ """
+ params = {'name': name}
+ return client.call('delete_aio_bdev', params)
+
+
+def set_bdev_nvme_options(client, action_on_timeout=None, timeout_us=None, retry_count=None, nvme_adminq_poll_period_us=None):
+ """Set options for the bdev nvme. This is startup command.
+
+ Args:
+ action_on_timeout: action to take on command time out. Valid values are: none, reset, abort (optional)
+ timeout_us: Timeout for each command, in microseconds. If 0, don't track timeouts (optional)
+ retry_count: The number of attempts per I/O when an I/O fails (optional)
+ nvme_adminq_poll_period_us: how often the admin queue is polled for asynchronous events in microsecon (optional)
+ """
+ params = {}
+
+ if action_on_timeout:
+ params['action_on_timeout'] = action_on_timeout
+
+ if timeout_us:
+ params['timeout_us'] = timeout_us
+
+ if retry_count:
+ params['retry_count'] = retry_count
+
+ if nvme_adminq_poll_period_us:
+ params['nvme_adminq_poll_period_us'] = nvme_adminq_poll_period_us
+
+ return client.call('set_bdev_nvme_options', params)
+
+
+def set_bdev_nvme_hotplug(client, enable, period_us=None):
+ """Set options for the bdev nvme. This is startup command.
+
+ Args:
+ enable: True to enable hotplug, False to disable.
+ period_us: how often the hotplug is processed for insert and remove events. Set 0 to reset to default. (optional)
+ """
+ params = {'enable': enable}
+
+ if period_us:
+ params['period_us'] = period_us
+
+ return client.call('set_bdev_nvme_hotplug', params)
+
+
+def construct_nvme_bdev(client, name, trtype, traddr, adrfam=None, trsvcid=None, subnqn=None):
+ """Construct NVMe namespace block device.
+
+ Args:
+ name: bdev name prefix; "n" + namespace ID will be appended to create unique names
+ trtype: transport type ("PCIe", "RDMA")
+ traddr: transport address (PCI BDF or IP address)
+ adrfam: address family ("IPv4", "IPv6", "IB", or "FC") (optional for PCIe)
+ trsvcid: transport service ID (port number for IP-based addresses; optional for PCIe)
+ subnqn: subsystem NQN to connect to (optional)
+
+ Returns:
+ Name of created block device.
+ """
+ params = {'name': name,
+ 'trtype': trtype,
+ 'traddr': traddr}
+
+ if adrfam:
+ params['adrfam'] = adrfam
+
+ if trsvcid:
+ params['trsvcid'] = trsvcid
+
+ if subnqn:
+ params['subnqn'] = subnqn
+
+ return client.call('construct_nvme_bdev', params)
+
+
+def delete_nvme_controller(client, name):
+ """Remove NVMe controller from the system.
+
+ Args:
+ name: controller name
+ """
+
+ params = {'name': name}
+ return client.call('delete_nvme_controller', params)
+
+
+def construct_rbd_bdev(client, pool_name, rbd_name, block_size, name=None):
+ """Construct a Ceph RBD block device.
+
+ Args:
+ pool_name: Ceph RBD pool name
+ rbd_name: Ceph RBD image name
+ block_size: block size of RBD volume
+ name: name of block device (optional)
+
+ Returns:
+ Name of created block device.
+ """
+ params = {
+ 'pool_name': pool_name,
+ 'rbd_name': rbd_name,
+ 'block_size': block_size,
+ }
+
+ if name:
+ params['name'] = name
+
+ return client.call('construct_rbd_bdev', params)
+
+
+def delete_rbd_bdev(client, name):
+ """Remove rbd bdev from the system.
+
+ Args:
+ name: name of rbd bdev to delete
+ """
+ params = {'name': name}
+ return client.call('delete_rbd_bdev', params)
+
+
+def construct_error_bdev(client, base_name):
+ """Construct an error injection block device.
+
+ Args:
+ base_name: base bdev name
+ """
+ params = {'base_name': base_name}
+ return client.call('construct_error_bdev', params)
+
+
+def delete_error_bdev(client, name):
+ """Remove error bdev from the system.
+
+ Args:
+ bdev_name: name of error bdev to delete
+ """
+ params = {'name': name}
+ return client.call('delete_error_bdev', params)
+
+
+def construct_iscsi_bdev(client, name, url, initiator_iqn):
+ """Construct a iSCSI block device.
+
+ Args:
+ name: name of block device
+ url: iSCSI URL
+ initiator_iqn: IQN name to be used by initiator
+
+ Returns:
+ Name of created block device.
+ """
+ params = {
+ 'name': name,
+ 'url': url,
+ 'initiator_iqn': initiator_iqn,
+ }
+ return client.call('construct_iscsi_bdev', params)
+
+
+def delete_iscsi_bdev(client, name):
+ """Remove iSCSI bdev from the system.
+
+ Args:
+ bdev_name: name of iSCSI bdev to delete
+ """
+ params = {'name': name}
+ return client.call('delete_iscsi_bdev', params)
+
+
+def construct_pmem_bdev(client, pmem_file, name):
+ """Construct a libpmemblk block device.
+
+ Args:
+ pmem_file: path to pmemblk pool file
+ name: name of block device
+
+ Returns:
+ Name of created block device.
+ """
+ params = {
+ 'pmem_file': pmem_file,
+ 'name': name
+ }
+ return client.call('construct_pmem_bdev', params)
+
+
+def delete_pmem_bdev(client, name):
+ """Remove pmem bdev from the system.
+
+ Args:
+ name: name of pmem bdev to delete
+ """
+ params = {'name': name}
+ return client.call('delete_pmem_bdev', params)
+
+
+def construct_passthru_bdev(client, base_bdev_name, passthru_bdev_name):
+ """Construct a pass-through block device.
+
+ Args:
+ base_bdev_name: name of the existing bdev
+ passthru_bdev_name: name of block device
+
+ Returns:
+ Name of created block device.
+ """
+ params = {
+ 'base_bdev_name': base_bdev_name,
+ 'passthru_bdev_name': passthru_bdev_name,
+ }
+ return client.call('construct_passthru_bdev', params)
+
+
+def delete_passthru_bdev(client, name):
+ """Remove pass through bdev from the system.
+
+ Args:
+ name: name of pass through bdev to delete
+ """
+ params = {'name': name}
+ return client.call('delete_passthru_bdev', params)
+
+
+def construct_split_vbdev(client, base_bdev, split_count, split_size_mb=None):
+ """Construct split block devices from a base bdev.
+
+ Args:
+ base_bdev: name of bdev to split
+ split_count: number of split bdevs to create
+ split_size_mb: size of each split volume in MiB (optional)
+
+ Returns:
+ List of created block devices.
+ """
+ params = {
+ 'base_bdev': base_bdev,
+ 'split_count': split_count,
+ }
+ if split_size_mb:
+ params['split_size_mb'] = split_size_mb
+
+ return client.call('construct_split_vbdev', params)
+
+
+def destruct_split_vbdev(client, base_bdev):
+ """Destroy split block devices.
+
+ Args:
+ base_bdev: name of previously split bdev
+ """
+ params = {
+ 'base_bdev': base_bdev,
+ }
+
+ return client.call('destruct_split_vbdev', params)
+
+
+def get_bdevs(client, name=None):
+ """Get information about block devices.
+
+ Args:
+ name: bdev name to query (optional; if omitted, query all bdevs)
+
+ Returns:
+ List of bdev information objects.
+ """
+ params = {}
+ if name:
+ params['name'] = name
+ return client.call('get_bdevs', params)
+
+
+def get_bdevs_iostat(client, name=None):
+ """Get I/O statistics for block devices.
+
+ Args:
+ name: bdev name to query (optional; if omitted, query all bdevs)
+
+ Returns:
+ I/O statistics for the requested block devices.
+ """
+ params = {}
+ if name:
+ params['name'] = name
+ return client.call('get_bdevs_iostat', params)
+
+
+def delete_bdev(client, bdev_name):
+ """Remove a bdev from the system.
+
+ Args:
+ bdev_name: name of bdev to delete
+ """
+ params = {'name': bdev_name}
+ return client.call('delete_bdev', params)
+
+
+def bdev_inject_error(client, name, io_type, error_type, num=1):
+ """Inject an error via an error bdev.
+
+ Args:
+ name: name of error bdev
+ io_type: one of "clear", "read", "write", "unmap", "flush", or "all"
+ error_type: one of "failure" or "pending"
+ num: number of commands to fail
+ """
+ params = {
+ 'name': name,
+ 'io_type': io_type,
+ 'error_type': error_type,
+ 'num': num,
+ }
+
+ return client.call('bdev_inject_error', params)
+
+
+def set_bdev_qd_sampling_period(client, name, period):
+ """Enable queue depth tracking on a specified bdev.
+
+ Args:
+ name: name of a bdev on which to track queue depth.
+ period: period (in microseconds) at which to update the queue depth reading. If set to 0, polling will be disabled.
+ """
+
+ params = {}
+ params['name'] = name
+ params['period'] = period
+ return client.call('set_bdev_qd_sampling_period', params)
+
+
+def set_bdev_qos_limit(client, name, rw_ios_per_sec=None, rw_mbytes_per_sec=None):
+ """Set QoS rate limit on a block device.
+
+ Args:
+ name: name of block device
+ rw_ios_per_sec: R/W IOs per second limit (>=10000, example: 20000). 0 means unlimited.
+ rw_mbytes_per_sec: R/W megabytes per second limit (>=10, example: 100). 0 means unlimited.
+ """
+ params = {}
+ params['name'] = name
+ if rw_ios_per_sec is not None:
+ params['rw_ios_per_sec'] = rw_ios_per_sec
+ if rw_mbytes_per_sec is not None:
+ params['rw_mbytes_per_sec'] = rw_mbytes_per_sec
+ return client.call('set_bdev_qos_limit', params)
+
+
+def apply_firmware(client, bdev_name, filename):
+ """Download and commit firmware to NVMe device.
+
+ Args:
+ bdev_name: name of NVMe block device
+ filename: filename of the firmware to download
+ """
+ params = {
+ 'filename': filename,
+ 'bdev_name': bdev_name,
+ }
+ return client.call('apply_nvme_firmware', params)
diff --git a/src/spdk/scripts/rpc/client.py b/src/spdk/scripts/rpc/client.py
new file mode 100644
index 00000000..6a71ab51
--- /dev/null
+++ b/src/spdk/scripts/rpc/client.py
@@ -0,0 +1,100 @@
+import json
+import socket
+import time
+
+
+def print_dict(d):
+ print(json.dumps(d, indent=2))
+
+
+class JSONRPCException(Exception):
+ def __init__(self, message):
+ self.message = message
+
+
+class JSONRPCClient(object):
+ def __init__(self, addr, port=None, verbose=False, timeout=60.0):
+ self.verbose = verbose
+ self.timeout = timeout
+ try:
+ if addr.startswith('/'):
+ self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ self.sock.connect(addr)
+ elif ':' in addr:
+ for res in socket.getaddrinfo(addr, port, socket.AF_INET6, socket.SOCK_STREAM, socket.SOL_TCP):
+ af, socktype, proto, canonname, sa = res
+ self.sock = socket.socket(af, socktype, proto)
+ self.sock.connect(sa)
+ else:
+ self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ self.sock.connect((addr, port))
+ except socket.error as ex:
+ raise JSONRPCException("Error while connecting to %s\n"
+ "Error details: %s" % (addr, ex))
+
+ def __del__(self):
+ self.sock.close()
+
+ def call(self, method, params={}, verbose=False):
+ req = {}
+ req['jsonrpc'] = '2.0'
+ req['method'] = method
+ req['id'] = 1
+ if (params):
+ req['params'] = params
+ reqstr = json.dumps(req)
+
+ verbose = verbose or self.verbose
+
+ if verbose:
+ print("request:")
+ print(json.dumps(req, indent=2))
+
+ self.sock.sendall(reqstr.encode("utf-8"))
+ buf = ''
+ closed = False
+ response = {}
+ start_time = time.clock()
+
+ while not closed:
+ try:
+ timeout = self.timeout - (time.clock() - start_time)
+ if timeout <= 0.0:
+ break
+
+ self.sock.settimeout(timeout)
+ newdata = self.sock.recv(4096)
+ if (newdata == b''):
+ closed = True
+
+ buf += newdata.decode("utf-8")
+ response = json.loads(buf)
+ except socket.timeout:
+ break
+ except ValueError:
+ continue # incomplete response; keep buffering
+ break
+
+ if not response:
+ if method == "kill_instance":
+ return {}
+ if closed:
+ msg = "Connection closed with partial response:"
+ else:
+ msg = "Timeout while waiting for response:"
+ msg = "\n".join([msg, buf])
+ raise JSONRPCException(msg)
+
+ if verbose:
+ print("response:")
+ print(json.dumps(response, indent=2))
+
+ if 'error' in response:
+ msg = "\n".join(["Got JSON-RPC error response",
+ "request:",
+ json.dumps(req, indent=2),
+ "response:",
+ json.dumps(response['error'], indent=2)])
+ raise JSONRPCException(msg)
+
+ return response['result']
diff --git a/src/spdk/scripts/rpc/ioat.py b/src/spdk/scripts/rpc/ioat.py
new file mode 100644
index 00000000..958e18bb
--- /dev/null
+++ b/src/spdk/scripts/rpc/ioat.py
@@ -0,0 +1,12 @@
+def scan_ioat_copy_engine(client, pci_whitelist):
+ """Scan and enable IOAT copy engine.
+
+ Args:
+ pci_whitelist: Python list of PCI addresses in
+ domain:bus:device.function format or
+ domain.bus.device.function format
+ """
+ params = {}
+ if pci_whitelist:
+ params['pci_whitelist'] = pci_whitelist
+ return client.call('scan_ioat_copy_engine', params)
diff --git a/src/spdk/scripts/rpc/iscsi.py b/src/spdk/scripts/rpc/iscsi.py
new file mode 100644
index 00000000..a824ad20
--- /dev/null
+++ b/src/spdk/scripts/rpc/iscsi.py
@@ -0,0 +1,502 @@
+
+
+def set_iscsi_options(
+ client,
+ auth_file=None,
+ node_base=None,
+ nop_timeout=None,
+ nop_in_interval=None,
+ disable_chap=None,
+ require_chap=None,
+ mutual_chap=None,
+ chap_group=None,
+ max_sessions=None,
+ max_queue_depth=None,
+ max_connections_per_session=None,
+ default_time2wait=None,
+ default_time2retain=None,
+ first_burst_length=None,
+ immediate_data=None,
+ error_recovery_level=None,
+ allow_duplicated_isid=None,
+ min_connections_per_core=None):
+ """Set iSCSI target options.
+
+ Args:
+ auth_file: Path to CHAP shared secret file (optional)
+ node_base: Prefix of the name of iSCSI target node (optional)
+ nop_timeout: Timeout in seconds to nop-in request to the initiator (optional)
+ nop_in_interval: Time interval in secs between nop-in requests by the target (optional)
+ disable_chap: CHAP for discovery session should be disabled (optional)
+ require_chap: CHAP for discovery session should be required
+ mutual_chap: CHAP for discovery session should be mutual
+ chap_group: Authentication group ID for discovery session
+ max_sessions: Maximum number of sessions in the host
+ max_queue_depth: Maximum number of outstanding I/Os per queue
+ max_connections_per_session: Negotiated parameter, MaxConnections
+ default_time2wait: Negotiated parameter, DefaultTime2Wait
+ default_time2retain: Negotiated parameter, DefaultTime2Retain
+ first_burst_length: Negotiated parameter, FirstBurstLength
+ immediate_data: Negotiated parameter, ImmediateData
+ error_recovery_level: Negotiated parameter, ErrorRecoveryLevel
+ allow_duplicated_isid: Allow duplicated initiator session ID
+ min_connections_per_core: Allocation unit of connections per core
+
+ Returns:
+ True or False
+ """
+ params = {}
+
+ if auth_file:
+ params['auth_file'] = auth_file
+ if node_base:
+ params['node_base'] = node_base
+ if nop_timeout:
+ params['nop_timeout'] = nop_timeout
+ if nop_in_interval:
+ params['nop_in_interval'] = nop_in_interval
+ if disable_chap:
+ params['disable_chap'] = disable_chap
+ if require_chap:
+ params['require_chap'] = require_chap
+ if mutual_chap:
+ params['mutual_chap'] = mutual_chap
+ if chap_group:
+ params['chap_group'] = chap_group
+ if max_sessions:
+ params['max_sessions'] = max_sessions
+ if max_queue_depth:
+ params['max_queue_depth'] = max_queue_depth
+ if max_connections_per_session:
+ params['max_connections_per_session'] = max_connections_per_session
+ if default_time2wait:
+ params['default_time2wait'] = default_time2wait
+ if default_time2retain:
+ params['default_time2retain'] = default_time2retain
+ if first_burst_length:
+ params['first_burst_length'] = first_burst_length
+ if immediate_data:
+ params['immediate_data'] = immediate_data
+ if error_recovery_level:
+ params['error_recovery_level'] = error_recovery_level
+ if allow_duplicated_isid:
+ params['allow_duplicated_isid'] = allow_duplicated_isid
+ if min_connections_per_core:
+ params['min_connections_per_core'] = min_connections_per_core
+
+ return client.call('set_iscsi_options', params)
+
+
+def set_iscsi_discovery_auth(
+ client,
+ disable_chap=None,
+ require_chap=None,
+ mutual_chap=None,
+ chap_group=None):
+ """Set CHAP authentication for discovery service.
+
+ Args:
+ disable_chap: CHAP for discovery session should be disabled (optional)
+ require_chap: CHAP for discovery session should be required (optional)
+ mutual_chap: CHAP for discovery session should be mutual (optional)
+ chap_group: Authentication group ID for discovery session (optional)
+
+ Returns:
+ True or False
+ """
+ params = {}
+
+ if disable_chap:
+ params['disable_chap'] = disable_chap
+ if require_chap:
+ params['require_chap'] = require_chap
+ if mutual_chap:
+ params['mutual_chap'] = mutual_chap
+ if chap_group:
+ params['chap_group'] = chap_group
+
+ return client.call('set_iscsi_discovery_auth', params)
+
+
+def get_iscsi_auth_groups(client):
+ """Display current authentication group configuration.
+
+ Returns:
+ List of current authentication group configuration.
+ """
+ return client.call('get_iscsi_auth_groups')
+
+
+def get_portal_groups(client):
+ """Display current portal group configuration.
+
+ Returns:
+ List of current portal group configuration.
+ """
+ return client.call('get_portal_groups')
+
+
+def get_initiator_groups(client):
+ """Display current initiator group configuration.
+
+ Returns:
+ List of current initiator group configuration.
+ """
+ return client.call('get_initiator_groups')
+
+
+def get_target_nodes(client):
+ """Display target nodes.
+
+ Returns:
+ List of ISCSI target node objects.
+ """
+ return client.call('get_target_nodes')
+
+
+def construct_target_node(
+ client,
+ luns,
+ pg_ig_maps,
+ name,
+ alias_name,
+ queue_depth,
+ chap_group=None,
+ disable_chap=None,
+ require_chap=None,
+ mutual_chap=None,
+ header_digest=None,
+ data_digest=None):
+ """Add a target node.
+
+ Args:
+ luns: List of bdev_name_id_pairs, e.g. [{"bdev_name": "Malloc1", "lun_id": 1}]
+ pg_ig_maps: List of pg_ig_mappings, e.g. [{"pg_tag": pg, "ig_tag": ig}]
+ name: Target node name (ASCII)
+ alias_name: Target node alias name (ASCII)
+ queue_depth: Desired target queue depth
+ chap_group: Authentication group ID for this target node
+ disable_chap: CHAP authentication should be disabled for this target node
+ require_chap: CHAP authentication should be required for this target node
+ mutual_chap: CHAP authentication should be mutual/bidirectional
+ header_digest: Header Digest should be required for this target node
+ data_digest: Data Digest should be required for this target node
+
+ Returns:
+ True or False
+ """
+ params = {
+ 'name': name,
+ 'alias_name': alias_name,
+ 'pg_ig_maps': pg_ig_maps,
+ 'luns': luns,
+ 'queue_depth': queue_depth,
+ }
+
+ if chap_group:
+ params['chap_group'] = chap_group
+ if disable_chap:
+ params['disable_chap'] = disable_chap
+ if require_chap:
+ params['require_chap'] = require_chap
+ if mutual_chap:
+ params['mutual_chap'] = mutual_chap
+ if header_digest:
+ params['header_digest'] = header_digest
+ if data_digest:
+ params['data_digest'] = data_digest
+ return client.call('construct_target_node', params)
+
+
+def target_node_add_lun(client, name, bdev_name, lun_id=None):
+ """Add LUN to the target node.
+
+ Args:
+ name: Target node name (ASCII)
+ bdev_name: bdev name
+ lun_id: LUN ID (integer >= 0)
+
+ Returns:
+ True or False
+ """
+ params = {
+ 'name': name,
+ 'bdev_name': bdev_name,
+ }
+ if lun_id:
+ params['lun_id'] = lun_id
+ return client.call('target_node_add_lun', params)
+
+
+def set_iscsi_target_node_auth(
+ client,
+ name,
+ chap_group=None,
+ disable_chap=None,
+ require_chap=None,
+ mutual_chap=None):
+ """Set CHAP authentication for the target node.
+
+ Args:
+ name: Target node name (ASCII)
+ chap_group: Authentication group ID for this target node
+ disable_chap: CHAP authentication should be disabled for this target node
+ require_chap: CHAP authentication should be required for this target node
+ mutual_chap: CHAP authentication should be mutual/bidirectional
+
+ Returns:
+ True or False
+ """
+ params = {
+ 'name': name,
+ }
+
+ if chap_group:
+ params['chap_group'] = chap_group
+ if disable_chap:
+ params['disable_chap'] = disable_chap
+ if require_chap:
+ params['require_chap'] = require_chap
+ if mutual_chap:
+ params['mutual_chap'] = mutual_chap
+ return client.call('set_iscsi_target_node_auth', params)
+
+
+def add_iscsi_auth_group(client, tag, secrets=None):
+ """Add authentication group for CHAP authentication.
+
+ Args:
+ tag: Authentication group tag (unique, integer > 0).
+ secrets: Array of secrets objects (optional).
+
+ Returns:
+ True or False
+ """
+ params = {'tag': tag}
+
+ if secrets:
+ params['secrets'] = secrets
+ return client.call('add_iscsi_auth_group', params)
+
+
+def delete_iscsi_auth_group(client, tag):
+ """Delete an authentication group.
+
+ Args:
+ tag: Authentication group tag (unique, integer > 0)
+
+ Returns:
+ True or False
+ """
+ params = {'tag': tag}
+ return client.call('delete_iscsi_auth_group', params)
+
+
+def add_secret_to_iscsi_auth_group(client, tag, user, secret, muser=None, msecret=None):
+ """Add a secret to an authentication group.
+
+ Args:
+ tag: Authentication group tag (unique, integer > 0)
+ user: User name for one-way CHAP authentication
+ secret: Secret for one-way CHAP authentication
+ muser: User name for mutual CHAP authentication (optional)
+ msecret: Secret for mutual CHAP authentication (optional)
+
+ Returns:
+ True or False
+ """
+ params = {'tag': tag, 'user': user, 'secret': secret}
+
+ if muser:
+ params['muser'] = muser
+ if msecret:
+ params['msecret'] = msecret
+ return client.call('add_secret_to_iscsi_auth_group', params)
+
+
+def delete_secret_from_iscsi_auth_group(client, tag, user):
+ """Delete a secret from an authentication group.
+
+ Args:
+ tag: Authentication group tag (unique, integer > 0)
+ user: User name for one-way CHAP authentication
+
+ Returns:
+ True or False
+ """
+ params = {'tag': tag, 'user': user}
+ return client.call('delete_secret_from_iscsi_auth_group', params)
+
+
+def delete_pg_ig_maps(client, pg_ig_maps, name):
+ """Delete PG-IG maps from the target node.
+
+ Args:
+ pg_ig_maps: List of pg_ig_mappings, e.g. [{"pg_tag": pg, "ig_tag": ig}]
+ name: Target node alias name (ASCII)
+
+ Returns:
+ True or False
+ """
+ params = {
+ 'name': name,
+ 'pg_ig_maps': pg_ig_maps,
+ }
+ return client.call('delete_pg_ig_maps', params)
+
+
+def add_pg_ig_maps(client, pg_ig_maps, name):
+ """Add PG-IG maps to the target node.
+
+ Args:
+ pg_ig_maps: List of pg_ig_mappings, e.g. [{"pg_tag": pg, "ig_tag": ig}]
+ name: Target node alias name (ASCII)
+
+ Returns:
+ True or False
+ """
+ params = {
+ 'name': name,
+ 'pg_ig_maps': pg_ig_maps,
+ }
+ return client.call('add_pg_ig_maps', params)
+
+
+def add_portal_group(client, portals, tag):
+ """Add a portal group.
+
+ Args:
+ portals: List of portals, e.g. [{'host': ip, 'port': port}] or [{'host': ip, 'port': port, 'cpumask': cpumask}]
+ tag: Initiator group tag (unique, integer > 0)
+
+ Returns:
+ True or False
+ """
+ params = {'tag': tag, 'portals': portals}
+ return client.call('add_portal_group', params)
+
+
+def add_initiator_group(client, tag, initiators, netmasks):
+ """Add an initiator group.
+
+ Args:
+ tag: Initiator group tag (unique, integer > 0)
+ initiators: List of initiator hostnames or IP addresses, e.g. ["127.0.0.1","192.168.200.100"]
+ netmasks: List of initiator netmasks, e.g. ["255.255.0.0","255.248.0.0"]
+
+ Returns:
+ True or False
+ """
+ params = {'tag': tag, 'initiators': initiators, 'netmasks': netmasks}
+ return client.call('add_initiator_group', params)
+
+
+def add_initiators_to_initiator_group(
+ client,
+ tag,
+ initiators=None,
+ netmasks=None):
+ """Add initiators to an existing initiator group.
+
+ Args:
+ tag: Initiator group tag (unique, integer > 0)
+ initiators: List of initiator hostnames or IP addresses, e.g. ["127.0.0.1","192.168.200.100"]
+ netmasks: List of initiator netmasks, e.g. ["255.255.0.0","255.248.0.0"]
+
+ Returns:
+ True or False
+ """
+ params = {'tag': tag}
+
+ if initiators:
+ params['initiators'] = initiators
+ if netmasks:
+ params['netmasks'] = netmasks
+ return client.call('add_initiators_to_initiator_group', params)
+
+
+def delete_initiators_from_initiator_group(
+ client, tag, initiators=None, netmasks=None):
+ """Delete initiators from an existing initiator group.
+
+ Args:
+ tag: Initiator group tag (unique, integer > 0)
+ initiators: List of initiator hostnames or IP addresses, e.g. ["127.0.0.1","192.168.200.100"]
+ netmasks: List of initiator netmasks, e.g. ["255.255.0.0","255.248.0.0"]
+
+ Returns:
+ True or False
+ """
+ params = {'tag': tag}
+
+ if initiators:
+ params['initiators'] = initiators
+ if netmasks:
+ params['netmasks'] = netmasks
+ return client.call('delete_initiators_from_initiator_group', params)
+
+
+def delete_target_node(client, target_node_name):
+ """Delete a target node.
+
+ Args:
+ target_node_name: Target node name to be deleted. Example: iqn.2016-06.io.spdk:disk1.
+
+ Returns:
+ True or False
+ """
+ params = {'name': target_node_name}
+ return client.call('delete_target_node', params)
+
+
+def delete_portal_group(client, tag):
+ """Delete a portal group.
+
+ Args:
+ tag: Portal group tag (unique, integer > 0)
+
+ Returns:
+ True or False
+ """
+ params = {'tag': tag}
+ return client.call('delete_portal_group', params)
+
+
+def delete_initiator_group(client, tag):
+ """Delete an initiator group.
+
+ Args:
+ tag: Initiator group tag (unique, integer > 0)
+
+ Returns:
+ True or False
+ """
+ params = {'tag': tag}
+ return client.call('delete_initiator_group', params)
+
+
+def get_iscsi_connections(client):
+ """Display iSCSI connections.
+
+ Returns:
+ List of iSCSI connection.
+ """
+ return client.call('get_iscsi_connections')
+
+
+def get_iscsi_global_params(client):
+ """Display iSCSI global parameters.
+
+ Returns:
+ List of iSCSI global parameter.
+ """
+ return client.call('get_iscsi_global_params')
+
+
+def get_scsi_devices(client):
+ """Display SCSI devices.
+
+ Returns:
+ List of SCSI device.
+ """
+ return client.call('get_scsi_devices')
diff --git a/src/spdk/scripts/rpc/log.py b/src/spdk/scripts/rpc/log.py
new file mode 100644
index 00000000..a152b3b8
--- /dev/null
+++ b/src/spdk/scripts/rpc/log.py
@@ -0,0 +1,65 @@
+def set_trace_flag(client, flag):
+ """Set trace flag.
+
+ Args:
+ flag: trace mask we want to set. (for example "nvme")
+ """
+ params = {'flag': flag}
+ return client.call('set_trace_flag', params)
+
+
+def clear_trace_flag(client, flag):
+ """Clear trace flag.
+
+ Args:
+ flag: trace mask we want to clear. (for example "nvme")
+ """
+ params = {'flag': flag}
+ return client.call('clear_trace_flag', params)
+
+
+def get_trace_flags(client):
+ """Get trace flags
+
+ Returns:
+ List of trace flag
+ """
+ return client.call('get_trace_flags')
+
+
+def set_log_level(client, level):
+ """Set log level.
+
+ Args:
+ level: log level we want to set. (for example "DEBUG")
+ """
+ params = {'level': level}
+ return client.call('set_log_level', params)
+
+
+def get_log_level(client):
+ """Get log level
+
+ Returns:
+ Current log level
+ """
+ return client.call('get_log_level')
+
+
+def set_log_print_level(client, level):
+ """Set log print level.
+
+ Args:
+ level: log print level we want to set. (for example "DEBUG")
+ """
+ params = {'level': level}
+ return client.call('set_log_print_level', params)
+
+
+def get_log_print_level(client):
+ """Get log print level
+
+ Returns:
+ Current log print level
+ """
+ return client.call('get_log_print_level')
diff --git a/src/spdk/scripts/rpc/lvol.py b/src/spdk/scripts/rpc/lvol.py
new file mode 100644
index 00000000..e7e05a3b
--- /dev/null
+++ b/src/spdk/scripts/rpc/lvol.py
@@ -0,0 +1,195 @@
+def construct_lvol_store(client, bdev_name, lvs_name, cluster_sz=None):
+ """Construct a logical volume store.
+
+ Args:
+ bdev_name: bdev on which to construct logical volume store
+ lvs_name: name of the logical volume store to create
+ cluster_sz: cluster size of the logical volume store in bytes (optional)
+
+ Returns:
+ UUID of created logical volume store.
+ """
+ params = {'bdev_name': bdev_name, 'lvs_name': lvs_name}
+ if cluster_sz:
+ params['cluster_sz'] = cluster_sz
+ return client.call('construct_lvol_store', params)
+
+
+def rename_lvol_store(client, old_name, new_name):
+ """Rename a logical volume store.
+
+ Args:
+ old_name: existing logical volume store name
+ new_name: new logical volume store name
+ """
+ params = {
+ 'old_name': old_name,
+ 'new_name': new_name
+ }
+ return client.call('rename_lvol_store', params)
+
+
+def construct_lvol_bdev(client, lvol_name, size, thin_provision=False, uuid=None, lvs_name=None):
+ """Create a logical volume on a logical volume store.
+
+ Args:
+ lvol_name: name of logical volume to create
+ size: desired size of logical volume in bytes (will be rounded up to a multiple of cluster size)
+ thin_provision: True to enable thin provisioning
+ uuid: UUID of logical volume store to create logical volume on (optional)
+ lvs_name: name of logical volume store to create logical volume on (optional)
+
+ Either uuid or lvs_name must be specified, but not both.
+
+ Returns:
+ Name of created logical volume block device.
+ """
+ if (uuid and lvs_name) or (not uuid and not lvs_name):
+ raise ValueError("Either uuid or lvs_name must be specified, but not both")
+
+ params = {'lvol_name': lvol_name, 'size': size}
+ if thin_provision:
+ params['thin_provision'] = thin_provision
+ if uuid:
+ params['uuid'] = uuid
+ if lvs_name:
+ params['lvs_name'] = lvs_name
+ return client.call('construct_lvol_bdev', params)
+
+
+def snapshot_lvol_bdev(client, lvol_name, snapshot_name):
+ """Capture a snapshot of the current state of a logical volume.
+
+ Args:
+ lvol_name: logical volume to create a snapshot from
+ snapshot_name: name for the newly created snapshot
+
+ Returns:
+ Name of created logical volume snapshot.
+ """
+ params = {
+ 'lvol_name': lvol_name,
+ 'snapshot_name': snapshot_name
+ }
+ return client.call('snapshot_lvol_bdev', params)
+
+
+def clone_lvol_bdev(client, snapshot_name, clone_name):
+ """Create a logical volume based on a snapshot.
+
+ Args:
+ snapshot_name: snapshot to clone
+ clone_name: name of logical volume to create
+
+ Returns:
+ Name of created logical volume clone.
+ """
+ params = {
+ 'snapshot_name': snapshot_name,
+ 'clone_name': clone_name
+ }
+ return client.call('clone_lvol_bdev', params)
+
+
+def rename_lvol_bdev(client, old_name, new_name):
+ """Rename a logical volume.
+
+ Args:
+ old_name: existing logical volume name
+ new_name: new logical volume name
+ """
+ params = {
+ 'old_name': old_name,
+ 'new_name': new_name
+ }
+ return client.call('rename_lvol_bdev', params)
+
+
+def resize_lvol_bdev(client, name, size):
+ """Resize a logical volume.
+
+ Args:
+ name: name of logical volume to resize
+ size: desired size of logical volume in bytes (will be rounded up to a multiple of cluster size)
+ """
+ params = {
+ 'name': name,
+ 'size': size,
+ }
+ return client.call('resize_lvol_bdev', params)
+
+
+def destroy_lvol_bdev(client, name):
+ """Destroy a logical volume.
+
+ Args:
+ name: name of logical volume to destroy
+ """
+ params = {
+ 'name': name,
+ }
+ return client.call('destroy_lvol_bdev', params)
+
+
+def inflate_lvol_bdev(client, name):
+ """Inflate a logical volume.
+
+ Args:
+ name: name of logical volume to inflate
+ """
+ params = {
+ 'name': name,
+ }
+ return client.call('inflate_lvol_bdev', params)
+
+
+def decouple_parent_lvol_bdev(client, name):
+ """Decouple parent of a logical volume.
+
+ Args:
+ name: name of logical volume to decouple parent
+ """
+ params = {
+ 'name': name,
+ }
+ return client.call('decouple_parent_lvol_bdev', params)
+
+
+def destroy_lvol_store(client, uuid=None, lvs_name=None):
+ """Destroy a logical volume store.
+
+ Args:
+ uuid: UUID of logical volume store to destroy (optional)
+ lvs_name: name of logical volume store to destroy (optional)
+
+ Either uuid or lvs_name must be specified, but not both.
+ """
+ if (uuid and lvs_name) or (not uuid and not lvs_name):
+ raise ValueError("Exactly one of uuid or lvs_name must be specified")
+
+ params = {}
+ if uuid:
+ params['uuid'] = uuid
+ if lvs_name:
+ params['lvs_name'] = lvs_name
+ return client.call('destroy_lvol_store', params)
+
+
+def get_lvol_stores(client, uuid=None, lvs_name=None):
+ """List logical volume stores.
+
+ Args:
+ uuid: UUID of logical volume store to retrieve information about (optional)
+ lvs_name: name of logical volume store to retrieve information about (optional)
+
+ Either uuid or lvs_name may be specified, but not both.
+ If both uuid and lvs_name are omitted, information about all logical volume stores is returned.
+ """
+ if (uuid and lvs_name):
+ raise ValueError("Exactly one of uuid or lvs_name may be specified")
+ params = {}
+ if uuid:
+ params['uuid'] = uuid
+ if lvs_name:
+ params['lvs_name'] = lvs_name
+ return client.call('get_lvol_stores', params)
diff --git a/src/spdk/scripts/rpc/nbd.py b/src/spdk/scripts/rpc/nbd.py
new file mode 100644
index 00000000..70cba167
--- /dev/null
+++ b/src/spdk/scripts/rpc/nbd.py
@@ -0,0 +1,18 @@
+def start_nbd_disk(client, bdev_name, nbd_device):
+ params = {
+ 'bdev_name': bdev_name,
+ 'nbd_device': nbd_device
+ }
+ return client.call('start_nbd_disk', params)
+
+
+def stop_nbd_disk(client, nbd_device):
+ params = {'nbd_device': nbd_device}
+ return client.call('stop_nbd_disk', params)
+
+
+def get_nbd_disks(client, nbd_device=None):
+ params = {}
+ if nbd_device:
+ params['nbd_device'] = nbd_device
+ return client.call('get_nbd_disks', params)
diff --git a/src/spdk/scripts/rpc/net.py b/src/spdk/scripts/rpc/net.py
new file mode 100644
index 00000000..e1ba7aa8
--- /dev/null
+++ b/src/spdk/scripts/rpc/net.py
@@ -0,0 +1,29 @@
+def add_ip_address(client, ifc_index, ip_addr):
+ """Add IP address.
+
+ Args:
+ ifc_index: ifc index of the nic device (int)
+ ip_addr: ip address will be added
+ """
+ params = {'ifc_index': ifc_index, 'ip_address': ip_addr}
+ return client.call('add_ip_address', params)
+
+
+def delete_ip_address(client, ifc_index, ip_addr):
+ """Delete IP address.
+
+ Args:
+ ifc_index: ifc index of the nic device (int)
+ ip_addr: ip address will be deleted
+ """
+ params = {'ifc_index': ifc_index, 'ip_address': ip_addr}
+ return client.call('delete_ip_address', params)
+
+
+def get_interfaces(client):
+ """Display current interface list
+
+ Returns:
+ List of current interface
+ """
+ return client.call('get_interfaces')
diff --git a/src/spdk/scripts/rpc/nvme.py b/src/spdk/scripts/rpc/nvme.py
new file mode 100644
index 00000000..a889474b
--- /dev/null
+++ b/src/spdk/scripts/rpc/nvme.py
@@ -0,0 +1,54 @@
+
+
+def send_nvme_cmd(client, name, cmd_type, data_direction, cmdbuf,
+ data=None, metadata=None,
+ data_len=None, metadata_len=None,
+ timeout_ms=None):
+ """Send one NVMe command
+
+ Args:
+ name: Name of the operating NVMe controller
+ cmd_type: Type of nvme cmd. Valid values are: admin, io
+ data_direction: Direction of data transfer. Valid values are: c2h, h2c
+ cmdbuf: NVMe command encoded by base64 urlsafe
+ data: Data transferring to controller from host, encoded by base64 urlsafe
+ metadata: metadata transferring to controller from host, encoded by base64 urlsafe
+ data_length: Data length required to transfer from controller to host
+ metadata_length: Metadata length required to transfer from controller to host
+ timeout-ms: Command execution timeout value, in milliseconds, if 0, don't track timeout
+
+ Returns:
+ NVMe completion queue entry, requested data and metadata, all are encoded by base64 urlsafe.
+ """
+ params = {'name': name,
+ 'cmd_type': cmd_type,
+ 'data_direction': data_direction,
+ 'cmdbuf': cmdbuf}
+
+ if data:
+ params['data'] = data
+ if metadata:
+ params['metadata'] = metadata
+ if data_len:
+ params['data_len'] = data_len
+ if metadata_len:
+ params['metadata_len'] = metadata_len
+ if timeout_ms:
+ params['timeout_ms'] = timeout_ms
+
+ return client.call('send_nvme_cmd', params)
+
+
+def get_nvme_controllers(client, name=None):
+ """Get information about NVMe controllers.
+
+ Args:
+ name: NVMe controller name to query (optional; if omitted, query all NVMe controllers)
+
+ Returns:
+ List of NVMe controller information objects.
+ """
+ params = {}
+ if name:
+ params['name'] = name
+ return client.call('get_nvme_controllers', params)
diff --git a/src/spdk/scripts/rpc/nvmf.py b/src/spdk/scripts/rpc/nvmf.py
new file mode 100644
index 00000000..d805ebca
--- /dev/null
+++ b/src/spdk/scripts/rpc/nvmf.py
@@ -0,0 +1,352 @@
+
+
+def set_nvmf_target_options(client,
+ max_queue_depth=None,
+ max_qpairs_per_ctrlr=None,
+ in_capsule_data_size=None,
+ max_io_size=None,
+ max_subsystems=None,
+ io_unit_size=None):
+ """Set NVMe-oF target options.
+
+ Args:
+ max_queue_depth: Max number of outstanding I/O per queue (optional)
+ max_qpairs_per_ctrlr: Max number of SQ and CQ per controller (optional)
+ in_capsule_data_size: Maximum in-capsule data size in bytes (optional)
+ max_io_size: Maximum I/O data size in bytes (optional)
+ max_subsystems: Maximum number of NVMe-oF subsystems (optional)
+ io_unit_size: I/O unit size in bytes (optional)
+
+ Returns:
+ True or False
+ """
+ params = {}
+
+ if max_queue_depth:
+ params['max_queue_depth'] = max_queue_depth
+ if max_qpairs_per_ctrlr:
+ params['max_qpairs_per_ctrlr'] = max_qpairs_per_ctrlr
+ if in_capsule_data_size:
+ params['in_capsule_data_size'] = in_capsule_data_size
+ if max_io_size:
+ params['max_io_size'] = max_io_size
+ if max_subsystems:
+ params['max_subsystems'] = max_subsystems
+ if io_unit_size:
+ params['io_unit_size'] = io_unit_size
+ return client.call('set_nvmf_target_options', params)
+
+
+def set_nvmf_target_config(client,
+ acceptor_poll_rate=None,
+ conn_sched=None):
+ """Set NVMe-oF target subsystem configuration.
+
+ Args:
+ acceptor_poll_rate: Acceptor poll period in microseconds (optional)
+ conn_sched: Scheduling of incoming connections (optional)
+
+ Returns:
+ True or False
+ """
+ params = {}
+
+ if acceptor_poll_rate:
+ params['acceptor_poll_rate'] = acceptor_poll_rate
+ if conn_sched:
+ params['conn_sched'] = conn_sched
+ return client.call('set_nvmf_target_config', params)
+
+
+def nvmf_create_transport(client,
+ trtype,
+ max_queue_depth=None,
+ max_qpairs_per_ctrlr=None,
+ in_capsule_data_size=None,
+ max_io_size=None,
+ io_unit_size=None,
+ max_aq_depth=None):
+ """NVMf Transport Create options.
+
+ Args:
+ trtype: Transport type (ex. RDMA)
+ max_queue_depth: Max number of outstanding I/O per queue (optional)
+ max_qpairs_per_ctrlr: Max number of SQ and CQ per controller (optional)
+ in_capsule_data_size: Maximum in-capsule data size in bytes (optional)
+ max_io_size: Maximum I/O data size in bytes (optional)
+ io_unit_size: I/O unit size in bytes (optional)
+ max_aq_depth: Max size admin quque per controller (optional)
+
+ Returns:
+ True or False
+ """
+ params = {}
+
+ params['trtype'] = trtype
+ if max_queue_depth:
+ params['max_queue_depth'] = max_queue_depth
+ if max_qpairs_per_ctrlr:
+ params['max_qpairs_per_ctrlr'] = max_qpairs_per_ctrlr
+ if in_capsule_data_size:
+ params['in_capsule_data_size'] = in_capsule_data_size
+ if max_io_size:
+ params['max_io_size'] = max_io_size
+ if io_unit_size:
+ params['io_unit_size'] = io_unit_size
+ if max_aq_depth:
+ params['max_aq_depth'] = max_aq_depth
+ return client.call('nvmf_create_transport', params)
+
+
+def get_nvmf_subsystems(client):
+ """Get list of NVMe-oF subsystems.
+
+ Returns:
+ List of NVMe-oF subsystem objects.
+ """
+ return client.call('get_nvmf_subsystems')
+
+
+def construct_nvmf_subsystem(client,
+ nqn,
+ serial_number,
+ listen_addresses=None,
+ hosts=None,
+ allow_any_host=False,
+ namespaces=None,
+ max_namespaces=0):
+ """Construct an NVMe over Fabrics target subsystem.
+
+ Args:
+ nqn: Subsystem NQN.
+ serial_number: Serial number of virtual controller.
+ listen_addresses: Array of listen_address objects (optional).
+ hosts: Array of strings containing allowed host NQNs (optional). Default: No hosts allowed.
+ allow_any_host: Allow any host (True) or enforce allowed host whitelist (False). Default: False.
+ namespaces: Array of namespace objects (optional). Default: No namespaces.
+ max_namespaces: Maximum number of namespaces that can be attached to the subsystem (optional). Default: 0 (Unlimited).
+
+ Returns:
+ True or False
+ """
+ params = {
+ 'nqn': nqn,
+ 'serial_number': serial_number,
+ }
+
+ if max_namespaces:
+ params['max_namespaces'] = max_namespaces
+
+ if listen_addresses:
+ params['listen_addresses'] = listen_addresses
+
+ if hosts:
+ params['hosts'] = hosts
+
+ if allow_any_host:
+ params['allow_any_host'] = True
+
+ if namespaces:
+ params['namespaces'] = namespaces
+
+ return client.call('construct_nvmf_subsystem', params)
+
+
+def nvmf_subsystem_create(client,
+ nqn,
+ serial_number,
+ allow_any_host=False,
+ max_namespaces=0):
+ """Construct an NVMe over Fabrics target subsystem.
+
+ Args:
+ nqn: Subsystem NQN.
+ serial_number: Serial number of virtual controller.
+ allow_any_host: Allow any host (True) or enforce allowed host whitelist (False). Default: False.
+ max_namespaces: Maximum number of namespaces that can be attached to the subsystem (optional). Default: 0 (Unlimited).
+
+ Returns:
+ True or False
+ """
+ params = {
+ 'nqn': nqn,
+ }
+
+ if serial_number:
+ params['serial_number'] = serial_number
+
+ if allow_any_host:
+ params['allow_any_host'] = True
+
+ if max_namespaces:
+ params['max_namespaces'] = max_namespaces
+
+ return client.call('nvmf_subsystem_create', params)
+
+
+def nvmf_subsystem_add_listener(client, nqn, trtype, traddr, trsvcid, adrfam):
+ """Add a new listen address to an NVMe-oF subsystem.
+
+ Args:
+ nqn: Subsystem NQN.
+ trtype: Transport type ("RDMA").
+ traddr: Transport address.
+ trsvcid: Transport service ID.
+ adrfam: Address family ("IPv4", "IPv6", "IB", or "FC").
+
+ Returns:
+ True or False
+ """
+ listen_address = {'trtype': trtype,
+ 'traddr': traddr,
+ 'trsvcid': trsvcid}
+
+ if adrfam:
+ listen_address['adrfam'] = adrfam
+
+ params = {'nqn': nqn,
+ 'listen_address': listen_address}
+
+ return client.call('nvmf_subsystem_add_listener', params)
+
+
+def nvmf_subsystem_remove_listener(
+ client,
+ nqn,
+ trtype,
+ traddr,
+ trsvcid,
+ adrfam):
+ """Remove existing listen address from an NVMe-oF subsystem.
+
+ Args:
+ nqn: Subsystem NQN.
+ trtype: Transport type ("RDMA").
+ traddr: Transport address.
+ trsvcid: Transport service ID.
+ adrfam: Address family ("IPv4", "IPv6", "IB", or "FC").
+
+ Returns:
+ True or False
+ """
+ listen_address = {'trtype': trtype,
+ 'traddr': traddr,
+ 'trsvcid': trsvcid}
+
+ if adrfam:
+ listen_address['adrfam'] = adrfam
+
+ params = {'nqn': nqn,
+ 'listen_address': listen_address}
+
+ return client.call('nvmf_subsystem_remove_listener', params)
+
+
+def nvmf_subsystem_add_ns(client, nqn, bdev_name, nsid=None, nguid=None, eui64=None, uuid=None):
+ """Add a namespace to a subsystem.
+
+ Args:
+ nqn: Subsystem NQN.
+ bdev_name: Name of bdev to expose as a namespace.
+ nsid: Namespace ID (optional).
+ nguid: 16-byte namespace globally unique identifier in hexadecimal (optional).
+ eui64: 8-byte namespace EUI-64 in hexadecimal (e.g. "ABCDEF0123456789") (optional).
+ uuid: Namespace UUID (optional).
+
+ Returns:
+ The namespace ID
+ """
+ ns = {'bdev_name': bdev_name}
+
+ if nsid:
+ ns['nsid'] = nsid
+
+ if nguid:
+ ns['nguid'] = nguid
+
+ if eui64:
+ ns['eui64'] = eui64
+
+ if uuid:
+ ns['uuid'] = uuid
+
+ params = {'nqn': nqn,
+ 'namespace': ns}
+
+ return client.call('nvmf_subsystem_add_ns', params)
+
+
+def nvmf_subsystem_remove_ns(client, nqn, nsid):
+ """Remove a existing namespace from a subsystem.
+
+ Args:
+ nqn: Subsystem NQN.
+ nsid: Namespace ID.
+
+ Returns:
+ True or False
+ """
+ params = {'nqn': nqn,
+ 'nsid': nsid}
+
+ return client.call('nvmf_subsystem_remove_ns', params)
+
+
+def nvmf_subsystem_add_host(client, nqn, host):
+ """Add a host NQN to the whitelist of allowed hosts.
+
+ Args:
+ nqn: Subsystem NQN.
+ host: Host NQN to add to the list of allowed host NQNs
+
+ Returns:
+ True or False
+ """
+ params = {'nqn': nqn,
+ 'host': host}
+
+ return client.call('nvmf_subsystem_add_host', params)
+
+
+def nvmf_subsystem_remove_host(client, nqn, host):
+ """Remove a host NQN from the whitelist of allowed hosts.
+
+ Args:
+ nqn: Subsystem NQN.
+ host: Host NQN to remove to the list of allowed host NQNs
+
+ Returns:
+ True or False
+ """
+ params = {'nqn': nqn,
+ 'host': host}
+
+ return client.call('nvmf_subsystem_remove_host', params)
+
+
+def nvmf_subsystem_allow_any_host(client, nqn, disable):
+ """Configure a subsystem to allow any host to connect or to enforce the host NQN whitelist.
+
+ Args:
+ nqn: Subsystem NQN.
+ disable: Allow any host (true) or enforce allowed host whitelist (false).
+
+ Returns:
+ True or False
+ """
+ params = {'nqn': nqn, 'allow_any_host': False if disable else True}
+
+ return client.call('nvmf_subsystem_allow_any_host', params)
+
+
+def delete_nvmf_subsystem(client, nqn):
+ """Delete an existing NVMe-oF subsystem.
+
+ Args:
+ nqn: Subsystem NQN.
+
+ Returns:
+ True or False
+ """
+ params = {'nqn': nqn}
+ return client.call('delete_nvmf_subsystem', params)
diff --git a/src/spdk/scripts/rpc/pmem.py b/src/spdk/scripts/rpc/pmem.py
new file mode 100644
index 00000000..4ab38ff3
--- /dev/null
+++ b/src/spdk/scripts/rpc/pmem.py
@@ -0,0 +1,29 @@
+def create_pmem_pool(client, pmem_file, num_blocks, block_size):
+ """Create pmem pool at specified path.
+ Args:
+ pmem_file: path at which to create pmem pool
+ num_blocks: number of blocks for created pmem pool file
+ block_size: block size for pmem pool file
+ """
+ params = {'pmem_file': pmem_file,
+ 'num_blocks': num_blocks,
+ 'block_size': block_size}
+ return client.call('create_pmem_pool', params)
+
+
+def pmem_pool_info(client, pmem_file):
+ """Get details about pmem pool.
+ Args:
+ pmem_file: path to pmem pool
+ """
+ params = {'pmem_file': pmem_file}
+ return client.call('pmem_pool_info', params)
+
+
+def delete_pmem_pool(client, pmem_file):
+ """Delete pmem pool.
+ Args:
+ pmem_file: path to pmem pool
+ """
+ params = {'pmem_file': pmem_file}
+ return client.call('delete_pmem_pool', params)
diff --git a/src/spdk/scripts/rpc/subsystem.py b/src/spdk/scripts/rpc/subsystem.py
new file mode 100644
index 00000000..c8e662bc
--- /dev/null
+++ b/src/spdk/scripts/rpc/subsystem.py
@@ -0,0 +1,7 @@
+def get_subsystems(client):
+ return client.call('get_subsystems')
+
+
+def get_subsystem_config(client, name):
+ params = {'name': name}
+ return client.call('get_subsystem_config', params)
diff --git a/src/spdk/scripts/rpc/vhost.py b/src/spdk/scripts/rpc/vhost.py
new file mode 100644
index 00000000..bc97455a
--- /dev/null
+++ b/src/spdk/scripts/rpc/vhost.py
@@ -0,0 +1,248 @@
+def set_vhost_controller_coalescing(client, ctrlr, delay_base_us, iops_threshold):
+ """Set coalescing for vhost controller.
+ Args:
+ ctrlr: controller name
+ delay_base_us: base delay time
+ iops_threshold: IOPS threshold when coalescing is enabled
+ """
+ params = {
+ 'ctrlr': ctrlr,
+ 'delay_base_us': delay_base_us,
+ 'iops_threshold': iops_threshold,
+ }
+ return client.call('set_vhost_controller_coalescing', params)
+
+
+def construct_vhost_scsi_controller(client, ctrlr, cpumask=None):
+ """Construct a vhost scsi controller.
+ Args:
+ ctrlr: controller name
+ cpumask: cpu mask for this controller
+ """
+ params = {'ctrlr': ctrlr}
+
+ if cpumask:
+ params['cpumask'] = cpumask
+
+ return client.call('construct_vhost_scsi_controller', params)
+
+
+def add_vhost_scsi_lun(client, ctrlr, scsi_target_num, bdev_name):
+ """Add LUN to vhost scsi controller target.
+ Args:
+ ctrlr: controller name
+ scsi_target_num: target number to use
+ bdev_name: name of bdev to add to target
+ """
+ params = {
+ 'ctrlr': ctrlr,
+ 'scsi_target_num': scsi_target_num,
+ 'bdev_name': bdev_name,
+ }
+ return client.call('add_vhost_scsi_lun', params)
+
+
+def remove_vhost_scsi_target(client, ctrlr, scsi_target_num):
+ """Remove target from vhost scsi controller.
+ Args:
+ ctrlr: controller name to remove target from
+ scsi_target_num: number of target to remove from controller
+ """
+ params = {
+ 'ctrlr': ctrlr,
+ 'scsi_target_num': scsi_target_num
+ }
+ return client.call('remove_vhost_scsi_target', params)
+
+
+def construct_vhost_nvme_controller(client, ctrlr, io_queues, cpumask=None):
+ """Construct vhost NVMe controller.
+ Args:
+ ctrlr: controller name
+ io_queues: number of IO queues for the controller
+ cpumask: cpu mask for this controller
+ """
+ params = {
+ 'ctrlr': ctrlr,
+ 'io_queues': io_queues
+ }
+
+ if cpumask:
+ params['cpumask'] = cpumask
+
+ return client.call('construct_vhost_nvme_controller', params)
+
+
+def add_vhost_nvme_ns(client, ctrlr, bdev_name):
+ """Add namespace to vhost nvme controller.
+ Args:
+ ctrlr: controller name where to add a namespace
+ bdev_name: block device name for a new namespace
+ """
+ params = {
+ 'ctrlr': ctrlr,
+ 'bdev_name': bdev_name,
+ }
+
+ return client.call('add_vhost_nvme_ns', params)
+
+
+def construct_vhost_blk_controller(client, ctrlr, dev_name, cpumask=None, readonly=None):
+ """Construct vhost BLK controller.
+ Args:
+ ctrlr: controller name
+ dev_name: device name to add to controller
+ cpumask: cpu mask for this controller
+ readonly: set controller as read-only
+ """
+ params = {
+ 'ctrlr': ctrlr,
+ 'dev_name': dev_name,
+ }
+ if cpumask:
+ params['cpumask'] = cpumask
+ if readonly:
+ params['readonly'] = readonly
+ return client.call('construct_vhost_blk_controller', params)
+
+
+def get_vhost_controllers(client, name=None):
+ """Get information about configured vhost controllers.
+
+ Args:
+ name: controller name to query (optional; if omitted, query all controllers)
+
+ Returns:
+ List of vhost controllers.
+ """
+ params = {}
+ if name:
+ params['name'] = name
+ return client.call('get_vhost_controllers', params)
+
+
+def remove_vhost_controller(client, ctrlr):
+ """Remove vhost controller from configuration.
+ Args:
+ ctrlr: controller name to remove
+ """
+ params = {'ctrlr': ctrlr}
+ return client.call('remove_vhost_controller', params)
+
+
+def construct_virtio_dev(client, name, trtype, traddr, dev_type, vq_count=None, vq_size=None):
+ """Construct new virtio device using provided
+ transport type and device type.
+ Args:
+ name: name base for new created bdevs
+ trtype: virtio target transport type: pci or user
+ traddr: transport type specific target address: e.g. UNIX
+ domain socket path or BDF
+ dev_type: device type: blk or scsi
+ vq_count: number of virtual queues to be used
+ vq_size: size of each queue
+ """
+ params = {
+ 'name': name,
+ 'trtype': trtype,
+ 'traddr': traddr,
+ 'dev_type': dev_type
+ }
+ if vq_count:
+ params['vq_count'] = vq_count
+ if vq_size:
+ params['vq_size'] = vq_size
+ return client.call('construct_virtio_dev', params)
+
+
+def construct_virtio_user_scsi_bdev(client, path, name, vq_count=None, vq_size=None):
+ """Connect to virtio user scsi device.
+ Args:
+ path: path to Virtio SCSI socket
+ name: use this name as base instead of 'VirtioScsiN'
+ vq_count: number of virtual queues to be used
+ vq_size: size of each queue
+ """
+ params = {
+ 'path': path,
+ 'name': name,
+ }
+ if vq_count:
+ params['vq_count'] = vq_count
+ if vq_size:
+ params['vq_size'] = vq_size
+ return client.call('construct_virtio_user_scsi_bdev', params)
+
+
+def construct_virtio_pci_scsi_bdev(client, pci_address, name):
+ """Create a Virtio SCSI device from a virtio-pci device.
+ Args:
+ pci_address: PCI address in domain:bus:device.function format or
+ domain.bus.device.function format
+ name: Name for the virtio device. It will be inhereted by all created
+ bdevs, which are named n the following format:
+ <name>t<target_id>
+ """
+ params = {
+ 'pci_address': pci_address,
+ 'name': name,
+ }
+ return client.call('construct_virtio_pci_scsi_bdev', params)
+
+
+def remove_virtio_scsi_bdev(client, name):
+ """Remove a Virtio-SCSI device
+ This will delete all bdevs exposed by this device.
+ Args:
+ name: virtio device name
+ """
+ params = {'name': name}
+ return client.call('remove_virtio_scsi_bdev', params)
+
+
+def remove_virtio_bdev(client, name):
+ """Remove a Virtio device
+ This will delete all bdevs exposed by this device.
+ Args:
+ name: virtio device name
+ """
+ params = {'name': name}
+ return client.call('remove_virtio_bdev', params)
+
+
+def get_virtio_scsi_devs(client):
+ """Get list of virtio scsi devices."""
+ return client.call('get_virtio_scsi_devs')
+
+
+def construct_virtio_user_blk_bdev(client, path, name, vq_count=None, vq_size=None):
+ """Connect to virtio user BLK device.
+ Args:
+ path: path to Virtio BLK socket
+ name: use this name as base instead of 'VirtioScsiN'
+ vq_count: number of virtual queues to be used
+ vq_size: size of each queue
+ """
+ params = {
+ 'path': path,
+ 'name': name,
+ }
+ if vq_count:
+ params['vq_count'] = vq_count
+ if vq_size:
+ params['vq_size'] = vq_size
+ return client.call('construct_virtio_user_blk_bdev', params)
+
+
+def construct_virtio_pci_blk_bdev(client, pci_address, name):
+ """Create a Virtio Blk device from a virtio-pci device.
+ Args:
+ pci_address: PCI address in domain:bus:device.function format or
+ domain.bus.device.function format
+ name: name for the blk device
+ """
+ params = {
+ 'pci_address': pci_address,
+ 'name': name,
+ }
+ return client.call('construct_virtio_pci_blk_bdev', params)
diff --git a/src/spdk/scripts/setup.sh b/src/spdk/scripts/setup.sh
new file mode 100755
index 00000000..22e726de
--- /dev/null
+++ b/src/spdk/scripts/setup.sh
@@ -0,0 +1,604 @@
+#!/usr/bin/env bash
+
+set -e
+
+rootdir=$(readlink -f $(dirname $0))/..
+source "$rootdir/scripts/common.sh"
+
+function usage()
+{
+ if [ `uname` = Linux ]; then
+ options="[config|reset|status|cleanup|help]"
+ else
+ options="[config|reset|help]"
+ fi
+
+ [[ ! -z $2 ]] && ( echo "$2"; echo ""; )
+ echo "Helper script for allocating hugepages and binding NVMe, I/OAT and Virtio devices to"
+ echo "a generic VFIO kernel driver. If VFIO is not available on the system, this script will"
+ echo "fall back to UIO. NVMe and Virtio devices with active mountpoints will be ignored."
+ echo "All hugepage operations use default hugepage size on the system (hugepagesz)."
+ echo "Usage: $(basename $1) $options"
+ echo
+ echo "$options - as following:"
+ echo "config Default mode. Allocate hugepages and bind PCI devices."
+ if [ `uname` = Linux ]; then
+ echo "cleanup Remove any orphaned files that can be left in the system after SPDK application exit"
+ fi
+ echo "reset Rebind PCI devices back to their original drivers."
+ echo " Also cleanup any leftover spdk files/resources."
+ echo " Hugepage memory size will remain unchanged."
+ if [ `uname` = Linux ]; then
+ echo "status Print status of all SPDK-compatible devices on the system."
+ fi
+ echo "help Print this help message."
+ echo
+ echo "The following environment variables can be specified."
+ echo "HUGEMEM Size of hugepage memory to allocate (in MB). 2048 by default."
+ echo " For NUMA systems, the hugepages will be evenly distributed"
+ echo " between CPU nodes"
+ echo "NRHUGE Number of hugepages to allocate. This variable overwrites HUGEMEM."
+ echo "HUGENODE Specific NUMA node to allocate hugepages on. To allocate"
+ echo " hugepages on multiple nodes run this script multiple times -"
+ echo " once for each node."
+ echo "PCI_WHITELIST Whitespace separated list of PCI devices (NVMe, I/OAT, Virtio) to bind."
+ echo " Each device must be specified as a full PCI address."
+ echo " E.g. PCI_WHITELIST=\"0000:01:00.0 0000:02:00.0\""
+ echo " To blacklist all PCI devices use a non-valid address."
+ echo " E.g. PCI_WHITELIST=\"none\""
+ echo " If empty or unset, all PCI devices will be bound."
+ echo "TARGET_USER User that will own hugepage mountpoint directory and vfio groups."
+ echo " By default the current user will be used."
+ exit 0
+}
+
+# In monolithic kernels the lsmod won't work. So
+# back that with a /sys/modules check. Return a different code for
+# built-in vs module just in case we want that down the road.
+function check_for_driver {
+ $(lsmod | grep $1 > /dev/null)
+ if [ $? -eq 0 ]; then
+ return 1
+ else
+ if [[ -d /sys/module/$1 ]]; then
+ return 2
+ else
+ return 0
+ fi
+ fi
+ return 0
+}
+
+function pci_can_bind() {
+ if [[ ${#PCI_WHITELIST[@]} == 0 ]]; then
+ #no whitelist specified, bind all devices
+ return 1
+ fi
+
+ for i in ${PCI_WHITELIST[@]}
+ do
+ if [ "$i" == "$1" ] ; then
+ return 1
+ fi
+ done
+ return 0
+}
+
+function linux_bind_driver() {
+ bdf="$1"
+ driver_name="$2"
+ old_driver_name="no driver"
+ ven_dev_id=$(lspci -n -s $bdf | cut -d' ' -f3 | sed 's/:/ /')
+
+ if [ -e "/sys/bus/pci/devices/$bdf/driver" ]; then
+ old_driver_name=$(basename $(readlink /sys/bus/pci/devices/$bdf/driver))
+
+ if [ "$driver_name" = "$old_driver_name" ]; then
+ return 0
+ fi
+
+ echo "$ven_dev_id" > "/sys/bus/pci/devices/$bdf/driver/remove_id" 2> /dev/null || true
+ echo "$bdf" > "/sys/bus/pci/devices/$bdf/driver/unbind"
+ fi
+
+ echo "$bdf ($ven_dev_id): $old_driver_name -> $driver_name"
+
+ echo "$ven_dev_id" > "/sys/bus/pci/drivers/$driver_name/new_id" 2> /dev/null || true
+ echo "$bdf" > "/sys/bus/pci/drivers/$driver_name/bind" 2> /dev/null || true
+
+ iommu_group=$(basename $(readlink -f /sys/bus/pci/devices/$bdf/iommu_group))
+ if [ -e "/dev/vfio/$iommu_group" ]; then
+ if [ -n "$TARGET_USER" ]; then
+ chown "$TARGET_USER" "/dev/vfio/$iommu_group"
+ fi
+ fi
+}
+
+function linux_unbind_driver() {
+ bdf="$1"
+ ven_dev_id=$(lspci -n -s $bdf | cut -d' ' -f3 | sed 's/:/ /')
+
+ if ! [ -e "/sys/bus/pci/devices/$bdf/driver" ]; then
+ return 0
+ fi
+
+ old_driver_name=$(basename $(readlink /sys/bus/pci/devices/$bdf/driver))
+
+ echo "$ven_dev_id" > "/sys/bus/pci/devices/$bdf/driver/remove_id" 2> /dev/null || true
+ echo "$bdf" > "/sys/bus/pci/devices/$bdf/driver/unbind"
+ echo "$bdf ($ven_dev_id): $old_driver_name -> no driver"
+}
+
+function linux_hugetlbfs_mounts() {
+ mount | grep ' type hugetlbfs ' | awk '{ print $3 }'
+}
+
+function get_nvme_name_from_bdf {
+ set +e
+ nvme_devs=`lsblk -d --output NAME | grep "^nvme"`
+ set -e
+ for dev in $nvme_devs; do
+ link_name=$(readlink /sys/block/$dev/device/device) || true
+ if [ -z "$link_name" ]; then
+ link_name=$(readlink /sys/block/$dev/device)
+ fi
+ link_bdf=$(basename "$link_name")
+ if [ "$link_bdf" = "$1" ]; then
+ eval "$2=$dev"
+ return
+ fi
+ done
+}
+
+function get_virtio_names_from_bdf {
+ blk_devs=`lsblk --nodeps --output NAME`
+ virtio_names=''
+
+ for dev in $blk_devs; do
+ if readlink "/sys/block/$dev" | grep -q "$1"; then
+ virtio_names="$virtio_names $dev"
+ fi
+ done
+
+ eval "$2='$virtio_names'"
+}
+
+function configure_linux_pci {
+ driver_name=vfio-pci
+ if [ -z "$(ls /sys/kernel/iommu_groups)" ]; then
+ # No IOMMU. Use uio.
+ driver_name=uio_pci_generic
+ fi
+
+ # NVMe
+ modprobe $driver_name || true
+ for bdf in $(iter_pci_class_code 01 08 02); do
+ blkname=''
+ get_nvme_name_from_bdf "$bdf" blkname
+ if pci_can_bind $bdf == "0" ; then
+ echo "Skipping un-whitelisted NVMe controller $blkname ($bdf)"
+ continue
+ fi
+ if [ "$blkname" != "" ]; then
+ mountpoints=$(lsblk /dev/$blkname --output MOUNTPOINT -n | wc -w)
+ else
+ mountpoints="0"
+ fi
+ if [ "$mountpoints" = "0" ]; then
+ linux_bind_driver "$bdf" "$driver_name"
+ else
+ echo Active mountpoints on /dev/$blkname, so not binding PCI dev $bdf
+ fi
+ done
+
+ # IOAT
+ TMP=`mktemp`
+ #collect all the device_id info of ioat devices.
+ grep "PCI_DEVICE_ID_INTEL_IOAT" $rootdir/include/spdk/pci_ids.h \
+ | awk -F"x" '{print $2}' > $TMP
+
+ for dev_id in `cat $TMP`; do
+ for bdf in $(iter_pci_dev_id 8086 $dev_id); do
+ if pci_can_bind $bdf == "0" ; then
+ echo "Skipping un-whitelisted I/OAT device at $bdf"
+ continue
+ fi
+ linux_bind_driver "$bdf" "$driver_name"
+ done
+ done
+ rm $TMP
+
+ # virtio
+ TMP=`mktemp`
+ #collect all the device_id info of virtio devices.
+ grep "PCI_DEVICE_ID_VIRTIO" $rootdir/include/spdk/pci_ids.h \
+ | awk -F"x" '{print $2}' > $TMP
+
+ for dev_id in `cat $TMP`; do
+ for bdf in $(iter_pci_dev_id 1af4 $dev_id); do
+ if pci_can_bind $bdf == "0" ; then
+ echo "Skipping un-whitelisted Virtio device at $bdf"
+ continue
+ fi
+ blknames=''
+ get_virtio_names_from_bdf "$bdf" blknames
+ for blkname in $blknames; do
+ if mount | grep -q "/dev/$blkname"; then
+ echo Active mountpoints on /dev/$blkname, so not binding PCI dev $bdf
+ continue 2
+ fi
+ done
+
+ linux_bind_driver "$bdf" "$driver_name"
+ done
+ done
+ rm $TMP
+
+ echo "1" > "/sys/bus/pci/rescan"
+}
+
+function cleanup_linux {
+ shopt -s extglob nullglob
+ dirs_to_clean=""
+ dirs_to_clean="$(echo {/var/run,/tmp}/dpdk/spdk{,_pid}+([0-9])) "
+ if [[ -d $XDG_RUNTIME_DIR && $XDG_RUNTIME_DIR != *" "* ]]; then
+ dirs_to_clean+="$(readlink -e assert_not_empty $XDG_RUNTIME_DIR/dpdk/spdk{,_pid}+([0-9]) || true) "
+ fi
+
+ files_to_clean=""
+ for dir in $dirs_to_clean; do
+ files_to_clean+="$(echo $dir/*) "
+ done
+ shopt -u extglob nullglob
+
+ files_to_clean+="$(echo /dev/shm/* | egrep '(spdk_tgt|iscsi|vhost|nvmf|rocksdb|bdevtest|bdevperf)_trace|spdk_iscsi_conns' || true) "
+ files_to_clean="$(readlink -e assert_not_empty $files_to_clean || true)"
+ if [[ -z "$files_to_clean" ]]; then
+ echo "Clean"
+ return 0;
+ fi
+
+ shopt -s extglob
+ for fd_dir in $(echo /proc/+([0-9])); do
+ opened_files+="$(readlink -e assert_not_empty $fd_dir/fd/* || true)"
+ done
+ shopt -u extglob
+
+ if [[ -z "$opened_files" ]]; then
+ echo "Can't get list of opened files!"
+ exit 1
+ fi
+
+ echo 'Cleaning'
+ for f in $files_to_clean; do
+ if ! echo "$opened_files" | egrep -q "^$f\$"; then
+ echo "Removing: $f"
+ rm $f
+ else
+ echo "Still open: $f"
+ fi
+ done
+
+ for dir in $dirs_to_clean; do
+ if ! echo "$opened_files" | egrep -q "^$dir\$"; then
+ echo "Removing: $dir"
+ rmdir $dir
+ else
+ echo "Still open: $dir"
+ fi
+ done
+ echo "Clean"
+
+ unset dirs_to_clean files_to_clean opened_files
+}
+
+function configure_linux {
+ configure_linux_pci
+ hugetlbfs_mounts=$(linux_hugetlbfs_mounts)
+
+ if [ -z "$hugetlbfs_mounts" ]; then
+ hugetlbfs_mounts=/mnt/huge
+ echo "Mounting hugetlbfs at $hugetlbfs_mounts"
+ mkdir -p "$hugetlbfs_mounts"
+ mount -t hugetlbfs nodev "$hugetlbfs_mounts"
+ fi
+
+ if [ -z "$HUGENODE" ]; then
+ hugepages_target="/proc/sys/vm/nr_hugepages"
+ else
+ hugepages_target="/sys/devices/system/node/node${HUGENODE}/hugepages/hugepages-${HUGEPGSZ}kB/nr_hugepages"
+ fi
+
+ echo "$NRHUGE" > "$hugepages_target"
+ allocated_hugepages=`cat $hugepages_target`
+ if [ "$allocated_hugepages" -lt "$NRHUGE" ]; then
+ echo ""
+ echo "## ERROR: requested $NRHUGE hugepages but only $allocated_hugepages could be allocated."
+ echo "## Memory might be heavily fragmented. Please try flushing the system cache, or reboot the machine."
+ exit 1
+ fi
+
+ if [ "$driver_name" = "vfio-pci" ]; then
+ if [ -n "$TARGET_USER" ]; then
+ for mount in $hugetlbfs_mounts; do
+ chown "$TARGET_USER" "$mount"
+ chmod g+w "$mount"
+ done
+ fi
+
+ MEMLOCK_AMNT=`ulimit -l`
+ if [ "$MEMLOCK_AMNT" != "unlimited" ] ; then
+ MEMLOCK_MB=$(( $MEMLOCK_AMNT / 1024 ))
+ echo ""
+ echo "Current user memlock limit: ${MEMLOCK_MB} MB"
+ echo ""
+ echo "This is the maximum amount of memory you will be"
+ echo "able to use with DPDK and VFIO if run as current user."
+ echo -n "To change this, please adjust limits.conf memlock "
+ echo "limit for current user."
+
+ if [ $MEMLOCK_AMNT -lt 65536 ] ; then
+ echo ""
+ echo "## WARNING: memlock limit is less than 64MB"
+ echo -n "## DPDK with VFIO may not be able to initialize "
+ echo "if run as current user."
+ fi
+ fi
+ fi
+}
+
+function reset_linux_pci {
+ # NVMe
+ set +e
+ check_for_driver nvme
+ driver_loaded=$?
+ set -e
+ for bdf in $(iter_pci_class_code 01 08 02); do
+ if pci_can_bind $bdf == "0" ; then
+ echo "Skipping un-whitelisted NVMe controller $blkname ($bdf)"
+ continue
+ fi
+ if [ $driver_loaded -ne 0 ]; then
+ linux_bind_driver "$bdf" nvme
+ else
+ linux_unbind_driver "$bdf"
+ fi
+ done
+
+ # IOAT
+ TMP=`mktemp`
+ #collect all the device_id info of ioat devices.
+ grep "PCI_DEVICE_ID_INTEL_IOAT" $rootdir/include/spdk/pci_ids.h \
+ | awk -F"x" '{print $2}' > $TMP
+
+ set +e
+ check_for_driver ioatdma
+ driver_loaded=$?
+ set -e
+ for dev_id in `cat $TMP`; do
+ for bdf in $(iter_pci_dev_id 8086 $dev_id); do
+ if pci_can_bind $bdf == "0" ; then
+ echo "Skipping un-whitelisted I/OAT device at $bdf"
+ continue
+ fi
+ if [ $driver_loaded -ne 0 ]; then
+ linux_bind_driver "$bdf" ioatdma
+ else
+ linux_unbind_driver "$bdf"
+ fi
+ done
+ done
+ rm $TMP
+
+ # virtio
+ TMP=`mktemp`
+ #collect all the device_id info of virtio devices.
+ grep "PCI_DEVICE_ID_VIRTIO" $rootdir/include/spdk/pci_ids.h \
+ | awk -F"x" '{print $2}' > $TMP
+
+ # TODO: check if virtio-pci is loaded first and just unbind if it is not loaded
+ # Requires some more investigation - for example, some kernels do not seem to have
+ # virtio-pci but just virtio_scsi instead. Also need to make sure we get the
+ # underscore vs. dash right in the virtio_scsi name.
+ modprobe virtio-pci || true
+ for dev_id in `cat $TMP`; do
+ for bdf in $(iter_pci_dev_id 1af4 $dev_id); do
+ if pci_can_bind $bdf == "0" ; then
+ echo "Skipping un-whitelisted Virtio device at $bdf"
+ continue
+ fi
+ linux_bind_driver "$bdf" virtio-pci
+ done
+ done
+ rm $TMP
+
+ echo "1" > "/sys/bus/pci/rescan"
+}
+
+function reset_linux {
+ reset_linux_pci
+ for mount in $(linux_hugetlbfs_mounts); do
+ rm -f "$mount"/spdk*map_*
+ done
+ rm -f /run/.spdk*
+}
+
+function status_linux {
+ echo "Hugepages"
+ printf "%-6s %10s %8s / %6s\n" "node" "hugesize" "free" "total"
+
+ numa_nodes=0
+ shopt -s nullglob
+ for path in /sys/devices/system/node/node?/hugepages/hugepages-*/; do
+ numa_nodes=$((numa_nodes + 1))
+ free_pages=`cat $path/free_hugepages`
+ all_pages=`cat $path/nr_hugepages`
+
+ [[ $path =~ (node[0-9]+)/hugepages/hugepages-([0-9]+kB) ]]
+
+ node=${BASH_REMATCH[1]}
+ huge_size=${BASH_REMATCH[2]}
+
+ printf "%-6s %10s %8s / %6s\n" $node $huge_size $free_pages $all_pages
+ done
+ shopt -u nullglob
+
+ # fall back to system-wide hugepages
+ if [ "$numa_nodes" = "0" ]; then
+ free_pages=`grep HugePages_Free /proc/meminfo | awk '{ print $2 }'`
+ all_pages=`grep HugePages_Total /proc/meminfo | awk '{ print $2 }'`
+ node="-"
+ huge_size="$HUGEPGSZ"
+
+ printf "%-6s %10s %8s / %6s\n" $node $huge_size $free_pages $all_pages
+ fi
+
+ echo "NVMe devices"
+
+ echo -e "BDF\t\tNuma Node\tDriver name\t\tDevice name"
+ for bdf in $(iter_pci_class_code 01 08 02); do
+ driver=`grep DRIVER /sys/bus/pci/devices/$bdf/uevent |awk -F"=" '{print $2}'`
+ node=`cat /sys/bus/pci/devices/$bdf/numa_node`;
+ if [ "$driver" = "nvme" -a -d /sys/bus/pci/devices/$bdf/nvme ]; then
+ name="\t"`ls /sys/bus/pci/devices/$bdf/nvme`;
+ else
+ name="-";
+ fi
+ echo -e "$bdf\t$node\t\t$driver\t\t$name";
+ done
+
+ echo "I/OAT DMA"
+
+ #collect all the device_id info of ioat devices.
+ TMP=`grep "PCI_DEVICE_ID_INTEL_IOAT" $rootdir/include/spdk/pci_ids.h \
+ | awk -F"x" '{print $2}'`
+ echo -e "BDF\t\tNuma Node\tDriver Name"
+ for dev_id in $TMP; do
+ for bdf in $(iter_pci_dev_id 8086 $dev_id); do
+ driver=`grep DRIVER /sys/bus/pci/devices/$bdf/uevent |awk -F"=" '{print $2}'`
+ node=`cat /sys/bus/pci/devices/$bdf/numa_node`;
+ echo -e "$bdf\t$node\t\t$driver"
+ done
+ done
+
+ echo "virtio"
+
+ #collect all the device_id info of virtio devices.
+ TMP=`grep "PCI_DEVICE_ID_VIRTIO" $rootdir/include/spdk/pci_ids.h \
+ | awk -F"x" '{print $2}'`
+ echo -e "BDF\t\tNuma Node\tDriver Name\t\tDevice Name"
+ for dev_id in $TMP; do
+ for bdf in $(iter_pci_dev_id 1af4 $dev_id); do
+ driver=`grep DRIVER /sys/bus/pci/devices/$bdf/uevent |awk -F"=" '{print $2}'`
+ node=`cat /sys/bus/pci/devices/$bdf/numa_node`;
+ blknames=''
+ get_virtio_names_from_bdf "$bdf" blknames
+ echo -e "$bdf\t$node\t\t$driver\t\t$blknames"
+ done
+ done
+}
+
+function configure_freebsd_pci {
+ TMP=`mktemp`
+
+ # NVMe
+ GREP_STR="class=0x010802"
+
+ # IOAT
+ grep "PCI_DEVICE_ID_INTEL_IOAT" $rootdir/include/spdk/pci_ids.h \
+ | awk -F"x" '{print $2}' > $TMP
+ for dev_id in `cat $TMP`; do
+ GREP_STR="${GREP_STR}\|chip=0x${dev_id}8086"
+ done
+
+ AWK_PROG="{if (count > 0) printf \",\"; printf \"%s:%s:%s\",\$2,\$3,\$4; count++}"
+ echo $AWK_PROG > $TMP
+
+ BDFS=`pciconf -l | grep "${GREP_STR}" | awk -F: -f $TMP`
+
+ kldunload nic_uio.ko || true
+ kenv hw.nic_uio.bdfs=$BDFS
+ kldload nic_uio.ko
+ rm $TMP
+}
+
+function configure_freebsd {
+ configure_freebsd_pci
+ # If contigmem is already loaded but the HUGEMEM specified doesn't match the
+ # previous value, unload contigmem so that we can reload with the new value.
+ if kldstat -q -m contigmem; then
+ if [ `kenv hw.contigmem.num_buffers` -ne "$((HUGEMEM / 256))" ]; then
+ kldunload contigmem.ko
+ fi
+ fi
+ if ! kldstat -q -m contigmem; then
+ kenv hw.contigmem.num_buffers=$((HUGEMEM / 256))
+ kenv hw.contigmem.buffer_size=$((256 * 1024 * 1024))
+ kldload contigmem.ko
+ fi
+}
+
+function reset_freebsd {
+ kldunload contigmem.ko || true
+ kldunload nic_uio.ko || true
+}
+
+mode=$1
+
+if [ -z "$mode" ]; then
+ mode="config"
+fi
+
+: ${HUGEMEM:=2048}
+: ${PCI_WHITELIST:=""}
+
+if [ -n "$NVME_WHITELIST" ]; then
+ PCI_WHITELIST="$PCI_WHITELIST $NVME_WHITELIST"
+fi
+
+if [ -n "$SKIP_PCI" ]; then
+ PCI_WHITELIST="none"
+fi
+
+declare -a PCI_WHITELIST=(${PCI_WHITELIST})
+
+if [ -z "$TARGET_USER" ]; then
+ TARGET_USER="$SUDO_USER"
+ if [ -z "$TARGET_USER" ]; then
+ TARGET_USER=`logname 2>/dev/null` || true
+ fi
+fi
+
+if [ `uname` = Linux ]; then
+ HUGEPGSZ=$(( `grep Hugepagesize /proc/meminfo | cut -d : -f 2 | tr -dc '0-9'` ))
+ HUGEPGSZ_MB=$(( $HUGEPGSZ / 1024 ))
+ : ${NRHUGE=$(( (HUGEMEM + HUGEPGSZ_MB - 1) / HUGEPGSZ_MB ))}
+
+ if [ "$mode" == "config" ]; then
+ configure_linux
+ elif [ "$mode" == "cleanup" ]; then
+ cleanup_linux
+ elif [ "$mode" == "reset" ]; then
+ reset_linux
+ elif [ "$mode" == "status" ]; then
+ status_linux
+ elif [ "$mode" == "help" ]; then
+ usage $0
+ else
+ usage $0 "Invalid argument '$mode'"
+ fi
+else
+ if [ "$mode" == "config" ]; then
+ configure_freebsd
+ elif [ "$mode" == "reset" ]; then
+ reset_freebsd
+ elif [ "$mode" == "cleanup" ]; then
+ echo "setup.sh cleanup function not yet supported on $(uname)"
+ elif [ "$mode" == "status" ]; then
+ echo "setup.sh status function not yet supported on $(uname)"
+ elif [ "$mode" == "help" ]; then
+ usage $0
+ else
+ usage $0 "Invalid argument '$mode'"
+ fi
+fi
diff --git a/src/spdk/scripts/spdkcli.py b/src/spdk/scripts/spdkcli.py
new file mode 100755
index 00000000..71d8857f
--- /dev/null
+++ b/src/spdk/scripts/spdkcli.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python3
+import sys
+import argparse
+import configshell_fb
+from os import getuid
+from configshell_fb import ConfigShell, shell
+from spdkcli import UIRoot
+from pyparsing import (alphanums, Optional, Suppress, Word, Regex,
+ removeQuotes, dblQuotedString, OneOrMore)
+
+
+def add_quotes_to_shell(spdk_shell):
+ command = shell.locatedExpr(Word(alphanums + '_'))('command')
+ value = dblQuotedString.addParseAction(removeQuotes)
+ value_word = Word(alphanums + ';,=_\+/.<>()~@:-%[]')
+ keyword = Word(alphanums + '_\-')
+ kparam = shell.locatedExpr(keyword + Suppress('=') +
+ Optional(value | value_word, default=''))('kparams*')
+ pparam = shell.locatedExpr(value | value_word)('pparams*')
+ parameters = OneOrMore(kparam | pparam)
+ bookmark = Regex('@([A-Za-z0-9:_.]|-)+')
+ pathstd = Regex('([A-Za-z0-9:_.\[\]]|-)*' + '/' + '([A-Za-z0-9:_.\[\]/]|-)*') \
+ | '..' | '.'
+ path = shell.locatedExpr(bookmark | pathstd | '*')('path')
+ spdk_shell._parser = Optional(path) + Optional(command) + Optional(parameters)
+
+
+def main():
+ """
+ Start SPDK CLI
+ :return:
+ """
+ spdk_shell = ConfigShell("~/.scripts")
+ add_quotes_to_shell(spdk_shell)
+
+ parser = argparse.ArgumentParser(description="SPDK command line interface")
+ parser.add_argument("-s", dest="socket", help="RPC socket path", default="/var/tmp/spdk.sock")
+ parser.add_argument("-v", dest="verbose", help="Print request/response JSON for configuration calls",
+ default=False, action="store_true")
+ parser.add_argument("commands", metavar="command", type=str, nargs="*", default="",
+ help="commands to execute by SPDKCli as one-line command")
+ args = parser.parse_args()
+
+ root_node = UIRoot(args.socket, spdk_shell)
+ root_node.verbose = args.verbose
+ try:
+ root_node.refresh()
+ except BaseException:
+ pass
+
+ if len(args.commands) > 0:
+ spdk_shell.run_cmdline(" ".join(args.commands))
+ sys.exit(0)
+
+ spdk_shell.con.display("SPDK CLI v0.1")
+ spdk_shell.con.display("")
+ spdk_shell.run_interactive()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/src/spdk/scripts/spdkcli/__init__.py b/src/spdk/scripts/spdkcli/__init__.py
new file mode 100644
index 00000000..571d49a8
--- /dev/null
+++ b/src/spdk/scripts/spdkcli/__init__.py
@@ -0,0 +1 @@
+from .ui_root import UIRoot
diff --git a/src/spdk/scripts/spdkcli/ui_node.py b/src/spdk/scripts/spdkcli/ui_node.py
new file mode 100644
index 00000000..43f6bdfc
--- /dev/null
+++ b/src/spdk/scripts/spdkcli/ui_node.py
@@ -0,0 +1,929 @@
+from configshell_fb import ConfigNode, ExecutionError
+from uuid import UUID
+from rpc.client import JSONRPCException
+import json
+
+
+def convert_bytes_to_human(size):
+ if not size:
+ return ""
+ for x in ["bytes", "K", "M", "G", "T"]:
+ if size < 1024.0:
+ return "%3.1f%s" % (size, x)
+ size /= 1024.0
+
+
+class UINode(ConfigNode):
+ def __init__(self, name, parent=None, shell=None):
+ ConfigNode.__init__(self, name, parent, shell)
+
+ def refresh(self):
+ for child in self.children:
+ child.refresh()
+
+ def ui_command_refresh(self):
+ self.refresh()
+
+ def ui_command_ll(self, path=None, depth=None):
+ """
+ Alias for ls.
+ """
+ self.ui_command_ls(path, depth)
+
+ def execute_command(self, command, pparams=[], kparams={}):
+ try:
+ result = ConfigNode.execute_command(self, command,
+ pparams, kparams)
+ except Exception as msg:
+ self.shell.log.error(str(msg))
+ pass
+ else:
+ self.shell.log.debug("Command %s succeeded." % command)
+ return result
+
+
+class UIBdevs(UINode):
+ def __init__(self, parent):
+ UINode.__init__(self, "bdevs", parent)
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ UIMallocBdev(self)
+ UIAIOBdev(self)
+ UILvolBdev(self)
+ UINvmeBdev(self)
+ UINullBdev(self)
+ UIErrorBdev(self)
+ UISplitBdev(self)
+ UIPmemBdev(self)
+ UIRbdBdev(self)
+ UIiSCSIBdev(self)
+ UIVirtioBlkBdev(self)
+ UIVirtioScsiBdev(self)
+
+
+class UILvolStores(UINode):
+ def __init__(self, parent):
+ UINode.__init__(self, "lvol_stores", parent)
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ for lvs in self.get_root().get_lvol_stores():
+ UILvsObj(lvs, self)
+
+ def ui_command_create(self, name, bdev_name, cluster_size=None):
+ """
+ Creates logical volume store on target bdev.
+
+ Arguments:
+ name - Friendly name to use alongside with UUID identifier.
+ bdev_name - On which bdev to create the lvol store.
+ cluster_size - Cluster size to use when creating lvol store, in bytes. Default: 4194304.
+ """
+
+ cluster_size = self.ui_eval_param(cluster_size, "number", None)
+
+ try:
+ self.get_root().create_lvol_store(lvs_name=name, bdev_name=bdev_name, cluster_sz=cluster_size)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+
+ self.get_root().refresh()
+ self.refresh()
+
+ def ui_command_delete(self, name=None, uuid=None):
+ """
+ Deletes logical volume store from configuration.
+ This will also delete all logical volume bdevs created on this lvol store!
+
+ Arguments:
+ name - Friendly name of the logical volume store to be deleted.
+ uuid - UUID number of the logical volume store to be deleted.
+ """
+ if name is None and uuid is None:
+ self.shell.log.error("Please specify one of the identifiers: "
+ "lvol store name or UUID")
+ self.get_root().delete_lvol_store(lvs_name=name, uuid=uuid)
+ self.get_root().refresh()
+ self.refresh()
+
+ def summary(self):
+ return "Lvol stores: %s" % len(self.children), None
+
+
+class UIBdev(UINode):
+ def __init__(self, name, parent):
+ UINode.__init__(self, name, parent)
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ for bdev in self.get_root().get_bdevs(self.name):
+ UIBdevObj(bdev, self)
+
+ def ui_command_get_bdev_iostat(self, name=None):
+ try:
+ ret = self.get_root().get_bdevs_iostat(name=name)
+ self.shell.log.info(json.dumps(ret, indent=2))
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+
+ def summary(self):
+ return "Bdevs: %d" % len(self.children), None
+
+
+class UIMallocBdev(UIBdev):
+ def __init__(self, parent):
+ UIBdev.__init__(self, "malloc", parent)
+
+ def ui_command_create(self, size, block_size, name=None, uuid=None):
+ """
+ Construct a Malloc bdev.
+
+ Arguments:
+ size - Size in megabytes.
+ block_size - Integer, block size to use when constructing bdev.
+ name - Optional argument. Custom name to use for bdev. If not provided
+ then name will be "MallocX" where X is next available ID.
+ uuid - Optional parameter. Custom UUID to use. If empty then random
+ will be generated.
+ """
+
+ size = self.ui_eval_param(size, "number", None)
+ block_size = self.ui_eval_param(block_size, "number", None)
+
+ try:
+ ret_name = self.get_root().create_malloc_bdev(num_blocks=size * 1024 * 1024 // block_size,
+ block_size=block_size,
+ name=name, uuid=uuid)
+ self.shell.log.info(ret_name)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+
+ self.get_root().refresh()
+ self.refresh()
+
+ def ui_command_delete(self, name):
+ """
+ Deletes malloc bdev from configuration.
+
+ Arguments:
+ name - Is a unique identifier of the malloc bdev to be deleted - UUID number or name alias.
+ """
+ try:
+ self.get_root().delete_malloc_bdev(name=name)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+ self.get_root().refresh()
+ self.refresh()
+
+
+class UIAIOBdev(UIBdev):
+ def __init__(self, parent):
+ UIBdev.__init__(self, "aio", parent)
+
+ def ui_command_create(self, name, filename, block_size):
+ """
+ Construct an AIO bdev.
+ Backend file must exist before trying to create an AIO bdev.
+
+ Arguments:
+ name - Optional argument. Custom name to use for bdev. If not provided
+ then name will be "MallocX" where X is next available ID.
+ filename - Path to AIO backend.
+ block_size - Integer, block size to use when constructing bdev.
+ """
+
+ block_size = self.ui_eval_param(block_size, "number", None)
+
+ try:
+ ret_name = self.get_root().create_aio_bdev(name=name,
+ block_size=int(block_size),
+ filename=filename)
+ self.shell.log.info(ret_name)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+
+ self.get_root().refresh()
+ self.refresh()
+
+ def ui_command_delete(self, name):
+ """
+ Deletes aio bdev from configuration.
+
+ Arguments:
+ name - Is a unique identifier of the aio bdev to be deleted - UUID number or name alias.
+ """
+ try:
+ self.get_root().delete_aio_bdev(name=name)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+ self.get_root().refresh()
+ self.refresh()
+
+
+class UILvolBdev(UIBdev):
+ def __init__(self, parent):
+ UIBdev.__init__(self, "logical_volume", parent)
+
+ def ui_command_create(self, name, size, lvs, thin_provision=None):
+ """
+ Construct a Logical Volume bdev.
+
+ Arguments:
+ name - Friendly name to use for creating logical volume bdev.
+ size - Size in megabytes.
+ lvs - Identifier of logical volume store on which the bdev should be
+ created. Can be either a friendly name or UUID.
+ thin_provision - Whether the bdev should be thick or thin provisioned.
+ Default is False, and created bdevs are thick-provisioned.
+ """
+ uuid = None
+ lvs_name = None
+ try:
+ UUID(lvs)
+ uuid = lvs
+ except ValueError:
+ lvs_name = lvs
+
+ size = self.ui_eval_param(size, "number", None)
+ size *= (1024 * 1024)
+ thin_provision = self.ui_eval_param(thin_provision, "bool", False)
+
+ try:
+ ret_uuid = self.get_root().create_lvol_bdev(lvol_name=name, size=size,
+ lvs_name=lvs_name, uuid=uuid,
+ thin_provision=thin_provision)
+ self.shell.log.info(ret_uuid)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+
+ self.get_root().refresh()
+ self.refresh()
+
+ def ui_command_delete(self, name):
+ """
+ Deletes lvol bdev from configuration.
+
+ Arguments:
+ name - Is a unique identifier of the lvol bdev to be deleted - UUID number or name alias.
+ """
+ try:
+ self.get_root().destroy_lvol_bdev(name=name)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+ self.get_root().refresh()
+ self.refresh()
+
+
+class UINvmeBdev(UIBdev):
+ def __init__(self, parent):
+ UIBdev.__init__(self, "nvme", parent)
+
+ def ui_command_create(self, name, trtype, traddr,
+ adrfam=None, trsvcid=None, subnqn=None):
+
+ if "rdma" in trtype and None in [adrfam, trsvcid, subnqn]:
+ self.shell.log.error("Using RDMA transport type."
+ "Please provide arguments for adrfam, trsvcid and subnqn.")
+
+ try:
+ ret_name = self.get_root().create_nvme_bdev(name=name, trtype=trtype,
+ traddr=traddr, adrfam=adrfam,
+ trsvcid=trsvcid, subnqn=subnqn)
+ self.shell.log.info(ret_name)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+
+ self.get_root().refresh()
+ self.refresh()
+
+ def ui_command_delete(self, name):
+ """
+ Deletes NVMe controller from configuration.
+
+ Arguments:
+ name - Is a unique identifier of the NVMe controller to be deleted.
+ """
+ try:
+ self.get_root().delete_nvme_controller(name=name)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+ self.get_root().refresh()
+ self.refresh()
+
+
+class UINullBdev(UIBdev):
+ def __init__(self, parent):
+ UIBdev.__init__(self, "null", parent)
+
+ def ui_command_create(self, name, size, block_size, uuid=None):
+ """
+ Construct a Null bdev.
+
+ Arguments:
+ name - Name to use for bdev.
+ size - Size in megabytes.
+ block_size - Integer, block size to use when constructing bdev.
+ uuid - Optional parameter. Custom UUID to use. If empty then random
+ will be generated.
+ """
+
+ size = self.ui_eval_param(size, "number", None)
+ block_size = self.ui_eval_param(block_size, "number", None)
+ num_blocks = size * 1024 * 1024 // block_size
+
+ try:
+ ret_name = self.get_root().create_null_bdev(num_blocks=num_blocks,
+ block_size=block_size,
+ name=name, uuid=uuid)
+ self.shell.log.info(ret_name)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+
+ self.get_root().refresh()
+ self.refresh()
+
+ def ui_command_delete(self, name):
+ """
+ Deletes null bdev from configuration.
+
+ Arguments:
+ name - Is a unique identifier of the null bdev to be deleted - UUID number or name alias.
+ """
+ try:
+ self.get_root().delete_null_bdev(name=name)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+ self.get_root().refresh()
+ self.refresh()
+
+
+class UIErrorBdev(UIBdev):
+ def __init__(self, parent):
+ UIBdev.__init__(self, "error", parent)
+
+ def ui_command_create(self, base_name):
+ """
+ Construct a error injection bdev.
+
+ Arguments:
+ base_name - base bdev name on top of which error bdev will be created.
+ """
+
+ try:
+ self.get_root().create_error_bdev(base_name=base_name)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+
+ self.get_root().refresh()
+ self.refresh()
+
+ def ui_command_delete(self, name):
+ """
+ Deletes error bdev from configuration.
+
+ Arguments:
+ name - Is a unique identifier of the error bdev to be deleted - UUID number or name alias.
+ """
+ try:
+ self.get_root().delete_error_bdev(name=name)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+ self.get_root().refresh()
+ self.refresh()
+
+
+class UISplitBdev(UIBdev):
+ def __init__(self, parent):
+ UIBdev.__init__(self, "split_disk", parent)
+
+ def ui_command_split_bdev(self, base_bdev, split_count, split_size_mb=None):
+ """
+ Construct split block devices from a base bdev.
+
+ Arguments:
+ base_bdev - Name of bdev to split
+ split_count - Number of split bdevs to create
+ split_size_mb- Size of each split volume in MiB (optional)
+ """
+
+ split_count = self.ui_eval_param(split_count, "number", None)
+ split_size_mb = self.ui_eval_param(split_size_mb, "number", None)
+
+ try:
+ ret_name = self.get_root().split_bdev(base_bdev=base_bdev,
+ split_count=split_count,
+ split_size_mb=split_size_mb)
+ self.shell.log.info(ret_name)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+
+ self.parent.refresh()
+ self.refresh()
+
+ def ui_command_destruct_split_bdev(self, base_bdev):
+ """Destroy split block devices associated with base bdev.
+
+ Args:
+ base_bdev: name of previously split bdev
+ """
+
+ try:
+ self.get_root().destruct_split_bdev(base_bdev=base_bdev)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+
+ self.parent.refresh()
+ self.refresh()
+
+
+class UIPmemBdev(UIBdev):
+ def __init__(self, parent):
+ UIBdev.__init__(self, "pmemblk", parent)
+
+ def ui_command_create_pmem_pool(self, pmem_file, total_size, block_size):
+ total_size = self.ui_eval_param(total_size, "number", None)
+ block_size = self.ui_eval_param(block_size, "number", None)
+ num_blocks = int((total_size * 1024 * 1024) / block_size)
+
+ try:
+ self.get_root().create_pmem_pool(pmem_file=pmem_file,
+ num_blocks=num_blocks,
+ block_size=block_size)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+
+ def ui_command_delete_pmem_pool(self, pmem_file):
+ try:
+ self.get_root().delete_pmem_pool(pmem_file=pmem_file)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+
+ def ui_command_info_pmem_pool(self, pmem_file):
+ try:
+ ret = self.get_root().delete_pmem_pool(pmem_file=pmem_file)
+ self.shell.log.info(ret)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+
+ def ui_command_create(self, pmem_file, name):
+ try:
+ ret_name = self.get_root().create_pmem_bdev(pmem_file=pmem_file,
+ name=name)
+ self.shell.log.info(ret_name)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+
+ self.get_root().refresh()
+ self.refresh()
+
+ def ui_command_delete(self, name):
+ """
+ Deletes pmem bdev from configuration.
+
+ Arguments:
+ name - Is a unique identifier of the pmem bdev to be deleted - UUID number or name alias.
+ """
+ try:
+ self.get_root().delete_pmem_bdev(name=name)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+ self.get_root().refresh()
+ self.refresh()
+
+
+class UIRbdBdev(UIBdev):
+ def __init__(self, parent):
+ UIBdev.__init__(self, "rbd", parent)
+
+ def ui_command_create(self, pool_name, rbd_name, block_size, name=None):
+ block_size = self.ui_eval_param(block_size, "number", None)
+
+ try:
+ ret_name = self.get_root().create_rbd_bdev(pool_name=pool_name,
+ rbd_name=rbd_name,
+ block_size=block_size,
+ name=name)
+ self.shell.log.info(ret_name)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+
+ self.get_root().refresh()
+ self.refresh()
+
+ def ui_command_delete(self, name):
+ """
+ Deletes rbd bdev from configuration.
+
+ Arguments:
+ name - Is a unique identifier of the rbd bdev to be deleted - UUID number or name alias.
+ """
+ try:
+ self.get_root().delete_rbd_bdev(name=name)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+ self.get_root().refresh()
+ self.refresh()
+
+
+class UIiSCSIBdev(UIBdev):
+ def __init__(self, parent):
+ UIBdev.__init__(self, "iscsi", parent)
+
+ def ui_command_create(self, name, url, initiator_iqn):
+ """
+ Create iSCSI bdev in configuration by connecting to remote
+ iSCSI target.
+
+ Arguments:
+ name - name to be used as an ID for created iSCSI bdev.
+ url - iscsi url pointing to LUN on remote iSCSI target.
+ Example: iscsi://127.0.0.1:3260/iqn.2018-06.org.spdk/0.
+ initiator_iqn - IQN to use for initiating connection with the target.
+ """
+ try:
+ ret_name = self.get_root().create_iscsi_bdev(name=name,
+ url=url,
+ initiator_iqn=initiator_iqn)
+ self.shell.log.info(ret_name)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+
+ self.get_root().refresh()
+ self.refresh()
+
+ def ui_command_delete(self, name):
+ """
+ Deletes iSCSI bdev from configuration.
+
+ Arguments:
+ name - name of the iscsi bdev to be deleted.
+ """
+ try:
+ self.get_root().delete_iscsi_bdev(name=name)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+
+ self.get_root().refresh()
+ self.refresh()
+
+
+class UIVirtioBlkBdev(UIBdev):
+ def __init__(self, parent):
+ UIBdev.__init__(self, "virtioblk_disk", parent)
+
+ def ui_command_create(self, name, trtype, traddr,
+ vq_count=None, vq_size=None):
+
+ vq_count = self.ui_eval_param(vq_count, "number", None)
+ vq_size = self.ui_eval_param(vq_size, "number", None)
+
+ try:
+ ret = self.get_root().create_virtio_dev(name=name,
+ trtype=trtype,
+ traddr=traddr,
+ dev_type="blk",
+ vq_count=vq_count,
+ vq_size=vq_size)
+
+ self.shell.log.info(ret)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+
+ self.get_root().refresh()
+ self.refresh()
+
+ def ui_command_delete(self, name):
+ """
+ Deletes virtio scsi bdev from configuration.
+
+ Arguments:
+ name - Is a unique identifier of the virtio scsi bdev to be deleted - UUID number or name alias.
+ """
+ try:
+ self.get_root().remove_virtio_bdev(name=name)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+ self.get_root().refresh()
+ self.refresh()
+
+
+class UIVirtioScsiBdev(UIBdev):
+ def __init__(self, parent):
+ UIBdev.__init__(self, "virtioscsi_disk", parent)
+
+ def refresh(self):
+ self._children = set([])
+ for bdev in self.get_root().get_virtio_scsi_devs():
+ UIVirtioScsiBdevObj(bdev, self)
+
+ def ui_command_create(self, name, trtype, traddr,
+ vq_count=None, vq_size=None):
+
+ vq_count = self.ui_eval_param(vq_count, "number", None)
+ vq_size = self.ui_eval_param(vq_size, "number", None)
+
+ try:
+ ret = self.get_root().create_virtio_dev(name=name,
+ trtype=trtype,
+ traddr=traddr,
+ dev_type="scsi",
+ vq_count=vq_count,
+ vq_size=vq_size)
+
+ self.shell.log.info(ret)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+
+ self.get_root().refresh()
+ self.refresh()
+
+ def ui_command_delete(self, name):
+ try:
+ self.get_root().remove_virtio_bdev(name=name)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+
+ self.get_root().refresh()
+ self.refresh()
+
+
+class UIBdevObj(UINode):
+ def __init__(self, bdev, parent):
+ self.bdev = bdev
+ # Using bdev name also for lvol bdevs, which results in displying
+ # UUID instead of alias. This is because alias naming convention
+ # (lvol_store_name/lvol_bdev_name) conflicts with configshell paths
+ # ("/" as separator).
+ # Solution: show lvol alias in "summary field" for now.
+ # TODO: Possible next steps:
+ # - Either change default separator in tree for smth else
+ # - or add a UI command which would be able to autocomplete
+ # "cd" command based on objects alias and match is to the
+ # "main" bdev name.
+ UINode.__init__(self, self.bdev.name, parent)
+
+ def ui_command_show_details(self):
+ self.shell.log.info(json.dumps(vars(self.bdev), indent=2))
+
+ def summary(self):
+ size = convert_bytes_to_human(self.bdev.block_size * self.bdev.num_blocks)
+ size = "=".join(["Size", size])
+
+ in_use = "Not claimed"
+ if bool(self.bdev.claimed):
+ in_use = "Claimed"
+
+ alias = None
+ if self.bdev.aliases:
+ alias = self.bdev.aliases[0]
+
+ info = ", ".join([_f for _f in [alias, size, in_use] if _f])
+ return info, True
+
+
+class UIVirtioScsiBdevObj(UIBdevObj):
+ def __init__(self, bdev, parent):
+ UIBdevObj.__init__(self, bdev, parent)
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ for bdev in self.get_root().get_bdevs("virtio_scsi_disk"):
+ if self.bdev.name in bdev.name:
+ UIBdevObj(bdev, self)
+
+ def summary(self):
+ if "socket" in list(self.bdev.virtio.keys()):
+ info = self.bdev.virtio["socket"]
+ if "pci_address" in list(self.bdev.virtio.keys()):
+ info = self.bdev.virtio["pci_address"]
+ return info, True
+
+
+class UILvsObj(UINode):
+ def __init__(self, lvs, parent):
+ UINode.__init__(self, lvs.name, parent)
+ self.lvs = lvs
+
+ def ui_command_show_details(self):
+ self.shell.log.info(json.dumps(vars(self.lvs), indent=2))
+
+ def summary(self):
+ size = convert_bytes_to_human(self.lvs.total_data_clusters * self.lvs.cluster_size)
+ free = convert_bytes_to_human(self.lvs.free_clusters * self.lvs.cluster_size)
+ if not free:
+ free = "0"
+ size = "=".join(["Size", size])
+ free = "=".join(["Free", free])
+ info = ", ".join([str(size), str(free)])
+ return info, True
+
+
+class UIVhosts(UINode):
+ def __init__(self, parent):
+ UINode.__init__(self, "vhost", parent)
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ self.get_root().list_vhost_ctrls()
+ UIVhostBlk(self)
+ UIVhostScsi(self)
+
+
+class UIVhost(UINode):
+ def __init__(self, name, parent):
+ UINode.__init__(self, name, parent)
+ self.refresh()
+
+ def ui_command_delete(self, name):
+ """
+ Delete a Vhost controller from configuration.
+
+ Arguments:
+ name - Controller name.
+ """
+ self.get_root().remove_vhost_controller(ctrlr=name)
+ self.get_root().refresh()
+ self.refresh()
+
+
+class UIVhostBlk(UIVhost):
+ def __init__(self, parent):
+ UIVhost.__init__(self, "block", parent)
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ for ctrlr in self.get_root().get_vhost_ctrlrs(self.name):
+ UIVhostBlkCtrlObj(ctrlr, self)
+
+ def ui_command_create(self, name, bdev, cpumask=None, readonly=False):
+ """
+ Construct a Vhost BLK controller.
+
+ Arguments:
+ name - Controller name.
+ bdev - Which bdev to attach to the controller.
+ cpumask - Optional. Integer to specify mask of CPUs to use.
+ Default: 1.
+ readonly - Whether controller should be read only or not.
+ Default: False.
+ """
+ try:
+ self.get_root().create_vhost_blk_controller(ctrlr=name,
+ dev_name=bdev,
+ cpumask=cpumask,
+ readonly=bool(readonly))
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+
+ self.get_root().refresh()
+ self.refresh()
+
+
+class UIVhostScsi(UIVhost):
+ def __init__(self, parent):
+ UIVhost.__init__(self, "scsi", parent)
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ for ctrlr in self.get_root().get_vhost_ctrlrs(self.name):
+ UIVhostScsiCtrlObj(ctrlr, self)
+
+ def ui_command_create(self, name, cpumask=None):
+ """
+ Construct a Vhost SCSI controller.
+
+ Arguments:
+ name - Controller name.
+ cpumask - Optional. Integer to specify mask of CPUs to use.
+ Default: 1.
+ """
+ try:
+ self.get_root().create_vhost_scsi_controller(ctrlr=name,
+ cpumask=cpumask)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+
+ self.get_root().refresh()
+ self.refresh()
+
+
+class UIVhostCtrl(UINode):
+ # Base class for SCSI and BLK controllers, do not instantiate
+ def __init__(self, ctrlr, parent):
+ self.ctrlr = ctrlr
+ UINode.__init__(self, self.ctrlr.ctrlr, parent)
+ self.refresh()
+
+ def ui_command_show_details(self):
+ self.shell.log.info(json.dumps(vars(self.ctrlr), indent=2))
+
+ def ui_command_set_coalescing(self, delay_base_us, iops_threshold):
+ delay_base_us = self.ui_eval_param(delay_base_us, "number", None)
+ iops_threshold = self.ui_eval_param(iops_threshold, "number", None)
+
+ try:
+ self.get_root().set_vhost_controller_coalescing(ctrlr=self.ctrlr.ctrlr,
+ delay_base_us=delay_base_us,
+ iops_threshold=iops_threshold)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+
+
+class UIVhostScsiCtrlObj(UIVhostCtrl):
+ def refresh(self):
+ self._children = set([])
+ for lun in self.ctrlr.backend_specific["scsi"]:
+ UIVhostTargetObj(lun, self)
+
+ def ui_command_remove_target(self, target_num):
+ """
+ Remove target node from SCSI controller.
+
+ Arguments:
+ target_num - Integer identifier of target node to delete.
+ """
+ try:
+ self.get_root().remove_vhost_scsi_target(ctrlr=self.ctrlr.ctrlr,
+ scsi_target_num=int(target_num))
+ for ctrlr in self.get_root().get_vhost_ctrlrs("scsi"):
+ if ctrlr.ctrlr == self.ctrlr.ctrlr:
+ self.ctrlr = ctrlr
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+
+ self.refresh()
+ self.get_root().refresh()
+
+ def ui_command_add_lun(self, target_num, bdev_name):
+ """
+ Add LUN to SCSI target node.
+ Currently only one LUN (which is LUN ID 0) per target is supported.
+ Adding LUN to not existing target node will create that node.
+
+ Arguments:
+ target_num - Integer identifier of target node to modify.
+ bdev - Which bdev to add as LUN.
+ """
+ try:
+ self.get_root().add_vhost_scsi_lun(ctrlr=self.ctrlr.ctrlr,
+ scsi_target_num=int(target_num),
+ bdev_name=bdev_name)
+ for ctrlr in self.get_root().get_vhost_ctrlrs("scsi"):
+ if ctrlr.ctrlr == self.ctrlr.ctrlr:
+ self.ctrlr = ctrlr
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+
+ self.refresh()
+
+ def summary(self):
+ info = self.ctrlr.socket
+ return info, True
+
+
+class UIVhostBlkCtrlObj(UIVhostCtrl):
+ def refresh(self):
+ self._children = set([])
+ UIVhostLunDevObj(self.ctrlr.backend_specific["block"]["bdev"], self)
+
+ def summary(self):
+ ro = None
+ if self.ctrlr.backend_specific["block"]["readonly"]:
+ ro = "Readonly"
+ info = ", ".join([_f for _f in [self.ctrlr.socket, ro] if _f])
+ return info, True
+
+
+class UIVhostTargetObj(UINode):
+ def __init__(self, target, parent):
+ self.target = target
+ # Next line: configshell does not allow paths with spaces.
+ UINode.__init__(self, target["target_name"].replace(" ", "_"), parent)
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ for target in self.target["luns"]:
+ UIVhostLunDevObj(target["bdev_name"], self)
+
+ def ui_command_show_details(self):
+ self.shell.log.info(json.dumps(self.target, indent=2))
+
+ def summary(self):
+ luns = "LUNs: %s" % len(self.target["luns"])
+ id = "TargetID: %s" % self.target["scsi_dev_num"]
+ info = ",".join([luns, id])
+ return info, True
+
+
+class UIVhostLunDevObj(UINode):
+ def __init__(self, name, parent):
+ UINode.__init__(self, name, parent)
diff --git a/src/spdk/scripts/spdkcli/ui_node_iscsi.py b/src/spdk/scripts/spdkcli/ui_node_iscsi.py
new file mode 100644
index 00000000..65592a75
--- /dev/null
+++ b/src/spdk/scripts/spdkcli/ui_node_iscsi.py
@@ -0,0 +1,635 @@
+from rpc.client import JSONRPCException
+from .ui_node import UINode
+
+
+class UIISCSI(UINode):
+ def __init__(self, parent):
+ UINode.__init__(self, "iscsi", parent)
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ UIISCSIDevices(self)
+ UIPortalGroups(self)
+ UIInitiatorGroups(self)
+ UIISCSIConnections(self)
+ UIISCSIAuthGroups(self)
+ UIISCSIGlobalParams(self)
+
+
+class UIISCSIGlobalParams(UINode):
+ def __init__(self, parent):
+ UINode.__init__(self, "global_params", parent)
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ for param, val in self.get_root().get_iscsi_global_params().items():
+ UIISCSIGlobalParam("%s: %s" % (param, val), self)
+
+ def ui_command_set_auth(self, g=None, d=None, r=None, m=None):
+ """Set CHAP authentication for discovery service.
+
+ Optional arguments:
+ g = chap_group: Authentication group ID for discovery session
+ d = disable_chap: CHAP for discovery session should be disabled
+ r = require_chap: CHAP for discovery session should be required
+ m = mutual_chap: CHAP for discovery session should be mutual
+ """
+ chap_group = self.ui_eval_param(g, "number", None)
+ disable_chap = self.ui_eval_param(d, "bool", None)
+ require_chap = self.ui_eval_param(r, "bool", None)
+ mutual_chap = self.ui_eval_param(m, "bool", None)
+ try:
+ self.get_root().set_iscsi_discovery_auth(
+ chap_group=chap_group, disable_chap=disable_chap,
+ require_chap=require_chap, mutual_chap=mutual_chap)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+ self.refresh()
+
+
+class UIISCSIGlobalParam(UINode):
+ def __init__(self, param, parent):
+ UINode.__init__(self, param, parent)
+
+
+class UIISCSIDevices(UINode):
+ def __init__(self, parent):
+ UINode.__init__(self, "target_nodes", parent)
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ self.target_nodes = list(self.get_root().get_target_nodes())
+ self.scsi_devices = list(self.get_root().get_scsi_devices())
+ for device in self.scsi_devices:
+ for node in self.target_nodes:
+ if hasattr(device, "device_name") and node['name'] \
+ == device.device_name:
+ UIISCSIDevice(device, node, self)
+
+ def ui_command_create(self, name, alias_name, bdev_name_id_pairs,
+ pg_ig_mappings, queue_depth, g=None, d=None, r=None,
+ m=None, h=None, t=None):
+ """Create target node
+
+ Positional args:
+ name: Target node name (ASCII)
+ alias_name: Target node alias name (ASCII)
+ bdev_name_id_pairs: List of bdev_name_id_pairs
+ pg_ig_mappings: List of pg_ig_mappings
+ queue_depth: Desired target queue depth
+ Optional args:
+ g = chap_group: Authentication group ID for this target node
+ d = disable_chap: CHAP authentication should be disabled for this target node
+ r = require_chap: CHAP authentication should be required for this target node
+ m = mutual_chap: CHAP authentication should be mutual/bidirectional
+ h = header_digest: Header Digest should be required for this target node
+ t = data_digest: Data Digest should be required for this target node
+ """
+ luns = []
+ print("bdev_name_id_pairs: %s" % bdev_name_id_pairs)
+ print("pg_ig_mappings: %s" % pg_ig_mappings)
+ for u in bdev_name_id_pairs.strip().split(" "):
+ bdev_name, lun_id = u.split(":")
+ luns.append({"bdev_name": bdev_name, "lun_id": int(lun_id)})
+ pg_ig_maps = []
+ for u in pg_ig_mappings.strip().split(" "):
+ pg, ig = u.split(":")
+ pg_ig_maps.append({"pg_tag": int(pg), "ig_tag": int(ig)})
+ queue_depth = self.ui_eval_param(queue_depth, "number", None)
+ chap_group = self.ui_eval_param(g, "number", None)
+ disable_chap = self.ui_eval_param(d, "bool", None)
+ require_chap = self.ui_eval_param(r, "bool", None)
+ mutual_chap = self.ui_eval_param(m, "bool", None)
+ header_digest = self.ui_eval_param(h, "bool", None)
+ data_digest = self.ui_eval_param(t, "bool", None)
+ try:
+ self.get_root().construct_target_node(
+ name=name, alias_name=alias_name, luns=luns,
+ pg_ig_maps=pg_ig_maps, queue_depth=queue_depth,
+ chap_group=chap_group, disable_chap=disable_chap,
+ require_chap=require_chap, mutual_chap=mutual_chap,
+ header_digest=header_digest, data_digest=data_digest)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+
+ self.refresh()
+
+ def ui_command_delete(self, name=None):
+ """Delete a target node. If name is not specified delete all target nodes.
+
+ Arguments:
+ name - Target node name.
+ """
+ if name is None:
+ for device in self.devices:
+ try:
+ self.get_root().delete_target_node(
+ target_node_name=device.device_name)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+ else:
+ try:
+ self.get_root().delete_target_node(target_node_name=name)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+ self.refresh()
+
+ def ui_command_add_lun(self, name, bdev_name, lun_id=None):
+ """Add lun to the target node.
+
+ Required args:
+ name: Target node name (ASCII)
+ bdev_name: bdev name
+ Positional args:
+ lun_id: LUN ID (integer >= 0)
+ """
+ if lun_id:
+ lun_id = self.ui_eval_param(lun_id, "number", None)
+ try:
+ self.get_root().target_node_add_lun(
+ name=name, bdev_name=bdev_name, lun_id=lun_id)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+ self.parent.refresh()
+
+ def summary(self):
+ count = 0
+ for device in self.scsi_devices:
+ for node in self.target_nodes:
+ if hasattr(device, "device_name") and node['name'] \
+ == device.device_name:
+ count = count + 1
+ return "Target nodes: %d" % count, None
+
+
+class UIISCSIDevice(UINode):
+ def __init__(self, device, target, parent):
+ UINode.__init__(self, device.device_name, parent)
+ self.device = device
+ self.target = target
+ self.refresh()
+
+ def ui_command_set_auth(self, g=None, d=None, r=None, m=None):
+ """Set CHAP authentication for the target node.
+
+ Optionals args:
+ g = chap_group: Authentication group ID for this target node
+ d = disable_chap: CHAP authentication should be disabled for this target node
+ r = require_chap: CHAP authentication should be required for this target node
+ m = mutual_chap: CHAP authentication should be mutual/bidirectional
+ """
+ chap_group = self.ui_eval_param(g, "number", None)
+ disable_chap = self.ui_eval_param(d, "bool", None)
+ require_chap = self.ui_eval_param(r, "bool", None)
+ mutual_chap = self.ui_eval_param(m, "bool", None)
+ try:
+ self.get_root().set_iscsi_target_node_auth(
+ name=self.device.device_name, chap_group=chap_group,
+ disable_chap=disable_chap,
+ require_chap=require_chap, mutual_chap=mutual_chap)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+ self.parent.refresh()
+
+ def ui_command_add_pg_ig_maps(self, pg_ig_mappings):
+ """Add PG-IG maps to the target node.
+
+ Args:
+ pg_ig_maps: List of pg_ig_mappings, e.g. pg_tag:ig_tag pg_tag2:ig_tag2
+ """
+ pg_ig_maps = []
+ for u in pg_ig_mappings.strip().split(" "):
+ pg, ig = u.split(":")
+ pg_ig_maps.append({"pg_tag": int(pg), "ig_tag": int(ig)})
+ try:
+ self.get_root().add_pg_ig_maps(
+ pg_ig_maps=pg_ig_maps, name=self.device.device_name)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+ self.parent.refresh()
+
+ def ui_command_delete_pg_ig_maps(self, pg_ig_mappings):
+ """Add PG-IG maps to the target node.
+
+ Args:
+ pg_ig_maps: List of pg_ig_mappings, e.g. pg_tag:ig_tag pg_tag2:ig_tag2
+ """
+ pg_ig_maps = []
+ for u in pg_ig_mappings.strip().split(" "):
+ pg, ig = u.split(":")
+ pg_ig_maps.append({"pg_tag": int(pg), "ig_tag": int(ig)})
+ try:
+ self.get_root().delete_pg_ig_maps(
+ pg_ig_maps=pg_ig_maps, name=self.device.device_name)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+ self.parent.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ UIISCSILuns(self.target['luns'], self)
+ UIISCSIPgIgMaps(self.target['pg_ig_maps'], self)
+ auths = {"disable_chap": self.target["disable_chap"],
+ "require_chap": self.target["require_chap"],
+ "mutual_chap": self.target["mutual_chap"],
+ "chap_group": self.target["chap_group"],
+ "data_digest": self.target["data_digest"]}
+ UIISCSIAuth(auths, self)
+
+ def summary(self):
+ return "Id: %s, QueueDepth: %s" % (self.device.id,
+ self.target['queue_depth']), None
+
+
+class UIISCSIAuth(UINode):
+ def __init__(self, auths, parent):
+ UINode.__init__(self, "auths", parent)
+ self.auths = auths
+ self.refresh()
+
+ def summary(self):
+ return "disable_chap: %s, require_chap: %s, mutual_chap: %s, chap_group: %s" % (
+ self.auths['disable_chap'], self.auths['require_chap'],
+ self.auths['mutual_chap'], self.auths['chap_group']), None
+
+
+class UIISCSILuns(UINode):
+ def __init__(self, luns, parent):
+ UINode.__init__(self, "luns", parent)
+ self.luns = luns
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ for lun in self.luns:
+ UIISCSILun(lun, self)
+
+ def summary(self):
+ return "Luns: %d" % len(self.luns), None
+
+
+class UIISCSILun(UINode):
+ def __init__(self, lun, parent):
+ UINode.__init__(self, "lun %s" % lun['lun_id'], parent)
+ self.lun = lun
+ self.refresh()
+
+ def summary(self):
+ return "%s" % self.lun['bdev_name'], None
+
+
+class UIISCSIPgIgMaps(UINode):
+ def __init__(self, pg_ig_maps, parent):
+ UINode.__init__(self, "pg_ig_maps", parent)
+ self.pg_ig_maps = pg_ig_maps
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ for pg_ig in self.pg_ig_maps:
+ UIISCSIPgIg(pg_ig, self)
+
+ def summary(self):
+ return "Pg_ig_maps: %d" % len(self.pg_ig_maps), None
+
+
+class UIISCSIPgIg(UINode):
+ def __init__(self, pg_ig, parent):
+ UINode.__init__(self, "portal_group%s - initiator_group%s" %
+ (pg_ig['pg_tag'], pg_ig['ig_tag']), parent)
+ self.pg_ig = pg_ig
+ self.refresh()
+
+
+class UIPortalGroups(UINode):
+ def __init__(self, parent):
+ UINode.__init__(self, "portal_groups", parent)
+ self.refresh()
+
+ def ui_command_create(self, tag, portal_list):
+ """Add a portal group.
+
+ Args:
+ portals: List of portals e.g. ip:port@cpumask ip2:port2
+ tag: Portal group tag (unique, integer > 0)
+ """
+ portals = []
+ print("portal_list: %s" % portal_list)
+ for portal in portal_list.strip().split(" "):
+ host = portal
+ cpumask = None
+ if "@" in portal:
+ host, cpumask = portal.split("@")
+ host, port = host.rsplit(":", -1)
+ portals.append({'host': host, 'port': port})
+ if cpumask:
+ portals[-1]['cpumask'] = cpumask
+ tag = self.ui_eval_param(tag, "number", None)
+ try:
+ self.get_root().construct_portal_group(tag=tag, portals=portals)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+
+ self.refresh()
+
+ def ui_command_delete(self, tag):
+ """Delete a portal group with given tag (unique, integer > 0))"""
+ tag = self.ui_eval_param(tag, "number", None)
+ try:
+ self.get_root().delete_portal_group(tag=tag)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ self.pgs = list(self.get_root().get_portal_groups())
+ for pg in self.pgs:
+ UIPortalGroup(pg, self)
+
+ def summary(self):
+ return "Portal groups: %d" % len(self.pgs), None
+
+
+class UIPortalGroup(UINode):
+ def __init__(self, pg, parent):
+ UINode.__init__(self, "portal_group%s" % pg.tag, parent)
+ self.pg = pg
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ for portal in self.pg.portals:
+ UIPortal(portal['host'], portal['port'], portal['cpumask'], self)
+
+ def summary(self):
+ return "Portals: %d" % len(self.pg.portals), None
+
+
+class UIPortal(UINode):
+ def __init__(self, host, port, cpumask, parent):
+ UINode.__init__(self, "host=%s, port=%s, cpumask=%s" % (
+ host, port, cpumask), parent)
+ self.refresh()
+
+
+class UIInitiatorGroups(UINode):
+ def __init__(self, parent):
+ UINode.__init__(self, "initiator_groups", parent)
+ self.refresh()
+
+ def ui_command_create(self, tag, initiator_list, netmask_list):
+ """Add an initiator group.
+
+ Args:
+ tag: Initiator group tag (unique, integer > 0)
+ initiators: List of initiator hostnames or IP addresses
+ separated with whitespaces, e.g. 127.0.0.1 192.168.200.100
+ netmasks: List of initiator netmasks separated with whitespaces,
+ e.g. 255.255.0.0 255.248.0.0
+ """
+ tag = self.ui_eval_param(tag, "number", None)
+ try:
+ self.get_root().construct_initiator_group(
+ tag=tag, initiators=initiator_list.split(" "),
+ netmasks=netmask_list.split(" "))
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+
+ self.refresh()
+
+ def ui_command_delete(self, tag):
+ """Delete an initiator group.
+
+ Args:
+ tag: Initiator group tag (unique, integer > 0)
+ """
+ tag = self.ui_eval_param(tag, "number", None)
+ try:
+ self.get_root().delete_initiator_group(tag=tag)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+
+ self.refresh()
+
+ def ui_command_add_initiator(self, tag, initiators, netmasks):
+ """Add initiators to an existing initiator group.
+
+ Args:
+ tag: Initiator group tag (unique, integer > 0)
+ initiators: List of initiator hostnames or IP addresses,
+ e.g. 127.0.0.1 192.168.200.100
+ netmasks: List of initiator netmasks,
+ e.g. 255.255.0.0 255.248.0.0
+ """
+ tag = self.ui_eval_param(tag, "number", None)
+ try:
+ self.get_root().add_initiators_to_initiator_group(
+ tag=tag, initiators=initiators.split(" "),
+ netmasks=netmasks.split(" "))
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+
+ self.refresh()
+
+ def ui_command_delete_initiator(self, tag, initiators=None, netmasks=None):
+ """Delete initiators from an existing initiator group.
+
+ Args:
+ tag: Initiator group tag (unique, integer > 0)
+ initiators: List of initiator hostnames or IP addresses, e.g. 127.0.0.1 192.168.200.100
+ netmasks: List of initiator netmasks, e.g. 255.255.0.0 255.248.0.0
+ """
+ tag = self.ui_eval_param(tag, "number", None)
+ if initiators:
+ initiators = initiators.split(" ")
+ if netmasks:
+ netmasks = netmasks.split(" ")
+ try:
+ self.get_root().delete_initiators_from_initiator_group(
+ tag=tag, initiators=initiators,
+ netmasks=netmasks)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ self.igs = list(self.get_root().get_initiator_groups())
+ for ig in self.igs:
+ UIInitiatorGroup(ig, self)
+
+ def summary(self):
+ return "Initiator groups: %d" % len(self.igs), None
+
+
+class UIInitiatorGroup(UINode):
+ def __init__(self, ig, parent):
+ UINode.__init__(self, "initiator_group%s" % ig.tag, parent)
+ self.ig = ig
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ for initiator, netmask in zip(self.ig.initiators, self.ig.netmasks):
+ UIInitiator(initiator, netmask, self)
+
+ def summary(self):
+ return "Initiators: %d" % len(self.ig.initiators), None
+
+
+class UIInitiator(UINode):
+ def __init__(self, initiator, netmask, parent):
+ UINode.__init__(self, "hostname=%s, netmask=%s" % (initiator, netmask), parent)
+ self.refresh()
+
+
+class UIISCSIConnections(UINode):
+ def __init__(self, parent):
+ UINode.__init__(self, "iscsi_connections", parent)
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ self.iscsicons = list(self.get_root().get_iscsi_connections())
+ for ic in self.iscsicons:
+ UIISCSIConnection(ic, self)
+
+ def summary(self):
+ return "Connections: %d" % len(self.iscsicons), None
+
+
+class UIISCSIConnection(UINode):
+ def __init__(self, ic, parent):
+ UINode.__init__(self, "%s" % ic['id'], parent)
+ self.ic = ic
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ for key, val in self.ic.iteritems():
+ if key == "id":
+ continue
+ UIISCSIConnectionDetails("%s: %s" % (key, val), self)
+
+
+class UIISCSIConnectionDetails(UINode):
+ def __init__(self, info, parent):
+ UINode.__init__(self, "%s" % info, parent)
+ self.refresh()
+
+
+class UIISCSIAuthGroups(UINode):
+ def __init__(self, parent):
+ UINode.__init__(self, "auth_groups", parent)
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ self.iscsi_auth_groups = list(self.get_root().get_iscsi_auth_groups())
+ if self.iscsi_auth_groups is None:
+ self.iscsi_auth_groups = []
+ for ag in self.iscsi_auth_groups:
+ UIISCSIAuthGroup(ag, self)
+
+ def ui_command_create(self, tag, secrets=None):
+ """Add authentication group for CHAP authentication.
+
+ Args:
+ tag: Authentication group tag (unique, integer > 0).
+ Optional args:
+ secrets: Array of secrets objects separated by comma sign,
+ e.g. user:test secret:test muser:mutual_test msecret:mutual_test
+ """
+ tag = self.ui_eval_param(tag, "number", None)
+ if secrets:
+ secrets = [dict(u.split(":") for u in a.split(" "))
+ for a in secrets.split(",")]
+ try:
+ self.get_root().add_iscsi_auth_group(tag=tag, secrets=secrets)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+
+ self.refresh()
+
+ def ui_command_delete(self, tag):
+ """Delete an authentication group.
+
+ Args:
+ tag: Authentication group tag (unique, integer > 0)
+ """
+ tag = self.ui_eval_param(tag, "number", None)
+ try:
+ self.get_root().delete_iscsi_auth_group(tag=tag)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+
+ self.refresh()
+
+ def ui_command_add_secret(self, tag, user, secret,
+ muser=None, msecret=None):
+ """Add a secret to an authentication group.
+
+ Args:
+ tag: Authentication group tag (unique, integer > 0)
+ user: User name for one-way CHAP authentication
+ secret: Secret for one-way CHAP authentication
+ Optional args:
+ muser: User name for mutual CHAP authentication
+ msecret: Secret for mutual CHAP authentication
+ """
+ tag = self.ui_eval_param(tag, "number", None)
+ try:
+ self.get_root().add_secret_to_iscsi_auth_group(
+ tag=tag, user=user, secret=secret,
+ muser=muser, msecret=msecret)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+ self.refresh()
+
+ def ui_command_delete_secret(self, tag, user):
+ """Delete a secret from an authentication group.
+
+ Args:
+ tag: Authentication group tag (unique, integer > 0)
+ user: User name for one-way CHAP authentication
+ """
+ tag = self.ui_eval_param(tag, "number", None)
+ try:
+ self.get_root().delete_secret_from_iscsi_auth_group(
+ tag=tag, user=user)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+ self.refresh()
+
+ def summary(self):
+ return "Groups: %s" % len(self.iscsi_auth_groups), None
+
+
+class UIISCSIAuthGroup(UINode):
+ def __init__(self, ag, parent):
+ UINode.__init__(self, "group" + str(ag['tag']), parent)
+ self.ag = ag
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ for secret in self.ag['secrets']:
+ UISCSIAuthSecret(secret, self)
+
+ def summary(self):
+ return "Secrets: %s" % len(self.ag['secrets']), None
+
+
+class UISCSIAuthSecret(UINode):
+ def __init__(self, secret, parent):
+ info = ", ".join("%s=%s" % (key, val)
+ for key, val in secret.items())
+ UINode.__init__(self, info, parent)
+ self.secret = secret
+ self.refresh()
diff --git a/src/spdk/scripts/spdkcli/ui_node_nvmf.py b/src/spdk/scripts/spdkcli/ui_node_nvmf.py
new file mode 100644
index 00000000..71b69367
--- /dev/null
+++ b/src/spdk/scripts/spdkcli/ui_node_nvmf.py
@@ -0,0 +1,302 @@
+from rpc.client import JSONRPCException
+from .ui_node import UINode
+
+
+class UINVMf(UINode):
+ def __init__(self, parent):
+ UINode.__init__(self, "nvmf", parent)
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ UINVMfSubsystems(self)
+
+
+class UINVMfSubsystems(UINode):
+ def __init__(self, parent):
+ UINode.__init__(self, "subsystem", parent)
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ for subsystem in self.get_root().get_nvmf_subsystems():
+ UINVMfSubsystem(subsystem, self)
+
+ def ui_command_create(self, nqn, serial_number=None,
+ max_namespaces=None, allow_any_host="false"):
+ """Create subsystem with given parameteres.
+
+ Arguments:
+ nqn - Target nqn(ASCII).
+ serial_number - Example: 'SPDK00000000000001'.
+ max_namespaces - Optional parameter. Maximum number of namespaces allowed to added during
+ active connection
+ allow_any_host - Optional parameter. Allow any host to connect (don't enforce host NQN
+ whitelist)
+ """
+ allow_any_host = self.ui_eval_param(allow_any_host, "bool", False)
+ max_namespaces = self.ui_eval_param(max_namespaces, "number", 0)
+ try:
+ self.get_root().create_nvmf_subsystem(nqn=nqn, serial_number=serial_number,
+ allow_any_host=allow_any_host,
+ max_namespaces=max_namespaces)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+ self.refresh()
+
+ def ui_command_delete(self, subsystem_nqn):
+ """Delete subsystem with given nqn.
+
+ Arguments:
+ nqn_subsystem - Name of susbsytem to delete
+ """
+ try:
+ self.get_root().delete_nvmf_subsystem(nqn=subsystem_nqn)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+ self.refresh()
+
+ def summary(self):
+ return "Subsystems: %s" % len(self.children), None
+
+
+class UINVMfSubsystem(UINode):
+ def __init__(self, subsystem, parent):
+ UINode.__init__(self, subsystem.nqn, parent)
+ self.subsystem = subsystem
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ UINVMfSubsystemListeners(self.subsystem.listen_addresses, self)
+ UINVMfSubsystemHosts(self.subsystem.hosts, self)
+ if hasattr(self.subsystem, 'namespaces'):
+ UINVMfSubsystemNamespaces(self.subsystem.namespaces, self)
+
+ def refresh_node(self):
+ for subsystem in self.get_root().get_nvmf_subsystems():
+ if subsystem.nqn == self.subsystem.nqn:
+ self.subsystem = subsystem
+ self.refresh()
+
+ def ui_command_show_details(self):
+ self.shell.log.info(json.dumps(vars(self.lvs), indent=2))
+
+ def ui_command_allow_any_host(self, disable="false"):
+ """Disable or or enable allow_any_host flag.
+
+ Arguments:
+ disable - Optional parameter. If false then enable, if true disable
+ """
+ disable = self.ui_eval_param(disable, "bool", None)
+ try:
+ self.get_root().nvmf_subsystem_allow_any_host(
+ nqn=self.subsystem.nqn, disable=disable)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+ self.get_root().refresh()
+ self.refresh_node()
+
+ def summary(self):
+ sn = None
+ if hasattr(self.subsystem, 'serial_number'):
+ sn = "sn=%s" % self.subsystem.serial_number
+ st = None
+ if hasattr(self.subsystem, 'subtype'):
+ st = "st=%s" % self.subsystem.subtype
+ allow_any_host = None
+ if self.subsystem.allow_any_host:
+ allow_any_host = "Allow any host"
+ info = ", ".join(filter(None, [sn, st, allow_any_host]))
+ return info, None
+
+
+class UINVMfSubsystemListeners(UINode):
+ def __init__(self, listen_addresses, parent):
+ UINode.__init__(self, "listen_addresses", parent)
+ self.listen_addresses = listen_addresses
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ for address in self.listen_addresses:
+ UINVMfSubsystemListener(address, self)
+
+ def refresh_node(self):
+ for subsystem in self.get_root().get_nvmf_subsystems():
+ if subsystem.nqn == self.parent.subsystem.nqn:
+ self.listen_addresses = subsystem.listen_addresses
+ self.refresh()
+
+ def ui_command_create(self, trtype, traddr, trsvcid, adrfam):
+ """Create address listener for subsystem.
+
+ Arguments:
+ trtype - NVMe-oF transport type: e.g., rdma.
+ traddr - NVMe-oF transport address: e.g., an ip address.
+ trsvcid - NVMe-oF transport service id: e.g., a port number.
+ adrfam - NVMe-oF transport adrfam: e.g., ipv4, ipv6, ib, fc.
+ """
+ try:
+ self.get_root().nvmf_subsystem_add_listener(
+ nqn=self.parent.subsystem.nqn, trtype=trtype, traddr=traddr,
+ trsvcid=trsvcid, adrfam=adrfam)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+ self.get_root().refresh()
+ self.refresh_node()
+
+ def ui_command_delete(self, trtype, traddr, trsvcid, adrfam=None):
+ """Remove address listener for subsystem.
+
+ Arguments:
+ trtype - Transport type (RDMA)
+ traddr - NVMe-oF transport address: e.g., an ip address.
+ trsvcid - NVMe-oF transport service id: e.g., a port number.
+ adrfam - Optional argument. Address family ("IPv4", "IPv6", "IB" or "FC").
+ """
+ try:
+ self.get_root().nvmf_subsystem_remove_listener(
+ nqn=self.parent.subsystem.nqn, trtype=trtype,
+ traddr=traddr, trsvcid=trsvcid, adrfam=adrfam)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+ self.get_root().refresh()
+ self.refresh_node()
+
+ def summary(self):
+ return "Addresses: %s" % len(self.listen_addresses), None
+
+
+class UINVMfSubsystemListener(UINode):
+ def __init__(self, address, parent):
+ UINode.__init__(self, "%s:%s" % (address['traddr'], address['trsvcid']),
+ parent)
+ self.address = address
+
+ def summary(self):
+ return "%s" % self.address['trtype'], True
+
+
+class UINVMfSubsystemHosts(UINode):
+ def __init__(self, hosts, parent):
+ UINode.__init__(self, "hosts", parent)
+ self.hosts = hosts
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ for host in self.hosts:
+ UINVMfSubsystemHost(host, self)
+
+ def refresh_node(self):
+ for subsystem in self.get_root().get_nvmf_subsystems():
+ if subsystem.nqn == self.parent.subsystem.nqn:
+ self.hosts = subsystem.hosts
+ self.refresh()
+
+ def ui_command_create(self, host):
+ """Add a host NQN to the whitelist of allowed hosts.
+
+ Args:
+ host: Host NQN to add to the list of allowed host NQNs
+ """
+ try:
+ self.get_root().nvmf_subsystem_add_host(
+ nqn=self.parent.subsystem.nqn, host=host)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+ self.get_root().refresh()
+ self.refresh_node()
+
+ def ui_command_delete(self, host):
+ """Delete host from subsystem.
+
+ Arguments:
+ host - NQN of host to remove.
+ """
+ try:
+ self.get_root().nvmf_subsystem_remove_host(
+ nqn=self.parent.subsystem.nqn, host=host)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+ self.get_root().refresh()
+ self.refresh_node()
+
+ def summary(self):
+ return "Hosts: %s" % len(self.hosts), None
+
+
+class UINVMfSubsystemHost(UINode):
+ def __init__(self, host, parent):
+ UINode.__init__(self, "%s" % host['nqn'], parent)
+ self.host = host
+
+
+class UINVMfSubsystemNamespaces(UINode):
+ def __init__(self, namespaces, parent):
+ UINode.__init__(self, "namespaces", parent)
+ self.namespaces = namespaces
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ for namespace in self.namespaces:
+ UINVMfSubsystemNamespace(namespace, self)
+
+ def refresh_node(self):
+ for subsystem in self.get_root().get_nvmf_subsystems():
+ if subsystem.nqn == self.parent.subsystem.nqn:
+ self.namespaces = subsystem.namespaces
+ self.refresh()
+
+ def ui_command_create(self, bdev_name, nsid=None,
+ nguid=None, eui64=None, uuid=None):
+ """Add a namespace to a subsystem.
+
+ Args:
+ bdev_name: Name of bdev to expose as a namespace.
+ Optional args:
+ nsid: Namespace ID.
+ nguid: 16-byte namespace globally unique identifier in hexadecimal.
+ eui64: 8-byte namespace EUI-64 in hexadecimal (e.g. "ABCDEF0123456789").
+ uuid: Namespace UUID.
+ """
+ nsid = self.ui_eval_param(nsid, "number", None)
+ try:
+ self.get_root().nvmf_subsystem_add_ns(
+ nqn=self.parent.subsystem.nqn, bdev_name=bdev_name,
+ nsid=nsid, nguid=nguid, eui64=eui64, uuid=uuid)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+ self.get_root().refresh()
+ self.refresh_node()
+
+ def ui_command_delete(self, nsid):
+ """Delete namespace from subsystem.
+
+ Arguments:
+ nsid - Id of namespace to remove.
+ """
+ nsid = self.ui_eval_param(nsid, "number", None)
+ try:
+ self.get_root().nvmf_subsystem_remove_ns(
+ nqn=self.parent.subsystem.nqn, nsid=nsid)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+ self.get_root().refresh()
+ self.refresh_node()
+
+ def summary(self):
+ return "Namespaces: %s" % len(self.namespaces), None
+
+
+class UINVMfSubsystemNamespace(UINode):
+ def __init__(self, namespace, parent):
+ UINode.__init__(self, namespace['bdev_name'], parent)
+ self.namespace = namespace
+
+ def summary(self):
+ info = ", ".join([str(self.namespace['uuid']), self.namespace['name'],
+ str(self.namespace['nsid'])])
+ return info, None
diff --git a/src/spdk/scripts/spdkcli/ui_root.py b/src/spdk/scripts/spdkcli/ui_root.py
new file mode 100644
index 00000000..9854d373
--- /dev/null
+++ b/src/spdk/scripts/spdkcli/ui_root.py
@@ -0,0 +1,482 @@
+from .ui_node import UINode, UIBdevs, UILvolStores, UIVhosts
+from .ui_node_nvmf import UINVMf
+from .ui_node_iscsi import UIISCSI
+import rpc.client
+import rpc
+from functools import wraps
+
+
+class UIRoot(UINode):
+ """
+ Root node for CLI menu tree structure. Refreshes running config on startup.
+ """
+ def __init__(self, s, shell):
+ UINode.__init__(self, "/", shell=shell)
+ self.current_bdevs = []
+ self.current_lvol_stores = []
+ self.current_vhost_ctrls = []
+ self.current_nvmf_subsystems = []
+ self.set_rpc_target(s)
+ self.verbose = False
+ self.is_init = self.check_init()
+
+ def refresh(self):
+ if self.is_init is False:
+ methods = self.get_rpc_methods(current=True)
+ methods = "\n".join(methods)
+ self.shell.log.warning("SPDK Application is not yet initialized.\n"
+ "Please initialize subsystems with start_subsystem_init command.\n"
+ "List of available commands in current state:\n"
+ "%s" % methods)
+ else:
+ # Pass because we'd like to build main tree structure for "ls"
+ # even if state is uninitialized
+ pass
+
+ self._children = set([])
+ UIBdevs(self)
+ UILvolStores(self)
+ UIVhosts(self)
+ UINVMf(self)
+ UIISCSI(self)
+
+ def set_rpc_target(self, s):
+ self.client = rpc.client.JSONRPCClient(s)
+
+ def print_array(self, a):
+ return " ".join(a)
+
+ def verbose(f):
+ # For any configuration calls (create, delete, construct, etc.)
+ # Check if verbose option is to be used and set appropriately.
+ # Do not use for "get_*" methods so that output is not
+ # flooded.
+ def w(self, **kwargs):
+ self.client.verbose = self.verbose
+ r = f(self, **kwargs)
+ self.client.verbose = False
+ return r
+ return w
+
+ def ui_command_start_subsystem_init(self):
+ if rpc.start_subsystem_init(self.client):
+ self.is_init = True
+ self.refresh()
+
+ def ui_command_load_config(self, filename):
+ with open(filename, "r") as fd:
+ rpc.load_config(self.client, fd)
+
+ def ui_command_load_subsystem_config(self, filename):
+ with open(filename, "r") as fd:
+ rpc.load_subsystem_config(self.client, fd)
+
+ def ui_command_save_config(self, filename, indent=2):
+ with open(filename, "w") as fd:
+ rpc.save_config(self.client, fd, indent)
+
+ def ui_command_save_subsystem_config(self, filename, subsystem, indent=2):
+ with open(filename, "w") as fd:
+ rpc.save_subsystem_config(self.client, fd, indent, subsystem)
+
+ def get_rpc_methods(self, current=False):
+ return rpc.get_rpc_methods(self.client, current=current)
+
+ def check_init(self):
+ return "start_subsystem_init" not in self.get_rpc_methods(current=True)
+
+ def get_bdevs(self, bdev_type):
+ if self.is_init:
+ self.current_bdevs = rpc.bdev.get_bdevs(self.client)
+ # Following replace needs to be done in order for some of the bdev
+ # listings to work: logical volumes, split disk.
+ # For example logical volumes: listing in menu is "Logical_Volume"
+ # (cannot have space), but the product name in SPDK is "Logical Volume"
+ bdev_type = bdev_type.replace("_", " ")
+ for bdev in [x for x in self.current_bdevs if bdev_type in x["product_name"].lower()]:
+ test = Bdev(bdev)
+ yield test
+
+ def get_bdevs_iostat(self, **kwargs):
+ return rpc.bdev.get_bdevs_iostat(self.client, **kwargs)
+
+ @verbose
+ def split_bdev(self, **kwargs):
+ response = rpc.bdev.construct_split_vbdev(self.client, **kwargs)
+ return self.print_array(response)
+
+ @verbose
+ def destruct_split_bdev(self, **kwargs):
+ rpc.bdev.destruct_split_vbdev(self.client, **kwargs)
+
+ @verbose
+ def delete_bdev(self, name):
+ rpc.bdev.delete_bdev(self.client, bdev_name=name)
+
+ @verbose
+ def create_malloc_bdev(self, **kwargs):
+ response = rpc.bdev.construct_malloc_bdev(self.client, **kwargs)
+ return response
+
+ @verbose
+ def delete_malloc_bdev(self, **kwargs):
+ rpc.bdev.delete_malloc_bdev(self.client, **kwargs)
+
+ @verbose
+ def create_iscsi_bdev(self, **kwargs):
+ response = rpc.bdev.construct_iscsi_bdev(self.client, **kwargs)
+ return response
+
+ @verbose
+ def delete_iscsi_bdev(self, **kwargs):
+ rpc.bdev.delete_iscsi_bdev(self.client, **kwargs)
+
+ @verbose
+ def create_aio_bdev(self, **kwargs):
+ response = rpc.bdev.construct_aio_bdev(self.client, **kwargs)
+ return response
+
+ @verbose
+ def delete_aio_bdev(self, **kwargs):
+ rpc.bdev.delete_aio_bdev(self.client, **kwargs)
+
+ @verbose
+ def create_lvol_bdev(self, **kwargs):
+ response = rpc.lvol.construct_lvol_bdev(self.client, **kwargs)
+ return response
+
+ @verbose
+ def destroy_lvol_bdev(self, **kwargs):
+ response = rpc.lvol.destroy_lvol_bdev(self.client, **kwargs)
+ return response
+
+ @verbose
+ def create_nvme_bdev(self, **kwargs):
+ response = rpc.bdev.construct_nvme_bdev(self.client, **kwargs)
+ return response
+
+ @verbose
+ def delete_nvme_controller(self, **kwargs):
+ rpc.bdev.delete_nvme_controller(self.client, **kwargs)
+
+ @verbose
+ def create_null_bdev(self, **kwargs):
+ response = rpc.bdev.construct_null_bdev(self.client, **kwargs)
+ return response
+
+ @verbose
+ def delete_null_bdev(self, **kwargs):
+ rpc.bdev.delete_null_bdev(self.client, **kwargs)
+
+ @verbose
+ def create_error_bdev(self, **kwargs):
+ response = rpc.bdev.construct_error_bdev(self.client, **kwargs)
+
+ @verbose
+ def delete_error_bdev(self, **kwargs):
+ rpc.bdev.delete_error_bdev(self.client, **kwargs)
+
+ def get_lvol_stores(self):
+ if self.is_init:
+ self.current_lvol_stores = rpc.lvol.get_lvol_stores(self.client)
+ for lvs in self.current_lvol_stores:
+ yield LvolStore(lvs)
+
+ @verbose
+ def create_lvol_store(self, **kwargs):
+ response = rpc.lvol.construct_lvol_store(self.client, **kwargs)
+ return response
+
+ @verbose
+ def delete_lvol_store(self, **kwargs):
+ rpc.lvol.destroy_lvol_store(self.client, **kwargs)
+
+ @verbose
+ def create_pmem_pool(self, **kwargs):
+ response = rpc.pmem.create_pmem_pool(self.client, **kwargs)
+ return response
+
+ @verbose
+ def delete_pmem_pool(self, **kwargs):
+ rpc.pmem.delete_pmem_pool(self.client, **kwargs)
+
+ @verbose
+ def create_pmem_bdev(self, **kwargs):
+ response = rpc.bdev.construct_pmem_bdev(self.client, **kwargs)
+ return response
+
+ @verbose
+ def delete_pmem_bdev(self, **kwargs):
+ response = rpc.bdev.delete_pmem_bdev(self.client, **kwargs)
+ return response
+
+ @verbose
+ def create_rbd_bdev(self, **kwargs):
+ response = rpc.bdev.construct_rbd_bdev(self.client, **kwargs)
+ return response
+
+ @verbose
+ def delete_rbd_bdev(self, **kwargs):
+ response = rpc.bdev.delete_rbd_bdev(self.client, **kwargs)
+ return response
+
+ @verbose
+ def create_virtio_dev(self, **kwargs):
+ response = rpc.vhost.construct_virtio_dev(self.client, **kwargs)
+ return self.print_array(response)
+
+ @verbose
+ def remove_virtio_bdev(self, **kwargs):
+ response = rpc.vhost.remove_virtio_bdev(self.client, **kwargs)
+ return response
+
+ def get_virtio_scsi_devs(self):
+ if self.is_init:
+ for bdev in rpc.vhost.get_virtio_scsi_devs(self.client):
+ test = Bdev(bdev)
+ yield test
+
+ def list_vhost_ctrls(self):
+ if self.is_init:
+ self.current_vhost_ctrls = rpc.vhost.get_vhost_controllers(self.client)
+
+ def get_vhost_ctrlrs(self, ctrlr_type):
+ if self.is_init:
+ self.list_vhost_ctrls()
+ for ctrlr in [x for x in self.current_vhost_ctrls if ctrlr_type in list(x["backend_specific"].keys())]:
+ yield VhostCtrlr(ctrlr)
+
+ @verbose
+ def remove_vhost_controller(self, **kwargs):
+ rpc.vhost.remove_vhost_controller(self.client, **kwargs)
+
+ @verbose
+ def create_vhost_scsi_controller(self, **kwargs):
+ rpc.vhost.construct_vhost_scsi_controller(self.client, **kwargs)
+
+ @verbose
+ def create_vhost_blk_controller(self, **kwargs):
+ rpc.vhost.construct_vhost_blk_controller(self.client, **kwargs)
+
+ @verbose
+ def remove_vhost_scsi_target(self, **kwargs):
+ rpc.vhost.remove_vhost_scsi_target(self.client, **kwargs)
+
+ @verbose
+ def add_vhost_scsi_lun(self, **kwargs):
+ rpc.vhost.add_vhost_scsi_lun(self.client, **kwargs)
+
+ def set_vhost_controller_coalescing(self, **kwargs):
+ rpc.vhost.set_vhost_controller_coalescing(self.client, **kwargs)
+
+ def list_nvmf_subsystems(self):
+ if self.is_init:
+ self.current_nvmf_subsystems = rpc.nvmf.get_nvmf_subsystems(self.client)
+
+ def get_nvmf_subsystems(self):
+ if self.is_init:
+ self.list_nvmf_subsystems()
+ for subsystem in self.current_nvmf_subsystems:
+ yield NvmfSubsystem(subsystem)
+
+ @verbose
+ def create_nvmf_subsystem(self, **kwargs):
+ rpc.nvmf.nvmf_subsystem_create(self.client, **kwargs)
+
+ @verbose
+ def delete_nvmf_subsystem(self, **kwargs):
+ rpc.nvmf.delete_nvmf_subsystem(self.client, **kwargs)
+
+ @verbose
+ def nvmf_subsystem_add_listener(self, **kwargs):
+ rpc.nvmf.nvmf_subsystem_add_listener(self.client, **kwargs)
+
+ @verbose
+ def nvmf_subsystem_remove_listener(self, **kwargs):
+ rpc.nvmf.nvmf_subsystem_remove_listener(self.client, **kwargs)
+
+ @verbose
+ def nvmf_subsystem_add_host(self, **kwargs):
+ rpc.nvmf.nvmf_subsystem_add_host(self.client, **kwargs)
+
+ @verbose
+ def nvmf_subsystem_remove_host(self, **kwargs):
+ rpc.nvmf.nvmf_subsystem_remove_host(self.client, **kwargs)
+
+ @verbose
+ def nvmf_subsystem_allow_any_host(self, **kwargs):
+ rpc.nvmf.nvmf_subsystem_allow_any_host(self.client, **kwargs)
+
+ @verbose
+ def nvmf_subsystem_add_ns(self, **kwargs):
+ rpc.nvmf.nvmf_subsystem_add_ns(self.client, **kwargs)
+
+ @verbose
+ def nvmf_subsystem_remove_ns(self, **kwargs):
+ rpc.nvmf.nvmf_subsystem_remove_ns(self.client, **kwargs)
+
+ @verbose
+ def nvmf_subsystem_allow_any_host(self, **kwargs):
+ rpc.nvmf.nvmf_subsystem_allow_any_host(self.client, **kwargs)
+
+ def get_scsi_devices(self):
+ if self.is_init:
+ for device in rpc.iscsi.get_scsi_devices(self.client):
+ yield ScsiObj(device)
+
+ def get_target_nodes(self):
+ if self.is_init:
+ for tg in rpc.iscsi.get_target_nodes(self.client):
+ yield tg
+
+ @verbose
+ def construct_target_node(self, **kwargs):
+ rpc.iscsi.construct_target_node(self.client, **kwargs)
+
+ @verbose
+ def delete_target_node(self, **kwargs):
+ rpc.iscsi.delete_target_node(self.client, **kwargs)
+
+ def get_portal_groups(self):
+ if self.is_init:
+ for pg in rpc.iscsi.get_portal_groups(self.client):
+ yield ScsiObj(pg)
+
+ def get_initiator_groups(self):
+ if self.is_init:
+ for ig in rpc.iscsi.get_initiator_groups(self.client):
+ yield ScsiObj(ig)
+
+ @verbose
+ def construct_portal_group(self, **kwargs):
+ rpc.iscsi.add_portal_group(self.client, **kwargs)
+
+ @verbose
+ def delete_portal_group(self, **kwargs):
+ rpc.iscsi.delete_portal_group(self.client, **kwargs)
+
+ @verbose
+ def construct_initiator_group(self, **kwargs):
+ rpc.iscsi.add_initiator_group(self.client, **kwargs)
+
+ @verbose
+ def delete_initiator_group(self, **kwargs):
+ rpc.iscsi.delete_initiator_group(self.client, **kwargs)
+
+ @verbose
+ def get_iscsi_connections(self, **kwargs):
+ if self.is_init:
+ for ic in rpc.iscsi.get_iscsi_connections(self.client, **kwargs):
+ yield ic
+
+ @verbose
+ def add_initiators_to_initiator_group(self, **kwargs):
+ rpc.iscsi.add_initiators_to_initiator_group(self.client, **kwargs)
+
+ @verbose
+ def delete_initiators_from_initiator_group(self, **kwargs):
+ rpc.iscsi.delete_initiators_from_initiator_group(self.client, **kwargs)
+
+ @verbose
+ def add_pg_ig_maps(self, **kwargs):
+ rpc.iscsi.add_pg_ig_maps(self.client, **kwargs)
+
+ @verbose
+ def delete_pg_ig_maps(self, **kwargs):
+ rpc.iscsi.delete_pg_ig_maps(self.client, **kwargs)
+
+ @verbose
+ def add_secret_to_iscsi_auth_group(self, **kwargs):
+ rpc.iscsi.add_secret_to_iscsi_auth_group(self.client, **kwargs)
+
+ @verbose
+ def delete_secret_from_iscsi_auth_group(self, **kwargs):
+ rpc.iscsi.delete_secret_from_iscsi_auth_group(self.client, **kwargs)
+
+ @verbose
+ def get_iscsi_auth_groups(self, **kwargs):
+ return rpc.iscsi.get_iscsi_auth_groups(self.client, **kwargs)
+
+ @verbose
+ def add_iscsi_auth_group(self, **kwargs):
+ rpc.iscsi.add_iscsi_auth_group(self.client, **kwargs)
+
+ @verbose
+ def delete_iscsi_auth_group(self, **kwargs):
+ rpc.iscsi.delete_iscsi_auth_group(self.client, **kwargs)
+
+ @verbose
+ def set_iscsi_target_node_auth(self, **kwargs):
+ rpc.iscsi.set_iscsi_target_node_auth(self.client, **kwargs)
+
+ @verbose
+ def target_node_add_lun(self, **kwargs):
+ rpc.iscsi.target_node_add_lun(self.client, **kwargs)
+
+ @verbose
+ def set_iscsi_discovery_auth(self, **kwargs):
+ rpc.iscsi.set_iscsi_discovery_auth(self.client, **kwargs)
+
+ @verbose
+ def get_iscsi_global_params(self, **kwargs):
+ return rpc.iscsi.get_iscsi_global_params(self.client, **kwargs)
+
+
+class Bdev(object):
+ def __init__(self, bdev_info):
+ """
+ All class attributes are set based on what information is received
+ from get_bdevs RPC call.
+ # TODO: Document in docstring parameters which describe bdevs.
+ # TODO: Possible improvement: JSON schema might be used here in future
+ """
+ for i in list(bdev_info.keys()):
+ setattr(self, i, bdev_info[i])
+
+
+class LvolStore(object):
+ def __init__(self, lvs_info):
+ """
+ All class attributes are set based on what information is received
+ from get_bdevs RPC call.
+ # TODO: Document in docstring parameters which describe bdevs.
+ # TODO: Possible improvement: JSON schema might be used here in future
+ """
+ for i in list(lvs_info.keys()):
+ setattr(self, i, lvs_info[i])
+
+
+class VhostCtrlr(object):
+ def __init__(self, ctrlr_info):
+ """
+ All class attributes are set based on what information is received
+ from get_vhost_controllers RPC call.
+ # TODO: Document in docstring parameters which describe bdevs.
+ # TODO: Possible improvement: JSON schema might be used here in future
+ """
+ for i in list(ctrlr_info.keys()):
+ setattr(self, i, ctrlr_info[i])
+
+
+class NvmfSubsystem(object):
+ def __init__(self, subsystem_info):
+ """
+ All class attributes are set based on what information is received
+ from get_nvmf_subsystem RPC call.
+ # TODO: Document in docstring parameters which describe bdevs.
+ # TODO: Possible improvement: JSON schema might be used here in future
+ """
+ for i in subsystem_info.keys():
+ setattr(self, i, subsystem_info[i])
+
+
+class ScsiObj(object):
+ def __init__(self, device_info):
+ """
+ All class attributes are set based on what information is received
+ from iscsi related RPC calls.
+ # TODO: Document in docstring parameters which describe bdevs.
+ # TODO: Possible improvement: JSON schema might be used here in future
+ """
+ for i in device_info.keys():
+ setattr(self, i, device_info[i])
diff --git a/src/spdk/scripts/vagrant/README.md b/src/spdk/scripts/vagrant/README.md
new file mode 100644
index 00000000..b44597ba
--- /dev/null
+++ b/src/spdk/scripts/vagrant/README.md
@@ -0,0 +1,219 @@
+# SPDK Vagrant and VirtualBox
+
+The following guide explains how to use the scripts in the `spdk/scripts/vagrant`. Mac, Windows, and Linux platforms are supported.
+
+1. Install and configure [Git](https://git-scm.com/) on your platform.
+2. Install [VirtualBox 5.1](https://www.virtualbox.org/wiki/Downloads) or newer
+3. Install [VirtualBox Extension Pack](https://www.virtualbox.org/wiki/Downloads)
+4. Install and configure [Vagrant 1.9.4](https://www.vagrantup.com) or newer
+
+## Mac OSX Setup (High Sierra)
+
+OSX platforms already have Git installed, however, installing the [Apple xCode](https://developer.apple.com/xcode/) developer kit and [xCode Command Line tools](https://developer.apple.com/xcode/features/) will provide UNIX command line tools such as make, awk, sed, ssh, tar, and zip. xCode can be installed through the App Store on you Mac.
+
+Quick start instructions for OSX:
+
+1. Install Homebrew
+2. Install Virtual Box Cask
+3. Install Virtual Box Extentions
+4. Install Vagrant Cask
+
+```
+ /usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
+ brew doctor
+ brew update
+ brew cask install virtualbox
+ brew cask install virtualbox-extension-pack
+ brew cask install vagrant
+```
+
+## Windows 10 Setup
+
+1. Windows platforms should install [Git](https://git-scm.com/download/win) from git-scm.com.
+ - This provides everything needed to use git on Windows, including a `git-bash` command line environment.
+2. Install [VirtualBox 5.1](https://www.virtualbox.org/wiki/Downloads) or newer
+3. Install [VirtualBox Extension Pack](https://www.virtualbox.org/wiki/Downloads)
+4. Install and configure [Vagrant 1.9.4](https://www.vagrantup.com) or newer
+
+- Note: VirtualBox requires virtualization to be enabled in the BIOS.
+- Note: You should disable Hyper-V in Windows RS 3 laptop. Search `windows features` uncheck Hyper-V, restart laptop
+
+## Linux Setup
+
+Following the generic instructions should be sufficient for most Linux distributions. For more thorough instructions on installing VirtualBox on your distribution of choice, please see the following [guide](https://www.virtualbox.org/wiki/Linux_Downloads).
+
+## Configure Vagrant
+
+If you are behind a corporate firewall, configure the following proxy settings.
+
+1. Set the http_proxy and https_proxy
+2. Install the proxyconf plugin
+
+```
+ $ export http_proxy=....
+ $ export https_proxy=....
+ $ vagrant plugin install vagrant-proxyconf
+```
+
+## Download SPDK from GitHub
+
+Use git to clone a new spdk repository. GerritHub can also be used. See the instructions at [spdk.io](http://www.spdk.io/development/#gerrithub) to setup your GerritHub account. Note that this spdk repository will be rsync'd into your VM, so you can use this repository to continue development within the VM.
+
+## Create a Virtual Box
+
+Use the `spdk/scripts/vagrant/create_vbox.sh` script to create a VM of your choice. Supported VM platforms are:
+
+- centos7
+- ubuntu16
+- ubuntu18
+- fedora26
+- fedora27
+- fedora28
+- freebsd11
+
+```
+$ spdk/scripts/vagrant/create_vbox.sh -h
+ Usage: create_vbox.sh [-n <num-cpus>] [-s <ram-size>] [-x <http-proxy>] [-hvrld] <distro>
+
+ distro = <centos7 | ubuntu16 | ubuntu18 | fedora26 | fedora27 | fedora28 | freebsd11>
+
+ -s <ram-size> in kb default: 4096
+ -n <num-cpus> 1 to 4 default: 4
+ -x <http-proxy> default: ""
+ -p <provider> libvirt or virtualbox
+ --vhost-host-dir=<path> directory path with vhost test dependencies
+ (test VM qcow image, fio binary, ssh keys)
+ --vhost-vm-dir=<path> directory where to put vhost dependencies in VM
+ -r dry-run
+ -l use a local copy of spdk, don't try to rsync from the host.
+ -d deploy a test vm by provisioning all prerequisites for spdk autotest
+ -h help
+ -v verbose
+
+ Examples:
+
+ ./scripts/vagrant/create_vbox.sh -x http://user:password@host:port fedora27
+ ./scripts/vagrant/create_vbox.sh -s 2048 -n 2 ubuntu16
+ ./scripts/vagrant/create_vbox.sh -rv freebsd
+ ./scripts/vagrant/create_vbox.sh fedora26
+```
+
+It is recommended that you call the `create_vbox.sh` script from outside of the spdk repository. Call this script from a parent directory. This will allow the creation of multiple VMs in separate <distro> directories, all using the same spdk repository. For example:
+
+```
+ $ spdk/scripts/vagrant/create_vbox.sh -s 2048 -n 2 fedora26
+```
+
+This script will:
+
+1. create a subdirectory named <distro> in your $PWD
+2. copy the needed files from `spdk/scripts/vagrant/` into the <distro> directory
+3. create a working virtual box in the <distro> directory
+4. rsync the `~/.gitconfig` file to `/home/vagrant/` in the newly provisioned virtual box
+5. rsync a copy of the source `spdk` repository to `/home/vagrant/spdk_repo/spdk` (optional)
+6. rsync a copy of the `~/vagrant_tools` directory to `/home/vagrant/tools` (optional)
+7. execute vm_setup.sh on the guest to install all spdk dependencies (optional)
+
+This arrangement allows the provisioning of multiple, different VMs within that same directory hierarchy using the same spdk repository. Following the creation of the vm you'll need to ssh into your virtual box and finish the VM initializaton.
+
+```
+ $ cd <distro>
+ $ vagrant ssh
+```
+
+## Finish VM Initializtion
+
+A copy of the `spdk` repository you cloned will exist in the `spdk_repo` directory of the `/home/vagrant` user account. After using `vagrant ssh` to enter your VM you must complete the initialization of your VM by running the `scripts/vagrant/update.sh` script. For example:
+
+```
+ $ script -c 'sudo spdk_repo/spdk/scripts/vagrant/update.sh' update.log
+```
+
+The `update.sh` script completes initialization of the VM by automating the following steps.
+
+1. Runs yum/apt-get update (Linux)
+2. Runs the scripts/pdkdep.sh script
+3. Installs the FreeBSD source in /usr/sys (FreeBSD only)
+
+This only needs to be done once. This is also not necessary for Fedora VMs provisioned with the -d flag. The `vm_setup` script performs these operations instead.
+
+## Post VM Initializtion
+
+Following VM initializtion you must:
+
+1. Verify you have an emulated NVMe device
+2. Compile your spdk source tree
+3. Run the hello_world example to validate the environment is set up correctly
+
+### Verify you have an emulated NVMe device
+
+```
+ $ lspci | grep "Non-Volatile"
+ 00:0e.0 Non-Volatile memory controller: InnoTek Systemberatung GmbH Device 4e56
+```
+
+### Compile SPDK
+
+```
+ $ cd spdk_repo/spdk
+ $ git submodule update --init
+ $ ./configure --enable-debug
+ $ make
+```
+
+### Run the hello_world example script
+
+```
+ $ sudo scripts/setup.sh
+ $ cd examples/bdev/hello_world
+ $ sudo ./hello_bdev
+```
+
+### Running autorun.sh with vagrant
+
+After running vm_setup.sh the `run-autorun.sh` can be used to run `spdk/autorun.sh` on a Fedora vagrant machine. Note that the `spdk/scripts/vagrant/autorun-spdk.conf` should be copied to `~/autorun-spdk.conf` before starting your tests.
+
+```
+ $ cp spdk/scripts/vagrant/autorun-spdk.conf ~/
+ $ spdk/scripts/vagrant/run-autorun.sh -h
+ Usage: scripts/vagrant/run-autorun.sh -d <path_to_spdk_tree> [-h] | [-q] | [-n]
+ -d : Specify a path to an SPDK source tree
+ -q : No output to screen
+ -n : Noop - dry-run
+ -h : This help
+
+ Examples:
+ run-spdk-autotest.sh -d . -q
+ run-spdk-autotest.sh -d /home/vagrant/spdk_repo/spdk
+```
+
+## FreeBSD Appendix
+
+---
+**NOTE:** As of this writing the FreeBSD Virtualbox instance does not correctly support the vagrant-proxyconf feature.
+---
+
+The following steps are done by the `update.sh` script. It is recommened that you capture the output of `update.sh` with a typescript. E.g.:
+
+```
+ $ script update.log sudo spdk_repo/spdk/scripts/vagrant/update.sh
+```
+
+1. Updates the pkg catalog
+1. Installs the needed FreeBSD packages on the system by calling pkgdep.sh
+2. Installs the FreeBSD source in /usr/src
+
+```
+ $ sudo pkg upgrade -f
+ $ sudo spdk_repo/spdk/scripts/pkgdep.sh
+ $ sudo git clone --depth 10 -b releases/11.1.0 https://github.com/freebsd/freebsd.git /usr/src
+```
+
+To build spdk on FreeBSD use `gmake MAKE=gmake`. E.g.:
+
+```
+ $ cd spdk_repo/spdk
+ $ git submodule update --init
+ $ ./configure --enable-debug
+ $ gmake MAKE=gmake
+```
diff --git a/src/spdk/scripts/vagrant/Vagrantfile b/src/spdk/scripts/vagrant/Vagrantfile
new file mode 100644
index 00000000..40e012b2
--- /dev/null
+++ b/src/spdk/scripts/vagrant/Vagrantfile
@@ -0,0 +1,158 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+Vagrant.configure(2) do |config|
+
+ # Pick the right distro and bootstrap, default is fedora26
+ distro = ( ENV['SPDK_VAGRANT_DISTRO'] || "fedora26")
+ case distro
+ when "centos7"
+ config.vm.box = "puppetlabs/centos-7.2-64-nocm"
+ config.ssh.insert_key = false
+ # Puppetlabs does not provide libvirt Box so we will use official one
+ config.vm.provider :libvirt do |libvirt|
+ config.vm.box = "centos/7"
+ end if Vagrant.has_plugin?('vagrant-libvirt')
+ when "ubuntu16"
+ # See: https://app.vagrantup.com/puppetlabs/boxes/ubuntu-16.04-64-nocm
+ config.vm.box = "puppetlabs/ubuntu-16.04-64-nocm"
+ config.vm.box_version = "1.0.0"
+ when "ubuntu18"
+ # See: https://app.vagrantup.com/bento/boxes/ubuntu-18.04
+ config.vm.box = "bento/ubuntu-18.04"
+ config.vm.box_version = "201803.24.0"
+ when "fedora26"
+ #See: https://app.vagrantup.com/generic/boxes/fedora2
+ config.vm.box = "generic/fedora26"
+ when "fedora27"
+ #See: https://app.vagrantup.com/generic/boxes/fedora27
+ config.vm.box = "generic/fedora27"
+ when "fedora28"
+ #See: https://app.vagrantup.com/generic/boxes/fedora28
+ config.vm.box = "generic/fedora28"
+ when "freebsd11"
+ #See: https://app.vagrantup.com/generic/boxes/freebsd11
+ config.vm.box = "generic/freebsd11"
+ if File.file?(File.expand_path("~/vagrant_pkg.conf"))
+ config.vm.provision "file", source: "~/vagrant_pkg.conf", destination: "pkg.conf"
+ config.vm.provision "shell", inline: "sudo mv pkg.conf /usr/local/etc/pkg.conf"
+ config.vm.provision "shell", inline: "sudo chown root:wheel /usr/local/etc/pkg.conf"
+ config.vm.provision "shell", inline: "sudo chmod 644 /usr/local/etc/pkg.conf"
+ end
+ else
+ "Invalid argument #{distro}"
+ abort("Invalid argument!")
+ end
+ config.vm.box_check_update = false
+
+ # Copy in the .gitconfig if it exists
+ if File.file?(File.expand_path("~/.gitconfig"))
+ config.vm.provision "file", source: "~/.gitconfig", destination: ".gitconfig"
+ end
+
+ # Copy the tsocks configuration file for use when installing some spdk test pool dependencies
+ if File.file?("/etc/tsocks.conf")
+ config.vm.provision "file", source: "/etc/tsocks.conf", destination: "tsocks.conf"
+ config.vm.provision "shell", inline: "sudo mv tsocks.conf /etc/tsocks.conf"
+ config.vm.provision "shell", inline: "sudo chown root:wheel /etc/tsocks.conf"
+ config.vm.provision "shell", inline: "sudo chmod 644 /etc/tsocks.conf"
+ end
+
+ # vagrant-cachier caches apt/yum etc to speed subsequent
+ # vagrant up
+ # to enable, run
+ # vagrant plugin install vagrant-cachier
+ #
+ if Vagrant.has_plugin?("vagrant-cachier")
+ config.cache.scope = :box
+ end
+
+ config.vm.network "private_network", type: "dhcp"
+
+ # use http proxy if avaiable
+ if ENV['http_proxy'] && Vagrant.has_plugin?("vagrant-proxyconf")
+ config.proxy.http = ENV['http_proxy']
+ config.proxy.https = ENV['https_proxy']
+ config.proxy.no_proxy = "localhost,127.0.0.1"
+ end
+
+ vmcpu=(ENV['SPDK_VAGRANT_VMCPU'] || 2)
+ vmram=(ENV['SPDK_VAGRANT_VMRAM'] || 4096)
+ spdk_dir=(ENV['SPDK_DIR'] || "none")
+ vhost_host_dir=(ENV['VHOST_HOST_DIR'] || "none")
+ vhost_vm_dir=(ENV['VHOST_VM_DIR'] || "none")
+
+ config.ssh.forward_agent = true
+ config.ssh.forward_x11 = true
+
+ config.vm.provider "virtualbox" do |vb|
+ vb.customize ["modifyvm", :id, "--ioapic", "on"]
+ vb.memory = "#{vmram}"
+ vb.cpus = "#{vmcpu}"
+
+ nvme_disk = 'nvme.vdi'
+ unless File.exist? (nvme_disk)
+ vb.customize ["createhd", "--filename", nvme_disk, "--variant", "Fixed", "--size", "1024"]
+ vb.customize ["storagectl", :id, "--name", "nvme", "--add", "pcie", "--controller", "NVMe", "--portcount", "1", "--bootable", "off"]
+ vb.customize ["storageattach", :id, "--storagectl", "nvme", "--type", "hdd", "--medium", nvme_disk, "--port", "0"]
+ end
+
+ #support for the SSE4.x instruction is required in some versions of VB.
+ vb.customize ["setextradata", :id, "VBoxInternal/CPUM/SSE4.1", "1"]
+ vb.customize ["setextradata", :id, "VBoxInternal/CPUM/SSE4.2", "1"]
+ end
+
+ # This setup was Tested on Fedora 27
+ # libvirt configuration need modern Qemu(tested on 2.10) & vagrant-libvirt in version 0.0.39+
+ # There are few limitation for SElinux - The file added outside libvirt must have proper SE ACL policy or setenforce 0
+ config.vm.provider "libvirt" do |libvirt, override|
+ libvirt.random_hostname = "1"
+ libvirt.disk_bus = "virtio"
+
+ # we put nvme_disk inside default pool to eliminate libvirt/SELinux Permissions Problems
+ # and to be able to run vagrant from user $HOME directory
+ nvme_disk = '/var/lib/libvirt/images/nvme_disk.img'
+ unless File.exist? (nvme_disk)
+ puts "If run with libvirt provider please execute create_nvme_img.sh"
+ end
+
+ libvirt.qemuargs :value => "-drive"
+ libvirt.qemuargs :value => "file=#{nvme_disk},if=none,id=D22"
+ libvirt.qemuargs :value => "-device"
+ libvirt.qemuargs :value => "nvme,drive=D22,serial=1234"
+ libvirt.driver = "kvm"
+ libvirt.graphics_type = "spice"
+ libvirt.memory = "#{vmram}"
+ libvirt.cpus = "#{vmcpu}"
+ libvirt.video_type = "qxl"
+
+ # Optional field if we want use other storage pools than default
+ # libvirt.storage_pool_name = "vm"
+ end
+
+ # rsync the spdk directory if provision hasn't happened yet
+ if ENV['COPY_SPDK_DIR'] == "1" && spdk_dir != "none"
+ config.vm.synced_folder "#{spdk_dir}", "/home/vagrant/spdk_repo/spdk", type: "rsync", rsync__auto: false
+ end
+
+ # provision the vm with all of the necessary spdk dependencies for running the autorun.sh tests
+ if ENV['DEPLOY_TEST_VM'] == "1" && spdk_dir != "none"
+ config.vm.provision "shell" do |setup|
+ setup.path = "#{spdk_dir}/test/common/config/vm_setup.sh"
+ setup.privileged = false
+ setup.args = ["-u", "-i"]
+ end
+ end
+
+ if vhost_host_dir != "none"
+ config.vm.synced_folder "#{vhost_host_dir}", "#{vhost_vm_dir}", type: "rsync", rsync__auto: false
+ config.vm.provision "shell", inline: "sudo mkdir -p /root/.ssh"
+ config.vm.provision "shell", inline: "sudo cp #{vhost_vm_dir}/spdk_vhost_id_rsa* /root/.ssh"
+ config.vm.provision "shell", inline: "sudo chmod 600 /root/.ssh/spdk_vhost_id_rsa"
+ end
+
+ # Copy in the user's tools if they exists
+ if File.directory?(File.expand_path("~/vagrant_tools"))
+ config.vm.synced_folder "~/vagrant_tools", "/home/vagrant/tools", type: "rsync", rsync__auto: false
+ end
+end
diff --git a/src/spdk/scripts/vagrant/Vagrantfile_vhost_vm b/src/spdk/scripts/vagrant/Vagrantfile_vhost_vm
new file mode 100644
index 00000000..2fd35540
--- /dev/null
+++ b/src/spdk/scripts/vagrant/Vagrantfile_vhost_vm
@@ -0,0 +1,111 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+Vagrant.configure(2) do |config|
+
+ # Pick the right distro and bootstrap, default is ubuntu1604
+ distro = ( ENV['SPDK_VAGRANT_DISTRO'] || "ubuntu16")
+ case distro
+ when "ubuntu16"
+ # See: https://app.vagrantup.com/puppetlabs/boxes/ubuntu-16.04-64-nocm
+ config.vm.box = "puppetlabs/ubuntu-16.04-64-nocm"
+ config.vm.box_version = "1.0.0"
+ when "ubuntu18"
+ # See: https://app.vagrantup.com/bento/boxes/ubuntu-18.04
+ config.vm.box = "bento/ubuntu-18.04"
+ config.vm.box_version = "201808.24.0"
+ else
+ "Invalid argument #{distro}"
+ abort("Invalid argument!")
+ end
+ config.vm.box_check_update = false
+
+ # vagrant-cachier caches apt/yum etc to speed subsequent
+ # vagrant up
+ # to enable, run
+ # vagrant plugin install vagrant-cachier
+ #
+ if Vagrant.has_plugin?("vagrant-cachier")
+ config.cache.scope = :box
+ end
+
+ # use http proxy if avaiable
+ if ENV['http_proxy'] && Vagrant.has_plugin?("vagrant-proxyconf")
+ config.proxy.http = ENV['http_proxy']
+ config.proxy.https = ENV['https_proxy']
+ config.proxy.no_proxy = "localhost,127.0.0.1"
+ end
+
+ vmcpu=(ENV['SPDK_VAGRANT_VMCPU'] || 2)
+ vmram=(ENV['SPDK_VAGRANT_VMRAM'] || 4096)
+ ssh_key_dir=(ENV['SPDK_VAGRANT_SSH_KEY'])
+ spdk_dir=(ENV['SPDK_DIR'] || "none")
+ install_deps=(ENV['INSTALL_DEPS'] || "false")
+
+ config.ssh.forward_agent = true
+ config.ssh.forward_x11 = true
+
+ # Change root passwd and allow root SSH
+ config.vm.provision "shell", inline: 'echo -e "root\nroot" | sudo passwd root'
+ config.vm.provision "shell", inline: 'sudo sh -c "echo \"PermitRootLogin yes\" >> /etc/ssh/sshd_config"'
+
+ # Use previously generated SSH keys for setting up a key pair
+ $ssh_key_gen_script = <<-SCRIPT
+ sudo mkdir -p /root/.ssh
+ cat /vagrant/ssh_keys/spdk_vhost_id_rsa.pub > /root/.ssh/authorized_keys
+ sudo chmod 644 /root/.ssh/authorized_keys
+ SCRIPT
+ config.vm.provision "shell", inline: $ssh_key_gen_script
+
+ # Install needed deps
+ $apt_script = <<-SCRIPT
+ sudo apt -y update
+ sudo DEBIAN_FRONTEND=noninteractive apt -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" upgrade
+ sudo apt -y install -y fio sg3-utils bc
+ SCRIPT
+ config.vm.provision "shell", inline: $apt_script
+
+ # Modify GRUB options
+ # console=ttyS0 earlyprintk=ttyS0 - reroute output to serial dev, so that QEMU can write output to file
+ # scsi_mod.use_blk_mq=1 - for multiqueue use
+ # net.ifnames=0 biosdevname=0 - do not rename NICs on boot. That way we ensure that addded NIC is always eth0.
+ # Reason for these options is that NIC can have different udev name during provisioning with Vagrant
+ # and then some other name while running SPDK tests which use Qemu without any hypervisor like vbox or libvirt
+ # so no corresponding configuration for this NIC name will be present in /etc.
+ config.vm.provision "shell", inline: 'sudo sed -ir s#GRUB_CMDLINE_LINUX=\"\"#GRUB_CMDLINE_LINUX=\"console=ttyS0\ earlyprintk=ttyS0\ scsi_mod.use_blk_mq=1\ net.ifnames=0\ biosdevname=0\"#g /etc/default/grub'
+ config.vm.provision "shell", inline: 'sudo update-grub'
+
+ # TODO: Next 2 lines break any future ssh communication via "vagrant ssh"
+ # I'd be good to check NIC names in ifconfig and then sed them in /etc/network/interfaces to eht0, eht1, and so on
+ config.vm.provision "shell", inline: 'sudo sh -c "echo \"auto eth0\" >> /etc/network/interfaces"'
+ config.vm.provision "shell", inline: 'sudo sh -c "echo \"iface eth0 inet dhcp\" >> /etc/network/interfaces"'
+
+ if distro.include? "ubuntu18"
+ # This is to avoid annoying "Start job is running for wait for network to be configured" 2 minute timeout
+ # in case of not-so-perfect NIC and virtual network configuration for the VM
+ config.vm.provision "shell", inline: 'systemctl disable systemd-networkd-wait-online.service'
+ config.vm.provision "shell", inline: 'systemctl mask systemd-networkd-wait-online.service'
+ end
+
+ config.vm.provider "virtualbox" do |vb|
+ vb.customize ["modifyvm", :id, "--ioapic", "on"]
+ vb.memory = "#{vmram}"
+ vb.cpus = "#{vmcpu}"
+
+ #support for the SSE4.x instruction is required in some versions of VB.
+ vb.customize ["setextradata", :id, "VBoxInternal/CPUM/SSE4.1", "1"]
+ vb.customize ["setextradata", :id, "VBoxInternal/CPUM/SSE4.2", "1"]
+ end
+
+ if spdk_dir != "none"
+ config.vm.synced_folder "#{spdk_dir}", "/home/vagrant/spdk_repo/spdk", type: "rsync", rsync__auto: false
+ if install_deps.include? "true"
+ config.vm.provision "shell", inline: 'sudo /home/vagrant/spdk_repo/spdk/scripts/pkgdep.sh'
+ end
+ end
+
+ # Copy in the user's tools if they exists
+ if File.directory?(File.expand_path("~/vagrant_tools"))
+ config.vm.synced_folder "~/vagrant_tools", "/home/vagrant/tools", type: "rsync", rsync__auto: false
+ end
+end
diff --git a/src/spdk/scripts/vagrant/autorun-spdk.conf b/src/spdk/scripts/vagrant/autorun-spdk.conf
new file mode 100644
index 00000000..4a815a51
--- /dev/null
+++ b/src/spdk/scripts/vagrant/autorun-spdk.conf
@@ -0,0 +1,28 @@
+# assign a value of 1 to all of the pertinent tests
+SPDK_BUILD_DOC=1
+SPDK_RUN_CHECK_FORMAT=1
+SPDK_RUN_SCANBUILD=1
+SPDK_RUN_VALGRIND=1
+SPDK_TEST_UNITTEST=1
+SPDK_TEST_ISCSI=0
+SPDK_TEST_ISCSI_INITIATOR=0
+SPDK_TEST_NVME=0
+SPDK_TEST_NVME_CLI=0
+SPDK_TEST_NVMF=1
+SPDK_TEST_RBD=0
+# requires some extra configuration. see TEST_ENV_SETUP_README
+SPDK_TEST_VHOST=0
+SPDK_TEST_VHOST_INIT=0
+SPDK_TEST_BLOCKDEV=1
+# doesn't work on vm
+SPDK_TEST_IOAT=0
+SPDK_TEST_EVENT=1
+SPDK_TEST_BLOBFS=0
+SPDK_TEST_PMDK=0
+SPDK_TEST_LVOL=0
+SPDK_RUN_ASAN=1
+SPDK_RUN_UBSAN=1
+# Reduce the size of the hugepages
+HUGEMEM=1024
+# Set up the DEPENDENCY_DIR
+DEPENDENCY_DIR=/home/vagrant
diff --git a/src/spdk/scripts/vagrant/create_nvme_img.sh b/src/spdk/scripts/vagrant/create_nvme_img.sh
new file mode 100755
index 00000000..0fec5770
--- /dev/null
+++ b/src/spdk/scripts/vagrant/create_nvme_img.sh
@@ -0,0 +1,22 @@
+#!/usr/bin/env bash
+SYSTEM=`uname -s`
+size="1024M"
+
+# NVMe img size example format: 2048M
+if [ -n $1 ]; then
+ size=$1
+fi
+
+if [ ! "${SYSTEM}" = "FreeBSD" ]; then
+ WHICH_OS=`lsb_release -i | awk '{print $3}'`
+ nvme_disk='/var/lib/libvirt/images/nvme_disk.img'
+
+ qemu-img create -f raw $nvme_disk ${size}
+ #Change SE Policy on Fedora
+ if [ $WHICH_OS == "Fedora" ]; then
+ sudo chcon -t svirt_image_t $nvme_disk
+ fi
+
+ chmod 777 $nvme_disk
+ chown qemu:qemu $nvme_disk
+fi
diff --git a/src/spdk/scripts/vagrant/create_vbox.sh b/src/spdk/scripts/vagrant/create_vbox.sh
new file mode 100755
index 00000000..5fc6cb21
--- /dev/null
+++ b/src/spdk/scripts/vagrant/create_vbox.sh
@@ -0,0 +1,220 @@
+#!/usr/bin/env bash
+
+# create_vbox.sh
+#
+# Creates a virtual box with vagrant in the $PWD.
+#
+# This script creates a subdirectory called $PWD/<distro> and copies the Vagrantfile
+# into that directory before running 'vagrant up'
+
+VAGRANT_TARGET="$PWD"
+
+DIR="$( cd "$( dirname $0 )" && pwd )"
+SPDK_DIR="$( cd "${DIR}/../../" && pwd )"
+
+# The command line help
+display_help() {
+ echo
+ echo " Usage: ${0##*/} [-n <num-cpus>] [-s <ram-size>] [-x <http-proxy>] [-hvrld] <distro>"
+ echo
+ echo " distro = <centos7 | ubuntu16 | ubuntu18 | fedora26 | fedora27 | freebsd11> "
+ echo
+ echo " -s <ram-size> in kb default: ${SPDK_VAGRANT_VMRAM}"
+ echo " -n <num-cpus> 1 to 4 default: ${SPDK_VAGRANT_VMCPU}"
+ echo " -x <http-proxy> default: \"${SPDK_VAGRANT_HTTP_PROXY}\""
+ echo " -p <provider> libvirt or virtualbox"
+ echo " --vhost-host-dir=<path> directory path with vhost test dependencies"
+ echo " (test VM qcow image, fio binary, ssh keys)"
+ echo " --vhost-vm-dir=<path> directory where to put vhost dependencies in VM"
+ echo " -r dry-run"
+ echo " -l use a local copy of spdk, don't try to rsync from the host."
+ echo " -d deploy a test vm by provisioning all prerequisites for spdk autotest"
+ echo " -h help"
+ echo " -v verbose"
+ echo
+ echo " Examples:"
+ echo
+ echo " $0 -x http://user:password@host:port fedora27"
+ echo " $0 -s 2048 -n 2 ubuntu16"
+ echo " $0 -rv freebsd"
+ echo " $0 fedora26 "
+ echo
+}
+
+# Set up vagrant proxy. Assumes git-bash on Windows
+# https://stackoverflow.com/questions/19872591/how-to-use-vagrant-in-a-proxy-environment
+SPDK_VAGRANT_HTTP_PROXY=""
+
+VERBOSE=0
+HELP=0
+COPY_SPDK_DIR=1
+DRY_RUN=0
+DEPLOY_TEST_VM=0
+SPDK_VAGRANT_DISTRO="distro"
+SPDK_VAGRANT_VMCPU=4
+SPDK_VAGRANT_VMRAM=4096
+OPTIND=1
+
+while getopts ":n:s:x:p:vrldh-:" opt; do
+ case "${opt}" in
+ -)
+ case "${OPTARG}" in
+ vhost-host-dir=*) VHOST_HOST_DIR="${OPTARG#*=}" ;;
+ vhost-vm-dir=*) VHOST_VM_DIR="${OPTARG#*=}" ;;
+ *) echo "Invalid argument '$OPTARG'" ;;
+ esac
+ ;;
+ x)
+ http_proxy=$OPTARG
+ https_proxy=$http_proxy
+ SPDK_VAGRANT_HTTP_PROXY="${http_proxy}"
+ ;;
+ n)
+ SPDK_VAGRANT_VMCPU=$OPTARG
+ ;;
+ s)
+ SPDK_VAGRANT_VMRAM=$OPTARG
+ ;;
+ p)
+ PROVIDER=$OPTARG
+ ;;
+ v)
+ VERBOSE=1
+ ;;
+ r)
+ DRY_RUN=1
+ ;;
+ h)
+ display_help >&2
+ exit 0
+ ;;
+ l)
+ COPY_SPDK_DIR=0
+ ;;
+ d)
+ DEPLOY_TEST_VM=1
+ ;;
+ *)
+ echo " Invalid argument: -$OPTARG" >&2
+ echo " Try: \"$0 -h\"" >&2
+ exit 1
+ ;;
+ esac
+done
+
+shift "$((OPTIND-1))" # Discard the options and sentinel --
+
+SPDK_VAGRANT_DISTRO="$@"
+
+case "$SPDK_VAGRANT_DISTRO" in
+ centos7)
+ export SPDK_VAGRANT_DISTRO
+ ;;
+ ubuntu16)
+ export SPDK_VAGRANT_DISTRO
+ ;;
+ ubuntu18)
+ export SPDK_VAGRANT_DISTRO
+ ;;
+ fedora26)
+ export SPDK_VAGRANT_DISTRO
+ ;;
+ fedora27)
+ export SPDK_VAGRANT_DISTRO
+ ;;
+ fedora28)
+ export SPDK_VAGRANT_DISTRO
+ ;;
+ freebsd11)
+ export SPDK_VAGRANT_DISTRO
+ ;;
+ *)
+ echo " Invalid argument \"${SPDK_VAGRANT_DISTRO}\""
+ echo " Try: \"$0 -h\"" >&2
+ exit 1
+ ;;
+esac
+
+if ! echo "$SPDK_VAGRANT_DISTRO" | grep -q fedora && [ $DEPLOY_TEST_VM -eq 1 ]; then
+ echo "Warning: Test machine deployment is only available on fedora distros. Disabling it for this build"
+ DEPLOY_TEST_VM=0
+fi
+
+if [ ${VERBOSE} = 1 ]; then
+ echo
+ echo DIR=${DIR}
+ echo SPDK_DIR=${SPDK_DIR}
+ echo VAGRANT_TARGET=${VAGRANT_TARGET}
+ echo HELP=$HELP
+ echo DRY_RUN=$DRY_RUN
+ echo SPDK_VAGRANT_DISTRO=$SPDK_VAGRANT_DISTRO
+ echo SPDK_VAGRANT_VMCPU=$SPDK_VAGRANT_VMCPU
+ echo SPDK_VAGRANT_VMRAM=$SPDK_VAGRANT_VMRAM
+ echo SPDK_VAGRANT_HTTP_PROXY=$SPDK_VAGRANT_HTTP_PROXY
+ echo VHOST_HOST_DIR=$VHOST_HOST_DIR
+ echo VHOST_VM_DIR=$VHOST_VM_DIR
+ echo
+fi
+
+export SPDK_VAGRANT_HTTP_PROXY
+export SPDK_VAGRANT_VMCPU
+export SPDK_VAGRANT_VMRAM
+export SPDK_DIR
+export COPY_SPDK_DIR
+export DEPLOY_TEST_VM
+
+if [ -n "$PROVIDER" ]; then
+ provider="--provider=${PROVIDER}"
+fi
+
+if [ -n "$VHOST_HOST_DIR" ]; then
+ export VHOST_HOST_DIR
+fi
+
+if [ -n "$VHOST_VM_DIR" ]; then
+ export VHOST_VM_DIR
+fi
+
+if [ ${DRY_RUN} = 1 ]; then
+ echo "Environemnt Variables"
+ printenv SPDK_VAGRANT_DISTRO
+ printenv SPDK_VAGRANT_VMRAM
+ printenv SPDK_VAGRANT_VMCPU
+ printenv SPDK_VAGRANT_HTTP_PROXY
+ printenv SPDK_DIR
+fi
+
+if [ -d "${VAGRANT_TARGET}/${SPDK_VAGRANT_DISTRO}" ]; then
+ echo "Error: ${VAGRANT_TARGET}/${SPDK_VAGRANT_DISTRO} already exists!"
+ exit 1
+fi
+
+if [ ${DRY_RUN} != 1 ]; then
+ mkdir -vp "${VAGRANT_TARGET}/${SPDK_VAGRANT_DISTRO}"
+ cp ${DIR}/Vagrantfile ${VAGRANT_TARGET}/${SPDK_VAGRANT_DISTRO}
+ pushd "${VAGRANT_TARGET}/${SPDK_VAGRANT_DISTRO}"
+ if [ ! -z "${http_proxy}" ]; then
+ export http_proxy
+ export https_proxy
+ if vagrant plugin list | grep -q vagrant-proxyconf; then
+ echo "vagrant-proxyconf already installed... skipping"
+ else
+ vagrant plugin install vagrant-proxyconf
+ fi
+ if echo "$SPDK_VAGRANT_DISTRO" | grep -q freebsd; then
+ cat >~/vagrant_pkg.conf <<EOF
+pkg_env: {
+http_proxy: ${http_proxy}
+}
+EOF
+ fi
+ fi
+ vagrant up $provider
+ echo ""
+ echo " SUCCESS!"
+ echo ""
+ echo " cd to ${SPDK_VAGRANT_DISTRO} and type \"vagrant ssh\" to use."
+ echo " Use vagrant \"suspend\" and vagrant \"resume\" to stop and start."
+ echo " Use vagrant \"destroy\" followed by \"rm -rf ${SPDK_VAGRANT_DISTRO}\" to destroy all trace of vm."
+ echo ""
+fi
diff --git a/src/spdk/scripts/vagrant/create_vhost_vm.sh b/src/spdk/scripts/vagrant/create_vhost_vm.sh
new file mode 100755
index 00000000..e87faa6c
--- /dev/null
+++ b/src/spdk/scripts/vagrant/create_vhost_vm.sh
@@ -0,0 +1,129 @@
+#!/usr/bin/env bash
+
+# create_vhost_vm.sh
+#
+# Creates a virtual machine image used as a dependency for running vhost tests
+
+set -e
+
+VAGRANT_TARGET="$PWD"
+
+DIR="$( cd "$( dirname $0 )" && pwd )"
+SPDK_DIR="$( cd "${DIR}/../../" && pwd )"
+USE_SSH_DIR=""
+MOVE_TO_DEFAULT_DIR=false
+INSTALL_DEPS=false
+
+# The command line help
+display_help() {
+ echo
+ echo " Usage: ${0##*/} <distro>"
+ echo
+ echo " distro = <ubuntu16 | ubuntu18> "
+ echo
+ echo " --use-ssh-dir=<dir path> Use existing spdk_vhost_id_rsa keys from specified directory"
+ echo " for setting up SSH key pair on VM"
+ echo " --install-deps Install SPDK build dependencies on VM. Needed by some of the"
+ echo " vhost and vhost initiator tests. Default: false."
+ echo " --move-to-default-dir Move generated files to default directories used by vhost test scripts."
+ echo " Default: false."
+ echo " --http-proxy Default: \"${SPDK_VAGRANT_HTTP_PROXY}\""
+ echo " -h help"
+ echo
+ echo " Examples:"
+ echo
+}
+
+while getopts ":h-:" opt; do
+ case "${opt}" in
+ -)
+ case "${OPTARG}" in
+ use-ssh-dir=*) USE_SSH_DIR="${OPTARG#*=}" ;;
+ move-to-default-dir) MOVE_TO_DEFAULT_DIR=true ;;
+ install-deps) INSTALL_DEPS=true ;;
+ http-proxy=*)
+ http_proxy=$OPTARG
+ https_proxy=$http_proxy
+ SPDK_VAGRANT_HTTP_PROXY="${http_proxy}"
+ ;;
+ *)
+ echo " Invalid argument -$OPTARG" >&2
+ echo " Try \"$0 -h\"" >&2
+ exit 1
+ ;;
+ esac
+ ;;
+ h)
+ display_help >&2
+ exit 0
+ ;;
+ *)
+ echo " Invalid argument: -$OPTARG" >&2
+ echo " Try: \"$0 -h\"" >&2
+ exit 1
+ ;;
+ esac
+done
+export SPDK_DIR
+export SPDK_VAGRANT_HTTP_PROXY
+export INSTALL_DEPS
+
+
+shift "$((OPTIND-1))" # Discard the options and sentinel --
+SPDK_VAGRANT_DISTRO="$@"
+
+case "$SPDK_VAGRANT_DISTRO" in
+ ubuntu16)
+ export SPDK_VAGRANT_DISTRO
+ ;;
+ ubuntu18)
+ export SPDK_VAGRANT_DISTRO
+ ;;
+ *)
+ echo " Invalid argument \"${SPDK_VAGRANT_DISTRO}\""
+ echo " Try: \"$0 -h\"" >&2
+ exit 1
+ ;;
+esac
+
+mkdir -vp "${VAGRANT_TARGET}/${SPDK_VAGRANT_DISTRO}"
+cp ${DIR}/Vagrantfile_vhost_vm ${VAGRANT_TARGET}/${SPDK_VAGRANT_DISTRO}/Vagrantfile
+
+# Copy or generate SSH keys to the VM
+mkdir -vp "${VAGRANT_TARGET}/${SPDK_VAGRANT_DISTRO}/ssh_keys"
+
+if [[ -n $USE_SSH_DIR ]]; then
+ cp ${USE_SSH_DIR}/spdk_vhost_id_rsa* "${VAGRANT_TARGET}/${SPDK_VAGRANT_DISTRO}/ssh_keys"
+else
+ ssh-keygen -f "${VAGRANT_TARGET}/${SPDK_VAGRANT_DISTRO}/ssh_keys/spdk_vhost_id_rsa" -N "" -q
+fi
+export SPDK_VAGRANT_SSH_KEY="${VAGRANT_TARGET}/${SPDK_VAGRANT_DISTRO}/ssh_keys/spdk_vhost_id_rsa"
+
+pushd "${VAGRANT_TARGET}/${SPDK_VAGRANT_DISTRO}"
+if [ ! -z "${http_proxy}" ]; then
+ export http_proxy
+ export https_proxy
+ if vagrant plugin list | grep -q vagrant-proxyconf; then
+ echo "vagrant-proxyconf already installed... skipping"
+ else
+ vagrant plugin install vagrant-proxyconf
+ fi
+fi
+VBoxManage setproperty machinefolder "${VAGRANT_TARGET}/${SPDK_VAGRANT_DISTRO}"
+vagrant up
+vagrant halt
+VBoxManage setproperty machinefolder default
+
+# Convert Vbox .vmkd image to qcow2
+vmdk_img=$(find ${VAGRANT_TARGET}/${SPDK_VAGRANT_DISTRO} -name "*.vmdk")
+qemu-img convert -f vmdk -O qcow2 ${vmdk_img} ${VAGRANT_TARGET}/${SPDK_VAGRANT_DISTRO}/vhost_vm_image.qcow2
+
+if $MOVE_TO_DEFAULT_DIR; then
+ sudo mkdir -p /home/sys_sgsw
+ sudo mv -f ${VAGRANT_TARGET}/${SPDK_VAGRANT_DISTRO}/vhost_vm_image.qcow2 /home/sys_sgsw/vhost_vm_image.qcow2
+ sudo mv -f ${VAGRANT_TARGET}/${SPDK_VAGRANT_DISTRO}/ssh_keys/spdk_vhost_id_rsa* ~/.ssh/
+fi
+
+echo ""
+echo " SUCCESS!"
+echo ""
diff --git a/src/spdk/scripts/vagrant/run-autorun.sh b/src/spdk/scripts/vagrant/run-autorun.sh
new file mode 100755
index 00000000..178bdbcc
--- /dev/null
+++ b/src/spdk/scripts/vagrant/run-autorun.sh
@@ -0,0 +1,235 @@
+#!/bin/bash
+
+#
+# BSD LICENSE
+#
+# Copyright (c) 2018 by NetApp, Inc.
+# All Rights Reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+if [ -z "${MAKE}" ]; then
+ export MAKE=make
+fi
+
+if [ -z "${GIT}" ]; then
+ export GIT=git
+fi
+
+if [ -z "${READLINK}" ]; then
+ export READLINK=readlink
+fi
+
+AUTOTEST_DRIVER_PATH=$($READLINK -f ${BASH_SOURCE%/*})
+SPDK_AUTOTEST_LOCAL_PATH=$PWD
+TIMESTAMP=`date +"%Y%m%d%H%M%S"`
+BUILD_NAME="build-${TIMESTAMP}"
+
+# The command line help
+display_help() {
+ echo
+ echo "Usage: $0 -d <path_to_spdk_tree> [-h] | [-q] | [-n]"
+ echo " -d : Specify a path to an SPDK source tree"
+ echo " -q : No output to screen"
+ echo " -n : Noop - dry-run"
+ echo " -h : This help"
+ echo
+ echo "Examples:"
+ echo " run-spdk-autotest.sh -d . -q"
+ echo " run-spdk-autotest.sh -d /home/vagrant/spdk_repo/spdk"
+ echo
+}
+
+set -e
+
+NOOP=0
+METHOD=0
+V=1
+OPTIND=1 # Reset in case getopts has been used previously in the shell.
+while getopts "d:qhn" opt; do
+ case "$opt" in
+ d) SPDK_SOURCE_PATH=$($READLINK -f $OPTARG)
+ echo Using SPDK source at ${SPDK_SOURCE_PATH}
+ METHOD=1
+ ;;
+ q) V=0
+ ;;
+ n) NOOP=1
+ ;;
+ h) display_help >&2
+ exit 0
+ ;;
+ esac
+done
+
+if [ -z "${SPDK_SOURCE_PATH}" ]; then
+ echo "Error: Must specify a source path "
+ display_help
+ exit 1
+fi
+
+# The following code verifies the input parameters and sets up the following variables:
+#
+# SPDK_AUTOTEST_LOCAL_PATH
+# GIT_REPO_PATH
+# GIT_BRANCH
+#
+
+case "$METHOD" in
+ 1)
+ if [ ! -d "${SPDK_SOURCE_PATH}" ]; then
+ echo "${SPDK_SOURCE_PATH} does not exist!"
+ exit 1
+ fi
+ if [ ! -d "${SPDK_SOURCE_PATH}/.git" ]; then
+ echo "${SPDK_SOURCE_PATH} is not a git repository"
+ exit 1
+ fi
+
+ GIT_REPO_SRC_DIR=$($READLINK -f "${SPDK_SOURCE_PATH}" | tr -t '/' ' ' | awk '{print $NF}')
+
+ if [ ! "${GIT_REPO_SRC_DIR}" = "spdk" ]; then
+ echo "The ${SPDK_SOURCE_PATH} git repository is not named \"spdk\""
+ exit 1
+ fi
+
+ pushd "${SPDK_SOURCE_PATH}"
+ GIT_REPO_SRC=$(git rev-parse --show-toplevel)
+ GIT_BRANCH=$(git rev-parse --abbrev-ref HEAD)
+ popd
+
+ if [ "${SPDK_AUTOTEST_LOCAL_PATH}" = "${SPDK_SOURCE_PATH}" ]; then
+ SPDK_AUTOTEST_LOCAL_PATH=$($READLINK -f ${SPDK_AUTOTEST_LOCAL_PATH}/..)
+ echo "Set SPDK_AUTOTEST_LOCAL_PATH to ${SPDK_AUTOTEST_LOCAL_PATH}"
+ fi
+
+ if [ -d "${SPDK_AUTOTEST_LOCAL_PATH}/${GIT_BRANCH}" ]; then
+ if [ -d "${SPDK_AUTOTEST_LOCAL_PATH}/${GIT_BRANCH}/.git" ]; then
+ echo "${SPDK_AUTOTEST_LOCAL_PATH}/${GIT_BRANCH} is a git repository!"
+ exit 1
+ fi
+ fi
+
+ GIT_REPO_PATH="${SPDK_AUTOTEST_LOCAL_PATH}/${GIT_BRANCH}/${BUILD_NAME}"
+ ;;
+ *)
+ echo "Internal Error: Must specify a source path or branch name"
+ display_help
+ exit 1
+ ;;
+esac
+
+AUTOTEST_RESULTS="${SPDK_AUTOTEST_LOCAL_PATH}/${GIT_BRANCH}/${BUILD_NAME}"
+AUTOTEST_OUTPUT_PATH="${GIT_REPO_PATH}/output"
+rootdir="${GIT_REPO_PATH}/spdk"
+BUILD_LOG_FILE="${AUTOTEST_OUTPUT_PATH}/build.log"
+
+if [[ ${NOOP} -eq 1 ]]; then
+ echo "AUTOTEST_DRIVER_PATH $AUTOTEST_DRIVER_PATH"
+ #echo "SPDK_AUTOTEST_LOCAL_PATH $SPDK_AUTOTEST_LOCAL_PATH"
+ echo "AUTOTEST_OUTPUT_PATH $AUTOTEST_OUTPUT_PATH"
+ #echo "rootdir $rootdir"
+ echo "BUILD_LOG_FILE $BUILD_LOG_FILE"
+ #echo "GIT_BRANCH $GIT_BRANCH"
+ #echo "BUILD_NAME $BUILD_NAME"
+ echo "GIT_REPO_PATH $GIT_REPO_PATH"
+ echo "AUTOTEST_RESULTS $AUTOTEST_RESULTS"
+fi
+
+#
+# I'd like to keep these files under source control
+#
+if [[ -e "${AUTOTEST_DRIVER_PATH}/autorun-spdk.conf" ]]; then
+ conf="${AUTOTEST_DRIVER_PATH}/autorun-spdk.conf"
+fi
+if [[ -e ~/autorun-spdk.conf ]]; then
+ conf=~/autorun-spdk.conf
+fi
+
+if [[ -z $conf ]]; then
+ echo Conf file not found.
+ exit 1
+fi
+
+mkdir -pv --mode=775 "${AUTOTEST_OUTPUT_PATH}"
+rm -f latest
+ln -sv ${GIT_REPO_PATH} latest
+
+if [[ ${NOOP} -eq 0 ]]; then
+ echo V=$V
+ if [[ $V -eq 0 ]]; then
+ echo Quieting output
+ exec 3>&1 4>&2 > "${BUILD_LOG_FILE}" 2>&1
+ else
+ echo Teeing to ${BUILD_LOG_FILE}
+ exec > >(tee -a "${BUILD_LOG_FILE}") 2>&1
+ fi
+
+ case "$METHOD" in
+ 1)
+ echo "rsync git repository from ${GIT_REPO_SRC} to ${GIT_REPO_PATH}"
+ rsync -av "${GIT_REPO_SRC}" "${GIT_REPO_PATH}"
+ pushd "${GIT_REPO_PATH}/spdk"
+ sudo "${MAKE}" clean -j $(nproc)
+ sudo "${GIT}" clean -d -f
+ popd
+ ;;
+ *)
+ echo "Internal Error: Must specify a source path or branch name"
+ display_help
+ exit 1
+ ;;
+ esac
+
+ trap "echo ERROR; exit" INT TERM EXIT
+
+ pushd "${AUTOTEST_OUTPUT_PATH}"
+ export output_dir="${AUTOTEST_OUTPUT_PATH}"
+
+ # Runs agent scripts
+ "${rootdir}/autobuild.sh" "$conf"
+ sudo -E "${rootdir}/autotest.sh" "$conf"
+ "${rootdir}/autopackage.sh" "$conf"
+ sudo -E "${rootdir}/autorun_post.py" -d "${AUTOTEST_OUTPUT_PATH}" -r "${rootdir}"
+
+ echo "All Tests Passed" > ${GIT_REPO_PATH}/passed
+
+ # Redirect back to screen
+ if [[ $V -eq 0 ]]; then
+ echo Redirect to screen
+ exec 1>&3 2>&4 > >(tee -a "${BUILD_LOG_FILE}") 2>&1
+ fi
+
+ popd
+
+fi
+
+echo "all tests passed"
+
+echo Output directory: ${GIT_REPO_PATH}
+echo Build log: "${BUILD_LOG_FILE}"
diff --git a/src/spdk/scripts/vagrant/update.sh b/src/spdk/scripts/vagrant/update.sh
new file mode 100755
index 00000000..681fd9dc
--- /dev/null
+++ b/src/spdk/scripts/vagrant/update.sh
@@ -0,0 +1,99 @@
+#!/usr/bin/env bash
+
+if [ ! "$USER" = "root" ]; then
+ echo
+ echo Error: must be run as root!
+ echo
+ exit 1
+fi
+
+set -e
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+SPDK_DIR="$( cd "${DIR}/../../" && pwd )"
+echo "SPDK_DIR = $SPDK_DIR"
+
+# Bug fix for vagrant rsync problem
+if [ -d /home/vagrant/spdk_repo ]; then
+ echo "Fixing permissions on /home/vagrant/spdk_repo"
+ chown vagrant /home/vagrant/spdk_repo
+ chgrp vagrant /home/vagrant/spdk_repo
+fi
+
+# Setup for run-autorun.sh
+if [ ! -f /home/vagrant/autorun-spdk.conf ]; then
+ echo "Copying scripts/vagrant/autorun-spdk.conf to /home/vagrant"
+ cp ${SPDK_DIR}/scripts/vagrant/autorun-spdk.conf /home/vagrant
+ chown vagrant /home/vagrant/autorun-spdk.conf
+ chgrp vagrant /home/vagrant/autorun-spdk.conf
+fi
+
+SYSTEM=`uname -s`
+
+if [ "$SYSTEM" = "FreeBSD" ]; then
+ # Do initial setup for the system
+ pkg upgrade -f
+ ${SPDK_DIR}/scripts/pkgdep.sh
+ if [ -d /usr/src/.git ]; then
+ echo
+ echo "/usr/src/ is a git repository"
+ echo "consider \"cd /usr/src/; git pull\" to update"
+ echo
+ else
+ git clone --depth 10 -b release/11.1.0 https://github.com/freebsd/freebsd.git /usr/src
+ fi
+else
+
+ # Make sure that we get the hugepages we need on provision boot
+ # Note: The package install should take care of this at the end
+ # But sometimes after all the work of provisioning, we can't
+ # get the requested number of hugepages without rebooting.
+ # So do it here just in case
+ sysctl -w vm.nr_hugepages=1024
+ HUGEPAGES=`sysctl -n vm.nr_hugepages`
+ if [ $HUGEPAGES != 1024 ]; then
+ echo "Warning: Unable to get 1024 hugepages, only got $HUGEPAGES"
+ echo "Warning: Adjusting HUGEMEM in /home/vagrant/autorun-spdk.conf"
+ sed "s/HUGEMEM=.*$/HUGEMEM=${HUGEPAGES}/g" /home/vagrant/autorun-spdk.conf > /home/vagrant/foo.conf
+ mv -f /home/vagrant/foo.conf /home/vagrant/autorun-spdk.conf
+ fi
+
+ # Figure out what system we are running on
+ if [ -f /etc/lsb-release ];then
+ . /etc/lsb-release
+ elif [ -f /etc/redhat-release ];then
+ yum update -y
+ yum install -y redhat-lsb
+ DISTRIB_ID=`lsb_release -si`
+ DISTRIB_RELEASE=`lsb_release -sr`
+ DISTRIB_CODENAME=`lsb_release -sc`
+ DISTRIB_DESCRIPTION=`lsb_release -sd`
+ fi
+
+ # Do initial setup for the system
+ if [ "$DISTRIB_ID" == "Ubuntu" ]; then
+ set -xv
+ export DEBIAN_PRIORITY=critical
+ export DEBIAN_FRONTEND=noninteractive
+ export DEBCONF_NONINTERACTIVE_SEEN=true
+ # Standard update + upgrade dance
+ apt-get update --assume-yes --no-install-suggests --no-install-recommends -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold"
+ apt-get upgrade --assume-yes --no-install-suggests --no-install-recommends -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold"
+ ${SPDK_DIR}/scripts/pkgdep.sh
+ elif [ "$DISTRIB_ID" == "CentOS" ]; then
+ # Standard update + upgrade dance
+ yum check-update
+ yum update -y
+ ${SPDK_DIR}/scripts/pkgdep.sh
+ elif [ "$DISTRIB_ID" == "Fedora" ]; then
+ if [ "$DISTRIB_RELEASE" = "26" ]; then
+ echo
+ echo " Run \"${SPDK_DIR}/test/common/config/vm_setup.sh\" to complete setup of Fedora 26"
+ echo
+ else
+ yum check-update
+ yum update -y
+ ${SPDK_DIR}/scripts/pkgdep.sh
+ fi
+ fi
+fi